diff --git "a/1329.jsonl" "b/1329.jsonl" new file mode 100644--- /dev/null +++ "b/1329.jsonl" @@ -0,0 +1,921 @@ +{"seq_id":"22873309294","text":"import time\nimport board\nimport displayio\nfrom adafruit_seesaw.tftshield18 import TFTShield18\nfrom adafruit_st7735r import ST7735R\n\n# Release any resources currently in use for the displays\ndisplayio.release_displays()\n\nss = TFTShield18()\n\nspi = board.SPI()\ntft_cs = board.D10\ntft_dc = board.D8\n\ndisplay_bus = displayio.FourWire(spi, command=tft_dc, chip_select=tft_cs)\n\nss.tft_reset()\ndisplay = ST7735R(display_bus, width=160, height=128, rotation=90, bgr=True)\n\nss.set_backlight(True)\n\nwhile True:\n buttons = ss.buttons\n\n if buttons.right:\n print(\"Button RIGHT!\")\n\n if buttons.down:\n print(\"Button DOWN!\")\n\n if buttons.left:\n print(\"Button LEFT!\")\n\n if buttons.up:\n print(\"Button UP!\")\n\n if buttons.select:\n print(\"Button SELECT!\")\n\n if buttons.a:\n print(\"Button A!\")\n\n if buttons.b:\n print(\"Button B!\")\n\n if buttons.c:\n print(\"Button C!\")\n\n time.sleep(0.001)\n","repo_name":"qbalsdon/pico_rgb_keypad_hid","sub_path":"adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/examples/st7735r_18tftshield_buttons.py","file_name":"st7735r_18tftshield_buttons.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"95"} +{"seq_id":"13886113367","text":"import csv\nimport datetime\nimport hashlib\nimport struct\nfrom collections import defaultdict\n\n\ndef Byte(byte_stream, size):\n return byte_stream[:size]\n\n\ndef Char(byte_stream, size):\n return struct.unpack('>' + str(size) + 'c', byte_stream[:size])\n\n\ndef Word(byte_stream, size):\n if size % 2 == 0:\n return struct.unpack('>' + str(size // 2) + 'H', byte_stream[:size])\n else:\n raise IOError('Bytestream not multiple of 2')\n\n\ndef Short(byte_stream, size):\n if size % 2 == 0:\n return struct.unpack('>' + str(size // 2) + 'h', byte_stream[:size])\n else:\n raise IOError('Bytestream not multiple of 2')\n\n\ndef Long(byte_stream, size):\n if size % 4 == 0:\n return struct.unpack('>' + str(size // 4) + 'l', byte_stream[:size])\n else:\n raise IOError('ByteStream not multiple of 4')\n\n\ndef Float(byte_stream, size):\n if size % 4 == 0:\n return struct.unpack('>' + str(size // 4) + 'f', byte_stream[:size])\n else:\n raise IOError('ByteStream not multiple of 4')\n\n\ndef Double(byte_stream, size):\n if size % 8 == 0:\n return struct.unpack('>' + str(size // 8) + 'd', byte_stream[:size])\n else:\n raise IOError('Bytestream not multiple of 8')\n\n\ndef Date(byte_stream, size):\n if size == 4:\n return struct.unpack('>hBB', byte_stream[:size])\n else:\n raise IOError('ByteStream not length of 4')\n\n\ndef Time(byte_stream, size):\n if size == 4:\n return struct.unpack('>BBBB', byte_stream[:size])\n else:\n raise IOError('ByteStream not length of 4')\n\n\ndef pString(byte_stream, size):\n return struct.unpack('>' + str(size) + 'p', byte_stream[:size])\n\n\ndef cString(byte_stream, size):\n return struct.unpack('>' + str(size) + 's', byte_stream[:size])\n\n\ndef Thumb(byte_stream, size):\n if size == 10:\n return struct.unpack('>iiBB', byte_stream[:size])\n else:\n raise IOError('Bytestream not length of 10')\n\n\ndef Bool(byte_stream, size):\n return struct.unpack('>' + str(size) + '?', byte_stream[:size])\n\n\ndef User(byte_stream, size):\n return byte_stream[:size]\n\n\nstructUnpacker = {1: Byte,\n 2: Char,\n 3: Word,\n 4: Short,\n 5: Long,\n 7: Float,\n 8: Double,\n 10: Date,\n 11: Time,\n 18: pString,\n 19: cString,\n 12: Thumb,\n 13: Bool,\n 0: User}\n\n\nclass FSAFile(object):\n SIGNATURE = b\"ABIF\"\n\n def __init__(self, byte_stream, malform_check=True):\n self._channels = {}\n self.raw = byte_stream\n self.signature = struct.unpack('>4s', byte_stream[0:4])[0]\n self.version = struct.unpack('>h', byte_stream[4:6])[0]\n self._channels = None\n self._hash = None\n\n if self.signature != FSAFile.SIGNATURE:\n raise IOError('WARNING: Not a valid ABIF File.')\n\n self.tdir = FSADir(byte_stream, offset=6)\n\n self.directories = defaultdict(dict)\n\n # Unpack FSA File.\n for i in range(0, self.tdir.numElements):\n directory = FSADir(byte_stream, self.tdir.dataOffset + i * 28)\n self.directories[directory.name][directory.number] = directory\n\n if malform_check:\n for k in ['DyeW', 'DATA']:\n if k not in self.directories:\n raise IOError(\"ABIF Malformed\")\n\n def dump_to_csv(self, filename):\n with open(filename, 'w') as f:\n w = csv.writer(f)\n for dir_key in self.directories:\n directory = self.directories[dir_key]\n for entry_key in directory:\n row = [dir_key, entry_key]\n row += list(directory[entry_key].data)\n w.writerow(row)\n\n def _compute_hash(self):\n return hashlib.md5(self.raw).hexdigest()\n\n @property\n def hash(self):\n if not self._hash:\n self._hash = self._compute_hash()\n return self._hash\n\n @property\n def channels(self):\n if not self._channels:\n # Colors ordered so that as they are popped off they match the given channel.\n colors = ['orange', 'red', 'yellow', 'green', 'blue']\n wavelength_keys = list(self.directories['DyeW'])\n # Backwards compatibility of FSA files requires that the 5th channel, if used, is labeled\n # as 105 in the data directory.\n if len(wavelength_keys) == 5:\n data_keys = [1, 2, 3, 4, 105]\n else:\n data_keys = sorted(wavelength_keys)\n self._channels = [{'data': self.directories['DATA'][data_keys[k]].data,\n 'wavelength': self.directories['DyeW'][wavelength_keys[k]].data[0],\n 'color': colors.pop()\n } for k in range(len(wavelength_keys))]\n return self._channels\n\n @property\n def sample_name(self):\n # Sample Label\n return self.directories['SpNm'][1].data[0].decode('ascii')\n\n @property\n def plate(self):\n # Plate Label\n return self.directories['CTID'][1].data[0].replace(b\"\\x00\", b\"\").decode('ascii')\n\n @property\n def well(self):\n well = self.directories['TUBE'][1].data[0].decode('ascii')\n # Normalize well label so that integer portion is zero-padded.\n if int(well[1:]) < 10:\n well_letter = well[0]\n well_integer = well[1]\n well = f'{well_letter}0{well_integer}'\n return well\n\n @property\n def date_run(self):\n date = datetime.datetime(*(sum((self.directories['RUND'][1].data, self.directories['RUNT'][1].data), ())))\n return date\n\n @property\n def ce_machine(self):\n # Name of CE machine on which the plate was run.\n ce_machine = self.directories['MCHN'][1].data[0].decode('ascii')\n return ce_machine\n\n @property\n def plate_size(self):\n # Size of the plate, either 384 or 96.\n plate_size = self.directories['PSZE'][1].data[0]\n return plate_size\n\n @property\n def offscale_indices(self):\n # Indices of data points detected by the machine where the signal is saturated.\n if 'Satd' in self.directories:\n offscale_indices = list(self.directories['Satd'][1].data)\n else:\n offscale_indices = []\n return offscale_indices\n\n @property\n def polymer_expiration(self):\n exp_date = datetime.datetime.strptime(self.directories['SMED'][1].data[0], '%b %d, %Y')\n return exp_date\n\n @property\n def polymer_lot_number(self):\n lot_num = int(self.directories['SMLt'][1].data[0])\n return lot_num\n\n @property\n def voltage(self):\n voltage = list(self.directories['DATA'][5].data)\n return voltage\n\n @property\n def current(self):\n current = list(self.directories['DATA'][6].data)\n return current\n\n @property\n def power(self):\n power = list(self.directories['DATA'][7].data)\n return power\n\n @property\n def temperature(self):\n temperature = list(self.directories['DATA'][8].data)\n return temperature\n\n\nclass FSADir(object):\n \"\"\"\n Given a full bytestream and an offset, unpack the directory found within an FSA file.\n \"\"\"\n def __init__(self, bytestream, offset):\n self.name = struct.unpack('>4s', bytestream[offset: offset + 4])[0].decode('ascii')\n self.number = struct.unpack('>i', bytestream[offset + 4: offset + 8])[0]\n self.elementType = struct.unpack('>h', bytestream[offset + 8: offset + 10])[0]\n self.elementSize = struct.unpack('>h', bytestream[offset + 10: offset + 12])[0]\n self.numElements = struct.unpack('>i', bytestream[offset + 12: offset + 16])[0]\n self.dataSize = struct.unpack('>i', bytestream[offset + 16: offset + 20])[0]\n self.dataOffset = struct.unpack('>i', bytestream[offset + 20: offset + 24])[0]\n self.dataHandle = struct.unpack('>i', bytestream[offset + 24: offset + 28])[0]\n if self.dataSize > 4:\n self.data = structUnpacker.get(self.elementType, User)(bytestream[self.dataOffset:], self.dataSize)\n else:\n self.data = structUnpacker.get(self.elementType, User)(bytestream[offset + 20: offset + 24], self.dataSize)\n\n def __repr__(self):\n if len(self.data) > 25:\n return ''.format(str(self.name) + str(self.number), str(self.data[0:25]) + \"...\")\n else:\n return ''.format(str(self.name) + str(self.number), str(self.data[0:25]))\n","repo_name":"EPPIcenter/MicroSPAT","sub_path":"src/microspat-py/app/microspat/fsa_tools/FSAExtractor.py","file_name":"FSAExtractor.py","file_ext":"py","file_size_in_byte":8700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"15132261378","text":"import sys\nsys.stdin = open(\"input/palindrome_string/input.txt\", \"rt\")\n\nn = int(input())\n\nfor i in range(0, n):\n word = input()\n word = word.upper()\n length = len(word)\n\n for j in range(length // 2):\n if word[j] != word[-1-j]:\n print(f'#{i + 1} NO')\n break\n else:\n print(f'#{i + 1} YES')\n\n","repo_name":"HYE0N1127/Algorithm","sub_path":"Algorithm/Lecture/section3/palindrome_string.py","file_name":"palindrome_string.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"41918444729","text":"def find_MHT_roots(nodes, edges):\n if nodes <= 0:\n return []\n\n # with only one node, its indegrees will be 0, so handle it separately\n if nodes == 1:\n return [0]\n\n # a. Initialise the graph\n indegrees = {i: 0 for i in range(nodes)}\n graph = {i: [] for i in range(nodes)}\n\n # b. Build the graph\n for edge in edges:\n n1, n2 = edge[0], edge[1]\n\n # this is an undirected graph, so add a link for both nodes\n graph[n1].append(n2)\n graph[n2].append(n1)\n\n indegrees[n1] += 1\n indegrees[n2] += 1\n\n # c. Find the leaves\n leaves = []\n for node in indegrees:\n if indegrees[node] == 1:\n leaves.append(node)\n\n # d. Modified Topological Sort\n # remove leaves level-by-level and subtract each leave's children's indegrees\n # repeat this until we are left with 1 or 2 nodes, which will be our answer\n # any node that has already been a leaf cannot be an MHT root\n total_nodes = nodes\n while total_nodes > 2:\n n_leaves = len(leaves)\n\n total_nodes -= n_leaves\n\n for i in range(0, n_leaves):\n leaf = leaves.pop(0)\n\n # get the leaf's children to decrement their indegrees\n for child in graph[leaf]:\n indegrees[child] -= 1\n\n if indegrees[child] == 1:\n leaves.append(child)\n\n print(indegrees)\n\n return list(leaves)\n\n\ndef main():\n print(\"Roots of MHTs: \" + str(find_MHT_roots(5, [[0, 1], [1, 2], [1, 3], [2, 4]])))\n print(\"Roots of MHTs: \" + str(find_MHT_roots(4, [[0, 1], [0, 2], [2, 3]])))\n print(\"Roots of MHTs: \" + str(find_MHT_roots(4, [[1, 2], [1, 3]])))\n\n\nmain()\n","repo_name":"mitchellnel/technical-interview-prep","sub_path":"gtci/18-topologicalSort/minimumHeightTrees.py","file_name":"minimumHeightTrees.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"8525766507","text":"import os\nimport subprocess\nimport shutil\nimport math\nfrom multidena.lib.common import read_seqs_with_complement\n\n\ndef calculate_scores_inmode_thresholds(path_to_inmode, path_to_model, path_to_fasta, path_to_java, tmp_dir):\n container = list()\n append = container.append\n args = [path_to_java, '-Xmx8096m', '-Xms1024m', '-jar', path_to_inmode, 'scan',\n 'i={}'.format(path_to_model),\n 'id={}'.format(path_to_fasta),\n 'b={}'.format('From file'),\n 'd={}'.format(path_to_fasta),\n 'f={}'.format(0.005),\n 'outdir={}'.format(tmp_dir)]\n r = subprocess.run(args, capture_output=True)\n with open(tmp_dir + \"/Motif_hits_from_SequenceScan(0.005).BED\") as file:\n for line in file:\n append(math.log10(float(line.strip().split()[4])))\n return(container)\n\n\ndef get_threshold(scores, number_of_sites, path_out):\n scores.sort(reverse=True) # big -> small\n with open(path_out, \"w\") as file:\n last_score = scores[0]\n for count, score in enumerate(scores[1:], 1):\n if score == last_score:\n continue\n elif count/number_of_sites > 0.0005:\n file.write(\"{0}\\t{1}\\n\".format(last_score, count/number_of_sites))\n break\n elif score != last_score:\n file.write(\"{0}\\t{1}\\n\".format(last_score, count/number_of_sites))\n last_score = score \n file.close()\n return(0)\n\n\ndef get_threshold_for_inmode(path_to_fasta, path_to_model, path_to_inmode,\n length_of_site, path_out, path_to_java='java', tmp_dir='./tmp'):\n tmp_dir = os.getcwd() + '/tmp'\n if not os.path.isdir(tmp_dir):\n os.mkdir(tmp_dir)\n\n peaks = read_seqs_with_complement(path_to_fasta)\n number_of_sites = sum([len(range(len(peak) - length_of_site + 1)) for peak in peaks])\n scores = calculate_scores_inmode_thresholds(path_to_inmode, path_to_model, path_to_fasta, path_to_java, tmp_dir)\n get_threshold(scores, number_of_sites, path_out)\n shutil.rmtree(tmp_dir)\n return(0)\n","repo_name":"ubercomrade/MultiDeNA","sub_path":"multidena/tools/get_threshold_for_inmode.py","file_name":"get_threshold_for_inmode.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"23299741461","text":"import configparser\nimport os\nimport smtplib\nimport sys\nimport time\nfrom datetime import datetime\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.utils import formataddr\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nclass Log:\n # 오늘 일자로 파일 이름을 생성 한다.!!\n def __init__(self):\n self.file_name = f'./log{datetime.today().strftime(\"%Y-%m-%d\")}.txt'\n\n # 현재시간을 기준으로 로그를 파일에 작성 및 \n def add_log(self, comment: str):\n if os.path.isfile(self.file_name) is True:\n with open(self.file_name, 'a', encoding='utf-8') as f:\n log_str = f'{datetime.today().strftime(\"%Y/%m/%d %H:%M:%S : \")}{comment}'\n f.write(log_str + '\\n')\n print(log_str)\n return True\n return False\n\n # 새로운 파일 생성\n def new_log_file(self):\n if os.path.isfile(self.file_name) is False:\n with open(self.file_name, 'w', encoding='utf-8') as f:\n pass\n return True\n return False\n\n\nclass Properties:\n\n def __init__(self):\n self.file_name = f'./config.ini'\n\n def new_config_file(self):\n if os.path.isfile(self.file_name) is False:\n with open(self.file_name, 'w', encoding='utf-8') as f:\n f.write('')\n\n return True\n return False\n\n def set(self):\n config = configparser.ConfigParser() ## 클래스 객체 생성\n\n config[\"DEFAULT\"] = {\"google_gmail_id\": \"myid@gmail.com\", \"google_app_pw\": \"xxxxyyyyzzzzqqqq\"}\n config[\"MAIL_TEXT\"] = {\"title\": \"안녕하세요 OO 입니다.\", \"header\": \"안녕하세요 OO 입니다.\", \"footer\": \"문의사항 있으면 연락주세요\"}\n\n with open(self.file_name, \"w\", encoding='utf-8') as f:\n config.write(f)\n\n\nclass Boho:\n def send_mail(id: str, pw: str, article: str, new_num: int, to_ad: str, title: str, header: str, footer: str):\n from_addr = formataddr(('SOCH', id))\n\n # 받는사람\n to_addr = formataddr(('담당자', to_ad))\n\n session = None\n try:\n # SMTP 세션 생성\n session = smtplib.SMTP('smtp.gmail.com', 587)\n # session.set_debuglevel(True)\n\n # SMTP 계정 인증 설정\n session.ehlo()\n session.starttls()\n session.login(id, pw)\n\n # 메일 콘텐츠 설정\n message = MIMEMultipart(\"mixed\")\n\n # 메일 송/수신 옵션 설정\n message.set_charset('utf-8')\n message['From'] = from_addr\n message['To'] = to_addr\n message['Subject'] = f\"{title} (\" + str(new_num) + \"건)\"\n # 메일 콘텐츠 - 내용\n body = f\"

{header}


\" + article + \"

\" + footer + \"
\"\n bodyPart = MIMEText(body, 'html', 'utf-8')\n message.attach(bodyPart)\n\n # 메일 발송\n session.sendmail(from_addr, to_addr, message.as_string())\n\n except Exception as e:\n return 9\n\n finally:\n if session is not None:\n session.quit()\n\n def get_text_list(file_name: str):\n if os.path.isfile(file_name) is False:\n nf = open(file_name, 'w', encoding='utf-8')\n nf.close()\n\n f = open(file_name, 'r', encoding='utf-8')\n search = '\\n'\n return_list = [word.strip(search) for word in f.readlines()]\n return return_list if len(return_list) > 0 else None\n\n def file_set_article(file_name: str, articles: list):\n f = open(file_name, 'w', encoding='utf-8')\n for i in articles:\n f.writelines(i + '\\n')\n f.close()\n\n def get_data(url: str):\n response = requests.get(url)\n articles_list = []\n line = ''\n if response.status_code == 200:\n html = response.text.strip()\n soup = BeautifulSoup(html, 'html.parser')\n\n articles = soup.select('table > tbody > tr > td')\n for article in enumerate(articles, start=1):\n\n if int(article[0]) % 5 != 3:\n line += article[1].text.strip() + ' '\n\n if int(article[0]) % 5 == 2:\n line += 'URL : https://www.boho.or.kr' + article[1].find(\"a\")[\"href\"] + ' '\n\n if int(article[0]) % 5 == 0:\n articles_list.append(line)\n line = ''\n continue\n\n return articles_list\n\n def what_is_new_article(article_list: list, new_article_list: list):\n if article_list is None:\n return list(set(new_article_list))\n return sorted(list(set(new_article_list) - set(article_list)))\n\n def article_to_html(newest_article: list):\n text = ''\n for i in newest_article:\n text += i + '
'\n return text\n\n\nlog = Log()\nproperties = Properties()\n\nprint('''\n.______ _______. _______. .______ ______ __ __ ______ \n| _ \\ / | / | | _ \\ / __ \\ | | | | / __ \\ \n| |_) | | (----` | (----` ______| |_) | | | | | | |__| | | | | | \n| / \\ \\ \\ \\ |______| _ < | | | | | __ | | | | | \n| |\\ \\----.----) | .----) | | |_) | | `--' | | | | | | `--' | \n| _| `._____|_______/ |_______/ |______/ \\______/ |__| |__| \\______/ v1.3\n\n''')\n\n\nif log.new_log_file():\n pass\n\nmail_list = Boho.get_text_list(file_name='./mail_list.txt')\nBoho.get_text_list(file_name='./article_lists.txt')\n\nlog.add_log(comment='소스코드 https://github.com/TwoIceFIsh/RSS-Boho')\nlog.add_log(comment='설명 https://twoicefish-secu.tistory.com/428')\n\nif properties.new_config_file() is True:\n log.add_log(comment='[-] 새로운 설정 파일이 생성 되었습니다!')\n log.add_log(comment='[-] 설정 후 실행해 주세요.')\n log.add_log(comment=f'[-] =======================================')\n log.add_log(comment=f'[-] {os.path.join(os.path.dirname(__file__),\"config.ini\")}')\n log.add_log(comment=f'[-] {os.path.join(os.path.dirname(__file__),\"mail_list.txt\")}')\n log.add_log(comment=f'[-] =======================================')\n properties.set()\n os.system('pause')\n sys.exit()\n\npropertiesq = configparser.ConfigParser() ## 클래스 객체 생성\npropertiesq.read('./config.ini', encoding='utf-8')\ndefault = propertiesq['DEFAULT']\nmail_text = propertiesq['MAIL_TEXT']\n\nif 'myid@gmail.com' == default['google_gmail_id'] or 'xxxxyyyyzzzzqqqq' == default['google_app_pw']:\n log.add_log(comment=f'[!] 자신만의 설정값으로 변경해 주세요')\n log.add_log(comment=f'[-] {os.path.join(os.path.dirname(__file__),\"config.ini\")}')\n os.system('pause')\n sys.exit()\n\nif mail_list is None:\n log.add_log(comment=f'[!] 이메일 리스트가 비어 있습니다. 추가해주세요')\n log.add_log(comment=f'[-] {os.path.join(os.path.dirname(__file__),\"mail_list.txt\")}')\n print(f'''\n 작성예시({os.path.join(os.path.dirname(__file__),\"mail_list.txt\")})\n \n asdfadsf@gmail.com\n sdijovjid@test.com\n sdjico@sdco.net\n \n ...\n \n ''')\n os.system('pause')\n sys.exit()\nelse:\n for i in mail_list:\n if '@' not in i or '.' not in i:\n log.add_log(comment=f'[!] {i} 올바른 이메일 형식이 아닙니다. 확인해 주세요')\n print(f'''\n 작성예시({os.path.join(os.path.dirname(__file__),\"mail_list.txt\")})\n \n asdfadsf@gmail.com\n sdijovjid@test.com\n sdjico@sdco.net\n \n ...\n \n ''')\n os.system('pause')\n sys.exit()\n\nwhile True:\n log.add_log(comment=f'[-] ======RSS-Boho Start======')\n\n # 신규 게시물 확인\n article_list = Boho.get_text_list(file_name='./article_lists.txt')\n new_article_list = Boho.get_data(url='https://www.boho.or.kr/kr/bbs/list.do?menuNo=205020&bbsId=B0000133')\n newest_article = Boho.what_is_new_article(article_list=article_list, new_article_list=new_article_list)\n\n # 이메일 목록을 획득 및 메일 발송\n if len(newest_article) > 0:\n log.add_log(comment=f'=====================================')\n log.add_log(comment=f'[-] {len(newest_article)}건의 신규 게시물이 발견 되었습니다 ')\n article_text = Boho.article_to_html(newest_article=newest_article)\n for i in enumerate(newest_article, start=1):\n log.add_log(comment=f'[{i[0]}] {i[1].split(\":\")[0].replace(\" URL \", \"\")}(New)')\n log.add_log(comment=f'=====================================')\n\n # 메일리스트 확인\n mail_list = Boho.get_text_list(file_name='./mail_list.txt')\n if mail_list is None:\n log.add_log(comment=f'[!] 이메일 리스트가 비어 있습니다. 추가해주세요')\n log.add_log(comment=f'[!] 15분후에 메일 발송을 시도 합니다.')\n log.add_log(comment=f'[!] (신규 게시글 정보 업데이트 스킵)')\n else:\n\n pid = default['google_gmail_id']\n ppw = default['google_app_pw']\n header = mail_text['header']\n footer = mail_text['footer']\n title = mail_text['title']\n for to in mail_list:\n message = Boho.send_mail(id=pid, pw=ppw, article=article_text, new_num=len(newest_article), to_ad=to,\n title=title, header=header, footer=footer)\n if message == 9:\n log.add_log(comment=f'[!] Google ID 및 Google API PW를 일치하지 않거나 존재하지 않습니다 확인해 주세요')\n log.add_log(comment=f'[-] {os.path.join(os.path.dirname(__file__),\"config.ini\")}')\n os.system('pause')\n sys.exit()\n\n else:\n log.add_log(comment=f'[-] {mail_list}에게 메일을 발송했습니다.')\n Boho.file_set_article(file_name='./article_lists.txt', articles=new_article_list)\n log.add_log(comment=f'[-] 신규 게시글 정보 업데이트를 완료했습니다.')\n log.add_log(comment=f'{os.path.join(os.path.dirname(__file__),\"article_lists.txt\")}')\n log.add_log(comment=f'[-] 루틴 종료 15분후에 재탐색을 실시 합니다.')\n else:\n log.add_log(comment=f'[-] 새롭게 발견된 기사가 없습니다.')\n log.add_log(comment=f'[-] 루틴 종료 15분후에 재탐색을 실시 합니다.')\n\n time.sleep(900)\n","repo_name":"TwoIceFIsh/RSS-Boho","sub_path":"RSS-Boho.py","file_name":"RSS-Boho.py","file_ext":"py","file_size_in_byte":10819,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"21360060548","text":"company = {\n \"name\": \"Dummy company name\",\n \"nip\": \"123-123-123\",\n \"regon\": \"348509723045\",\n \"street\": \"Street no\",\n \"city\": \"Dummy city\",\n \"code\": \"333\"\n}\n\nprogram = {\n \"semester_no\": 2,\n \"school_year\": \"2022/2023\",\n \"fruitVeg_price\": 1.5,\n \"dairy_price\": 2.00,\n \"start_date\": \"2023-01-01\",\n \"end_date\": \"2023-12-31\",\n \"dairy_min_per_week\": 2,\n \"fruitVeg_min_per_week\": 3,\n \"dairy_amount\": 12,\n \"fruitVeg_amount\": 21\n}\n\nschool_data = {\n \"nick\": \"My dummy school\"\n}\n\nannex_data = {\n \"validity_date\": \"2023-12-07\",\n \"fruitVeg_products\": 10,\n \"dairy_products\": 1\n}\n\nweek_data = {\n \"start_date\": \"2023-12-01\",\n \"end_date\": \"2023-12-16\",\n \"week_no\": 1\n}\n\n\ndef get_program_data(company_id):\n program[\"company_id\"] = company_id\n return program\n","repo_name":"mariakoszula/programforschools-backend","sub_path":"tests/common_data.py","file_name":"common_data.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"72397513274","text":"from common import *\n\n\ndef partOne(instr: str) -> int:\n foods, allergens = parse(instr)\n possibilities = get_possibilities(foods, allergens)\n\n bad_ingredients = set()\n for x in possibilities:\n bad_ingredients.update(possibilities[x])\n\n count = 0\n for food in foods:\n for ingredient in food.ingredients:\n if ingredient not in bad_ingredients:\n count += 1\n\n return count\n","repo_name":"codemicro/adventOfCode","sub_path":"challenges/2020/21-allergenAmusement/python/partOne.py","file_name":"partOne.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"95"} +{"seq_id":"9383483759","text":"class Label:\n def __init__(\n self,\n name: str = \"\",\n symbol: str = \"\",\n unit: str = \"\",\n prefix: str = \"\",\n subscript: str = \"\",\n superscript: str = \"\",\n multiplier: str = \"\",\n ):\n self.name = name\n self.symbol = symbol\n self.unit = unit\n self.prefix = prefix\n self.subscript = subscript\n self.superscript = superscript\n self.multiplier = multiplier\n\n @property\n def label(self):\n name = self.name\n if self.prefix:\n name = self.prefix + \" \" + name\n\n symbol = fr\"${self.symbol}\"\n if self.subscript:\n symbol += fr\"_\\mathrm{{{self.subscript}}}\"\n if self.superscript:\n symbol += fr\"^\\mathrm{{{self.superscript}}}\"\n symbol += \"$\"\n\n unit = self.unit\n if self.multiplier:\n unit = self.multiplier + unit\n\n return fr\"{name} {symbol} ({unit})\"\n\n def __repr__(self):\n obj_repr = [f\"{k}='{v}'\" for k, v in vars(self).items()]\n obj_repr = \"Label(\" + \", \".join(obj_repr) + \")\"\n return obj_repr\n\n def __str__(self):\n return self.label\n","repo_name":"giorgionicoli/labberpy","sub_path":"labels.py","file_name":"labels.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"17352181049","text":"'''\n集成分类器方法有:bagging(boosting aggregating,自举汇聚法)、随机森林(random forest)、boosting等\nboosting也可细分为很多种,其中比较流行的一种是AdaBoost(adaptive boosting, 自适应boosting)\nAdaBoost一般流程为:\n1、收集数据\n2、准备数据\n3、分析数据\n4、训练算法:AdaBoost的大部分时间用在训练上,分类器将多次在同一数据集上训练弱分类器\n5、测试算法:计算分类的错误率\n6、使用算法\n以下是利用多个单层决策树和adaboost算法,在小数据上的运用实例\n'''\n\nfrom numpy import *\nimport matplotlib.pyplot as plt\n\ndef loadSimpData():\n datMat = matrix([[ 1. , 2.1],\n [ 2. , 1.1],\n [ 1.3, 1. ],\n [ 1. , 1. ],\n [ 2. , 1. ]])\n classLabels = [1.0, 1.0, -1.0, -1.0, 1.0]\n return datMat,classLabels\n\n# 通过阈值比较对数据进行分类\ndef stumpClassify(dataMatrix,dimen,threshVal,threshIneq):\n \"\"\"\n [summary]:单层决策树分类函数,根据某一特征进行分类\n \n Arguments:\n dataMatrix -- 数据矩阵\n dimen -- 选取第几列,对特征进行抽取\n threshVal -- 阀值\n threshIneq -- 比较关系(lt)\n \n Returns:\n retArray [numpy.ndarray]-- 分类结果\n \"\"\"\n # 初始化retArray为1,m行1列全为1\n retArray = ones((shape(dataMatrix)[0],1))\n # 置于-1,进行分类\n if threshIneq == 'lt':\n retArray[dataMatrix[:,dimen] <= threshVal] = -1.0 # 该列的值<=threshVal的,全部置为-1\n else:\n retArray[dataMatrix[:,dimen] > threshVal] = -1.0\n return retArray\n \n# 构建单层决策树(决策树的简化版本),是一种弱分类器算法\ndef buildStump(dataArr,classLabels,D):\n \"\"\"\n [summary]:找到数据集上最佳的单层决策树\n 将最小错误率minError设为+∞ \n 对数据集中的每一个特征(第一层循环):\n 对每个步长(第二层循环):\n 对每个不等号(第三层循环):\n 建立一棵单层决策树并利用加权数据集对它进行测试\n 如果错误率低于minError,则将当前单层决策树设为最佳单层决策树\n 返回最佳单层决策树\n \n Arguments:\n dataArr -- 数据矩阵\n classLabels -- 数据标签\n D -- 样本权重\n \n Returns:\n bestStump - 最佳单层决策树信息\n minError - 最小误差\n bestClasEst - 最佳的分类结果\n \"\"\"\n dataMatrix = mat(dataArr); labelMat = mat(classLabels).T #.T就是对一个矩阵的转置\n m,n = shape(dataMatrix)\n numSteps = 10.0; bestStump = {}; bestClasEst = mat(zeros((m,1)))\n minError = inf # 最小误差初始化为正无穷大\n for i in range(n):\n rangeMin = dataMatrix[:,i].min(); rangeMax = dataMatrix[:,i].max() # 找到特征中最小的值和最大值\n stepSize = (rangeMax-rangeMin)/numSteps # 步长,按步长选择列的最佳分割值\n for j in range(-1,int(numSteps)+1):# 第二层循环:按一定步长,遍历当前特征的特征值\n for inequal in ['lt', 'gt']: # 大于和小于的情况, 第三层循环:在大于和小于之间切换不等式\n threshVal = (rangeMin + float(j) * stepSize) # 根据阈值对数据进行分类,得到预测分类值\n # 计算分类结果\n predictedVals = stumpClassify(dataMatrix,i,threshVal,inequal) # 结果为m行1列的二维,值为-1或者1\n # 初始化误差矩阵\n errArr = mat(ones((m,1)))\n # 分类正确的,赋值为0,其他依然为1\n errArr[predictedVals == labelMat] = 0\n weightedError = D.T*errArr # 计算总误差乘以D,结果为一个值\n # nn=\"split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.3f\" % (i, threshVal, inequal, weightedError)\n # print(nn)\n # 找到误差最小的分类方式\n if weightedError < minError:\n minError = weightedError\n bestClasEst = predictedVals.copy()\n bestStump['dim'] = i # 最佳分割维度\n bestStump['thresh'] = threshVal # 最佳分割值\n bestStump['ineq'] = inequal # 最佳分割方法:le/ge\n return bestStump,minError,bestClasEst\n\ndatMat,classLabels=loadSimpData()\nD=mat(ones((5,1))/5)\nbestStump,minError,bestClasEst=buildStump(datMat,classLabels,D)\n# print(\"***********************\")\n# print(bestStump)\n# print(minError)\n# print(bestClasEst)\n\n#INPUT:dataArr:训练集 classLabels:训练集的标签 numIt:弱分类器最多的个数\n#OUPUT:weakClassArr:弱分类器的线性组合\ndef adaBoostTrainDS(dataArr,classLabels,numIt=40):\n \"\"\"\n [summary]:\n 对每次迭代:\n 利用buildStump()函数找到最佳的单层决策树\n 将最佳单层决策树加入到单层决策树数组\n 计算alpha\n 计算新的权重向量D\n 更新累计类别估计值\n 如果错误率等于0.0,则退出循环\n \n Arguments:\n dataArr {[type]} -- 数据\n classLabels {[type]} -- 标签\n \n Keyword Arguments:\n numIt {int} -- 迭代次数 (default: {40})\n \n Returns:\n weakClassArr\n aggClassEst\n \"\"\"\n weakClassArr = []\n m = shape(dataArr)[0]\n D = mat(ones((m,1))/m) # 初始权重1/m,概率分布向量,元素之和为1。D在迭代中增加错分数据的权重\n aggClassEst = mat(zeros((m,1))) # 记录每个数据点的类别估计累计值\n for i in range(numIt):\n # 构建单层决策树\n bestStump,error,classEst = buildStump(dataArr,classLabels,D)\n # print(\"D:\",D.T)\n # 根据公式计算弱学习算法权重alpha,使error不等于0,因为分母不能为0\n alpha = float(0.5*log((1.0-error)/max(error,1e-16))) # 1/2*In((1-error)/error),分类器的权重。\n bestStump['alpha'] = alpha # 存储弱学习算法权重\n weakClassArr.append(bestStump) # 弱分类器的列表,存储单层决策树 \n # print(\"classEst: \",classEst.T)\n expon = multiply(-1*alpha*mat(classLabels).T,classEst) # 根据数学公式更改权重\n D = multiply(D,exp(expon)) # 为下一次迭代计算新的D\n D = D/D.sum() # 下一个分类的各样本的权重D(i+1)\n # 所有分类器的计算训练错误,如果为0,则提前退出循环(使用中断)\n aggClassEst += alpha*classEst\n # print(\"aggClassEst: \",aggClassEst.T)\n aggErrors = multiply(sign(aggClassEst) != mat(classLabels).T,ones((m,1))) # sign()函数:如果数字为正数,则返回 1;如果数字为 0,则返回零 (0);如果数字为负数,则返回 -1\n errorRate = aggErrors.sum()/m\n # print(\"total error: \",errorRate)\n if errorRate == 0.0: break # 两种情况停止:(1)40个弱分类器的组合 (2)分类误差为0\n # return weakClassArr\n return weakClassArr,aggClassEst # plotROC()函数\n\n# print(adaBoostTrainDS(datMat,classLabels,9))\n\n\n'''\n参数:多个待分类样例 dataToClass,多个弱分类器 classifierArr\n'''\ndef adaClassify(datToClass,classifierArr):\n dataMatrix = mat(datToClass)\n m = shape(dataMatrix)[0]\n aggClassEst = mat(zeros((m,1)))\n for i in range(len(classifierArr)):\n classEst = stumpClassify(dataMatrix,classifierArr[i]['dim'],\\\n classifierArr[i]['thresh'],\\\n classifierArr[i]['ineq']) \n aggClassEst += classifierArr[i]['alpha']*classEst\n # print(aggClassEst)\n return sign(aggClassEst)\n\ndatArr,labelArr=loadSimpData()\nclassifierArr=adaBoostTrainDS(datArr,labelArr,30)\n# print(adaClassify([0,0],classifierArr))\n\ndef loadDataSet(fileName): \n numFeat = len(open(fileName).readline().split('\\t'))\n dataMat = []; labelMat = []\n fr = open(fileName)\n for line in fr.readlines():\n lineArr =[]\n curLine = line.strip().split('\\t')\n for i in range(numFeat-1):\n lineArr.append(float(curLine[i]))\n dataMat.append(lineArr)\n labelMat.append(float(curLine[-1]))\n return dataMat,labelMat\n\n# datArr,labelArr=loadDataSet('horseColicTraining2.txt')\n# classifierArr=adaBoostTrainDS(datArr,labelArr,10)\n\n# testArr,testLabelArr=loadDataSet('horseColicTest2.txt')\n# prediction10=adaClassify(testArr,classifierArr)\n# errArr=mat(ones((67,1))) # 有67行数据\n# sum=errArr[prediction10!=mat(testLabelArr).T].sum()\n# print(sum)\n\n'''\n参数:分类器的预测强度(即每个特征对应的类别累计估计值),数据标签\n'''\ndef plotROC(predStrengths, classLabels):\n import matplotlib.pyplot as plt\n cur = (1.0,1.0) # 绘制光标的位置\n ySum = 0.0 # 用于计算AUC的值\n numPosClas = sum(array(classLabels)==1.0) # 正例的数目,这里是178\n yStep = 1/float(numPosClas); # 纵坐标表示实际正例中被正确识别的概率\n xStep = 1/float(len(classLabels)-numPosClas) # 横坐标表示实际反例中被错误识别的概率\n sortedIndicies = predStrengths.argsort() # 获取排序索引:由小到大\n fig = plt.figure()\n fig.clf()\n ax = plt.subplot(111)\n print(sortedIndicies.tolist()[0])\n # 循环遍历所有值,在每个点绘制线段\n for index in sortedIndicies.tolist()[0]: # tolist()作用:将矩阵(matrix)和数组(array)转化为列表。\n if classLabels[index] == 1.0: \n delX = 0; delY = yStep # delX是横坐标变化值,delY是纵坐标变化值\n else:\n delX = xStep; delY = 0\n ySum += cur[1] # 把每一小段的y值相加,最后乘以xStep就是面积AUC\n # 从cur到(cur[0]-delX,cur[1]-delY)绘制线\n ax.plot([cur[0],cur[0]-delX],[cur[1],cur[1]-delY], c='b') # 从右上到左下画线\n cur = (cur[0]-delX,cur[1]-delY) # 画完线之后,当前点作为光标起点\n ax.plot([0,1],[0,1],'b--') # 画对角线\n plt.xlabel('False positive rate'); plt.ylabel('True positive rate')\n plt.title('ROC curve for AdaBoost horse colic detection system')\n ax.axis([0,1,0,1]) # 设置坐标轴范围\n plt.savefig('ROC.png')\n plt.show()\n print(\"the Area Under the Curve is: \",ySum*xStep)\n\ndatArr,labelArr=loadDataSet('horseColicTraining2.txt')\nclassifierArr,aggClassEst=adaBoostTrainDS(datArr,labelArr,10)\nplotROC(aggClassEst.T,labelArr)","repo_name":"Zheng-shuang/Machine-Learning-in-Action","sub_path":"AdaBoost/adaboost.py","file_name":"adaboost.py","file_ext":"py","file_size_in_byte":10715,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"39205203186","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nfrom util import savefig, get_path\n\nxlim = (-4, 2)\nylim = (-3, 3)\nx1 = np.linspace(*xlim, 500)\ny1 = np.linspace(*ylim, 500)\n\nx, y = np.meshgrid(x1, y1)\n\n\ndef f(y):\n return y\n\n\ndef rk1(f, y, h):\n k1 = f(y)\n y += h * k1\n return y\n\n\n# Midpoint\ndef rk2_midpoint(f, y, h):\n k1 = f(y)\n k2 = f(y + h * k1 / 2)\n y += h * k2\n return y\n\n# Ralston's third-order\ndef rk3_ralston(f, y, h):\n k1 = f(y)\n k2 = f(y + h * k1 / 2)\n k3 = f(y + h * k2 * 3 / 4)\n y += h * (2 * k1 + 3 * k2 + 4 * k3) / 9\n return y\n\n# Heun's second-order\ndef rk2(f, y, h):\n k1 = f(y)\n k2 = f(y + h * k1)\n y += h * (k1 + k2) / 2\n return y\n\n# Heun's third-order\ndef rk3(f, y, h):\n k1 = f(y)\n k2 = f(y + h * k1 / 3)\n k3 = f(y + h * k2 * 2 / 3)\n y += h * (k1 + 3 * k3) / 4\n return y\n\n\n\ndef rk4(f, y, h):\n k1 = f(y)\n k2 = f(y + h * k1 / 2)\n k3 = f(y + h * k2 / 2)\n k4 = f(y + h * k3)\n y += h * (k1 + 2 * k2 + 2 * k3 + k4) / 6\n return y\n\n\ndef rk(p, f, y, h):\n return [rk1, rk2, rk3, rk4][p - 1](f, y, h)\n\n\nfig, ax = plt.subplots(figsize=(2, 2))\nz = x + y * 1j\n\nfor p, c, lbl in [\n (1, 'C0', r'p=1'),\n (2, 'C1', r'p=2'),\n (3, 'C2', r'p=3'),\n (4, 'C3', r'p=4'),\n]:\n r = np.abs(rk(p, f, 1, z))\n ax.plot([], [], c=c, label=lbl)\n ax.contour(x1, y1, r, levels=[1.], colors=c)\n ax.contourf(x1, y1, r, levels=[0., 1.], colors='k', alpha=0.1)\n\nax.set_xlim(*xlim)\nax.set_ylim(*ylim)\nax.set_xlabel(r'$\\mathrm{Re}(h\\lambda)$')\nax.set_ylabel(r'$\\mathrm{Im}(h\\lambda)$')\nax.set_aspect('equal')\nax.set_xticks(range(xlim[0], xlim[1] + 1))\nax.set_yticks(range(ylim[0], ylim[1] + 1))\nax.axvline(x=0, lw=0.5, ls='-', c='k', zorder=-5)\nax.axhline(y=0, lw=0.5, ls='-', c='k', zorder=-5)\nax.legend(bbox_to_anchor=(1.4,1))\nsavefig(fig)\nplt.close(fig)\n","repo_name":"pkarnakov/am205","sub_path":"media/unit3/media/stabregion.py","file_name":"stabregion.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"95"} +{"seq_id":"577938313","text":"from src.DataStructures.Predicate import Predicate\nimport copy\nimport numpy as np\n\nclass Placed(Predicate):\n def __init__(self, name, arg_list=None):\n super(Placed, self).__init__(name, arg_list)\n\n def __deepcopy__(self, memodict={}):\n return Placed(copy.deepcopy(self.name), copy.deepcopy(self.arg_list))\n\n def apply(self, generated_values, ll_state):\n next_hl_state = generated_values[\"next_hl_state\"].getTrueProps()\n plank = generated_values[\"plank\"]\n for prop in next_hl_state:\n if \"human_placed\" in prop and plank in prop and \"location1\" in prop:\n t = np.eye(4) # change it to transform of location1\n break\n elif \"human_placed\" in prop and plank in prop and \"location2\" in prop:\n t = np.eye(4) # change it to transform of location1\n break\n p = ll_state.simulator.GetKinBody(plank)\n p.SetTransform()\n","repo_name":"AAIR-lab/ATAM","sub_path":"test_domains/Hangar/Predicates/Placed.py","file_name":"Placed.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"95"} +{"seq_id":"32266670857","text":"import pyHook\r\nimport pythoncom\r\nfrom threading import Thread\r\nfrom win32gui import GetCursorPos\r\n# from multiprocessing import Process\r\n# from mimik_keyboard import press_key, release_key\r\n# from mimik_mouse import move_mouse, click_mouse, wheel_mouse\r\n\r\n\r\nclass InputTracker:\r\n\r\n def __init__(self, communication_handle, update_pointer_pipe,\r\n pc_server_pointed=False):\r\n \"\"\"\r\n this is the initializing function of the tracking process. this process\r\n uses pyhook to \"subscribe\" certain io inputs to functions that will\r\n ultimately send then through pipe to the process which sends input to\r\n the client/controlled pc. the \"pc_server_pointed\" attribute is used\r\n to know if the controlled pc is the one currently running the\r\n session-manager server side component in order to know if to confirm\r\n ot reject the io input, will it be executed or not.another thread that\r\n runs while this process is running is the thread that changes the\r\n pc_server_pointed attribute through a pipe that connects this process\r\n to the process which receives matrix update from the clients.\r\n :param communication_handle: the pipe that io input information is sent\r\n through in order to send it to the controlled pc\r\n :param update_pointer_pipe: the pipe that is receiving updated\r\n \"pc_server_pointed\" attribute in order to change input direction\r\n :param pc_server_pointed: the pointer which is used to confirm or\r\n reject the io input and send it to the client\r\n \"\"\"\r\n self._pc_server_pointed = pc_server_pointed\r\n self._hm = pyHook.HookManager()\r\n self._hm.HookKeyboard()\r\n self._hm.HookMouse()\r\n self._communication_handle = communication_handle\r\n self._hm.MouseAll = self.mouse_event\r\n self._hm.SubscribeMouseLeftDown(self.mouse_left_down)\r\n self._hm.SubscribeMouseMiddleDown(self.mouse_middle_down)\r\n self._hm.SubscribeMouseRightDown(self.mouse_right_down)\r\n self._hm.SubscribeMouseLeftUp(self.mouse_left_up)\r\n self._hm.SubscribeMouseMiddleUp(self.mouse_middle_up)\r\n self._hm.SubscribeMouseRightUp(self.mouse_right_up)\r\n self._hm.KeyUp = self.keyboard_release\r\n self._hm.KeyDown = self.keyboard_press\r\n self._update_pointer_pipe = update_pointer_pipe\r\n self._update_thread = Thread(target=self.update_pointer)\r\n self._update_thread.start()\r\n pythoncom.PumpMessages()\r\n\r\n def update_pointer(self):\r\n while 1:\r\n data_recv = self._update_pointer_pipe.recv()\r\n if data_recv:\r\n self._pc_server_pointed = eval(data_recv)\r\n\r\n def keyboard_press(self, event):\r\n if not self._pc_server_pointed:\r\n self._communication_handle.send(\"k|p|\" + str(event.KeyID))\r\n return self._pc_server_pointed\r\n\r\n def keyboard_release(self, event):\r\n if not self._pc_server_pointed:\r\n self._communication_handle.send(\"k|r|\" + str(event.KeyID))\r\n return self._pc_server_pointed\r\n\r\n def mouse_event(self, event):\r\n # print(self._pc_server_pointed)\r\n if not self._pc_server_pointed:\r\n if event.MessageName == \"mouse move\":\r\n new_position = event.Position\r\n old_position = GetCursorPos()\r\n # print(event.Injected, \" injected\")\r\n movement = (new_position[0] - old_position[0],\r\n new_position[1] - old_position[1])\r\n self._communication_handle.send(\"m|m|\" + str(movement))\r\n elif event.MessageName == \"mouse wheel\":\r\n self._communication_handle.send(\"m|w|\" + str(event.Wheel))\r\n return self._pc_server_pointed\r\n\r\n def mouse_left_down(self, event):\r\n if not self._pc_server_pointed:\r\n self._communication_handle.send(\"m|c|l|d\")\r\n return self._pc_server_pointed\r\n\r\n def mouse_left_up(self, event):\r\n if not self._pc_server_pointed:\r\n self._communication_handle.send(\"m|c|l|u\")\r\n return self._pc_server_pointed\r\n\r\n def mouse_middle_down(self, event):\r\n if not self._pc_server_pointed:\r\n self._communication_handle.send(\"m|c|m|d\")\r\n return self._pc_server_pointed\r\n\r\n def mouse_middle_up(self, event):\r\n if not self._pc_server_pointed:\r\n self._communication_handle.send(\"m|c|m|u\")\r\n return self._pc_server_pointed\r\n\r\n def mouse_right_down(self, event):\r\n if not self._pc_server_pointed:\r\n self._communication_handle.send(\"m|c|r|d\")\r\n return self._pc_server_pointed\r\n\r\n def mouse_right_up(self, event):\r\n if not self._pc_server_pointed:\r\n self._communication_handle.send(\"m|c|r|u\")\r\n return self._pc_server_pointed\r\n","repo_name":"shachar731945/final-project-2018-heights-","sub_path":"server/input_tracker.py","file_name":"input_tracker.py","file_ext":"py","file_size_in_byte":4872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"74189361913","text":"\"\"\"\nTools are singular physical objects or process grouped physical\nobjects that are used to perform a specific task\n\nthey have multiple taxonomies\nProcess, what sorta processes this tool can be used for\nAutomation, what level of automation this tool contains for accomplishing the process?\nApproach,\nViewpoint\n\"\"\"\nfrom catalog import Catalog\nfrom colorful.fields import RGBColorField\nfrom django.conf import settings\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django_extensions.db.models import TimeStampedModel, TitleDescriptionModel\nfrom markdownx.models import MarkdownxField\nfrom tagulous.models import TagField, TagTreeModel\n\nfrom utils.models import StateMachineMixin\n\nfrom tools.exceptions import ToolAvailabilityException, ToolClearanceException\nfrom tools.querysets import ToolHistoryQuerySet, UserToolQuerySet\n\n\nclass ToolTaxonomy(TagTreeModel):\n \"\"\"A generic way of describing a tool, the top level is the base taxonomy\"\"\"\n\n # order = models.IntegerField(blank=False, default=0)\n\n # Published state\n # Allows users to submit new taxonomies that are evaluated and approved\n class State(Catalog):\n _attrs = \"value\", \"label\"\n in_review = 0, _(\"in review\")\n approved = 1, _(\"approved\")\n rejected = 2, _(\"rejected\")\n\n state = models.PositiveSmallIntegerField(\n _(\"State\"), choices=State._zip(\"value\", \"label\"), default=State.in_review.value\n )\n\n color = RGBColorField(null=True, blank=True)\n\n class Meta:\n verbose_name = _(\"Tool Taxonomy\")\n verbose_name_plural = _(\"Tool Taxonomies\")\n\n class TagMeta:\n force_lowercase = False\n space_delimiter = False\n\n def get_color(self):\n if not self.color and self.parent:\n # try to get color from parent (this can cascade)\n return self.parent.get_color()\n else:\n return self.color\n\n def get_absolute_url(self):\n return reverse(\"tools:taxonomy_detail\", kwargs={\"path\": self.path})\n\n\nclass ToolStates(Catalog):\n _attrs = \"value\", \"label\", \"badge_type\"\n none = \"none\", _(\"None\"), None\n available = \"available\", _(\"Available\"), \"success\"\n in_use = \"in_use\", _(\"In Use\"), \"warning\"\n disabled = \"disabled\", _(\"Disabled\"), \"danger\"\n\n\nclass ToolTransitions(Catalog):\n _attrs = \"value\", \"label\", \"source\", \"dest\"\n create = 0, _(\"Create\"), ToolStates.none.value, ToolStates.available.value\n borrow = 1, _(\"Borrow\"), ToolStates.available.value, ToolStates.in_use.value\n return_ = 2, _(\"Return\"), ToolStates.in_use.value, ToolStates.available.value\n decommission = 3, _(\"Decommission\"), \"*\", ToolStates.disabled.value\n reinstate = 4, _(\"Reinstate\"), ToolStates.disabled.value, ToolStates.available.value\n\n\nclass UserTool(StateMachineMixin, TitleDescriptionModel, TimeStampedModel):\n \"\"\"A tool owned by a User\"\"\"\n\n States = ToolStates\n\n Transitions = ToolTransitions\n\n class Visibility(Catalog):\n _attrs = \"value\", \"label\", \"card_class\"\n private = 0, _(\"Private\"), \"tool-private border-danger\"\n cleared = 1, _(\"Cleared Users\"), \"tool-cleared-only border-success\"\n public = 2, _(\"Public\"), \"tool-public\"\n\n class Clearance(Catalog):\n _attrs = \"value\", \"label\"\n none = 0, _(\"Available to all\")\n owner = 1, _(\"Owner cleared users only\")\n cleared = 2, _(\"Cleared users can approve anyone\")\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL, on_delete=models.PROTECT, related_name=\"tools\"\n )\n description = MarkdownxField(blank=True)\n state = models.CharField(\n max_length=10,\n choices=States._zip(\"value\", \"label\"),\n default=States.none.value,\n editable=False,\n )\n taxonomies = TagField(to=ToolTaxonomy, blank=True, related_name=\"tools\")\n visibility = models.PositiveSmallIntegerField(\n _(\"Visibility\"),\n choices=Visibility._zip(\"value\", \"label\"),\n default=Visibility.public.value,\n help_text=_(\"The level of user visibility for this tool\"),\n )\n clearance = models.PositiveSmallIntegerField(\n _(\"Clearance\"),\n choices=Clearance._zip(\"value\", \"label\"),\n default=Clearance.none.value,\n help_text=_(\"Who is allowed to clear a user to use this tool\"),\n )\n\n objects = UserToolQuerySet.as_manager()\n\n class Meta:\n ordering = (\"-created\",)\n get_latest_by = \"created\"\n verbose_name = _(\"Tool\")\n verbose_name_plural = _(\"Tools\")\n\n class StateMachine(StateMachineMixin.StateMachine):\n states = [{\"name\": state.value} for state in ToolStates]\n transitions = [\n {\"trigger\": trigger, \"source\": source, \"dest\": dest}\n for trigger, source, dest in ToolTransitions._zip(\"name\", \"source\", \"dest\")\n ]\n after_state_change = \"record_transition\"\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse(\"tools:detail\", kwargs={\"pk\": self.pk})\n\n def record_transition(self, event):\n if not event.kwargs.get(\"skip_save\", False):\n self.save()\n self.history.create(\n user=event.kwargs.get(\"user\"), action=self.Transitions(event.event.name, \"name\").value\n )\n\n def check_clearance(self, user):\n return self.permissions.filter(cleared_user=user).exists()\n\n def user_can_grant_clearance(self, user):\n \"\"\"See if we're allowed to grant clearance\"\"\"\n level = self.Clearance(self.clearance)\n if level == self.Clearance.none:\n return True\n if level == self.Clearance.owner:\n return self.user == user\n if level == self.Clearance.cleared:\n return self.user == user or self.check_clearance(user)\n\n def user_can_borrow(self, user):\n return self._meta.model.objects.borrowable_to_user(user).filter(pk=self.pk).exists()\n\n def prepare_borrow(self, event):\n \"\"\"Do validation before allowing a user to borrow a tool\"\"\"\n user = event.kwargs.get(\"user\")\n if not self.user_can_borrow(user):\n raise ToolClearanceException(\"%s isn't allowed to borrow this tool\" % user)\n # Is this needed?\n if not self.is_available():\n raise ToolAvailabilityException()\n\n @property\n def cover_photo(self):\n try:\n return self.photos.latest()\n except ToolPhoto.DoesNotExist:\n return None\n\n\nclass ToolHistory(TimeStampedModel):\n tool = models.ForeignKey(UserTool, on_delete=models.CASCADE, related_name=\"history\")\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.PROTECT,\n blank=True,\n null=True,\n related_name=\"tool_history\",\n )\n action = models.PositiveSmallIntegerField(choices=UserTool.Transitions._zip(\"value\", \"label\"))\n\n objects = ToolHistoryQuerySet.as_manager()\n\n class Meta:\n ordering = (\"-created\",)\n get_latest_by = \"created\"\n verbose_name = _(\"Tool History\")\n verbose_name_plural = _(\"Tool Histories\")\n\n def __str__(self):\n action = UserTool.Transitions(self.action).label\n return f\"{self.tool} - {action}\"\n\n\nclass ClearancePermission(TimeStampedModel):\n tool = models.ForeignKey(UserTool, on_delete=models.CASCADE, related_name=\"permissions\")\n cleared_by_user = models.ForeignKey(\n settings.AUTH_USER_MODEL, on_delete=models.PROTECT, related_name=\"given_tool_permissions\"\n )\n cleared_user = models.ForeignKey(\n settings.AUTH_USER_MODEL, on_delete=models.PROTECT, related_name=\"tool_permissions\"\n )\n\n class Meta:\n ordering = (\"-created\",)\n get_latest_by = \"created\"\n unique_together = ((\"tool\", \"cleared_user\"),)\n verbose_name = _(\"Clearance\")\n verbose_name_plural = _(\"Clearances\")\n\n def __str__(self):\n return f\"{self.cleared_by_user} cleared {self.cleared_user} ({self.tool})\"\n\n\nclass ToolPhoto(TimeStampedModel):\n tool = models.ForeignKey(UserTool, on_delete=models.CASCADE, related_name=\"photos\")\n uploading_user = models.ForeignKey(\n settings.AUTH_USER_MODEL, on_delete=models.PROTECT, related_name=\"uploaded_photos\"\n )\n file = models.FileField()\n title = models.CharField(max_length=255, blank=True)\n\n class Meta:\n ordering = (\"-created\",)\n get_latest_by = \"created\"\n","repo_name":"bkmakerspace/toolhub","sub_path":"tools/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8419,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"95"} +{"seq_id":"28463637700","text":"import unittest\r\n\r\nimport requests\r\nfrom parameterized import parameterized\r\n\r\nfrom 获取用户信息.api.user_api import TestUser\r\nfrom 获取用户信息.read_json.read_user_json import ReadJson\r\n\r\n\r\ndef get_data():\r\n datas = ReadJson(\"use.json\").read_uesr_json()\r\n arr = []\r\n for data in datas.values():\r\n arr.append((data.get(\"headers\"),\r\n data.get(\"params\"),\r\n data.get(\"url\"),\r\n data.get(\"status_code\")))\r\n return arr\r\n\r\n\r\nclass TestUserCase(unittest.TestCase):\r\n @parameterized.expand(get_data())\r\n def test_user(self, headers, params, url, status_code):\r\n # headers = {\r\n # \"Content-Type\": \"application/json\",\r\n # \"Authorization\": \"Token 0f3f4f9e398819eb92f6dae3abe7958c50670338\"\r\n # }\r\n # params = {\"uid\": \"111579943374652448\"}\r\n # url = \"http://test.zaitakugeek.cn:8000/user/profile\"\r\n r = TestUser().user_api(headers, params, url)\r\n # r = requests.get(headers=headers,url=url,params=params)\r\n print(r.json())\r\n print(r.status_code)\r\n try:\r\n self.assertEqual(status_code, r.status_code)\r\n except AssertionError:\r\n raise\r\n\r\n\r\nif __name__ == '__main__':\r\n TestUserCase().test_user()\r\n","repo_name":"cai9987/test_automation","sub_path":"接口自动化/获取用户信息/case/user_case.py","file_name":"user_case.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"32053157480","text":"\nimport json\n\nCLOCK = 19520\nADJUST_HIGH = 0\nADJUST_LOW = CLOCK // 2\n\ndef decode(pulses):\n\tresult = []\n\tcurrent_byte = 0\n\tbit_count = 0\n\tlevel = True\n\tfor pulse in pulses:\n\t\tif level:\n\t\t\t# falling edge, round down prev high\n\t\t\tnew_bits = (pulse + ADJUST_HIGH) // CLOCK\n\t\t\tfor _ in range(new_bits):\n\t\t\t\tcurrent_byte <<= 1\n\t\t\t\tcurrent_byte |= 1\n\t\t\tbit_count += new_bits\n\t\telse:\n\t\t\t# rising edge, round up prev low\n\t\t\tnew_bits = (pulse + ADJUST_LOW) // CLOCK\n\t\t\tcurrent_byte <<= new_bits\n\t\t\tbit_count += new_bits\n\t\tlevel = not level\n\t\tif bit_count == 10:\n\t\t\tresult.append((current_byte >> 1) & 0xFF)\n\t\t\tcurrent_byte = 0\n\t\t\tbit_count = 0\n\t\tif bit_count > 10:\n\t\t\tresult.append(0x1000 + bit_count)\n\t\t\treturn result\n\tcurrent_byte <<= 10 - bit_count\n\tresult.append((current_byte >> 1) & 0xFF)\n\treturn result\n\nif __name__ == \"__main__\":\n with open(\"irdata.json\") as f:\n for item in json.load(f)[\"data\"]:\n if item[\"id\"].startswith(\"mw\"):\n decoding = decode(item[\"A\"][1:])\n print(\" \".join(\"%02X\" % x for x in decoding), end=\"\\t\")\n print(item[\"id\"])\n\n","repo_name":"dmcomm/irplot","sub_path":"decode_witches.py","file_name":"decode_witches.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"27934877855","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 10 13:30:21 2019\r\n\r\n@author: Adi\r\n\"\"\"\r\n\r\n\r\n\r\n### Packages\r\n\r\nimport psycopg2\r\n\r\n\r\n### Global variables\r\n\r\n#Database connection parameters\r\nnazwa_bazy = 'database_name'\r\nuzytkownik = 'user_name' \r\nhaslo = 'password' \r\n\r\n\r\n\r\n################################ PART I - HEXAGONAL GRID ##################################\r\n\r\n\r\n#Function arguments\r\n\r\nsrid = 4326 # EPSG Code - Coordinate System\r\noffset_y = 0.1 # Side length of the hexagon\r\nwarstwa_siatki = \"hex_grid_name\" # The name of the grid you want to create\r\nwarstwa_zasiegu = \"coverage_layer\" # The name of the layer for which area you want to draw the grid\r\n\r\n\r\n\r\ndef siatka_heksagonalna(srid, offset_y, warstwa_siatki, warstwa_zasiegu):\r\n \r\n \"\"\"\r\n \r\n The function receives from the user following arguments: srid - coordinate system, offset_y - side length of the hexagon, warstwa_siatki - name of the layer storing the grid of regular hexagons, warstwa_zasiegu - the name of the layer for which area you want to draw the grid. \r\n \r\n \r\n \"\"\"\r\n ### Connecting with database\r\n \r\n try:\r\n \r\n conn = psycopg2.connect(host=\"localhost\",database=nazwa_bazy, user=uzytkownik, password=haslo)\r\n print('OK')\r\n \r\n except:\r\n print('Something went wrong...')\r\n \r\n \r\n ### Creating a cursor\r\n \r\n cur = conn.cursor()\r\n \r\n \r\n \r\n ## Query - range\r\n \r\n query3 = \"\"\"select min(ST_XMin(geom)) as minX , max(ST_XMax(geom)) as maxX, min(ST_YMin(geom)) as minY, max(ST_YMax(geom)) as maxY from {warstwa_zasiegu};\"\"\".format(warstwa_zasiegu = warstwa_zasiegu)\r\n \r\n \r\n cur.execute(query3)\r\n b = cur.fetchone()\r\n \r\n minX,maxX,minY,maxY= list(b)\r\n \r\n \r\n \r\n offset_x = offset_y*pow(3,1/2)/2 # offset_x - the height of an equilateral triangle\r\n \r\n offset2_x = offset_x*2 # real shift, horizontal difference between hexagons\r\n \r\n \r\n ldX,ldY = minX - offset_x, minY-0.5*offset_y\r\n lgX,lgY = minX- offset_x, minY+0.5*offset_y\r\n sgX, sgY = minX, minY + offset_y\r\n pgX,pgY = minX + offset_x, minY+0.5*offset_y\r\n pdX,pdY = minX + offset_x, minY-0.5*offset_y\r\n sdX, sdY = minX, minY - offset_y\r\n \r\n \r\n offset2_y = offset_y*3 # real shift - vertical difference between hexagons\r\n \r\n \r\n ## Calculation of the maximum value for the SQL function - generate_series\r\n \r\n zasiegX = abs(maxX - minX) # difference max - min horizontally - layer extent \r\n offsety_w_x = int(round(zasiegX/offset2_x + 1,0)) # the number of regular hexagons that should cover the horizontal layer / 1 in addition\r\n ilosc_x = offsety_w_x * offset2_x # calculation of the boundary range for hexagons - necessary for the SQL function\r\n \r\n zasiegY = abs(maxY - minY) # analogous steps for vertical coverage\r\n offsety_w_y = int(round(zasiegY/offset2_y + 1,0))\r\n ilosc_y = offsety_w_y * offset2_y \r\n \r\n \r\n ## Query creating table in the database storing regular hexagons\r\n \r\n query2= \"\"\"\r\n create TABLE {warstwa_siatki} (gid serial not null primary key);\r\n SELECT addgeometrycolumn('{warstwa_siatki}','geom', {srid}, 'POLYGON', 2);\r\n \r\n INSERT INTO {warstwa_siatki} (geom)\r\n SELECT st_translate(geom, x_series, y_series)\r\n from generate_series(0, {ilosc_x}, {roznica_x}) AS x_series,\r\n generate_series(0, {ilosc_y}, {roznica_y}) as y_series,\r\n \r\n (\r\n SELECT ST_setSRID('POLYGON(({ldX} {ldY},{lgX} {lgY},{sgX} {sgY},{pgX} {pgY},{pdX} {pdY},{sdX} {sdY},{ldX} {ldY}))'::geometry,{srid}) as geom\r\n UNION\r\n SELECT ST_Translate(st_setSRID('POLYGON(({ldX} {ldY},{lgX} {lgY},{sgX} {sgY},{pgX} {pgY},{pdX} {pdY},{sdX} {sdY},{ldX} {ldY}))'::geometry,{srid}), {offset_x}, {offset2_y}) as geom\r\n \r\n ) as two_hex;\r\n \r\n \"\"\".format(srid=srid, minX = minX, maxX=maxX, minY=minY, maxY=maxY,ldX=ldX, ldY=ldY, lgX=lgX, lgY=lgY, sgX=sgX, sgY=sgY, pgX=pgX, pgY=pgY, pdX=pdX,pdY=pdY,sdX=sdX,sdY=sdY, roznica_x = offset2_x, roznica_y = offset2_y, offset_x=offset_x, offset2_y = 1.5*offset_y, ilosc_x = ilosc_x, ilosc_y = ilosc_y, warstwa_siatki = warstwa_siatki)\r\n \r\n \r\n \r\n try:\r\n cur.execute(query2)\r\n print('OK')\r\n except:\r\n print('Something went wrong')\r\n \r\n \r\n \r\n #Commit for table creating operation\r\n conn.commit()\r\n \r\n conn.close()\r\n\r\n\r\n############################ PART II ######################################\r\n########### STATISTICAL ANALYSIS BASED ON HEXAGONES ############\r\n\r\n\r\n#Function arguments\r\n\r\n\r\nwarstwa_siatki = \"hex_grid_name\" # The name of the grid you created\r\nwarstwa_zasiegu = \"coverage_layer\" # The name of the layer you want to calculate statistics based on\r\nwarstwa_docelowa = \"target_layer\" # The name of layer you want to create - it will contain collectd hexagonal stats\r\npole_wagowe= 0 # Argument with the default value of zero, do not change it in the first version of the code\r\n\r\n\r\n\r\ndef statystyki_heksagony(warstwa_zasiegu, warstwa_docelowa, pole_wagowe, warstwa_siatki):\r\n \"\"\"\r\n \r\n The function calculates statistics based on the drawn hexagons.\r\n \r\n \r\n \"\"\"\r\n \r\n \r\n #### Connecting with database\r\n \r\n try:\r\n \r\n conn = psycopg2.connect(host=\"localhost\",database=nazwa_bazy, user=uzytkownik, password=haslo)\r\n print('OK')\r\n\r\n except:\r\n print('Something went wrong...')\r\n \r\n \r\n \r\n cur = conn.cursor() # Creating cursor\r\n \r\n \r\n query = \"select distinct(st_geometrytype(geom)) from {};\".format(warstwa_zasiegu) # query retrieving the layer geometry type\r\n cur.execute(query)\r\n typ_warstwy = cur.fetchone()\r\n typ_warstwy=str(list(typ_warstwy)) # string conversion - character-by-character comparison\r\n \r\n \r\n ### POINT LAYER ###\r\n \r\n if typ_warstwy == \"['ST_Point']\" or \"['ST_MultiPoint']\":\r\n if pole_wagowe != 0:\r\n query2 = \" \"\r\n print(\"first if\")\r\n else:\r\n try:\r\n query2 = \"select a.gid, a.geom, count(b.geom) INTO {warstwa_docelowa} from {warstwa_siatki} as a, {warstwa_zasiegu} as b where st_intersects(a.geom, b.geom)=true group by a.gid; alter table {warstwa_docelowa} ADD PRIMARY KEY (gid); \".format(warstwa_docelowa = warstwa_docelowa, warstwa_zasiegu = warstwa_zasiegu, warstwa_siatki = warstwa_siatki)\r\n cur.execute(query2)\r\n conn.commit()\r\n conn.close()\r\n print(\"All rigtht\")\r\n except:\r\n print(\"Something went wrong for point...\")\r\n else: \r\n print('does not see ST_Point...')\r\n \r\n \r\n \r\n ### LINE LAYER ###\r\n \r\n if typ_warstwy == \"['ST_MultiLineString']\":\r\n if pole_wagowe != 0:\r\n query2 = \" \"\r\n print(\"first if\")\r\n else:\r\n try:\r\n query2 = \"select sum(st_length(st_intersection(a.geom,b.geom))),a.geom, a.gid INTO {} from {warstwa_siatki} as a, {} as b where st_intersects(b.geom, a.geom) group by a.gid;\".format(warstwa_docelowa, warstwa_zasiegu, warstwa_siatki=warstwa_siatki)\r\n cur.execute(query2)\r\n conn.commit()\r\n conn.close()\r\n print(\"All right\")\r\n except:\r\n print(\"Something went wrong for line...\")\r\n else: \r\n print('does not see ST_MultiLineString...')\r\n \r\n \r\n \r\n ### POLYGON LAYER ###\r\n \r\n if typ_warstwy == \"['ST_MultiPolygon']\":\r\n if pole_wagowe != 0:\r\n query2 = \" \"\r\n print(\"first if\")\r\n else:\r\n try:\r\n query2 = \"select sum(st_area(st_intersection(a.geom,b.geom))),a.geom, a.gid INTO {warstwa_docelowa} from {warstwa_siatki} as a, {warstwa_zasiegu} as b where st_intersects(b.geom, a.geom) group by a.gid;\".format(warstwa_docelowa = warstwa_docelowa, warstwa_zasiegu = warstwa_zasiegu, warstwa_siatki=warstwa_siatki)\r\n cur.execute(query2)\r\n conn.commit()\r\n conn.close()\r\n print(\"All right\")\r\n except:\r\n print(\"Something went wrong for polygon...\")\r\n else: \r\n print('does not see ST_MultiPolygon...')\r\n \r\n \r\n \r\n \r\n \r\n","repo_name":"AdiKom95/hexagons","sub_path":"hexagons_procedural_01.py","file_name":"hexagons_procedural_01.py","file_ext":"py","file_size_in_byte":8285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"17154299402","text":"from flask_restful import Resource, Api\nfrom flask_restful import reqparse\nfrom flask import jsonify\nfrom flask import abort\nimport extra.auth as auth\nfrom models import Character\n\ncharacters_parser = reqparse.RequestParser()\ncharacters_parser.add_argument('title', required=True)\ncharacters_parser.add_argument('name', required=True)\ncharacters_parser.add_argument('city', required=True)\ncharacters_parser.add_argument('age', required=True, type=int)\ncharacters_parser.add_argument('info', required=True)\ncharacters_parser.add_argument('ispublic', required=False, type=bool)\n\n\nclass CharactersListApi(Resource):\n def __init__(self, auth):\n super(CharactersListApi, self).__init__()\n self._auth = auth\n\n def get(self):\n characters = Character.query.all()\n return jsonify(characters=[i.serialize for i in characters])\n\n def post(self):\n if not self._auth.is_authorized():\n abort(401)\n args = characters_parser.parse_args()\n characters = Character.add(args['name'], args['title'], args['city'],\n args['age'], args['info'], args['ispublic'], self._auth.get_user())\n return jsonify(characters.serialize)\n\n\nclass CharactersApi(Resource):\n\n def __init__(self, auth):\n super(CharactersApi, self).__init__()\n self._auth = auth\n\n def get(self, id):\n characters = Character.query.filter_by(id=id).first()\n if not characters:\n abort(404)\n return jsonify(characters.serialize)\n\n def delete(self, id):\n if not self._auth.is_authorized():\n abort(401)\n characters = Character.query.filter_by(id=id).first()\n if characters.user_id != self._auth.get_user().id:\n abort(403)\n Character.delete(characters)\n return jsonify({\"deleted\": True})\n","repo_name":"Maria173/web-server-gameroll","sub_path":"api/news_api.py","file_name":"news_api.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"2259817697","text":"import pandas as pd \nimport numpy as np\nimport sys\nimport os\nimport pathlib\n\nanss = [11,12,13,14,15,16,17]\ntotal = pd.DataFrame({'id':range(20000)})\nfor i in anss:\n\ttotal['y'+str(i)] = pd.read_csv('ans/ans_'+str(i)+'.csv')['label'].values\n\ntotal['ans'] = [0]*len(total)\nfor i in anss:\n\ttotal['ans']+=total['y'+str(i)]\n\non_thres = (total['ans'].values == len(anss)/2).astype(np.int)\nfor idx in range(len(on_thres)):\n\tif on_thres[idx]==1:\n\t\tprint(idx)\n\nprint(on_thres.sum())\n\ny_test = (total['ans'].values >= (len(anss)/2) ).astype(np.int)\n\nif os.path.dirname(sys.argv[1])!='': \n\tif not os.path.isdir(os.path.dirname(sys.argv[1])):\n\t\tdirname = os.path.dirname(sys.argv[1])\n\t\todir = pathlib.Path(dirname)\n\t\todir.mkdir(parents=True, exist_ok=True)\n\nwith open(sys.argv[1], 'w') as f:\n\tf.write('id,label')\n\tf.write('\\n')\n\tfor i in range(y_test.shape[0]):\n\t\tf.write(str(i))\n\t\tf.write(',')\n\t\tf.write(str(y_test[i]))\n\t\tf.write('\\n')","repo_name":"zytyz/ML2019SPRING","sub_path":"hw6/voting.py","file_name":"voting.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"42492485354","text":"import matplotlib.pyplot\nimport sys\nimport numpy as np\nimport os\nimport copy\nimport stitch.mesospim\nimport stitch.Rigid\n\nme = \"b.py\"\nshifts = []\nwhile True:\n sys.argv.pop(0)\n if len(sys.argv) and len(sys.argv[0]) > 1 and sys.argv[0][0] == '-':\n if sys.argv[0][1] == 'h':\n usg()\n elif sys.argv[0][1] == 'f':\n sys.argv.pop(0)\n if len(sys.argv) == 0:\n sys.stderr.write(\"%s: not enough arguments for -f\\n\" % me)\n sys.exit(1)\n shift_path = sys.argv[0]\n with open(shift_path) as file:\n for line in file:\n x, y, z, corr = line.split()\n shifts.append((int(x), int(y), int(z)))\n else:\n sys.stderr.write(\"%s: unknown option '%s'\\n\" % (me, sys.argv[0]))\n sys.exit(2)\n else:\n break\ntry:\n (tx, ty), (nx, ny, nz), (ox,\n oy), path = stitch.mesospim.read_tiles(sys.argv)\nexcept ValueError:\n tx = ty = 2\n nx = ny = nz = 200\n ox = oy = 10\n path = sys.argv\ndtype = np.dtype(\" nz - 1:\n z = nz - 1\n s = src[i][::stride[0], ::stride[1], z]\n y = ty * (ny - oy) - y\n extent = (x, x + nx, y - ny, y)\n cmap = 'Greens' if se[0] == i else 'Greys'\n if art[i] is not None:\n art[i].remove()\n vmin = np.quantile(s, 0.1)\n vmax = np.quantile(s, 0.9)\n art[i] = matplotlib.pyplot.imshow(s.T,\n alpha=0.5,\n cmap=cmap,\n vmin=vmin,\n vmax=vmax,\n extent=extent)\n\n\ndef press(event):\n n = len(src)\n key = event.key\n if key == \"down\":\n se[0] += 1\n if se[0] == n:\n se[0] = 0\n for i in range(n):\n draw(i)\n fig.canvas.draw()\n elif key == \"up\":\n se[0] -= 1\n if se[0] == -1:\n se[0] = n - 1\n for i in range(n):\n draw(i)\n fig.canvas.draw()\n elif key == \"right\":\n zslice[0] += stride[2]\n for i in range(n):\n draw(i)\n fig.canvas.draw()\n elif key == \"left\":\n zslice[0] -= stride[2]\n for i in range(n):\n draw(i)\n fig.canvas.draw()\n elif key == \"h\":\n positions[se[0]][0] -= stride[0]\n draw(se[0])\n fig.canvas.draw()\n elif key == \"l\":\n positions[se[0]][0] += stride[0]\n draw(se[0])\n fig.canvas.draw()\n elif key == \"j\":\n positions[se[0]][1] -= stride[1]\n draw(se[0])\n fig.canvas.draw()\n elif key == \"k\":\n positions[se[0]][1] += stride[1]\n draw(se[0])\n fig.canvas.draw()\n elif key == \"i\":\n positions[se[0]][2] -= stride[2]\n draw(se[0])\n fig.canvas.draw()\n elif key == \"n\":\n positions[se[0]][2] += stride[2]\n draw(se[0])\n fig.canvas.draw()\n elif key == \"z\":\n if art[se[0]] == None:\n draw(se[0])\n fig.canvas.draw()\n else:\n art[se[0]].remove()\n art[se[0]] = None\n fig.canvas.draw()\n elif key == \"R\":\n stride[:] = [2 * e for e in stride]\n for i in range(n):\n draw(i)\n fig.canvas.draw()\n elif key == \"r\":\n stride[:] = [max(1, e // 2) for e in stride]\n for i in range(n):\n draw(i)\n fig.canvas.draw()\n elif key == \"q\":\n sys.exit(0)\n elif key == \"s\":\n print()\n with open(\".shifts\", \"w\") as file:\n for (x, y, z), (x0, y0, z0) in zip(positions, positions0):\n print(x, y, z)\n file.write(\"%d %d %d %.16e\\n\" % (x - x0, y - y0, z - z0, 0.0))\n\n\nfor i in range(len(src)):\n draw(i)\nfig.canvas.mpl_connect('key_press_event', press)\nfig.tight_layout()\nmatplotlib.pyplot.show()\n","repo_name":"cselab/stitch","sub_path":"poc/viewer/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":4992,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"95"} +{"seq_id":"72589827833","text":"import sys\nimport ratemyprofessor\n\nprofessorName = sys.argv[1] + \" \" + sys.argv[2]\nindex = sys.argv[3] \n\nschool = ratemyprofessor.get_schools_by_name(\"University of Texas at Dallas\")[0]\nprofessor = ratemyprofessor.get_professors_by_school_and_name(school, professorName)[(int) (index)]\n\nrecentComment = professor.get_ratings()[0].comment\nscore = 2 * professor.rating - professor.difficulty\nisGood = score > 3\n\nprint(\"Professor: %s (%s)/Rating: %s/Difficulty: %s/Good Overall: %s/Recent Comment: %s\" % (professor.name, professor.department, professor.rating, professor.difficulty, isGood, recentComment))\n","repo_name":"MagnusChase03/Magnus","sub_path":"backEnd/ProfessorSearch.py","file_name":"ProfessorSearch.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"12244357940","text":"\"\"\" \nGiven strings s1, s2, and s3, find whether s3 is formed by an interleaving of s1 and s2.\n\nAn interleaving of two strings s and t is a configuration where s and t are divided into n and m non-empty substrings respectively, \nsuch that:\n\ns = s1 + s2 + ... + sn\nt = t1 + t2 + ... + tm\n|n - m| <= 1\nThe interleaving is s1 + t1 + s2 + t2 + s3 + t3 + ... or t1 + s1 + t2 + s2 + t3 + s3 + ...\nNote: a + b is the concatenation of strings a and b.\n\n \n\nExample 1:\nInput: s1 = \"aabcc\", s2 = \"dbbca\", s3 = \"aadbbcbcac\"\nOutput: true\nExplanation: One way to obtain s3 is:\nSplit s1 into s1 = \"aa\" + \"bc\" + \"c\", and s2 into s2 = \"dbbc\" + \"a\".\nInterleaving the two splits, we get \"aa\" + \"dbbc\" + \"bc\" + \"a\" + \"c\" = \"aadbbcbcac\".\nSince s3 can be obtained by interleaving s1 and s2, we return true.\n\nAlgorithm:\n\n1. Check if the length of s1 + s2 = s3, otherwise return False \n2. Create three pointers, and initialize them to zero\n3. Create a boolean variable that is going to store False to begin with \n4. First: if i1 < len(s1) and s3[i3] == s1[i1] then update found if the recursive call returns True when pointers are incremented. \n5. Second, if i2 < len(s2) and s3[i3] == s2[i2] then update found if the recursive call returns True when pointers are incremented\n\"\"\"\n\n\ndef interleaving(s1, s2, s3):\n\n if len(s1) + len(s2) != len(s3):\n return False \n\n return _interleaving(s1, s2, s3, 0, 0, 0)\n\ndef _interleaving(s1, s2, s3, i1, i2, i3):\n if i3 == len(s3):\n return True \n\n found = False \n if i1 < len(s1) and s3[i3] == s1[i1]:\n found = found or _interleaving(s1, s2, s3, i1+1, i2, i3+1)\n if i2 < len(s2) and s3[i3] == s2[i2]:\n found = found or _interleaving(s1, s2, s3, i1, i2+1, i3+1)\n\n return found \n\nif __name__ == \"__main__\":\n\n s1 = \"aabcc\"\n s2 = \"dbbca\"\n s3 = \"aadbbcbcac\"\n print(interleaving(s1,s2,s3))\n\n # Test case 01 \n s1 = \"\" \n s2 = \"\" \n s3 = \"\"\n print(interleaving(s1,s2,s3))\n","repo_name":"monika0603/lovely-python","sub_path":"strings/interleave_strings.py","file_name":"interleave_strings.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"18486634883","text":"from selenium import webdriver\n\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.service import Service as ChromeService\n\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom selenium.webdriver.firefox.service import Service as FirefoxService\n\n\nLAUNCH_DRIVER = 'remote' # 'local' >> use webdriver_manager :: 'remote' >> use webdriver.Remote - for grid\n\n\nclass DriverFactory:\n\n @staticmethod\n def get_driver(browser):\n if browser == 'chrome':\n options = webdriver.ChromeOptions()\n options.add_argument(\"--start-maximized\")\n options.add_argument(\"--disable-infobars\")\n options.add_argument(\"--disable-extensions\")\n options.add_argument('--disable-notifications') # notification handler\n options.add_argument('--ignore-certificate-errors')\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--verbose\")\n\n if LAUNCH_DRIVER == 'local':\n return webdriver.Chrome(ChromeDriverManager().install(), options=options) # local\n elif LAUNCH_DRIVER == 'remote':\n options.set_capability(\"browserName\", \"chrome\") # grid\n return webdriver.Remote(\"http://localhost:4444/wd/hub\", options=options) # grid\n\n elif browser == 'firefox':\n options = webdriver.FirefoxOptions()\n options.add_argument(\"--start-maximized\")\n options.add_argument(\"--disable-infobars\")\n options.add_argument(\"--disable-extensions\")\n options.add_argument('--disable-notifications') # notification handler\n options.add_argument('--ignore-certificate-errors')\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--verbose\")\n\n if LAUNCH_DRIVER == 'local':\n # service = FirefoxService(executable_path=GeckoDriverManager().install()) # local future\n # return webdriver.Firefox(service=service, options=options) # local future\n return webdriver.Firefox(executable_path=GeckoDriverManager().install(), options=options) # local\n elif LAUNCH_DRIVER == 'remote':\n options.set_capability(\"browserName\", \"firefox\") # grid\n return webdriver.Remote(\"http://localhost:4444/wd/hub\", options=options) # grid\n\n raise Exception('Provide valid driver name')","repo_name":"KarolZajkowski/TAB_UI_testing","sub_path":"Page_Object_Pattern/utils/driver_factory.py","file_name":"driver_factory.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"35692321366","text":"import cherrypy\nimport json\n\nfrom ratelimit import *\nfrom services import ShortURL\n\n\n@cherrypy.expose\nclass ShortURLAPIv1(object):\n \"\"\"Create shortened URLs and convert them back.\n\n REST URIs for v1:\n - [GET] /shorturl/v1/ RETURNS data including both URLs, based on short URL.\n - [POST] /shorturl/v1/ RETURNS data including both URLs, based on original URL.\n\n The REST URIs are rate limited (2 requests per second) using the ratelimit module\n found here: https://pypi.python.org/pypi/ratelimit/1.1.0.\n\n Motivation for the REST URI architecture came from GOOG.LE URL shortener. The idea is to let\n METHODS tell us what we need to do (and not nest deeply). Thus, the REST URIs are simple and\n return the same data (with just different inputs). The return data could be modified in future\n versions.\n \"\"\"\n\n ACCEPTED_CONTENT_TYPES = dict(json='application/json', plain='text/plain')\n\n def __init__(self):\n \"\"\"Initialization instantiations.\"\"\"\n self.shorturl = ShortURL()\n\n\n def process_params(self, params):\n \"\"\"Support AJAX requests that are JSON formatted.\n\n This method is needed in order to support AJAX requests if a `Content-Type` header is\n declared. I have opted to require AJAX callers declare this because it is more beneficial\n to be explicit. Wait, explicit, go figure ;)\n \"\"\"\n header_content_type = cherrypy.request.headers.get('Content-Type')\n if not params:\n if header_content_type == self.ACCEPTED_CONTENT_TYPES.get('plain'):\n params = json.loads(cherrypy.request.body.read().decode('utf-8'))\n elif header_content_type == self.ACCEPTED_CONTENT_TYPES.get('json'):\n params = cherrypy.request.json\n else:\n raise cherrypy.HTTPError(400, 'ERROR_INCORRECT_OR_MISSING_PARAM')\n elif params and (header_content_type in self.ACCEPTED_CONTENT_TYPES.values()):\n # supports GETs\n params = json.loads(list(params.keys())[0])\n\n return params\n\n\n def retrieve_param(self, identifier, params):\n \"\"\"Retrieve the specified parameter.\"\"\"\n identified_param = params.get(identifier)\n\n if not identified_param:\n raise cherrypy.HTTPError(400, 'ERROR_INCORRECT_OR_MISSING_PARAM')\n\n return identified_param\n\n\n @rate_limited(2)\n def OPTIONS(self, *args, **kwargs):\n \"\"\"Accept CORS preflight check if 'Content-Type' header is not standard for AJAX.\n \n The BODY of OPTIONS is not important. Accepts any params (needed).\n\n per https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS#Overview\n \"...the specification mandates that browsers \"preflight\" the request, soliciting\n supported methods from the server with an HTTP OPTIONS request method...\"\n\n per http://api.jquery.com/jquery.ajax/ (regarding `contentType`)\n \"Note: For cross-domain requests, setting the content type to anything other than\n application/x-www-form-urlencoded, multipart/form-data, or text/plain will trigger\n the browser to send a preflight OPTIONS request to the server.\"\n\n Important REQEUST HEADERS to look at\n (example per jQuery `contentType: 'application/json'`):\n Request Headers:\n ACCESS-CONTROL-REQUEST-HEADERS: content-type\n ACCESS-CONTROL-REQUEST-METHOD: GET\n\n See corsheaders() in the main configuration. They \"answer\" the \"preflight\" request. \n \"\"\"\n return\n\n\n @cherrypy.tools.json_in()\n @cherrypy.tools.json_out()\n @rate_limited(2)\n def GET(self, **kwargs):\n \"\"\"Return a set of data based on the original URL.\n\n successful example:\n\n {\n 'short_url': 'http://52.8.43.12/qM',\n 'original_url': 'http://example.com/hello-there/testing',\n 'created': '2017-01-05 02:57:10.366'\n }\n \"\"\"\n params = kwargs\n try:\n params = self.process_params(params)\n short_url = self.retrieve_param('short_url', params)\n except cherrypy._cperror.HTTPError as e:\n return self.shorturl.standardize_error(e)\n\n return self.shorturl.short_to_original(short_url)\n\n\n @cherrypy.tools.json_in()\n @cherrypy.tools.json_out()\n @rate_limited(2)\n def POST(self, **kwargs):\n \"\"\"Return a set of data based on the original URL.\n\n successful example:\n\n {\n 'short_url': 'http://52.8.43.12/qM',\n 'original_url': 'http://example.com/hello-there/testing',\n 'created': '2017-01-05 02:57:10.366'\n }\n \"\"\"\n params = kwargs\n try:\n params = self.process_params(params)\n original_url = self.retrieve_param('original_url', params)\n except cherrypy._cperror.HTTPError as e:\n return self.shorturl.standardize_error(e)\n\n return self.shorturl.original_to_short(original_url)\n","repo_name":"gsafcik/ShortURL","sub_path":"services/shorturl_api_v1.py","file_name":"shorturl_api_v1.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"70066917112","text":"#A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,\n\n#a**2 + b**2 = c**2\n#For example, 3**2 + 4**2 = 9 + 16 = 25 = 5**2.\n\n#There exists exactly one Pythagorean triplet for which a + b + c = 1000.\n#Find the product abc.\n\nimport math\ndef pythagorean_triplet(n):\n num=[]\n for b in range(n):\n for a in range(1, b):\n c = math.sqrt( a * a + b * b)\n suma = a+b+c\n if (c % 1 == 0 and suma==1000):\n print(a, b, int(c))\n print(a*b*c)\n \n \npythagorean_triplet(1000)\n\n","repo_name":"Hiram20buz/projectEuler","sub_path":"9_SpecialPythagoreanTriplet.py","file_name":"9_SpecialPythagoreanTriplet.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"5607786393","text":"# -*- encoding: utf-8 -*-\n'''\n@File : cycleGAN.py\n@Time : 2023/03/10 13:14:04\n@Author : zwt \n@Version : 1.0\n@Contact : 1030456532@qq.com\n'''\n\n# here put the import lib\n\nfrom collections import OrderedDict\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass RedisualBlock(nn.Module):\n \"\"\"\n Don't change the input shape.\n \"\"\"\n def __init__(self, input_channel) -> None:\n super().__init__()\n\n conv_block = [\n # Use the boundary as the axis of symmetry reflection to padding it.\n # \n # 5 4 5 6 5\n # 1 2 3 2 1 2 3 2 2 1 2 3 2\n # 4 5 6 => 5 4 5 6 5 => 5 4 5 6 5\n # 7 8 9 8 7 8 9 8 8 7 8 9 8\n # 5 4 5 6 5\n # \n nn.ReflectionPad2d(1), \n nn.Conv2d(input_channel, input_channel, 3),\n # ** Is more suit for style transform missions (cycle gan style transform). **\n nn.InstanceNorm2d(input_channel),\n nn.ReLU(inplace=True), \n nn.ReflectionPad2d(1), \n nn.Conv2d(input_channel, input_channel, 3), \n nn.InstanceNorm2d(input_channel)\n ]\n\n self.conv_block = nn.Sequential(*conv_block)\n\n def forward(self, x):\n return x + self.conv_block(x)\n\n\nclass Generator(nn.Module):\n def __init__(self, input_channel, output_channel, redisual_block_nums=9) -> None:\n super().__init__()\n\n # ReflectionPad2d 3 with kernel_size 7. Then the output shape will not changed.\n # input.\n model = [\n nn.ReflectionPad2d(3), \n nn.Conv2d(input_channel, 64, 7), \n nn.InstanceNorm2d(64), \n nn.ReLU(inplace=True)\n ]\n\n # down sample.\n input_channel = 64\n for _ in range(2):\n model += [\n nn.Conv2d(input_channel, input_channel * 2, 3, stride=2, padding=1), \n nn.InstanceNorm2d(input_channel * 2), \n nn.ReLU(inplace=True)\n ]\n input_channel = input_channel * 2\n \n # redisual block.\n for _ in range(redisual_block_nums):\n model += [RedisualBlock(input_channel)]\n\n # up sample.\n for _ in range(2):\n model += [\n nn.ConvTranspose2d(input_channel, input_channel // 2, 3, stride=2, padding=1, output_padding=1), \n nn.InstanceNorm2d(input_channel // 2),\n nn.ReLU(inplace=True)\n ]\n input_channel = input_channel // 2\n \n # output.\n \"\"\"\n saturated neurons: sigmoid , tanh. etc\n\n one-sided saturations: relu , leaky relu. etc\n\n 1. solve the vanishing gradients problems.\n \"\"\"\n model += [\n nn.ReflectionPad2d(3),\n nn.Conv2d(64, output_channel, 7),\n nn.Tanh()\n ]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x):\n return self.model(x)\n\nclass Discriminator(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n\nif __name__ == '__main__':\n G = Generator(3, 32)\n R = RedisualBlock(3)\n print(G)\n print(R)\n\n import torch\n input = torch.zeros((4, 3, 256, 256))\n output = G(input)\n r_output = R(input)\n print(f\"input.shape: {input.shape} output.shape: {output.shape} r: {r_output.shape}\")","repo_name":"zwtttttt/GAN_ZOO","sub_path":"cycleGAN/model/cycleGAN.py","file_name":"cycleGAN.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"4405523173","text":"def sortedarraybyparity(nums):\n i = 0\n j = len(nums) - 1\n while i < j:\n if nums[i] % 2 == 1 and nums[j] % 2 == 0:\n nums[i], nums[j] = nums[j], nums[i]\n i += 1\n j -= 1\n elif nums[i] % 2 == 0 and nums[j] % 2 == 1:\n i += 1\n j -= 1\n elif nums[i] % 2 == 0:\n i += 1\n elif nums[j] % 2 == 1:\n j -= 1\n return nums\nprint(sortedarraybyparity([3,1,2,4,7,9,4,2]))","repo_name":"sagarverma2602/python_practice","sub_path":"leetcode/905. Sort Array By Parity.py","file_name":"905. Sort Array By Parity.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"71677510712","text":"import faiss\nimport torch\nimport logging\nimport numpy as np\nfrom tqdm import tqdm\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.dataset import Subset\nfrom utils.plotting import process_results_simulation\nfrom h5_transformer import calc_overlap\nfrom model.functional import calculate_psnr\nimport yaml\nimport os\nfrom PIL import Image\nimport shutil\nimport datasets_ws\nimport h5py\n\ndef test_efficient_ram_usage(args, eval_ds, model, test_method=\"hard_resize\"):\n \"\"\"This function gives the same output as test(), but uses much less RAM.\n This can be useful when testing with large descriptors (e.g. NetVLAD) on large datasets (e.g. San Francisco).\n Obviously it is slower than test(), and can't be used with PCA.\n \"\"\"\n\n model = model.eval()\n if test_method == \"nearest_crop\" or test_method == \"maj_voting\":\n distances = np.empty(\n [eval_ds.queries_num * 5, eval_ds.database_num], dtype=np.float32\n )\n else:\n distances = np.empty(\n [eval_ds.queries_num, eval_ds.database_num], dtype=np.float32\n )\n\n with torch.no_grad():\n if test_method == \"nearest_crop\" or test_method == \"maj_voting\":\n queries_features = np.ones(\n (eval_ds.queries_num * 5, args.features_dim), dtype=\"float32\"\n )\n else:\n queries_features = np.ones(\n (eval_ds.queries_num, args.features_dim), dtype=\"float32\"\n )\n logging.debug(\"Extracting queries features for evaluation/testing\")\n queries_infer_batch_size = (\n 1 if test_method == \"single_query\" else args.infer_batch_size\n )\n eval_ds.test_method = test_method\n queries_subset_ds = Subset(\n eval_ds,\n list(\n range(eval_ds.database_num,\n eval_ds.database_num + eval_ds.queries_num)\n ),\n )\n queries_dataloader = DataLoader(\n dataset=queries_subset_ds,\n num_workers=args.num_workers,\n batch_size=queries_infer_batch_size,\n pin_memory=(args.device == \"cuda\"),\n )\n for inputs, indices in tqdm(queries_dataloader, ncols=100):\n if (\n test_method == \"five_crops\"\n or test_method == \"nearest_crop\"\n or test_method == \"maj_voting\"\n ):\n # shape = 5*bs x 3 x 480 x 480\n inputs = torch.cat(tuple(inputs))\n features = model(inputs.to(args.device))\n if test_method == \"five_crops\": # Compute mean along the 5 crops\n features = torch.stack(torch.split(features, 5)).mean(1)\n if test_method == \"nearest_crop\" or test_method == \"maj_voting\":\n start_idx = (indices[0] - eval_ds.database_num) * 5\n end_idx = start_idx + indices.shape[0] * 5\n indices = np.arange(start_idx, end_idx)\n queries_features[indices, :] = features.cpu().numpy()\n else:\n queries_features[\n indices.numpy() - eval_ds.database_num, :\n ] = features.cpu().numpy()\n\n queries_features = torch.tensor(\n queries_features).type(torch.float32).cuda()\n\n logging.debug(\"Extracting database features for evaluation/testing\")\n # For database use \"hard_resize\", although it usually has no effect because database images have same resolution\n eval_ds.test_method = \"hard_resize\"\n database_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num)))\n database_dataloader = DataLoader(\n dataset=database_subset_ds,\n num_workers=args.num_workers,\n batch_size=args.infer_batch_size,\n pin_memory=(args.device == \"cuda\"),\n )\n for inputs, indices in tqdm(database_dataloader, ncols=100):\n inputs = inputs.to(args.device)\n features = model(inputs)\n for pn, (index, pred_feature) in enumerate(zip(indices, features)):\n distances[:, index] = (\n ((queries_features - pred_feature) ** 2).sum(1).cpu().numpy()\n )\n del features, queries_features, pred_feature\n\n predictions = distances.argsort(axis=1)[:, : max(args.recall_values)]\n\n if test_method == \"nearest_crop\":\n distances = np.array(\n [distances[row, index] for row, index in enumerate(predictions)]\n )\n distances = np.reshape(distances, (eval_ds.queries_num, 20 * 5))\n predictions = np.reshape(predictions, (eval_ds.queries_num, 20 * 5))\n for q in range(eval_ds.queries_num):\n # sort predictions by distance\n sort_idx = np.argsort(distances[q])\n predictions[q] = predictions[q, sort_idx]\n # remove duplicated predictions, i.e. keep only the closest ones\n _, unique_idx = np.unique(predictions[q], return_index=True)\n # unique_idx is sorted based on the unique values, sort it again\n predictions[q, :20] = predictions[q, np.sort(unique_idx)][:20]\n predictions = predictions[\n :, :20\n ] # keep only the closer 20 predictions for each\n elif test_method == \"maj_voting\":\n distances = np.array(\n [distances[row, index] for row, index in enumerate(predictions)]\n )\n distances = np.reshape(distances, (eval_ds.queries_num, 5, 20))\n predictions = np.reshape(predictions, (eval_ds.queries_num, 5, 20))\n for q in range(eval_ds.queries_num):\n # votings, modify distances in-place\n top_n_voting(\"top1\", predictions[q],\n distances[q], args.majority_weight)\n top_n_voting(\"top5\", predictions[q],\n distances[q], args.majority_weight)\n top_n_voting(\"top10\", predictions[q],\n distances[q], args.majority_weight)\n\n # flatten dist and preds from 5, 20 -> 20*5\n # and then proceed as usual to keep only first 20\n dists = distances[q].flatten()\n preds = predictions[q].flatten()\n\n # sort predictions by distance\n sort_idx = np.argsort(dists)\n preds = preds[sort_idx]\n # remove duplicated predictions, i.e. keep only the closest ones\n _, unique_idx = np.unique(preds, return_index=True)\n # unique_idx is sorted based on the unique values, sort it again\n # here the row corresponding to the first crop is used as a\n # 'buffer' for each query, and in the end the dimension\n # relative to crops is eliminated\n predictions[q, 0, :20] = preds[np.sort(unique_idx)][:20]\n predictions = predictions[\n :, 0, :20\n ] # keep only the closer 20 predictions for each query\n del distances\n\n # For each query, check if the predictions are correct\n positives_per_query = eval_ds.get_positives()\n # args.recall_values by default is [1, 5, 10, 20]\n recalls = np.zeros(len(args.recall_values))\n for query_index, pred in enumerate(predictions):\n for i, n in enumerate(args.recall_values):\n if np.any(np.in1d(pred[:n], positives_per_query[query_index])):\n recalls[i:] += 1\n break\n\n recalls = recalls / eval_ds.queries_num * 100\n recalls_str = \", \".join(\n [f\"R@{val}: {rec:.1f}\" for val,\n rec in zip(args.recall_values, recalls)]\n )\n return recalls, recalls_str\n\n\ndef test(args, eval_ds, model, model_db=None, test_method=\"hard_resize\", pca=None, visualize=False):\n \"\"\"Compute features of the given dataset and compute the recalls.\"\"\"\n\n assert test_method in [\n \"hard_resize\",\n \"single_query\",\n \"central_crop\",\n \"five_crops\",\n \"nearest_crop\",\n \"maj_voting\",\n ], f\"test_method can't be {test_method}\"\n\n if args.efficient_ram_testing:\n if model_db is not None:\n raise NotImplementedError()\n return test_efficient_ram_usage(args, eval_ds, model, test_method)\n\n model = model.eval()\n if model_db is not None:\n model_db = model_db.eval()\n with torch.no_grad():\n logging.debug(\"Extracting database features for evaluation/testing\")\n # For database use \"hard_resize\", although it usually has no effect because database images have same resolution\n eval_ds.test_method = \"hard_resize\"\n database_subset_ds = Subset(eval_ds, list(range(eval_ds.database_num)))\n database_dataloader = DataLoader(\n dataset=database_subset_ds,\n num_workers=args.num_workers,\n batch_size=args.infer_batch_size,\n pin_memory=(args.device == \"cuda\"),\n )\n\n if test_method == \"nearest_crop\" or test_method == \"maj_voting\":\n all_features = np.empty(\n (5 * eval_ds.queries_num + eval_ds.database_num, args.features_dim),\n dtype=\"float32\",\n )\n else:\n all_features = np.empty(\n (len(eval_ds), args.features_dim), dtype=\"float32\")\n\n for inputs, indices in tqdm(database_dataloader, ncols=100):\n if model_db is not None:\n features = model_db(inputs.to(args.device))\n else:\n features = model(inputs.to(args.device))\n features = features.cpu().numpy()\n if pca != None:\n features = pca.transform(features)\n all_features[indices.numpy(), :] = features\n\n logging.debug(\"Extracting queries features for evaluation/testing\")\n queries_infer_batch_size = (\n 1 if test_method == \"single_query\" else args.infer_batch_size\n )\n eval_ds.test_method = test_method\n queries_subset_ds = Subset(\n eval_ds,\n list(\n range(eval_ds.database_num,\n eval_ds.database_num + eval_ds.queries_num)\n ),\n )\n queries_dataloader = DataLoader(\n dataset=queries_subset_ds,\n num_workers=args.num_workers,\n batch_size=queries_infer_batch_size,\n pin_memory=(args.device == \"cuda\"),\n )\n for inputs, indices in tqdm(queries_dataloader, ncols=100):\n if (\n test_method == \"five_crops\"\n or test_method == \"nearest_crop\"\n or test_method == \"maj_voting\"\n ):\n # shape = 5*bs x 3 x 480 x 480\n inputs = torch.cat(tuple(inputs))\n features = model(inputs.to(args.device))\n if test_method == \"five_crops\": # Compute mean along the 5 crops\n features = torch.stack(torch.split(features, 5)).mean(1)\n features = features.cpu().numpy()\n if pca != None:\n features = pca.transform(features)\n\n if (\n test_method == \"nearest_crop\" or test_method == \"maj_voting\"\n ): # store the features of all 5 crops\n start_idx = (\n eval_ds.database_num +\n (indices[0] - eval_ds.database_num) * 5\n )\n end_idx = start_idx + indices.shape[0] * 5\n indices = np.arange(start_idx, end_idx)\n all_features[indices, :] = features\n else:\n all_features[indices.numpy(), :] = features\n\n queries_features = all_features[eval_ds.database_num:]\n database_features = all_features[: eval_ds.database_num]\n logging.info(f\"Final feature dim: {queries_features.shape[1]}\")\n \n del all_features\n\n logging.debug(\"Calculating recalls\")\n if args.prior_location_threshold == -1:\n if args.use_faiss_gpu:\n res = faiss.StandardGpuResources()\n faiss_index = faiss.GpuIndexFlatL2(res, args.features_dim)\n else:\n faiss_index = faiss.IndexFlatL2(args.features_dim)\n faiss_index.add(database_features)\n distances, predictions = faiss_index.search(\n queries_features, max(args.recall_values)\n )\n del database_features\n else:\n distances, predictions = [[] for i in range(len(queries_features))], [[] for i in range(len(queries_features))]\n hard_negatives_per_query = eval_ds.get_hard_negatives()\n for query_index in tqdm(range(len(predictions))):\n faiss_index = faiss.IndexFlatL2(args.features_dim)\n faiss_index.add(database_features[hard_negatives_per_query[query_index]])\n distances_single, local_predictions_single = faiss_index.search(\n np.expand_dims(queries_features[query_index], axis=0), max(args.recall_values)\n )\n # logging.debug(f\"distances_single:{distances_single}\")\n # logging.debug(f\"predictions_single:{predictions_single}\")\n distances[query_index] = distances_single\n predictions_single = hard_negatives_per_query[query_index][local_predictions_single]\n predictions[query_index] = predictions_single\n distances = np.concatenate(distances, axis=0)\n predictions = np.concatenate(predictions, axis=0)\n del database_features\n if test_method == \"nearest_crop\":\n distances = np.reshape(distances, (eval_ds.queries_num, 20 * 5))\n predictions = np.reshape(predictions, (eval_ds.queries_num, 20 * 5))\n for q in range(eval_ds.queries_num):\n # sort predictions by distance\n sort_idx = np.argsort(distances[q])\n predictions[q] = predictions[q, sort_idx]\n # remove duplicated predictions, i.e. keep only the closest ones\n _, unique_idx = np.unique(predictions[q], return_index=True)\n # unique_idx is sorted based on the unique values, sort it again\n predictions[q, :20] = predictions[q, np.sort(unique_idx)][:20]\n predictions = predictions[\n :, :20\n ] # keep only the closer 20 predictions for each query\n elif test_method == \"maj_voting\":\n distances = np.reshape(distances, (eval_ds.queries_num, 5, 20))\n predictions = np.reshape(predictions, (eval_ds.queries_num, 5, 20))\n for q in range(eval_ds.queries_num):\n # votings, modify distances in-place\n top_n_voting(\"top1\", predictions[q],\n distances[q], args.majority_weight)\n top_n_voting(\"top5\", predictions[q],\n distances[q], args.majority_weight)\n top_n_voting(\"top10\", predictions[q],\n distances[q], args.majority_weight)\n\n # flatten dist and preds from 5, 20 -> 20*5\n # and then proceed as usual to keep only first 20\n dists = distances[q].flatten()\n preds = predictions[q].flatten()\n\n # sort predictions by distance\n sort_idx = np.argsort(dists)\n preds = preds[sort_idx]\n # remove duplicated predictions, i.e. keep only the closest ones\n _, unique_idx = np.unique(preds, return_index=True)\n # unique_idx is sorted based on the unique values, sort it again\n # here the row corresponding to the first crop is used as a\n # 'buffer' for each query, and in the end the dimension\n # relative to crops is eliminated\n predictions[q, 0, :20] = preds[np.sort(unique_idx)][:20]\n predictions = predictions[\n :, 0, :20\n ] # keep only the closer 20 predictions for each query\n\n # For each query, check if the predictions are correct\n positives_per_query = eval_ds.get_positives()\n # args.recall_values by default is [1, 5, 10, 20]\n recalls = np.zeros(len(args.recall_values))\n for query_index, pred in enumerate(predictions):\n for i, n in enumerate(args.recall_values):\n if np.any(np.in1d(pred[:n], positives_per_query[query_index])):\n recalls[i:] += 1\n break\n # Divide by the number of queries*100, so the recalls are in percentages\n recalls = recalls / eval_ds.queries_num * 100\n recalls_str = \", \".join(\n [f\"R@{val}: {rec:.1f}\" for val,\n rec in zip(args.recall_values, recalls)]\n )\n\n if args.use_best_n > 0:\n if visualize:\n if os.path.isdir(\"visual_loc\"):\n shutil.rmtree(\"visual_loc\")\n os.mkdir(\"visual_loc\")\n save_dir = \"visual_loc\"\n # init dataset\n eval_ds.__getitem__(0)\n samples_to_be_used = args.use_best_n\n error_m = []\n position_m = []\n for query_index in tqdm(range(len(predictions))):\n distance = distances[query_index]\n prediction = predictions[query_index]\n sort_idx = np.argsort(distance)\n if args.use_best_n == 1:\n best_position = eval_ds.database_utms[prediction[sort_idx[0]]]\n else:\n if distance[sort_idx[0]] == 0:\n best_position = eval_ds.database_utms[prediction[sort_idx[0]]]\n else:\n mean = distance[sort_idx[0]]\n sigma = distance[sort_idx[0]] / distance[sort_idx[-1]]\n X = np.array(distance[sort_idx[:samples_to_be_used]]).reshape((-1,))\n weights = np.exp(-np.square(X - mean) / (2 * sigma ** 2)) # gauss\n weights = weights / np.sum(weights)\n\n x = y = 0\n for p, w in zip(eval_ds.database_utms[prediction[sort_idx[:samples_to_be_used]]], weights.tolist()):\n y += p[0] * w\n x += p[1] * w\n best_position = (y, x)\n actual_position = eval_ds.queries_utms[query_index]\n error = np.linalg.norm((actual_position[0]-best_position[0], actual_position[1]-best_position[1]))\n if error >= 50 and visualize: # Wrong results\n database_index = prediction[sort_idx[0]]\n database_img = eval_ds._find_img_in_h5(database_index, \"database\")\n if args.G_contrast:\n query_img = transforms.functional.adjust_contrast(eval_ds._find_img_in_h5(query_index, \"queries\"), contrast_factor=3)\n else:\n query_img = eval_ds._find_img_in_h5(query_index, \"queries\")\n result = Image.new(database_img.mode, (524, 524), (255, 0, 0))\n result.paste(database_img, (6, 6))\n database_img = result\n database_img.save(f\"{save_dir}/{query_index}_wrong_d.png\")\n query_img.save(f\"{save_dir}/{query_index}_wrong_q.png\")\n elif error <= 35 and visualize: # Wrong results\n database_index = prediction[sort_idx[0]]\n database_img = eval_ds._find_img_in_h5(database_index, \"database\")\n if args.G_contrast:\n query_img = transforms.functional.adjust_contrast(eval_ds._find_img_in_h5(query_index, \"queries\"), contrast_factor=3)\n else:\n query_img = eval_ds._find_img_in_h5(query_index, \"queries\")\n result = Image.new(database_img.mode, (524, 524), (0, 255, 0))\n result.paste(database_img, (6, 6))\n database_img = result\n database_img.save(f\"{save_dir}/{query_index}_correct_d.png\")\n query_img.save(f\"{save_dir}/{query_index}_correct_q.png\")\n elif visualize: # Ambiguous results\n database_index = prediction[sort_idx[0]]\n database_img = eval_ds._find_img_in_h5(database_index, \"database\")\n if args.G_contrast:\n query_img = transforms.functional.adjust_contrast(eval_ds._find_img_in_h5(query_index, \"queries\"), contrast_factor=3)\n else:\n query_img = eval_ds._find_img_in_h5(query_index, \"queries\")\n result = Image.new(database_img.mode, (524, 524), (128, 128, 128))\n result.paste(database_img, (6, 6))\n database_img = result\n database_img.save(f\"{save_dir}/{query_index}_d.png\")\n query_img.save(f\"{save_dir}/{query_index}_q.png\")\n \n error_m.append(error)\n position_m.append(actual_position)\n process_results_simulation(error_m, args.save_dir)\n \n return recalls, recalls_str\n\ndef test_translation_pix2pix(args, eval_ds, model, visual_current=False, visual_image_num=10, epoch_num=None):\n \"\"\"Compute PSNR of the given dataset and compute the recalls.\"\"\"\n \n if args.G_test_norm == \"batch\":\n model.netG = model.netG.eval()\n elif args.G_test_norm == \"instance\":\n model.netG = model.netG.train()\n psnr_sum = 0\n psnr_count = 0\n save_dir = None\n if args.visual_all:\n if os.path.isdir(\"visual_all\"):\n shutil.rmtree(\"visual_all\")\n os.mkdir(\"visual_all\")\n save_dir = \"visual_all\"\n if visual_current:\n if not os.path.isdir(os.path.join(args.save_dir, \"visual_current\")):\n os.mkdir(os.path.join(args.save_dir, \"visual_current\"))\n save_dir = os.path.join(args.save_dir, \"visual_current\")\n with torch.no_grad():\n # For database use \"hard_resize\", although it usually has no effect because database images have same resolution\n eval_ds.test_method = \"hard_resize\"\n\n eval_ds.is_inference = True\n eval_ds.compute_pairs(args)\n eval_ds.is_inference = False\n\n eval_dataloader = DataLoader(\n dataset=eval_ds,\n num_workers=args.num_workers,\n batch_size=1,\n pin_memory=(args.device == \"cuda\"),\n shuffle=False\n )\n\n logging.debug(\"Calculating PSNR\")\n for query, database, query_name, database_name in tqdm(eval_dataloader, ncols=100):\n # Compute features of all images (images contains queries, positives and negatives)\n model.set_input(database, query)\n model.forward()\n output = model.fake_B\n output = torch.clamp(output, min=-1, max=1)\n query_images = query.to(args.device) * 0.5 + 0.5\n output_images = output * 0.5 + 0.5\n database_images = database.to(args.device) * 0.5 + 0.5\n if args.visual_all or (visual_current == True and psnr_count < visual_image_num):\n vis_image_1 = transforms.ToPILImage()(output_images[0].cpu())\n vis_image_2 = transforms.ToPILImage()(query_images[0].cpu())\n vis_image_3 = transforms.ToPILImage()(database_images[0].cpu())\n dst = Image.new('RGB', (vis_image_1.width, vis_image_1.height + vis_image_2.height + vis_image_3.height))\n dst.paste(vis_image_1, (0, 0))\n dst.paste(vis_image_2, (0, vis_image_1.height))\n dst.paste(vis_image_3, (0, vis_image_1.height + vis_image_2.height))\n if args.visual_all:\n vis_image_1.save(f\"{save_dir}/{psnr_count}_gen.jpg\")\n vis_image_2.save(f\"{save_dir}/{psnr_count}_gt.jpg\")\n vis_image_3.save(f\"{save_dir}/{psnr_count}_st.jpg\")\n elif visual_current:\n dst.save(f\"{save_dir}/{epoch_num}_{query_name}.jpg\")\n elif visual_current == True and psnr_count >= visual_image_num:\n # early stop\n break\n psnr_sum += calculate_psnr(query_images, output_images)\n psnr_count += 1\n\n psnr_sum /= psnr_count\n\n psnr_str = f\"PSNR: {psnr_sum:.1f}\"\n \n return [psnr_sum], psnr_str\n\ndef test_translation_pix2pix_generate_h5(args, eval_ds, model):\n \"\"\"Compute PSNR of the given dataset and compute the recalls.\"\"\"\n \n if args.G_test_norm == \"batch\":\n model.netG = model.netG.eval()\n elif args.G_test_norm == \"instance\":\n model.netG = model.netG.train()\n \n save_path = os.path.join(args.save_dir, \"train_queries.h5\")\n\n with torch.no_grad():\n # For database use \"hard_resize\", although it usually has no effect because database images have same resolution\n eval_ds.test_method = \"hard_resize\"\n\n eval_ds.is_inference = True\n eval_ds.compute_pairs(args)\n eval_ds.is_inference = False\n \n eval_dataloader = DataLoader(\n dataset=eval_ds,\n num_workers=args.num_workers,\n batch_size=16 if args.G_test_norm == \"batch\" else 1,\n pin_memory=(args.device == \"cuda\"),\n shuffle=False\n )\n with h5py.File(save_path, \"a\") as hf:\n start = False\n img_names = []\n for query, database, query_path, database_path in tqdm(eval_dataloader, ncols=100):\n # Compute features of all images (images contains queries, positives and negatives)\n model.set_input(database, query)\n model.forward()\n output = model.fake_B\n output = torch.clamp(output, min=-1, max=1)\n output_images = output * 0.5 + 0.5\n for i in range(len(database_path)):\n generated_query = transforms.Grayscale(num_output_channels=3)(transforms.Resize(args.resize)(transforms.ToPILImage()(output_images[i].cpu())))\n cood_y = database_path[i].split(\"@\")[1]\n cood_x = database_path[i].split(\"@\")[2]\n name = f\"@{cood_y}@{cood_x}\"\n img_names.append(name)\n img_np = np.array(generated_query)\n img_np = np.expand_dims(img_np, axis=0)\n size_np = np.expand_dims(\n np.array([img_np.shape[1], img_np.shape[2]]), axis=0)\n if not start:\n hf.create_dataset(\n \"image_data\",\n data=img_np,\n chunks=(1, 512, 512, 3),\n maxshape=(None, 512, 512, 3),\n compression=\"lzf\",\n ) # write the data to hdf5 file\n hf.create_dataset(\n \"image_size\",\n data=size_np,\n chunks=True,\n maxshape=(None, 2),\n compression=\"lzf\",\n )\n start = True\n else:\n hf[\"image_data\"].resize(\n hf[\"image_data\"].shape[0] + img_np.shape[0], axis=0\n )\n hf[\"image_data\"][-img_np.shape[0]:] = img_np\n hf[\"image_size\"].resize(\n hf[\"image_size\"].shape[0] + size_np.shape[0], axis=0\n )\n hf[\"image_size\"][-size_np.shape[0]:] = size_np\n t = h5py.string_dtype(encoding=\"utf-8\")\n hf.create_dataset(\"image_name\", data=img_names,\n dtype=t, compression=\"lzf\")\n print(\"hdf5 file size: %d bytes\" % os.path.getsize(save_path))\n\n\ndef top_n_voting(topn, predictions, distances, maj_weight):\n if topn == \"top1\":\n n = 1\n selected = 0\n elif topn == \"top5\":\n n = 5\n selected = slice(0, 5)\n elif topn == \"top10\":\n n = 10\n selected = slice(0, 10)\n # find predictions that repeat in the first, first five,\n # or fist ten columns for each crop\n vals, counts = np.unique(predictions[:, selected], return_counts=True)\n # for each prediction that repeats more than once,\n # subtract from its score\n for val, count in zip(vals[counts > 1], counts[counts > 1]):\n mask = predictions[:, selected] == val\n distances[:, selected][mask] -= maj_weight * count / n\n","repo_name":"arplaboratory/satellite-thermal-geo-localization","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":28004,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"95"} +{"seq_id":"21484984203","text":"#!/usr/bin/env python\n# -*- coding=utf8 -*-\n\n\nimport pytest\n\n\nclass Solution:\n\n def trailingZeroes1(self, n: int) -> int:\n \"\"\"\n https://leetcode.com/problems/factorial-trailing-zeroes/discuss/196311/topic\n\n 2和5相乘为0,��质求2和5的个数,最终转换为5的个数,有几对2*5结尾就有几个0。\n\n 递归公式:f(n) = n/5 + f(n/5)\n \"\"\"\n if (n < 5):\n return 0\n if (n < 10):\n return 1\n return n // 5 + self.trailingZeroes1(n // 5)\n\n def trailingZeroes2(self, n: int) -> int:\n res = 0\n while n > 0:\n n //= 5\n res += n\n return res\n\n\n@pytest.mark.parametrize((\"param\", \"ret\"), [(3, 0),\n (5, 1),\n (10, 2),\n (30, 7)])\ndef test1(param, ret):\n solution = Solution()\n assert solution.trailingZeroes1(param) is ret\n assert solution.trailingZeroes2(param) is ret\n","repo_name":"helloocc/algorithm","sub_path":"172_factorial-trailing-zeroes.py","file_name":"172_factorial-trailing-zeroes.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"39587158846","text":"import pandas as pd\nimport torch\nfrom torch.utils.data import Dataset\n\n\ndf = pd.read_csv(\"Data/Clean_Flipkart_Product.csv\")\ndf.dropna(inplace=True)\n\nclass Vocabulary:\n \n '''\n __init__ method is called by default as soon as an object of this class is initiated\n we use this method to initiate our vocab dictionaries\n '''\n def __init__(self, freq_threshold, max_size):\n '''\n freq_threshold : the minimum times a word must occur in corpus to be treated in vocab\n max_size : max source vocab size. Eg. if set to 10,000, we pick the top 10,000 most frequent words and discard others\n '''\n #initiate the index to token dict\n ## -> padding, used for padding the shorter sentences in a batch to match the length of longest sentence in the batch\n ## -> start token, added in front of each sentence to signify the start of sentence\n ## -> End of sentence token, added to the end of each sentence to signify the end of sentence\n ## -> words which are not found in the vocab are replace by this token\n self.itos = {0: '', 1:'', 2:'', 3: ''}\n #initiate the token to index dict\n self.stoi = {k:j for j,k in self.itos.items()} \n \n self.freq_threshold = freq_threshold\n self.max_size = max_size\n \n '''\n __len__ is used by dataloader later to create batches\n '''\n def __len__(self):\n return len(self.itos)\n \n '''\n a simple tokenizer to split on space and converts the sentence to list of words\n '''\n @staticmethod\n def tokenizer(text):\n return [tok.lower().strip() for tok in text.split(' ')]\n \n '''\n build the vocab: create a dictionary mapping of index to string (itos) and string to index (stoi)\n output ex. for stoi -> {'the':5, 'a':6, 'an':7}\n '''\n def build_vocabulary(self, sentence_list):\n #calculate the frequencies of each word first to remove the words with freq < freq_threshold\n frequencies = {} #init the freq dict\n idx = 4 #index from which we want our dict to start. We already used 4 indexes for pad, start, end, unk\n \n #calculate freq of words\n for sentence in sentence_list:\n for word in self.tokenizer(sentence):\n if word not in frequencies.keys():\n frequencies[word]=1\n else:\n frequencies[word]+=1\n \n \n #limit vocab by removing low freq words\n frequencies = {k:v for k,v in frequencies.items() if v>self.freq_threshold} \n \n #limit vocab to the max_size specified\n frequencies = dict(sorted(frequencies.items(), key = lambda x: -x[1])[:self.max_size-idx]) # idx =4 for pad, start, end , unk\n \n #create vocab\n for word in frequencies.keys():\n self.stoi[word] = idx\n self.itos[idx] = word\n idx+=1\n \n \n '''\n convert the list of words to a list of corresponding indexes\n ''' \n def numericalize(self, text):\n #tokenize text\n tokenized_text = self.tokenizer(text)\n numericalized_text = []\n for token in tokenized_text:\n if token in self.stoi.keys():\n numericalized_text.append(self.stoi[token])\n else: #out-of-vocab words are represented by UNK token index\n numericalized_text.append(self.stoi[''])\n \n return numericalized_text\n\n\nclass CustomDataset(Dataset):\n '''\n Initiating Variables\n df: the training dataframe\n source_column : the name of source text column in the dataframe\n transform : If we want to add any augmentation\n freq_threshold : the minimum times a word must occur in corpus to be treated in vocab\n source_vocab_max_size : max source vocab size\n '''\n \n def __init__(self, df, source_column,freq_threshold = 3,\n source_vocab_max_size = 10000 , transform=None):\n \n self.df = df\n self.transform = transform\n \n #get source and target texts\n self.source_texts = self.df[source_column]\n \n \n ##VOCAB class has been created above\n #Initialize source vocab object and build vocabulary\n self.source_vocab = Vocabulary(freq_threshold, source_vocab_max_size)\n self.source_vocab.build_vocabulary(self.source_texts.tolist())\n\n \n def __len__(self):\n return len(self.df)\n \n '''\n __getitem__ runs on 1 example at a time. Here, we get an example at index and return its numericalize source and\n target values using the vocabulary objects we created in __init__\n '''\n def __getitem__(self, index):\n source_text = self.source_texts[index]\n \n if self.transform is not None:\n source_text = self.transform(source_text)\n \n #numericalize texts ['','cat', 'in', 'a', 'bag',''] -> [1,12,2,9,24,2]\n numerialized_source = [self.source_vocab.stoi[\"\"]]\n numerialized_source += self.source_vocab.numericalize(source_text)\n numerialized_source.append(self.source_vocab.stoi[\"\"])\n \n #convert the list to tensor and return\n return torch.tensor(numerialized_source), torch.tensor(self.df.y[index])\n\n\ndataset = CustomDataset(df, \"clean_review\")\n\n\n# Learn the vocabulary for the source language\ndata_words = df['clean_review2'].values.tolist()\n\ndel df\nfrom gensim.models import FastText\nfasttext_model = FastText(data_words, vector_size= 100, window=5, min_count=5, workers=4,sg=1)","repo_name":"Ankit-Gupta-11/Aspect-Based-Sentiment-Analysis","sub_path":"CustomDataset.py","file_name":"CustomDataset.py","file_ext":"py","file_size_in_byte":5649,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"10008064181","text":"if __name__ == '__main__':\n DIAL_CODES = [\n (86, 'China'),\n (91, 'India'),\n (1, 'United States'),\n (62, 'Indonesia'),\n (55, 'Brazil'),\n (92, 'Pakistan'),\n (880, 'Bangladesh'),\n (234, 'Nigeria'),\n (7, 'Russia'),\n (81, 'Japan')\n ]\n\n d1 = dict(DIAL_CODES) # 数据元组的顺序是按照国家的人口排名来决定的\n print('d:', d1.keys())\n d2 = dict(sorted(DIAL_CODES)) # 数据元组的顺序是按照国家的电话区号来决定的\n print('d2:', d2.keys())\n d3 = dict(sorted(DIAL_CODES, key=lambda x: x[1])) # 数据元组的顺序是按照国家名字的英文拼写来决定的\n print('d3:', d3.keys())\n assert d1 == d2 == d3 # 三个字典都是相等的,因为它们所包含的数据是一样的\n","repo_name":"JackCaiZhang/Fluen-Python","sub_path":"Chapter3_Dictionary&Set/Example 3-17_dialcodes.py","file_name":"Example 3-17_dialcodes.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"14077921119","text":"from django.db import migrations\nfrom django.utils.timezone import now\n\n\ndef update_weather(apps, schema_editor):\n Weather = apps.get_model(\"weather\", \"Weather\")\n for weather in Weather.objects.all():\n weather.is_forecast = weather.date_time > now()\n weather.save()\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (\"weather\", \"0003_weather_is_forecast\"),\n ]\n\n operations = [migrations.RunPython(update_weather)]\n","repo_name":"ollz272/garden-server","sub_path":"apps/weather/migrations/0004_set_default_weather.py","file_name":"0004_set_default_weather.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"72777077434","text":"from paucore.utils.presenters import AbstractPresenter, html\n\nfrom pau.constants import POST_LIMIT\nfrom pau.presenters.widgets import file_upload_progress\n\n\nclass PostCreatePresenter(AbstractPresenter):\n textarea_name = 'post'\n char_count = POST_LIMIT\n success_message = 'Your post has been created.'\n\n @classmethod\n def from_data(cls, request, btn_action='Post', post_create_pre_text='', reply_to=None, appended_post_url=None, *args, **kwargs):\n presenter = cls(*args, **kwargs)\n presenter.request = request\n presenter.btn_action = btn_action\n presenter.in_reply_to_post_id = reply_to.id if reply_to else None\n if not post_create_pre_text and reply_to and reply_to.get('user') and request.user.is_authenticated() and request.user.adn_user.id != reply_to.user.id:\n post_create_pre_text = '@%s ' % (reply_to.user.username)\n\n presenter.post_create_pre_text = post_create_pre_text\n\n presenter.appended_post_url = appended_post_url\n\n return presenter\n\n def photo_url_template(self):\n photo_url_template = 'https://photo.app.net/'\n photo_url_template += '{post_id}/1'\n return photo_url_template\n\n def textarea_data_attrs(self):\n\n data_attrs = {\n 'main-message': 1,\n 'text-line-height': 18,\n 'current-height': 80,\n 'include-attachment': 1,\n 'photo-url': self.photo_url_template()\n }\n\n if self.in_reply_to_post_id:\n data_attrs['in-reply-to-post-id'] = self.in_reply_to_post_id\n\n return data_attrs\n\n def generate_textarea(self, placeholder=''):\n data_attrs = self.textarea_data_attrs()\n return html.textarea(class_='editable input-flex', name=self.textarea_name, placeholder=placeholder, tabindex='1', data=data_attrs, *[self.post_create_pre_text])\n\n def generate_textarea_container(self):\n return html.div(class_='text-area layout-like-p subpixel\"', *[\n self.generate_textarea()\n ])\n\n def generate_in_reply_to_container(self):\n return html.div(class_='hide in-reply-to layout-like-p subpixel', data={'in-reply-to': 1}, *[\n html.div(html.em(*['In Reply To:'])),\n html.div(class_='well-style reply-to', *[\n html.a(href='#', class_='close relative space-5', data={'remove-reply': 1}, *[\n html.i(class_='icon-remove')\n ]),\n html.div(class_='post-container subpixel', data={'post-copy': 1})\n ])\n ])\n\n def generate_append_post_url(self):\n return html.div(class_='ta-left', *[\n html.p(*[\n '%s will automatically be appended to your post.' % (self.appended_post_url)\n ])\n ])\n\n def generate_char_count(self):\n return html.span(class_='char-count', data={'char-counter-for': 'message', 'total-chars': self.char_count, 'current-chars': 0}, *[\n unicode(self.char_count)\n ])\n\n def generate_bottom_row(self):\n file_upload = file_upload_progress()\n success = html.span(class_='text-success hide', data={'success-text': 1}, *[self.success_message])\n add_photo = html.button(class_='btn-attach-file file-related transition-color', data={'attach-btn': 1}, *[\n html.i(class_='icon-picture'),\n u'Add photo\\u2026'\n ])\n char_count = self.generate_char_count()\n create_button = html.button(tabindex='2', data={'submit-button': 1}, class_='btn btn-primary %s-button btn-small disabled' % (self.btn_action.lower()), *[\n self.btn_action\n ])\n\n return html.grid(*[\n html.div(class_='yui3-u-1-4 ta-left m-yui3-u-none', *[char_count]),\n html.div(class_='yui3-u-3-4 ta-right m-yui3-u-1', *[\n file_upload,\n success,\n add_photo,\n create_button,\n ])\n ])\n\n def generate_html(self):\n if not self.request.user.is_authenticated():\n return ''\n\n parts = [self.generate_textarea_container()]\n parts += [self.generate_in_reply_to_container()]\n if self.appended_post_url:\n parts += [self.generate_append_post_url()]\n\n parts += [self.generate_bottom_row()]\n\n return html.div(class_='well-plain well-elevated newpost', data={'message-create': 1}, *parts)\n","repo_name":"appdotnet/alpha","sub_path":"pau/presenters/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","stars":168,"dataset":"github-code","pt":"95"} +{"seq_id":"72216135992","text":"#importing art.py file\nimport art\nprint(art.logo)\n\n#start encode function\ndef cipher(text):\n to_return=\"\"\n for letter in text:\n if ord(letter)>=97 and ord(letter)<=122:\n if ord(letter)+shift<123:\n to_return+=chr(ord(letter)+shift)\n else:\n add=ord(letter)+shift-123\n to_return+=chr(97+int(add))\n else:\n to_return+=letter\n \n return to_return;\n\n\n#start the main code\n\nrestart='y'\n\nwhile restart=='y':\n\n direction = input(\"Type 'encode' to cipher, type 'decode' to decrypt:\\n\")\n text = input(\"Type your message:\\n\").lower()\n shift = int(input(\"Type the shift number:\\n\"))\n shift%=26\n\n if direction=='encode':\n print(cipher(text))\n elif direction==\"decode\":\n shift=26-shift\n print(cipher(text))\n else:\n print(\"wrong option\")\n exit\n\n restart=input(\"Do you want to restart the program (y/n) \")\n\n","repo_name":"surendraLongre/caesar-cipher","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"2189697464","text":"import osmosis_aws_driver.data_S3_plugin as ocean_s3\n# General imports\nimport sys\nimport os\n#import glob\nimport pandas as pd\nimport hashlib\n\n#%% Logging\nimport logging\nloggers_dict = logging.Logger.manager.loggerDict\n \nlogger = logging.getLogger()\nlogger.handlers = []\n\n# Set level\nlogger.setLevel(logging.DEBUG)\n\n# Create formatter\n\n#FORMAT = \"%(asctime)s - %(levelno)s - %(module)-15s - %(funcName)-15s - %(message)s\"\nFORMAT = \"%(asctime)s L%(levelno)s: %(message)s\"\n\nDATE_FMT = \"%Y-%m-%d %H:%M:%S\"\nformatter = logging.Formatter(FORMAT, DATE_FMT)\n\n# Create handler and assign\nhandler = logging.StreamHandler(sys.stderr)\nhandler.setFormatter(formatter)\nlogger.handlers = [handler]\nlogger.critical(\"Logging started\")\n\n\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\n\n#%% IO\n\n# The working directory is the repo root\nlogging.debug(\"Current working directory: {}\".format(os.getcwd()))\n\n# The source catalog\nFNAME_SOURCE_CATALOG = \"Original/OceanDataSets_master catalog clean.csv\"\n# The current catalog stores the updated state\nFNAME_CURRENT_CATALOG = r\"Master catalog current.csv\"\nPATH_SOURCE_CATALOGUE = os.path.join(os.getcwd(),'catalog', FNAME_SOURCE_CATALOG)\nPATH_CURRENT_CATALOGUE = os.path.join(os.getcwd(),'catalog', FNAME_CURRENT_CATALOG)\nassert os.path.exists(PATH_SOURCE_CATALOGUE), \"{}\".format(PATH_SOURCE_CATALOGUE)\nassert os.path.exists(PATH_CURRENT_CATALOGUE), \"{}\".format(PATH_CURRENT_CATALOGUE)\n\n#%% Load the data catalogue\ndf = pd.read_csv(PATH_CURRENT_CATALOGUE)\n\ntotal_GB = sum(df.loc[:,'SizeGB'])\nlogging.debug(\"Loaded data catalogue with {} records representing {:0.0f} GB\".format(len(df),total_GB))\nlogging.debug(\"{} files have been flagged as already uploaded to S3.\".format(sum(df['uploaded'])))\nerrors = df[df['error'] != 'No error']['error'].value_counts()\nlogging.debug(\"{} files have been flagged with an upload error.\".format(sum(errors)))\n\nprint(\"Error summary:\")\nfor err in errors.iteritems():\n print('\\t',*err)\n\nres = df.head()\ndf = df[0:5]\n\n\n#%% Create the connection via the wrapper\n\n# The `osmosis-aws-driver`, imported here as `ocean_s3` is a wrapper for Boto3.\n\n\n# config = dict()\n# config['region'] = 'eu-central-1'\nconfig = None\nocn_s3 = ocean_s3.S3_Plugin(config)\n\n#%% List buckets\n\nfor i,b in enumerate(ocn_s3.list_buckets()):\n print(i,b['Name'])\n\n#%% Get the bucket\nbucketname =\"data-catalogue-r00\"\n#bucket = ocn_s3.s3_client.head_bucket(Bucket=bucketname)\nbucket = ocn_s3.s3_resource.Bucket(bucketname)\n\n#%% Get the files\ns3files = {obj.key:obj for obj in bucket.objects.all()}\n\n# Select a subset of files\nthese_keys = list(s3files.keys())[:2]\nfor f in these_keys:\n meta_data = s3files[f].Object().metadata\n print(f, meta_data)\n\ntotal_GB=sum([s3files[f].size for f in s3files])/1000/1000/1000\n\nlogging.debug(\"{} files on {}, {:0.2f} GB\".format(len(s3files),bucketname,total_GB))\n\n\n#%%\n\n#%%\nfor row in df.iterrows():\n print(row)\n\n#%%\ndf['uploaded']\n\n#%% Register the dataset onto blockchain","repo_name":"oceanprotocol-archive/mantaray","sub_path":"ipython_scripts/0_notebooks_verify/Superceded/s00_test_connections.py","file_name":"s00_test_connections.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"35804112386","text":"for _ in range(int(input())):\n n=int(input())\n arr=list(map(int,input().split()))\n s1 = arr[0]\n s2 = arr[-1]\n l,r = 0, n-1 \n ans = 0\n while l str:\n p = '{} <--> {}'\n s = p.format(self.sig, self.replace_sigs)\n return s\n\n\nclass inst_syntax():\n def __init__(self, inst_dict=None):\n '''\n inst_dict : {inst_ctgy: insts}\n '''\n if inst_dict is None:\n inst_dict = {}\n self.insts_dict = inst_dict\n\n def insert_insts(self, inst_ctgy, inst_productions):\n if self.insts_dict.get(inst_ctgy) is None:\n self.insts_dict[inst_ctgy] = {}\n target_dict = self.insts_dict[inst_ctgy]\n for p in inst_productions:\n assert isinstance(p, Production)\n if target_dict.get(p.sig) is None:\n target_dict[p.sig] = []\n target_dict[p.sig].extend(p.replace_sigs)\n\n @property\n def productions(self):\n global_ps = {}\n check_ps = {}\n for ctgy, ps in self.insts_dict.items():\n for k, v in ps.items():\n if k == '\\\\instr':\n continue\n assert check_ps.get(k) is None, print(k)\n global_ps[(ctgy, k)] = v\n check_ps[k] = v\n return global_ps\n\n @property\n def productions_without_cgty(self):\n check_ps = {}\n for ctgy, ps in self.insts_dict.items():\n for k, v in ps.items():\n if k == '\\\\instr':\n continue\n assert check_ps.get(k) is None, print(k)\n check_ps[k] = v\n return check_ps\n\n\n def production_keys(self):\n return list(self.productions_without_cgty.keys())\n\n def get_insts(self, ctgy=None):\n if ctgy is not None:\n insts = self.insts_dict[ctgy]['\\\\instr']\n else:\n insts = []\n for v in self.insts_dict.values():\n insts.extend(v['\\\\instr'])\n insts = [inst for inst in insts if inst != '\\\\dots']\n return insts\n\n def init_op_insts(self):\n self.op_insts = {}\n assert self.insts_dict\n for ctgy_data in self.insts_dict.values():\n insts = ctgy_data['\\\\instr']\n for k, op_names in ctgy_data.items():\n if 'op' in k:\n self.op_insts[k] = []\n for inst in insts:\n for name in op_names:\n if name in inst:\n self.op_insts[k].append(inst)\n\n def save_as_json(self, path=None):\n if path is None:\n return self.insts_dict\n else:\n save_json(path, self.insts_dict)\n\n def save_op_inits(self, path=None):\n if path is None:\n return self.op_insts\n else:\n save_json(path, self.op_insts)\n\n\ndef _extract_inst_blocks_from_syx_ct_paragraph(content):\n inst_block_p = r' +\\\\begin\\{array\\}(.*?)\\\\end\\{array\\}'\n inst_block_p = re.compile(inst_block_p, re.S)\n inst_blocks = inst_block_p.findall(content)\n return inst_blocks\n\n\ndef _extract_inst_names_from_block(block):\n production_p = r' \\\\production\\{(.*?)\\}.*?& +(.*?) &::=&(.*?)$'\n production_p = re.compile(production_p, re.S)\n paras = []\n lines = block.split('\\n')\n para_lines = []\n for line in lines:\n if re.search(r'^ + \\\\production', line):\n if len(para_lines):\n paras.append('\\n'.join(para_lines))\n para_lines = []\n para_lines.append(line)\n else:\n if para_lines:\n para_lines.append(line)\n paras.append('\\n'.join(para_lines))\n matched_productions = [production_p.findall(para)[0] for para in paras]\n\n ps = []\n for pp in matched_productions:\n sigs = pp[1].split(', ')\n for sig in sigs:\n product = Production.from_extracted(sig, pp[2], name=pp[0])\n ps.append(product)\n extend_Profuctions(ps)\n return ps\n\n\ndef get_inst_names_from_syntax():\n ct_paras = extract_category_paragraph(syntax_inst_rst_path)\n inst_names = inst_syntax()\n other_ps = []\n instr_ps = {}\n ct_names = []\n for ct_name, content in ct_paras:\n ct_names.append(ct_name)\n blocks = _extract_inst_blocks_from_syx_ct_paragraph(content)\n for block in blocks:\n ps = _extract_inst_names_from_block(block)\n for p in ps:\n if p.sig == '\\\\instr':\n pass\n assert instr_ps.get(ct_name) is None\n instr_ps[ct_name] = p\n else:\n other_ps.append(p)\n appended_p = Production('{N}', ['8', '16', '32'], 'append_z')\n other_ps.append(appended_p)\n other_ps_sigs = [x.sig for x in other_ps]\n assert len(other_ps_sigs) == len(set(other_ps_sigs))\n extend_Profuctions(other_ps)\n\n for ct_name, content in ct_paras:\n ct_names.append(ct_name)\n current_ps = []\n blocks = _extract_inst_blocks_from_syx_ct_paragraph(content)\n for block in blocks:\n current_ps.extend(_extract_inst_names_from_block(block))\n current_instr_p = [x for x in current_ps if x.sig == '\\\\instr'][0]\n current_ps_sigs = [x.sig for x in current_ps if x.sig != '\\\\instr']\n current_ps = [x for x in other_ps if x.sig in current_ps_sigs]\n extend_Profuctions(other_ps + [current_instr_p])\n current_ps.append(current_instr_p)\n inst_names.insert_insts(ct_name, current_ps)\n return inst_names\n","repo_name":"erxiaozhou/spec2tcs","sub_path":"combinedInst_util/extract_syntax_names.py","file_name":"extract_syntax_names.py","file_ext":"py","file_size_in_byte":7982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"34301381605","text":"\na=input()\ncount=0\nlang={'H','Q','9'}\n \nfor i in range(len(a)):\n if a[i] in lang:\n count=1\n else:\n continue\nif count==1:\n print(\"YES\")\nelse:\n print(\"NO\")\n","repo_name":"f4him/codeforces-python","sub_path":"133A.py","file_name":"133A.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"35882727559","text":"import os\nimport sys\nimport pickle\nimport argparse\nimport ipaddress\nimport socket\nimport time\nfrom pathlib import Path\n\n\nServerPort = 8089\n\ndef recvStr(sock):\n data = sock.recv(4)\n numBytes = int.from_bytes(data, \"big\")\n data = sock.recv(numBytes)\n return data.decode(\"utf-8\")\n\n\ndef sendStr(sock, s):\n data = s.encode(\"utf-8\")\n sent = sock.send(len(data).to_bytes(4, \"big\"))\n assert sent == 4\n sent = sock.send(data)\n assert sent == len(data)\n\n\ndef sendFile(sock, filePath):\n ''' Send a big file over socket. Should pair with function recvFile '''\n if not sock:\n raise socket.error\n length = filePath.stat().st_size\n sentByteNum = sock.send(length.to_bytes(4, \"big\"))\n assert sentByteNum == 4\n chunckSize = 64 * 1024\n totalSent = 0\n lastProcess = 0\n startTime = time.perf_counter()\n lastTime = startTime\n lastSent = 0\n with filePath.open(\"rb\") as f:\n while True:\n data = f.read(chunckSize) # Send a small chunk of files\n if not data:\n break\n sendByteNum = sock.send(data)\n assert sendByteNum == len(data)\n totalSent += sendByteNum\n progress = int(totalSent/length * 20)\n if progress > lastProcess:\n curTime = time.perf_counter()\n deltaTime = curTime - lastTime\n deltaSent = totalSent - lastSent\n speed = deltaSent / deltaTime / 1024\n lastSent = totalSent\n lastTime = curTime\n lastProcess = progress\n\n sys.stdout.write('\\r')\n # the exact output you're looking for:\n sys.stdout.write(\"[%-20s] %3d%% %.0f KB/s\" % ('='*progress, 5*progress, speed))\n sys.stdout.flush()\n deltaTime = curTime - startTime\n aveSpeed = length / deltaTime / 1024\n print(\"\\nCost time %.1fs, average speed %.0f KB/s\" % (deltaTime, aveSpeed))\n\n\ndef pickOneHost(hostDict):\n hostIp = None\n phoneChecked = False\n while hostDict:\n ipList = list(hostDict.keys())\n if len(hostDict) > 1:\n print(\"There're %d previous ip scan results:\" % len(hostDict))\n for i, ip in enumerate(ipList):\n print(\"(%d) %s %s\" % (i, ip, hostDict[ip]))\n key = input(\"Please choose one to try:\")\n try:\n idx = int(key)\n ip = ipList[idx]\n except Exception:\n return None\n else:\n ip = ipList[0]\n\n # Try to connect\n while True:\n s = socket.socket()\n try:\n s.connect((ip, ServerPort))\n sendStr(s, \"Identify yourself\")\n recvStr(s)\n hostIp = ip\n break\n except Exception:\n if not phoneChecked:\n print(\"Cannot connect %s %s. Please make sure the phone app is turned on.\" % (ip, hostDict[ip]))\n input(\"Press enter to continue...\")\n phoneChecked = True\n else:\n break\n finally:\n s.close()\n\n if not hostIp:\n print(\"Removed %s %s from scan result\" % (ip, hostDict[ip]))\n hostDict.pop(ip)\n else:\n break\n\n return hostIp\n\n\ndef main(argv):\n # Parse the command line\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-network\", help=\"The ip network to scan\", default=\"192.168.0.0/255.255.255.0\")\n parser.add_argument(\"-ip\", help=\"The ip address to send file\")\n parser.add_argument(\"file\", help=\"file to send\")\n\n args = parser.parse_args(argv)\n filePath = Path(args.file)\n if not filePath.is_file():\n print(\"local file %s doesn't exist\" % str(filePath))\n return 1\n\n hostDict = dict() # Element (ip, hostName)\n cachePath = Path(os.path.expanduser('~'))/\"ipScanCache.pickle\"\n if cachePath.is_file():\n with cachePath.open(\"rb\") as f:\n hostDict = pickle.load(f)\n\n if args.ip:\n s = socket.socket()\n try:\n s.connect((args.ip, ServerPort))\n sendStr(s, \"Identify yourself\")\n msg = recvStr(s)\n if not msg.startswith(\"This is a file receiver at\"):\n return 1\n hostName = msg.split(\"This is a file receiver at\")[1].strip()\n hostDict[args.ip] = hostName\n except Exception:\n print(\"Cannot connect %s\" % args.ip)\n return 1\n finally:\n s.close()\n\n ip = pickOneHost(hostDict)\n\n if not ip:\n # Cannot connect with previous scan result. Need a new scan\n for addr in ipaddress.IPv4Network(args.network):\n s = socket.socket()\n s.settimeout(0.2)\n try:\n print(\"Scan %s\" % str(addr))\n s.connect((str(addr), ServerPort))\n s.settimeout(1)\n sendStr(s, \"Identify yourself\")\n msg = recvStr(s)\n if not msg.startswith(\"This is a file receiver at\"):\n s.close()\n continue\n hostName = msg.split(\"This is a file receiver at\")[1]\n hostDict[str(addr)] = hostName\n print(\"Found host %s\" % hostName)\n except Exception:\n pass\n s.close()\n if not hostDict:\n print(\"Cannot find any device in the range %s\" % args.network)\n return 1\n ip = pickOneHost(hostDict)\n assert ip\n # Save the hostDict\n with cachePath.open(\"wb\") as f:\n pickle.dump(hostDict, f)\n\n s = socket.socket()\n try:\n s.connect((ip, ServerPort))\n sendStr(s, \"Please download file\")\n msg = recvStr(s)\n if msg != \"What's the file name?\":\n return 1\n sendStr(s, filePath.name)\n print(\"Sending file to %s %s...\" % (ip, hostDict[ip]))\n sendFile(s, filePath)\n msg = recvStr(s)\n if msg != \"File received\":\n return 1\n print(\"Sent file %s\" % str(filePath))\n except Exception as e:\n print(\"Something wrong happend during transfering file\")\n print(e)\n s.close()\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"Yuhe-Wang/DevTools","sub_path":"scripts/sendfile.py","file_name":"sendfile.py","file_ext":"py","file_size_in_byte":6297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"19951339757","text":"from django.urls import path\nfrom .views import index, products, contractors, documents, operations, storage_items, products_to_xls, \\\n contractors_to_xls, remove_marked_objects, import_products, consolidated_report, motion_report\n\napp_name = 'main'\n\nurlpatterns = [\n path('', index, name='index'),\n path('products/', products, name='products'),\n path('contractors/', contractors, name='contractors'),\n path('documents/', documents, name='documents'),\n path('operations/', operations, name='operations'),\n path('storage_items/', storage_items, name='storage_items'),\n path('products_to_xls/', products_to_xls, name='products_to_xls'),\n path('contractors_to_xls/', contractors_to_xls, name='contractors_to_xls'),\n path('remove_marked_objects/', remove_marked_objects, name='remove_marked_objects'),\n path('import_products/', import_products, name='import_products'),\n path('consolidated_report/', consolidated_report, name='consolidated_report'),\n path('motion_report/', motion_report, name='motion_report')\n]\n","repo_name":"SergeyLebidko/MiniStorage","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"95"} +{"seq_id":"32788304973","text":"from collections import deque\r\n#Breadth First Search\r\ndef BFS(G, node1, node2): #finds if there is a path between 2 nodes\r\n Q = deque([node1])\r\n marked = {node1 : True} #already been to first node\r\n for node in G.adj:\r\n if node != node1:\r\n marked[node] = False #all the other nodes have yet to be visited\r\n while len(Q) != 0: #while queue is not empty\r\n current_node = Q.popleft()\r\n for node in G.adj[current_node]:\r\n if node == node2:\r\n return True\r\n if not marked[node]: #if node is not already visited, add it to the queue to explore its neighbors and mark it as visited\r\n Q.append(node)\r\n marked[node] = True\r\n return False","repo_name":"matpetro/python","sub_path":"Graphs/BFS.py","file_name":"BFS.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"72026123511","text":"import os\nimport time \nimport subprocess \nfrom BallCatch import BallCatch\nfrom agent import Agent\nfrom torch.utils.tensorboard import SummaryWriter \n\n\ndef agent_train():\n game_name = \"BallCatch\"\n run_time = time.strftime('%m_%d_%H_%M_%S', time.localtime(time.time()))\n log_dir = os.path.join(os.path.join(os.path.expanduser('~')), 'Desktop/tensorboard_Data')\n\n port = 6006 \n\n subprocess.Popen(f\"tensorboard --logdir={log_dir} --port={port} --reload_multifile=true\", shell=True)\n\n log_dir = log_dir + '/' + game_name + '_' + str(run_time)\n\n env = BallCatch()\n\n state_size = env.state_n\n action_size = env.action_n\n\n hidden_size = 256\n learning_rate = 0.001 \n memory_size = 10000 \n batch_size = 64\n gamma = 0.99 \n\n agent = Agent(state_size=state_size, action_size=action_size,\n hidden_size=hidden_size, learning_rate=learning_rate,\n memory_size=memory_size, batch_size=batch_size,\n gamma=gamma)\n \n\n # Set up TensorBoard output\n writer = SummaryWriter(log_dir=log_dir)\n\n\n num_episode = 1000\n\n for i_episode in range(num_episode):\n state, info = env.reset()\n done = False\n truncated = False \n total_length = 1 \n total_reward = 0 \n\n while not(done or truncated):\n action = agent.act(state)\n next_state, reward, done, truncated, info = env.step(action)\n\n total_reward += reward \n total_length +=1 \n\n agent.remember(state, action, reward, next_state, done)\n\n state = next_state\n agent.replay()\n\n \n if done:\n agent.decay_epsilon() \n\n\n writer.add_scalar(\"reward\", total_reward, i_episode) \n writer.add_scalar(\"length\", total_length, i_episode)\n writer.add_scalar(\"reward_rate\", total_reward/total_length, i_episode)\n writer.add_scalar(\"epsilion\", agent.epsilon, i_episode)\n\n print(\"Episode: {}, total_reward: {:.2f}, epsilon: {:.2f}, length: {}\".format(i_episode, total_reward, agent.epsilon, total_length))\n\n env.close()\n writer.close() \n\nif __name__ == \"__main__\":\n agent_train() \n","repo_name":"k1seul/comp_RL","sub_path":"agent_train.py","file_name":"agent_train.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"30464139517","text":"'''\n直接把已有的评分当成奖励函数,未评分的电影的奖励函数当成0处理;\n还有种方法是对已有的评分矩阵进行矩阵分解拟合,将拟合后的评分当成奖励函数;\n这样不会存在未评分的情况。\n由于是非个性化的转移概率,所以评分矩阵需要做相应的处理;\n因为有1682部电影,所以每部电影对应一个奖励函数;\n处理为:对于每一部电影而言,统计出所有对他评分的记录的均值作为该电影状态的奖励函数\n'''\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import sparse\n\nratings=np.loadtxt(r'E:\\RQ-MASTER\\recommender system\\mdp-recom\\FPMC-master\\data\\ratings.txt')\nrating_reward=pd.DataFrame(ratings)\ndef mean_nonzero(column):\n sum=0\n times=0\n for i in range(943):\n if column[i]!=0.0:\n times+=1\n sum+=column[i]\n if times!=0.0:\n return 1.0*sum/times\n else:\n return 0.0\n\nrating_mean_list=[]\nfor i in range(1682):\n mean=mean_nonzero(rating_reward[i])\n rating_mean_list.append(float('%.2f' % mean))\n","repo_name":"RQ-XMU/mdp-recom","sub_path":"mdp/get_reward.py","file_name":"get_reward.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"29889308106","text":"from __future__ import print_function\n\nimport os.path\nimport subprocess\nimport logging\n\nfrom gi.repository import Gtk, GLib, Gio\n\nfrom gtweak.tweakmodel import Tweak\nfrom gtweak.widgets import ListBoxTweakGroup, UI_BOX_SPACING\nfrom gtweak.utils import AutostartManager, AutostartFile\n\ndef _list_header_func(row, before, user_data):\n if before and not row.get_header():\n row.set_header (Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL))\n\nclass _AppChooser(Gtk.Dialog):\n def __init__(self, main_window, running_exes):\n Gtk.Dialog.__init__(self, title=_(\"Applications\"))\n\n self._running = {}\n self._all = {}\n\n lb = Gtk.ListBox()\n lb.props.margin = 5\n lb.set_sort_func(self._sort_apps, None)\n lb.set_header_func(_list_header_func, None)\n\n apps = Gio.app_info_get_all()\n for a in apps:\n if a.should_show():\n running = a.get_executable() in running_exes\n w = self._build_widget(\n a,\n _(\"running\") if running else \"\")\n lb.add(w)\n self._all[w] = a\n self._running[w] = running\n\n sw = Gtk.ScrolledWindow()\n sw.props.hscrollbar_policy = Gtk.PolicyType.NEVER\n sw.add(lb)\n\n self.add_button(_(\"_Close\"), Gtk.ResponseType.CLOSE)\n self.add_button(_(\"Add Application\"), Gtk.ResponseType.OK)\n\n self.get_content_area().pack_start(sw, True, True, 0)\n self.set_modal(True)\n self.set_transient_for(main_window)\n self.set_size_request(400,300)\n\n self.listbox = lb\n\n def _sort_apps(self, a, b, user_data):\n if self._running.get(a) and not self._running.get(b):\n return -1\n return 1\n\n def _build_widget(self, a, extra):\n row = Gtk.ListBoxRow()\n g = Gtk.Grid()\n img = Gtk.Image.new_from_gicon(a.get_icon(),Gtk.IconSize.DIALOG)\n g.attach(img, 0, 0, 1, 1)\n img.props.hexpand = False\n lbl = Gtk.Label(a.get_name(), xalign=0)\n g.attach_next_to(lbl,img,Gtk.PositionType.RIGHT,1,1)\n lbl.props.hexpand = True\n lbl.props.halign = Gtk.Align.START\n lbl.props.vexpand = False\n lbl.props.valign = Gtk.Align.CENTER\n if extra:\n g.attach_next_to(\n Gtk.Label(extra),\n lbl,Gtk.PositionType.RIGHT,1,1)\n row.add(g)\n #row.get_style_context().add_class('tweak-white')\n return row\n\n def get_selected_app(self):\n row = self.listbox.get_selected_row()\n if row:\n return self._all.get(row)\n return None\n\nclass _StartupTweak(Gtk.ListBoxRow, Tweak):\n def __init__(self, df, **options):\n\n Gtk.ListBoxRow.__init__(self)\n Tweak.__init__(self, \n df.get_name(),\n df.get_description(),\n **options)\n \n grid = Gtk.Grid(column_spacing=10)\n\n icn = df.get_icon()\n if icn:\n img = Gtk.Image.new_from_gicon(icn,Gtk.IconSize.DIALOG)\n grid.attach(img, 0, 0, 1, 1)\n else:\n img = None #attach_next_to treats this correctly\n\n lbl = Gtk.Label(df.get_name(), xalign=0.0)\n grid.attach_next_to(lbl,img,Gtk.PositionType.RIGHT,1,1)\n lbl.props.hexpand = True\n lbl.props.halign = Gtk.Align.START\n\n btn = Gtk.Button(_(\"Remove\"))\n grid.attach_next_to(btn,lbl,Gtk.PositionType.RIGHT,1,1)\n btn.props.vexpand = False\n btn.props.valign = Gtk.Align.CENTER\n\n self.add(grid)\n\n self.props.margin = 5\n self.get_style_context().add_class('tweak-white')\n\n self.btn = btn\n\nclass AddStartupTweak(Gtk.ListBoxRow, Tweak):\n def __init__(self, **options):\n Gtk.ListBoxRow.__init__(self)\n Tweak.__init__(self, _(\"New startup application\"),\n _(\"Add a new application to be run at startup\"),\n **options)\n\n self.btn = Gtk.Button(\"\")\n self.btn.get_style_context().remove_class(\"button\")\n img = Gtk.Image()\n img.set_from_icon_name(\"list-add-symbolic\", Gtk.IconSize.BUTTON)\n self.btn.set_image(img)\n self.btn.props.always_show_image = True\n self.add(self.btn)\n\nclass AutostartListBoxTweakGroup(ListBoxTweakGroup):\n def __init__(self):\n tweaks = []\n\n self.asm = AutostartManager()\n files = self.asm.get_user_autostart_files()\n for f in files:\n try:\n df = Gio.DesktopAppInfo.new_from_filename(f)\n except TypeError:\n logging.warning(\"Error loading desktopfile: %s\" % f)\n continue\n\n sdf = _StartupTweak(df)\n sdf.btn.connect(\"clicked\", self._on_remove_clicked, sdf, df)\n tweaks.append( sdf )\n\n add = AddStartupTweak()\n add.btn.connect(\"clicked\", self._on_add_clicked)\n tweaks.append(add)\n\n ListBoxTweakGroup.__init__(self,\n _(\"Startup Applications\"),\n *tweaks,\n css_class='tweak-group-white')\n self.set_header_func(_list_header_func, None)\n\n def _on_remove_clicked(self, btn, widget, df):\n self.remove(widget)\n AutostartFile(df).update_start_at_login(False)\n\n def _on_add_clicked(self, btn):\n a = _AppChooser(\n self.main_window,\n set(self._get_running_executables()))\n a.show_all()\n resp = a.run()\n if resp == Gtk.ResponseType.OK:\n df = a.get_selected_app()\n if df:\n AutostartFile(df).update_start_at_login(True)\n sdf = _StartupTweak(df)\n sdf.btn.connect(\"clicked\", self._on_remove_clicked, sdf, df)\n self.add_tweak_row(sdf, 0).show_all()\n a.destroy()\n\n def _get_running_executables(self):\n exes = []\n cmd = subprocess.Popen([\n 'ps','-e','-w','-w','-U',\n os.getlogin(),'-o','cmd'],\n stdout=subprocess.PIPE)\n out = cmd.communicate()[0]\n for l in out.split('\\n'):\n exe = l.split(' ')[0]\n if exe and exe[0] != '[': #kernel process\n exes.append( os.path.basename(exe) )\n\n return exes\n\nTWEAK_GROUPS = [\n AutostartListBoxTweakGroup(),\n]\n","repo_name":"nzjrs/gnome-tweak-tool","sub_path":"gtweak/tweaks/tweak_group_startup.py","file_name":"tweak_group_startup.py","file_ext":"py","file_size_in_byte":6363,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"95"} +{"seq_id":"37638587478","text":"#favor manter essa linha\nimport uiscriptlocale\n\nwidth = 176\nheight = 418\n\nwindow = {\n\t\"name\":\"SafeboxWindow\",\n\t\"x\":100,\n\t\"y\":20,\n\t\"style\":(\"movable\", \"float\",),\n\t\"width\":width,\n\t\"height\":height,\n\t\"children\":\n\t(\n\t\t{\n\t\t\t\"name\":\"board\",\n\t\t\t\"type\":\"new_board_with_titlebar\",\n\t\t\t\"title\":uiscriptlocale.SAFE_TITLE,\n\t\t\t\"x\":0,\n\t\t\t\"y\":0,\n\t\t\t\"width\":width,\n\t\t\t\"height\":height,\n\t\t\t\"children\":\n\t\t\t(\n\t\t\t\t{\n\t\t\t\t\t\"name\":\"ChangePasswordButton\",\n\t\t\t\t\t\"type\":\"redbutton\",\n\t\t\t\t\t\"x\":13,\n\t\t\t\t\t\"y\":45,\n\t\t\t\t\t\"width\":90,\n\t\t\t\t\t\"text\":\"Trocar Senha\",\n\t\t\t\t\t\"horizontal_align\":\"left\",\n\t\t\t\t\t\"vertical_align\":\"bottom\",\n\t\t\t\t\t\"children\":\n\t\t\t\t\t(\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"name\":\"horizontal_separator\",\n\t\t\t\t\t\t\t\"type\":\"horizontalseparator\",\n\t\t\t\t\t\t\t\"width\":width - 14,\n\t\t\t\t\t\t\t\"x\":-6,\n\t\t\t\t\t\t\t\"y\":-10,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"name\":\"top\",\n\t\t\t\t\t\t\t\"type\":\"horizontalseparator\",\n\t\t\t\t\t\t\t\"width\":width - 14,\n\t\t\t\t\t\t\t\"x\":-6,\n\t\t\t\t\t\t\t\"y\":-46,\n\t\t\t\t\t\t},\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"name\":\"ExitButton\",\n\t\t\t\t\t\"type\":\"redbutton\",\n\t\t\t\t\t\"x\":65,\n\t\t\t\t\t\"y\":45,\n\t\t\t\t\t\"width\":50,\n\t\t\t\t\t\"text\":uiscriptlocale.CLOSE,\n\t\t\t\t\t\"horizontal_align\":\"right\",\n\t\t\t\t\t\"vertical_align\":\"bottom\",\n\t\t\t\t},\n\t\t\t),\n\t\t},\n\t),\n}\n","repo_name":"BrunoNyland/aegon2_cliente_aberto","sub_path":"uiscript/safeboxwindow.py","file_name":"safeboxwindow.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"30636388362","text":"import logging\nfrom django.contrib.gis.geos import Point\nfrom django.test import TestCase\nfrom django.contrib.gis.db.models import functions\nfrom django.contrib.gis.measure import Distance\n\nfrom . import models\n\nlogging.basicConfig(format='%(asctime)s %(levelname)-7s %(thread)-5d %(filename)s:%(lineno)s | %(funcName)s | %(message)s', datefmt='%H:%M:%S')\nlogging.getLogger().setLevel(logging.DEBUG)\nlogging.disable(logging.NOTSET)\nlogging.getLogger('environ').setLevel(logging.INFO)\n\n\nclass GeoTests(TestCase):\n def test_distance(self):\n location_manager = models.Location.objects\n location_manager.create(point=Point(x=float(-119), y=float(35), srid=models.DEFAULT_SRID))\n item = location_manager.create(point=Point(x=float(-118), y=float(34), srid=models.DEFAULT_SRID))\n queryset = location_manager.all()\n\n point = Point(x=float(-119), y=float(34), srid=models.DEFAULT_SRID)\n distance = Distance(km=100)\n\n queryset = queryset.filter(point__distance_lte=(point, distance))\n queryset = queryset.annotate(distance=functions.Distance('point', point))\n\n logging.debug(\"queryset[0].distance: %s\", queryset[0].distance)\n self.assertEqual(Distance(m=92184.53310623), queryset[0].distance)\n self.assertEqual([item], list(queryset))\n\n","repo_name":"wooyek/docker-geodjango","sub_path":"sample/awesome-project/geoapp/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"95"} +{"seq_id":"2202200716","text":"# Ученые часто славятся своей рассеянностью. И немудрено – уже столько всего открыто, разве упомнишь! И вот, когда обнаруживают новый вид каких-нибудь гусениц, нужно придумать им название, но не повториться.\n#\n# Напишите программу, которая проверяет, не было ли название использовано раньше.\n\n\nz = int(input())\nx = []\nfor item in range(z):\n y = input()\n if y not in x:\n print('НЕТ')\n x.append(y)\n else:\n print('ДА')\n","repo_name":"Nlegion/School_Work","sub_path":"ya_lic/2_1_sets/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"26458487493","text":"from flask import Flask, request, render_template\n\napp = Flask(__name__)\n\n# Initialize the variables with some default values\nprogress = \"0%\"\nrow = \"n/a\"\nlink = \"n/a\"\nfunction = \"n/a\"\nstart_time = \"n/a\"\n\n@app.route('/update_progress', methods=['POST'])\ndef update_progress():\n global progress\n new_value = request.form['new_value']\n progress = new_value\n return \"progress updated successfully\"\n\n@app.route('/update_row', methods=['POST'])\ndef update_row():\n global row\n new_value = request.form['new_value']\n row = new_value\n return \"row updated successfully\"\n\n@app.route('/update_link', methods=['POST'])\ndef update_link():\n global link\n new_value = request.form['new_value']\n link = new_value\n return \"link updated successfully\"\n\n@app.route('/update_function', methods=['POST'])\ndef update_function():\n global function\n new_value = request.form['new_value']\n function = new_value\n return \"function updated successfully\"\n\n@app.route('/update_starttime', methods=['POST'])\ndef update_starttime():\n global start_time\n new_value = request.form['new_value']\n start_time = new_value\n return \"start time updated successfully\"\n\n@app.route('/display_variables')\ndef display_variables():\n return f\"Progress: {progress}\\nRow: {row}\\nLink: {link}\\nFunction: {function}\\nStart Time: {start_time}\"\n\n@app.route('/')\ndef index():\n rendered = render_template('index.html', progress=progress, row=row, link=link, function=function, start_time=start_time)\n return rendered\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"maxtalwar/Dev-Report","sub_path":"dev_report/github_urls_present/flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"32256604873","text":"class NaturalResources:\r\n def __init__(self):\r\n self.tree_selected = False\r\n self.rock_selected = False\r\n self.mine_selected = False\r\n self.fish_selected = False\r\n self.comestible_fauna_selected = False\r\n self.fruit_selected = False\r\n self.fishes = [\r\n self.atun,\r\n self.sardina,\r\n self.fish_bank\r\n ]\r\n self.comestible_fauna = [\r\n self.pavo,\r\n self.chicken,\r\n self.sheep,\r\n self.goat,\r\n self.pig,\r\n self.cow\r\n ]\r\n self.fruits = [\r\n self.apple,\r\n self.kiwi,\r\n self.uva,\r\n self.cheer\r\n ]\r\n self.trees = [\r\n self.roble,\r\n self.pino,\r\n self.cipres,\r\n self.palm,\r\n self.nogal,\r\n self.old_tree,\r\n self.big_tree\r\n ]\r\n self.rock = [\r\n self.small_rocks,\r\n self.medium_rocks,\r\n self.big_rocks,\r\n self.megalito\r\n ]\r\n self.mines = [\r\n self.small_mine,\r\n self.medium_mine,\r\n self.big_mine,\r\n self.megalito_mine\r\n ]\r\n self.wild_fauna = [\r\n \"bear\",\r\n \"wolf\",\r\n \"jaguar\",\r\n \"lion\",\r\n \"tiger\",\r\n \"rhino\",\r\n \"hippopotamus\"\r\n ]\r\n self.marine_fauna = [\r\n \"dolphin\",\r\n \"shark\",\r\n \"whale\",\r\n \"seal\",\r\n \"turtle\",\r\n \"octopus\",\r\n \"squid\"\r\n ]\r\n\r\nclass Wood(NaturalResources):\r\n def __init__(self):\r\n self.roble = 200\r\n self.pino = 500\r\n self.cipres = 350\r\n self.palm = 300\r\n self.nogal = 400\r\n self.old_tree = 100\r\n self.big_tree = 850\r\n self.apple = 100\r\n self.kiwi = 150\r\n self.uva = 200\r\n self.cheer = 250\r\n \r\nclass Food(NaturalResources):\r\n def __init__(self):\r\n self.apple = 100\r\n self.kiwi = 150\r\n self.uva = 200\r\n self.cheer = 250\r\n self.atun = 100\r\n self.sardina = 50\r\n self.fish_bank = 200\r\n self.chicken = 100\r\n self.pavo = 150\r\n self.sheep = 200\r\n self.goat = 300\r\n self.pig = 350\r\n self.cow = 500\r\n\r\nclass Stone(NaturalResources):\r\n def __init__(self):\r\n self.small_rocks = 250\r\n self.medium_rocks = 450\r\n self.big_rocks = 600\r\n self.megalito = 2000\r\n\r\nclass Metal(NaturalResources):\r\n def __init__(self):\r\n self.small_mine = 300\r\n self.medium_mine = 500\r\n self.big_mine = 700\r\n self.megalito_mine = 2000","repo_name":"jacobjauregui/civis","sub_path":"gaia.py","file_name":"gaia.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"2304087064","text":"#FFNN- feed forward neural network\r\n\r\nfrom tensorflow.keras.layers import Input, Dense, Flatten\r\nfrom tensorflow.keras.models import Model\r\n\r\nm=28\r\nn=28 #image size- m-by-n\r\nh=4 #fully connected hidden layers\r\nc=10 #number of output classes\r\n\r\n#input layer\r\ninputs = Input((m, n))\r\nx = Flatten()(inputs) #convert from 2D into 1D(vector)\r\n\r\n#hidden layer\r\nx = Dense(128, activation=\"softmax\")(x)\r\nfor i in range(h-1):\r\n x = Dense(32, activation=\"softmax\")(x)\r\n\r\n#output layer\r\noutputs = Dense(c)(x)\r\n\r\n#model\r\nmodel = Model(inputs, outputs)\r\nmodel.summary()","repo_name":"Rubel2475/Artificial-intelligence-Lab_4Y2S","sub_path":"Lab_final/EXP-9_FFNN.py","file_name":"EXP-9_FFNN.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"72116941744","text":"###### 자바스크립트에서 한글 깨짐 현상 방지용 ################################\nimport sys\nimport io\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')\n###########################################################################\n\n# print('삼성전자 전일종가 알려줘')\n\nimport requests\n\n# 네이버 클로바 API - 15초/4원 과금\n\ndata = open('./uploads/' + sys.argv[1], \"rb\") # STT를 진행하고자 하는 음성 파일\n\nLang = \"Kor\" # Kor / Jpn / Chn / Eng\nURL = \"https://naveropenapi.apigw.ntruss.com/recog/v1/stt?lang=\" + Lang\n \nID = \"sb21okbjmh\" # 인증 정보의 Client ID\nSecret = \"iFVd8KWihMzJnXRFGbpBQ6vutUT0FhmLzTSAnklV\" # 인증 정보의 Client Secret\n \nheaders = {\n \"Content-Type\": \"application/octet-stream\", # Fix\n \"X-NCP-APIGW-API-KEY-ID\": ID,\n \"X-NCP-APIGW-API-KEY\": Secret,\n}\nresponse = requests.post(URL, data=data, headers=headers)\nrescode = response.status_code\n\nif(rescode == 200):\n #print (response.text)\n print(response.text.split('\":\"')[1].split('\"')[0].replace(' ',''))\n # 공백 없이 전부\nelse:\n print(\"Error : \" + response.text)\n\n","repo_name":"charles098/2021_Fall_Capston_Design2","sub_path":"server/api_codes/naverCloba.py","file_name":"naverCloba.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"21024983278","text":"from collections import Counter\nimport glob\nimport os\nimport re\nfrom math import log2\nimport pandas as pd\nfrom nltk.util import ngrams\nimport csv\n\ndef unigrams(words): return Counter(words)\n\ndef bigrams(words): return Counter(ngrams(words,2))\n\ndef H(elements):\n N = sum(elements)\n return -sum([k * log2(k/N + (k==0)) for k in elements])\n\ndef calcuate_llr(bigram, BIGRAMS,UNIGRAMS, N_BI, N_UNI):\n k_11 = BIGRAMS[bigram] / N_UNI\n px = UNIGRAMS[bigram[0]]/N_BI\n py = UNIGRAMS[bigram[1]]/N_BI\n k_22 = 1 - (px+py-k_11)\n k_21 = px-k_11\n k_12 = py - k_11\n sumOfEvents = k_11+k_12+k_21+k_22;\n H_all = H((k_11,k_12,k_21,k_22))\n H_rows = H((k_11+k_12,k_21+k_22))\n H_cols = H((k_11+k_21,k_12+k_22))\n llr = 2*sumOfEvents * (H_rows + H_cols - H_all)\n return llr;\n\ndataPath = 'dump/'\nregexp = re.compile(\"\\w+\")\nfiles = glob.glob(os.path.join(dataPath,\"*.txt\"))\nWORDS = []\nWORDS2 = []\ni = 0\ngrammarClasses=set([\"subst\", \"depr\", \"num\", \"numcol\", \"adj\", \"ppron12\", \"ppron3\", \"siebie\", \"ger\", \"pact\", \"ppas\", \"prep\"])\nfor file in files:\n print(\"reading csv file: \" + file)\n with open(file) as csvfile:\n reader = csv.reader(csvfile, delimiter=\"\\t\")\n print(\"file read, getting words from csv\")\n for row in reader:\n if len(row) < 3: continue\n word = str(row[1]).lower()\n if regexp.match(word):\n grammarClass = str(row[2]).split(\":\").pop(0)\n word = word+\":\"+grammarClass\n WORDS.append(word)\n if(set([grammarClass]) & grammarClasses != set()):\n WORDS2.append(word)\nprint(\"creating unigrams 1\")\nUNIGRAMS1 = unigrams(WORDS)\nN_UNI1 = sum(UNIGRAMS1.values())\nprint(\"creating bigrams 1\")\nBIGRAMS1 = bigrams(WORDS)\nN_BI1 = sum(BIGRAMS1.values())\n\nprint(\"creating unigrams 2\")\nUNIGRAMS2 = unigrams(WORDS2)\nN_UNI2 = sum(UNIGRAMS2.values())\nprint(\"creating bigrams 2\")\nBIGRAMS2 = bigrams(WORDS2)\nN_BI2 = sum(BIGRAMS2.values())\n\nprint(\"saving results1 to files\")\nbig1 = open(\"bigrams1.txt\",\"w+\")\nllr1 = open(\"llr1.txt\",\"w+\")\nresults = open(\"results.txt\",\"w+\")\nfor bigram in BIGRAMS1.most_common():\n big1.write(\"{}\\t{}\\n\".format(bigram[0], bigram[1]))\n llr = calcuate_llr(bigram[0],BIGRAMS1, UNIGRAMS1, N_BI1, N_UNI1)\n llr1.write(\"{}\\t{}\\n\".format(bigram[0], llr))\n left = bigram[0][0].split(\":\").pop(1).split(\"'\").pop(0)\n right = bigram[0][1].split(\":\").pop(1).split(\"'\").pop(0)\n if(left == \"subst\") and ((right == \"subst\") or (right == \"adj\")):\n results.write(\"{}\\t{}\\n\".format(bigram[0], llr))\nllr1.close()\nbig1.close()\nresults.close()\n\nprint(\"saving results2 to file\")\nbig2 = open(\"bigrams2.txt\",\"w+\")\nllr2 = open(\"llr2.txt\",\"w+\")\nfor bigram in BIGRAMS2.most_common():\n big2.write(\"{}\\t{}\\n\".format(bigram[0], bigram[1]))\n llr = calcuate_llr(bigram[0],BIGRAMS2, UNIGRAMS2, N_BI2, N_UNI2)\n llr2.write(\"{}\\t{}\\n\".format(bigram[0], llr))\nllr2.close()\nbig2.close()","repo_name":"marcinosypka/pjn","sub_path":"5/analyzeText.py","file_name":"analyzeText.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"4936421536","text":"import os\n\nfrom PIL import Image\nimport io\nimport base64\n\n\nfrom pubnub.callbacks import SubscribeCallback\nfrom pubnub.enums import PNStatusCategory, PNOperationType\nfrom pubnub.enums import PNReconnectionPolicy\nfrom pubnub.pnconfiguration import PNConfiguration\nfrom pubnub.pubnub import PubNub\n\nENTRY = \"Earth\"\nCHANNEL = \"awesomeChannel\"\n\npnconfig = PNConfiguration()\npnconfig.publish_key = \"pub-c-10b9f92d-a0e2-4d07-bef2-8b84aee0c2a3\"\npnconfig.subscribe_key = \"sub-c-12ea3aa6-ed25-11ea-a728-4ec3aefbf636\"\npnconfig.uuid = \"serverUUID-SUB\"\npnconfig.reconnect_policy = PNReconnectionPolicy.LINEAR\n\npubnub = PubNub(pnconfig)\n\n\nclass MySubscribeCallback(SubscribeCallback):\n def presence(self, pubnub, event):\n print(\"[PRESENCE: {}]\".format(event.event))\n print(\"uuid: {}, channel: {}\".format(event.uuid, event.channel))\n\n def status(self, pubnub, status):\n \n if status.category == PNStatusCategory.PNUnexpectedDisconnectCategory:\n print(\"PN Unexpected Disconnect\")\n pubnub.reconnect()\n\n if status.category == PNStatusCategory.PNConnectedCategory:\n print(\"PN Connected\")\n\n if status.category == PNStatusCategory.PNReconnectedCategory:\n print(\"PN Re-Connected\")\n pubnub.subscribe().channels('devChannel').execute()\n\n if status.category == PNStatusCategory.PNDecryptionErrorCategory:\n print(\"PN Decryption Error\")\n \n \n def message(self, pubnub, event):\n print(\"[MESSAGE received]\")\n\n print(\"message : {}\".format(event.message[\"message\"]))\n\nclass HandleDisconnectsCallback(SubscribeCallback):\n def status(self, pubnub, status):\n if status.category == PNStatusCategory.PNUnexpectedDisconnectCategory:\n print(\"Fuck you\")\n # internet got lost, do some magic and call reconnect when ready\n pubnub.reconnect()\n elif status.category == PNStatusCategory.PNTimeoutCategory:\n # do some magic and call reconnect when ready\n pubnub.reconnect()\n print(\"Fuck you\")\n\n else:\n print(\"Bitch im working\")\n \n def presence(self, pubnub, presence):\n pass\n \n def message(self, pubnub, event):\n print(\"[MESSAGE received]\")\n print(\"{} : {}\".format(event.message[\"name\"], event.message[\"message\"]))\n s = event.message[\"message\"]\n s = s[2:-1]\n f = io.BytesIO(base64.b64decode(s))\n pilimage = Image.open(f)\n pilimage.save(\"recovered.jpg\")\n\n def signal(self, pubnub, signal):\n pass\n\ndisconnect_listener = HandleDisconnectsCallback()\n \npubnub.add_listener(disconnect_listener)\n\n\npubnub.subscribe().channels(CHANNEL).with_presence().execute()\n\nprint(\"***************************************************\")\nprint(\"* Waiting for updates to The Guide about {}... *\".format(ENTRY))\nprint(\"***************************************************\")\n","repo_name":"Ravenking7675/Screen-Sharing-using-PubSub-","sub_path":"sub.py","file_name":"sub.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"436484551","text":"from pytz import common_timezones_set\nimport StrategyLearner\nimport ManualStrategy \nimport datetime as dt \nimport time\nimport marketsimcode\nimport pandas as pd\nimport matplotlib.pyplot as plt \t\n\ndef run_experiment(): \n\n \n sd = dt.datetime(2008,1,1)\n ed = dt.datetime(2009,12,31)\n sv = 100000\n\n # Strategy Learner - impact = 0.0005\n qlearner = StrategyLearner.StrategyLearner(verbose = False, impact=0.0005)\n qlearner.add_evidence(symbol=\"JPM\",sd=dt.datetime(2008,1,1),ed=dt.datetime(2009,12,31),sv=100000)\n q_trades = qlearner.testPolicy(symbol=\"JPM\",sd=dt.datetime(2008,1,1),ed=dt.datetime(2009,12,31),sv=100000)\n q_port_vals1 = marketsimcode.compute_portvals(q_trades,'JPM',sv,0,0.0005)\n q_port_vals_normed1=q_port_vals1/q_port_vals1.iloc[0,:]\n\n\n # Strategy Learner - impact = 0.005\n qlearner = StrategyLearner.StrategyLearner(verbose = False, impact=0.005)\n qlearner.add_evidence(symbol=\"JPM\",sd=dt.datetime(2008,1,1),ed=dt.datetime(2009,12,31),sv=100000)\n q_trades = qlearner.testPolicy(symbol=\"JPM\",sd=dt.datetime(2008,1,1),ed=dt.datetime(2009,12,31),sv=100000)\n q_port_vals2 = marketsimcode.compute_portvals(q_trades,'JPM',sv,0,0.005)\n q_port_vals_normed2=q_port_vals2/q_port_vals2.iloc[0,:]\n\n # Strategy Learner - impact = 0.05\n qlearner = StrategyLearner.StrategyLearner(verbose = False, impact=0.05)\n qlearner.add_evidence(symbol=\"JPM\",sd=dt.datetime(2008,1,1),ed=dt.datetime(2009,12,31),sv=100000)\n q_trades = qlearner.testPolicy(symbol=\"JPM\",sd=dt.datetime(2008,1,1),ed=dt.datetime(2009,12,31),sv=100000)\n q_port_vals3 = marketsimcode.compute_portvals(q_trades,'JPM',sv,0,0.05)\n q_port_vals_normed3=q_port_vals3/q_port_vals3.iloc[0,:]\n\n\n \n \n\n\n \n\n # Benchmark_orders = q_trades.copy()\n # Benchmark_orders[:]=0\n # Benchmark_orders.iloc[0]=1000\n\n \n # Benchmark_port_vals = marketsimcode.compute_portvals(Benchmark_orders,'JPM',sv,9.95,0.005)\n # q_port_vals = marketsimcode.compute_portvals(q_trades,'JPM',sv,9.95,0.005)\n # manual_port_vals = marketsimcode.compute_portvals(manual_trades,'JPM',sv,9.95,0.005)\n\n\n q_cr1,q_adr1,q_sddr1,q_sr1 = marketsimcode.get_statistics(q_port_vals1) \n q_port_vals_normed1=q_port_vals1/q_port_vals1.iloc[0,:]\n\n q_cr2,q_adr2,q_sddr2,q_sr2 = marketsimcode.get_statistics(q_port_vals2) \n q_port_vals_normed2=q_port_vals2/q_port_vals2.iloc[0,:]\n\n q_cr3, q_adr3,q_sddr3,q_sr3 = marketsimcode.get_statistics(q_port_vals3) \n q_port_vals_normed3=q_port_vals3/q_port_vals3.iloc[0,:]\n\n \n #sys.stdout = open(\"p6_results.txt\", \"w\")\n print()\n print(f\" IN SAMPLE Date Range: {sd} to {ed}\") \n print('------------------------------------------------------------------------------------------------------')\t\n print(' Startegy Learner Impact 0.0005 0.005 0.05')\n print('------------------------------------------------------------------------------------------------------')\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n print(f\"Sharpe Ratio {'%08.6f'%q_sr1[0]} {'%08.6f'%q_sr2[0]} {'%08.6f'%q_sr3[0]}\") \n print('------------------------------------------------------------------------------------------------------')\t\t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n print(f\"Cumulative Return {'%08.6f'%q_cr1[0]} {'%08.6f'%q_cr2[0]} {'%08.6f'%q_cr3[0]}\") \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n print('------------------------------------------------------------------------------------------------------')\t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n print(f\"Standard Deviation {'%08.6f'%q_sddr1[0]} {'%08.6f'%q_sddr2[0]} {'%08.6f'%q_sddr3[0]}\")\n print('------------------------------------------------------------------------------------------------------') \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n print(f\"Average Daily Return {'%08.6f'%q_adr1[0]} {'%08.6f'%q_adr2[0]} {'%08.6f'%q_adr3[0]}\") \n print('------------------------------------------------------------------------------------------------------') \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n print(f\"Final Portfolio Value: {'%09.2f'%q_port_vals1.iloc[-1][0]} {'%09.2f'%q_port_vals2.iloc[-1][0]} {'%09.2f'%q_port_vals3.iloc[-1][0]}\")\n print('------------------------------------------------------------------------------------------------------')\t\n \t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n df_temp = pd.concat( \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n [q_port_vals_normed1,q_port_vals_normed2, q_port_vals_normed3], keys=[\"Strategy Learner\",\"Manual Strategy\", \"Benchmark\"], axis=1 \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n ) \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n ax = df_temp.plot(title=\"Strategy Learner vs Impact \",color=['y','r','purple'], grid=True, fontsize=12)\n ax.legend(['Impact = 0.0005',\"Impact = 0.005\",'Impact = 0.05'])\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Normalized daily portfolio value\")\n fig = ax.get_figure()\n fig.savefig('images/impact.png')\n plt.close()\n \n\ndef test():\n print()\n print()\n print('********************** Starting Experiment 2 **********************')\n run_experiment()\n print('********************** End of Experiment 2 **********************')\n print()\n print()\n\n\ndef author(): \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n \"\"\" \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n :return: The GT username of the student \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n :rtype: str \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n \"\"\" \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t\t\t \t \n return \"ybouzekraoui3\"\nif __name__ == \"__main__\":\n test()\n","repo_name":"Younes43/Trading-Strategy-Evaluation","sub_path":"experiment2.py","file_name":"experiment2.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"74650609903","text":"import datetime\nimport logging\nfrom abc import ABC, abstractmethod\n\nfrom lib.models.etl import Pipeline\nfrom lib.providers.state import BaseStateProvider\n\n\nclass Process(ABC):\n \"\"\"\n Процесс загрузки данных оперирующий несколькими входными параметрами:\n - Хранилище состояния, откуда берется сохраненное состояние по каждой итерации\n для начала следующей итерации с нужного места\n - Экстрактор, позволящий достававть данные из хранилища\n - Трансформер, преобразующий данные в пригодный вид для последующей загрузки\n - Загрузчик, склыдвающий в быстрое хранилище подготовленные данные\n \"\"\"\n\n @abstractmethod\n def run(self):\n \"\"\"Основная точка входа для процесса загрузки данных\n Выполняет итерацию загрузки данных, основываясь на фильтрах и текущем состоянии\"\"\"\n\n\nclass ETLProcess(Process):\n def __init__(\n self,\n pipeline: Pipeline,\n state_provider: BaseStateProvider,\n logger: logging.Logger = None,\n ):\n self.pipeline = pipeline\n\n self.extractor = pipeline.extractor\n self.transformer = pipeline.transformer\n self.loader = pipeline.loader\n\n self.state = state_provider\n self.logger = logger\n self.__init_logger()\n\n async def run(self):\n ids = set()\n for f in self.pipeline.filters:\n self.log(f\"search by {f.state_key}\")\n state = await self.__get_state(f.state_key)\n self.log(f\"state: {state}\")\n\n # Выборка всех ID сущности по фильтру\n current_filter_result = self.extractor.extract(\n query=f.query, args={f.param: state}\n )\n if current_filter_result:\n # Сборка уникальных ID\n ids.update({r.id for r in current_filter_result})\n\n # Сохранение последней обработанной даты в фильтре\n last_filter_dt = max(\n [r.modified for r in current_filter_result]\n )\n await self.state.set(f.state_key, str(last_filter_dt))\n self.log(f\"{f.state_key} now is {last_filter_dt}\")\n\n self.log(f\"Collected {len(ids)} ids\")\n\n if ids:\n # Выборка пачками через генератор данные по сущностям\n db_gen = self.extractor.extract_generator(\n query=self.pipeline.collect_query,\n args={\"ids\": tuple(str(id_) for id_ in ids)},\n model=self.pipeline.model,\n )\n # Загрузка преобразованных данных\n for chunk in db_gen:\n transformed_data = [\n self.transformer.transform(c) for c in chunk\n ]\n result = self.loader.load(transformed_data)\n self.log(result)\n self.log(f\"Loaded {len(chunk)} rows\")\n\n self.log(\"=\" * 50)\n\n def log(self, msg: str, level=None):\n \"\"\"Обертка логирования для упрощенного доступа\"\"\"\n self.logger.log(level=level or logging.INFO, msg=msg)\n\n async def __get_state(self, state_key: str):\n value = await self.state.get(state_key)\n if value:\n return datetime.datetime.fromisoformat(value)\n return datetime.datetime.min\n\n def __init_logger(self):\n if not self.logger:\n self.logger = logging.getLogger(__name__)\n","repo_name":"async-python/Async_API_sprint_2","sub_path":"etl/lib/etl_process.py","file_name":"etl_process.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"5767034190","text":"import torch\nfrom torch.utils.data import DataLoader\nimport math\nfrom sentence_transformers import models, losses\nfrom sentence_transformers import LoggingHandler, SentenceTransformer, util, InputExample\nfrom sentence_transformers.evaluation import EmbeddingSimilarityEvaluator, LabelAccuracyEvaluator\nimport logging\nfrom datetime import datetime\nimport sys\nimport re\nimport numpy as np\nimport argparse\nfrom utils import read_sts\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--model',dest='name_model', type=str, help='bert/roberta/gpt...', default='bert-base-cased')\nparser.add_argument('--dataset', dest='dataset', type=str, help='name of dataset', default='')\nparser.add_argument('--path-data',dest='path_data', type=str , help='path of folder data', default='')\nparser.add_argument('--pooling',dest='pooling', type=str, help='mean/max', default='mean')\nparser.add_argument('--use-topic',dest='use_topic', type=str, help='Use Trasformer-topic (True/False)', default='False')\nparser.add_argument('--train-topic',dest='train_topic', type=str, help='Update topic embed during training (True/False)', default='False')\nparser.add_argument('--train-file',dest='train_file', type=str, default='')\nparser.add_argument('--dev-file', dest='dev_file', type=str, default='')\nparser.add_argument('--test-file',dest='test_file', type=str, default='')\nparser.add_argument('--batch-size', dest='batch_size', type=int, default=8)\nparser.add_argument('--epochs', dest='num_epochs', type=int, default=2)\nparser.add_argument('--num-topics', dest='num_topic', type=int, default=0)\nargs = parser.parse_args()\n\n\n\nuse_topic = True if args.use_topic.lower() != 'false' else False\ntrain_topic = True if args.train_topic.lower() != 'false' else False\n\n# model_save_path = args.path_data+'/'+args.dataset+'_'+args.name_model+'_'+str(args.use_topic)+'_'+str(args.train_topic)+'_'+args.pooling +\"_\"+ str(args.num_topic)\n\n# topic set up\nif use_topic is True:\n model_save_path = args.path_data+'/'+args.dataset+'_'+args.name_model+'_'+str(args.use_topic)+'_'+str(args.train_topic)+'_'+args.pooling +\"_\"+ str(args.num_topic)\n W = torch.tensor(np.load(args.path_data+'/W_gensim_t'+ str(args.num_topic) + '.npy'), dtype = torch.float) \n word_embedding_model = models.Transformer_Topic(args.name_model, topic_weight = W, train_topic = train_topic, max_seq_length = 512)\n transfer_layer = models.Features_transfer(word_embedding_model.get_word_embedding_dimension(), word_embedding_model.get_word_embedding_dimension())\n pooling_model = models.Pooling(transfer_layer.get_word_embedding_dimension(), pooling_mode_mean_tokens=True, pooling_mode_cls_token=False, pooling_mode_max_tokens=False)\n model = SentenceTransformer(modules=[word_embedding_model, transfer_layer, pooling_model])\nelse:\n model_save_path = args.path_data+'/'+args.dataset+'_'+args.name_model+'_'+str(args.use_topic)+'_'+str(args.train_topic)+'_'+args.pooling\n word_embedding_model = models.Transformer(args.name_model, max_seq_length = 256)\n pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode_mean_tokens=True, pooling_mode_cls_token=False, pooling_mode_max_tokens=False)\n model = SentenceTransformer(modules=[word_embedding_model, pooling_model])\n\nmodel.cuda()\n\ntrain_samples = []\ntrain_data = read_sts(args.path_data+'/'+args.train_file)\nfor sample in train_data:\n train_samples.append(InputExample(texts=[sample[1], sample[2]], label=float(sample[0])/5.0))\n \ntrain_dataloader = DataLoader(train_samples, shuffle=True, batch_size=args.batch_size)\ntrain_loss = losses.CosineSimilarityLoss(model=model)\n\ndev_samples = []\nif args.dev_file == '':\n dev_samples = train_samples[0:1000]\nelse:\n train_data = read_sts(args.path_data+'/'+args.dev_file)\n for sample in train_data:\n dev_samples.append(InputExample(texts=[sample[1], sample[2]], label=float(sample[0])/5.0))\n\nevaluator = EmbeddingSimilarityEvaluator.from_input_examples(dev_samples, name='sts-dev')\n\nwarmup_steps = math.ceil(len(train_dataloader) * args.num_epochs * 0.1) #10% of train data for warm-up\nlogging.info(\"Warmup-steps: {}\".format(warmup_steps))\n\n# Train the model\nmodel.fit(train_objectives=[(train_dataloader, train_loss)],\n evaluator=evaluator,\n epochs=args.num_epochs,\n evaluation_steps=1000,\n warmup_steps=warmup_steps,\n output_path=model_save_path\n )\n\n\ntest_samples = []\ntrain_data = read_sts(args.path_data+'/'+args.test_file)\nfor sample in train_data:\n test_samples.append(InputExample(texts=[sample[1], sample[2]], label=float(sample[0])/5.0))\n \nmodel = SentenceTransformer(model_save_path)\nmodel.cuda()\ntest_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, batch_size=args.batch_size, name='sts-test')\ntest_evaluator(model, output_path=model_save_path)\ntest_evaluator.predict(model, output_path=model_save_path)","repo_name":"binhdt95/SubTST","sub_path":"sts.py","file_name":"sts.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"17047223782","text":"from django.conf.urls import url\n\nfrom dashboard.views import *\n\n\nurlpatterns = [\n url(r'^$', dashboard_view, name=\"dashboard_view\"),\n url(r'^settings', setitngs_view, name=\"setitngs_view\"),\n url(r'^save-settings', save_settings_action, name=\"save_settings_action\"),\n url(r'^activate-trade-bot/(?P.+)', activate_trade_bot_action, name=\"activate_trade_bot_action\"),\n url(r'^deactivate-trade-bot/(?P.+)', deactivate_trade_bot_action, name=\"deactivate_trade_bot_action\"),\n url(r'^delete-trade-bot/(?P.+)', delete_trade_bot_action, name=\"delete_trade_bot_action\"),\n url(r'^activate-custom-bot/(?P.+)', activate_custom_bot_action, name=\"activate_custom_bot_action\"),\n url(r'^deactivate-custom-bot/(?P.+)', deactivate_custom_bot_action, name=\"deactivate_custom_bot_action\"),\n url(r'^delete-custom-bot/(?P.+)', delete_custom_bot_action, name=\"delete_custom_bot_action\")\n]\n","repo_name":"jthomaskerr/Haas-Dradis","sub_path":"dashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"2950717178","text":"\ndef draw_state(state, board_size):\n x_count = board_size[0]\n y_count = board_size[1]\n\n for x in range(x_count):\n this_row = state.get_for_row(x)\n\n draw_map = {\n \"w\": \"O \",\n \"b\": \"X \"\n }\n\n line_text = [draw_map[this_row[(x, y)][0]] if (x, y) in this_row else \"- \" for y in range(y_count)]\n\n to_print = \"\".join(line_text)\n\n print(\"| \" + to_print + \"|\")\n","repo_name":"steviej08/checkers-python","sub_path":"src/console/Draw.py","file_name":"Draw.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"16244608566","text":"import pandas as pd\r\nimport os\r\nimport json\r\nimport logging\r\n\r\n# Create log file if one doesn't already exist and add to log with each run\r\n# Mk separate log files, basic_config only configures unconfigured root files\r\nlogging.basicConfig(\r\n filename='./logs', # Path to log file\r\n level=logging.INFO, # Log info, warnings, errors, and critical errors\r\n filemode='a',\r\n format='%(asctime)s-%(name)s - %(levelname)s - %(message)s',\r\n datefmt='%d %b %Y %H:%M:%S %Z',\r\n force=True)\r\n\r\n# Access the created logger\r\nlogger = logging.getLogger()\r\n\r\n# Load config.json and get input and output paths\r\n# Config file must be stored in same dir as ingestion.py\r\nwith open('config.json', 'r') as f:\r\n config = json.load(f)\r\n\r\n\r\ndef get_csv_files():\r\n \"\"\"\r\n Get list of csv files in data directory and return their full path.\r\n \"\"\"\r\n # Get full path to csv folder and the csv files inside\r\n input_folder_path = os.path.join(\r\n os.getcwd(), config['input_folder_path'] + '/')\r\n input_data_files = os.listdir(input_folder_path)\r\n csv_files = [f for f in input_data_files if str('.csv') in f]\r\n # Return list of csv filepaths\r\n return [os.path.join(\r\n input_folder_path, csv_file) for csv_file in csv_files]\r\n\r\n\r\ndef output_folder():\r\n \"\"\"\r\n Get path to output folder. Create one if it doesn't already exist.\r\n \"\"\"\r\n # Make ingested data directory if one doesn't exist or is not writeable\r\n output_folder_path = os.path.join(\r\n os.getcwd(), config['output_folder_path'] + '/')\r\n if os.path.isdir(output_folder_path):\r\n try:\r\n # Confirm folder can be written to\r\n assert os.access(output_folder_path, os.W_OK | os.X_OK)\r\n except AssertionError:\r\n print(f'{output_folder_path} cannot be written to')\r\n\r\n # Create writeable folder if one doesn't exist\r\n else:\r\n os.umask(0) # Ensure folder can be written to\r\n os.makedirs(output_folder_path)\r\n return output_folder_path\r\n\r\n\r\ncsv_files = get_csv_files()\r\n\r\n\r\ndef merge_multiple_dataframes(csv_files,\r\n save=True,\r\n output_csv='finaldata.csv'):\r\n \"\"\"\r\n Merge multiple data frames and write the output to a new csv file\r\n Shapes and Column names must be the same in all instances\r\n Write the names of the csv file files used to ingestedfiles.txt\r\n Inputs:\r\n csv_files = list of csv file filepaths\r\n save = True if saving new data frame to a new file\r\n output_csv = filename of merged data frame\r\n \"\"\"\r\n # Merge data frames and drop duplicates\r\n full_df = pd.DataFrame()\r\n for csv in csv_files:\r\n df = pd.read_csv(csv)\r\n full_df = pd.concat([df, full_df])\r\n df = full_df.drop_duplicates()\r\n\r\n if save and output_csv is not None:\r\n # Save csv file to output folder\r\n output_csv_folder = output_folder()\r\n output_csv = 'finaldata.csv'\r\n df.to_csv(os.path.join(output_csv_folder, output_csv), index=False)\r\n\r\n # Save list of used data frames to output folder\r\n used_csvs = os.path.join(output_csv_folder, 'ingestedfiles.txt')\r\n with open(used_csvs, 'w') as fp:\r\n fp.write(str(csv_files))\r\n return df\r\n\r\n\r\nif __name__ == '__main__':\r\n df = merge_multiple_dataframes(csv_files)\r\n","repo_name":"Dyrutter/Attrition","sub_path":"ingestion.py","file_name":"ingestion.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"12214601130","text":"#inserire la parola nella variabile message (linea 90)\n#insert the word in the variable: message (line 90)\n\n#inserire la chiave di crittografia nella variabile message (linea 91)\n#insert the word in the variable: message (line 91)\n\n#INSERIRE LA PAROLA IN MAIUSCOLO\n#INSERT THE WORD IN CAPS\n\nALPHABET=\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\nletter_to_index=dict(zip(ALPHABET, range(len(ALPHABET))))\nindex_to_letter=dict(zip(range(len(ALPHABET)), ALPHABET))\n\ndef encrypt(message, key):\n encrypted=\"\"\n spazi=[]\n mes=\"\"\n enc=\"\"\n counter=0\n for n in range(len(message)):\n if message[n]== \" \":\n spazi.append(n)\n else:\n mes=mes+message[n]\n\n\n #separa il messaggio nella lunghezza della chiave\n split_message=[mes[i:i + len(key)] for i in range(0, len(mes), len(key))]\n\n #converte il messaggio in indice e aggiunge la chiave\n for each_split in split_message:\n i=0\n \n for letter in each_split:\n \n number=(letter_to_index[letter]+letter_to_index[key[i]])%len(ALPHABET)\n encrypted+=index_to_letter[number]\n i+=1\n \n for l in range(len(encrypted)+len(spazi)):\n if l in spazi:\n enc=enc+\" \"\n else:\n enc=enc+encrypted[counter]\n counter+=1\n \n return enc\n\n\ndef decrypt(cipher,key):\n decrypted=\"\"\n spazi=[]\n mes=\"\"\n dec=\"\"\n counter=0\n for n in range(len(cipher)):\n if cipher[n]== \" \":\n spazi.append(n)\n else:\n mes=mes+cipher[n]\n\n\n #separa il messaggio nella lunghezza della chiave\n split_cipher=[mes[i:i + len(key)] for i in range(0, len(mes), len(key))]\n\n #converte il cifrario in indece e sottrarre la chiave\n for each_split in split_cipher:\n i=0\n \n for letter in each_split:\n \n number=(letter_to_index[letter]-letter_to_index[key[i]])%len(ALPHABET)\n decrypted+=index_to_letter[number]\n \n i+=1\n\n for l in range(len(decrypted)+len(spazi)):\n if l in spazi:\n dec=dec+\" \"\n else:\n dec=dec+decrypted[counter]\n counter+=1\n \n return dec\n\n\n\ndef main():\n message=\"WORD\"\n key=\"CLGF\"\n scelta=input(\"Scrivere o per selezionare la modalità preferita: \")\n if scelta==\"criptare\":\n encrypted_message=encrypt(message, key)\n print(encrypted_message)\n elif scelta==\"decriptare\":\n decrypted_message=decrypt(message, key)\n print(decrypted_message)\n\n\n\nmain()\n\n","repo_name":"Tond28/OPS","sub_path":"crittografia/cifrario di Vigenere.py","file_name":"cifrario di Vigenere.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"33606040725","text":"#!/usr/bin/python3\nif __name__ == \"__main__\":\n import sys\n from calculator_1 import add, sub, mul, div\n\n count = len(sys.argv)\n if count != 4:\n print(\"Usage: {} \".format(sys.argv[0]))\n exit(1)\n\n num1 = int(sys.argv[1])\n num2 = int(sys.argv[3])\n op = sys.argv[2]\n\n def not_found():\n print(\"Unknown operator. Available operators: +, -, * and /\")\n exit(1)\n\n options = {\n \"+\": \"{} + {} = {}\".format(num1, num2, add(num1, num2)),\n \"-\": \"{} - {} = {}\".format(num1, num2, sub(num1, num2)),\n \"*\": \"{} * {} = {}\".format(num1, num2, mul(num1, num2)),\n \"/\": \"{} / {} = {}\".format(num1, num2, div(num1, num2))\n }\n\n if op in options.keys():\n print(options[op])\n else:\n not_found()\n","repo_name":"aaronahmid/alx-higher_level_programming","sub_path":"0x02-python-import_modules/100-my_calculator.py","file_name":"100-my_calculator.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"37504423696","text":"def chk(s):\n if len(s) <= 1:\n return True\n l = len(s)\n left = s[:l//2]\n right = s[l//2+1:][::-1]\n for i in range(l >> 1):\n if left[i] == right[i]:\n return False\n return chk(left) and chk(right)\n\n\nfor _ in range(int(input())):\n print('YES' if chk(input()) else 'NO')\n","repo_name":"Seungwuk98/Algorithm-Solving-python3","sub_path":"1802BOJ_paper.py","file_name":"1802BOJ_paper.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"29657852404","text":"import cv2\r\n\r\nvc = cv2.VideoCapture('/media/root/tmp/2018-09-20_11_50_54.mp4')\r\nc=1\r\na=0\r\nif vc.isOpened():\r\n rval,frame=vc.read()\r\nelse:\r\n rval = False\r\n\r\ntimeF = 3\r\n\r\n# munster _000032_000019_leftImg8bit.png\r\n# lindau _000000_000019_leftImg8bit.png\r\nwhile rval:\r\n rval,frame = vc.read()\r\n if(c%timeF == 0):\r\n strzyx = 'outpic/'+'munster_'+ '%06d' % a +'_000019_leftImg8bit.png'\r\n frame = cv2.resize(frame,(960,480))\r\n cv2.imwrite(str(strzyx),frame)\r\n a = a + 1\r\n c = c+1\r\n # print('1536822933796196.mp4.pic/'+'3_'+str(a) +'.png')\r\n print(a)\r\n if 174==a:\r\n exit(0)\r\n pass\r\n cv2.waitKey(1)\r\nvc.release()\r\n","repo_name":"zyxcambridge/data_utils_python","sub_path":"jiequ.py","file_name":"jiequ.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"39173738083","text":"import errno\nimport logging\nimport os.path\nimport sys\n\nfrom hadoop import confparse\nfrom desktop.lib.security_util import get_components\n\nif sys.version_info[0] > 2:\n open_file = open\nelse:\n open_file = file\n\nLOG = logging.getLogger()\n\n\nSITE_PATH = None\nSITE_DICT = None\n\n_CNF_HBASE_THRIFT_KERBEROS_PRINCIPAL = 'hbase.thrift.kerberos.principal'\n_CNF_HBASE_THRIFT_SPNEGO_PRINCIPAL = 'hbase.thrift.spnego.principal'\n_CNF_HBASE_AUTHENTICATION = 'hbase.security.authentication'\n_CNF_HBASE_REGIONSERVER_THRIFT_FRAMED = 'hbase.regionserver.thrift.framed'\n\n_CNF_HBASE_IMPERSONATION_ENABLED = 'hbase.thrift.support.proxyuser'\n_CNF_HBASE_USE_THRIFT_HTTP = 'hbase.regionserver.thrift.http'\n_CNF_HBASE_USE_THRIFT_SSL = 'hbase.thrift.ssl.enabled'\n\n\n\ndef reset():\n global SITE_DICT\n SITE_DICT = None\n\n\ndef get_conf():\n if SITE_DICT is None:\n _parse_site()\n return SITE_DICT\n\n\ndef get_server_principal():\n thrift_principal = get_conf().get(_CNF_HBASE_THRIFT_KERBEROS_PRINCIPAL, None)\n principal = get_conf().get(_CNF_HBASE_THRIFT_SPNEGO_PRINCIPAL, thrift_principal)\n components = get_components(principal)\n if components is not None:\n return components[0]\n\n\ndef get_server_authentication():\n return get_conf().get(_CNF_HBASE_AUTHENTICATION, 'NOSASL').upper()\n\ndef get_thrift_transport():\n use_framed = get_conf().get(_CNF_HBASE_REGIONSERVER_THRIFT_FRAMED)\n if use_framed is not None:\n if use_framed.upper() == \"TRUE\":\n return \"framed\"\n else:\n return \"buffered\"\n else:\n #Avoid circular import\n from hbase.conf import THRIFT_TRANSPORT\n return THRIFT_TRANSPORT.get()\n\ndef is_impersonation_enabled():\n #Avoid circular import\n from hbase.conf import USE_DOAS\n return get_conf().get(_CNF_HBASE_IMPERSONATION_ENABLED, 'FALSE').upper() == 'TRUE' or USE_DOAS.get()\n\ndef is_using_thrift_http():\n #Avoid circular import\n from hbase.conf import USE_DOAS\n return get_conf().get(_CNF_HBASE_USE_THRIFT_HTTP, 'FALSE').upper() == 'TRUE' or USE_DOAS.get()\n\ndef is_using_thrift_ssl():\n return get_conf().get(_CNF_HBASE_USE_THRIFT_SSL, 'FALSE').upper() == 'TRUE'\n\n\ndef _parse_site():\n global SITE_DICT\n global SITE_PATH\n\n #Avoid circular import\n from hbase.conf import HBASE_CONF_DIR\n SITE_PATH = os.path.join(HBASE_CONF_DIR.get(), 'hbase-site.xml')\n try:\n data = open_file(SITE_PATH, 'r').read()\n except IOError as err:\n if err.errno != errno.ENOENT:\n LOG.error('Cannot read from \"%s\": %s' % (SITE_PATH, err))\n return\n data = \"\"\n\n SITE_DICT = confparse.ConfParse(data)\n\n","repo_name":"cloudera/hue","sub_path":"apps/hbase/src/hbase/hbase_site.py","file_name":"hbase_site.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","stars":988,"dataset":"github-code","pt":"91"} +{"seq_id":"16294612973","text":"# #Character Recognition using python's builtn OCR\r\n# imgn = cv2.imread('E:/CUI data/Semester 6/Digital Image Processing/Project/Final/BoardCells/cell1.jpg')\r\nimport pickle\r\nimport numpy as np\r\nfrom skimage.transform import resize\r\nfrom skimage.feature import hog\r\nimport re\r\nimport cv2\r\nimport os\r\nfrom tkinter import messagebox\r\nimport pandas as pd\r\nimport sys \r\nfrom tkinter import * \r\nfrom tkinter import ttk\r\nimport tkinter as tk\r\nfrom tkinter.constants import LEFT, TOP\r\n\r\nnumbers = re.compile(r'(\\d+)')\r\ndef numericalSort(value):\r\n parts = numbers.split(value)\r\n parts[1::2] = map(int, parts[1::2])\r\n return parts\r\n \r\nmyLetters = []\r\nfor filename in sorted(os.listdir('BoardCells'), key=numericalSort):\r\n img = cv2.imread(os.path.join('BoardCells/',filename))\r\n \r\n resized_img = resize(img, (16,16)) \r\n fd, hog_image = hog(resized_img, pixels_per_cell=(3,3), cells_per_block=(2, 2), visualize=True, multichannel=True)\r\n \r\n X = fd\r\n nX_test = np.delete(X, -1)\r\n X_test = nX_test.reshape(1,-1)\r\n\r\n pickled_model_svm = pickle.load(open('SVM_Model.pkl', 'rb'))\r\n predicted_svm = pickled_model_svm.predict(X_test)\r\n\r\n\r\n pickled_model_dt = pickle.load(open('DT_Model.pkl', 'rb'))\r\n predicted_dt = pickled_model_dt.predict(X_test)\r\n\r\n\r\n pickled_model_knn = pickle.load(open('KNN_Model.pkl', 'rb'))\r\n predicted_knn = pickled_model_knn.predict(X_test)\r\n\r\n if ((predicted_svm == predicted_knn) or (predicted_svm == predicted_dt)):\r\n myLetters.append(predicted_svm)\r\n elif ((predicted_dt == predicted_knn)):\r\n myLetters.append(predicted_dt)\r\n else:\r\n myLetters.append(predicted_dt)\r\n\r\nindex = 0\r\nletters = np.zeros(shape=(15, 15), dtype=myLetters[0].dtype)\r\n\r\nfor i in range(15):\r\n for j in range(15):\r\n letters[i,j] = myLetters[index]\r\n index = index + 1\r\n\r\n\r\nroot = Tk()\r\n\t\t\t\r\nroot.geometry('1000x500')\t\r\nroot.title(\"Scramble Puzzle \")\r\nLabel(root, text =\"Scramble Puzzle Solver\", font=150).pack()\r\n\r\nclass WordSearch(object):\r\n \r\n def __init__(self):\r\n self.word = StringVar()\r\n\r\n def ViewWords(self, letters):\r\n \r\n dframe = pd.DataFrame(letters)\r\n\r\n txt = Text(root) \r\n txt.pack() \r\n\r\n class PrintToTXT(object): \r\n def write(self, s): \r\n txt.insert(END, s)\r\n def flush(self):\r\n pass\r\n sys.stdout = PrintToTXT() \r\n\r\n print ('Characters found in the image are:') \r\n\r\n print (dframe)\r\n\r\n Label(root, text=\"Enter Word to Search:\").place(x=100, y=500)\r\n w = Entry(root, textvariable=self.word)\r\n w.place(x=250, y=500)\r\n\r\n \r\n submit = Button(root, text = 'Submit', command = lambda: self.FindWords(letters))\r\n submit.place(x=100,y=550) \r\n\r\n \r\n def FindWords(self, letters):\r\n \r\n try: \r\n word = self.word.get()\r\n self.find_word(letters, word)\r\n except:\r\n print('Word not found')\r\n \r\n def find_word (self, wordsearch, word='Abcd'):\r\n \"\"\"Trys to find word in wordsearch and prints result\"\"\"\r\n # Store first character positions in array\r\n print('word is: ', word)\r\n start_pos = []\r\n \r\n first_char = word[0]\r\n\r\n for i in range(0, len(wordsearch)):\r\n for j in range(0, len(wordsearch[i])):\r\n if (wordsearch[i][j] == first_char):\r\n start_pos.append([i,j])\r\n # Check all starting positions for word\r\n for p in start_pos:\r\n if self.check_start(wordsearch, word, p):\r\n # Word found\r\n return\r\n # Word not found\r\n print('Word Not Found')\r\n\r\n def check_start (self, wordsearch, word, start_pos):\r\n \"\"\"Checks if the word starts at the startPos. Returns True if word found\"\"\"\r\n directions = [[-1,1], [0,1], [1,1], [-1,0], [1,0], [-1,-1], [0,-1], [1,-1]]\r\n # Iterate through all directions and check each for the word\r\n for d in directions:\r\n if (self.check_dir(wordsearch, word, start_pos, d)):\r\n return True\r\n\r\n def check_dir (self, wordsearch, word, start_pos, dir):\r\n \"\"\"Checks if the word is in a direction dir from the start_pos position in the wordsearch. Returns True and prints result if word found\"\"\"\r\n found_chars = [word[0]] # Characters found in direction. Already found the first character\r\n current_pos = start_pos # Position we are looking at\r\n pos = [start_pos] # Positions we have looked at\r\n while (self.chars_match(found_chars, word)):\r\n if (len(found_chars) == len(word)):\r\n # If found all characters and all characters found are correct, then word has been found\r\n print('')\r\n print('Word Found')\r\n print('')\r\n # Draw wordsearch on command line. Display found characters and '-' everywhere else\r\n for x in range(0, len(wordsearch)):\r\n line = \"\"\r\n for y in range(0, len(wordsearch[x])):\r\n is_pos = False\r\n for z in pos:\r\n if (z[0] == x) and (z[1] == y):\r\n is_pos = True\r\n if (is_pos):\r\n line = line + \" \" + wordsearch[x][y]\r\n else:\r\n line = line + \" -\"\r\n print(line)\r\n print('')\r\n return True;\r\n # Have not found enough letters so look at the next one\r\n current_pos = [current_pos[0] + dir[0], current_pos[1] + dir[1]]\r\n pos.append(current_pos)\r\n if (self.is_valid_index(wordsearch, current_pos[0], current_pos[1])):\r\n found_chars.append(wordsearch[current_pos[0]][current_pos[1]])\r\n else:\r\n # Reached edge of wordsearch and not found word\r\n return\r\n\r\n def chars_match (self, found, word):\r\n \"\"\"Checks if the leters found are the start of the word we are looking for\"\"\"\r\n index = 0\r\n for i in found:\r\n if (i != word[index]):\r\n return False\r\n index += 1\r\n return True\r\n\r\n def is_valid_index (self, wordsearch, line_num, col_num):\r\n \"\"\"Checks if the provided line number and column number are valid\"\"\"\r\n if ((line_num >= 0) and (line_num < len(wordsearch))):\r\n if ((col_num >= 0) and (col_num < len(wordsearch[line_num]))):\r\n return True\r\n return False\r\n\r\na = WordSearch()\r\na.ViewWords(letters)\r\n\r\n\r\nroot.mainloop()","repo_name":"muhammadd7/WordSearch","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"8222686775","text":"import torch\nimport csv\nimport json\nimport numpy as np\nimport pandas as pd\nfrom icd_classifier.modeling import models\nfrom icd_classifier.settings import DATA_DIR\nfrom icd_classifier.data import data_utils\nimport logging\nfrom sklearn.preprocessing import MultiLabelBinarizer\n\n\ndef pick_model(args, dicts):\n \"\"\"\n Use args to initialize the appropriate model\n \"\"\"\n logging.info(\"Picking model: {}\".format(args.model))\n number_labels = len(dicts['ind2c'])\n if args.model == \"basic_cnn\":\n filter_size = int(args.filter_size)\n model = models.BasicCNN(\n number_labels, args.embeddings_file, filter_size, args.filter_maps,\n args.gpu,\n dicts, args.embedding_size, args.dropout)\n\n elif args.model == \"rnn\":\n model = models.RNN(\n number_labels, args.embeddings_file, dicts, args.rnn_dim,\n args.rnn_cell_type, args.rnn_layers, args.dropout, args.gpu,\n args.batch_size, args.embedding_size, args.bidirectional)\n\n elif args.model == \"caml\":\n filter_size = int(args.filter_size)\n model = models.CAML(\n number_labels, args.embeddings_file, filter_size, args.filter_maps,\n args.lmbda, args.gpu, dicts, embedding_size=args.embedding_size,\n dropout=args.dropout)\n else:\n # rewrite with \"try - except\" pattern\n logging.error(\"ERROR: unknown model '{}'\".format(args.model))\n\n if args.test_model:\n sd = torch.load(args.test_model)\n model.load_state_dict(sd)\n\n if args.gpu:\n model.cuda()\n\n return model\n\n\ndef make_param_dict(args):\n \"\"\"\n Make a list of parameters to save for future reference\n \"\"\"\n param_vals = [\n args.number_labels, args.filter_size, args.dropout, args.filter_maps,\n args.rnn_dim, args.rnn_cell_type, args.rnn_layers, args.lmbda,\n args.command, args.weight_decay, args.data_path,\n args.vocab, args.embeddings_file, args.lr]\n param_names = [\n \"number_labels\", \"filter_size\", \"dropout\", \"filter_maps\", \"rnn_dim\",\n \"rnn_cell_type\", \"rnn_layers\", \"lmbda\", \"command\",\n \"weight_decay\", \"data_path\", \"vocab\", \"embeddings_file\", \"lr\"]\n params = {\n name: val for name, val in zip(\n param_names, param_vals) if val is not None}\n return params\n\n\ndef build_code_vecs(code_inds, dicts):\n \"\"\"\n Get vocab-indexed arrays representing words in\n descriptions of each *unseen* label\n \"\"\"\n logging.info(\"Building code vectors\")\n code_inds = list(code_inds)\n ind2w, ind2c, dv_dict = dicts['ind2w'], dicts['ind2c'], dicts['dv']\n vecs = []\n for c in code_inds:\n code = ind2c[c]\n if code in dv_dict.keys():\n vecs.append(dv_dict[code])\n else:\n # vec is a single UNK token if not in lookup\n vecs.append([len(ind2w) + 1])\n # pad everything\n vecs = data_utils.pad_desc_vecs(vecs)\n long_tensor_code_inds = torch.cuda.LongTensor(code_inds)\n logging.info(\n \"Done building code vectors. Shape code_inds: {}, its tensor: {}, \"\n \"vecs: {}\".format(\n len(code_inds), long_tensor_code_inds.shape, len(vecs)))\n\n return (long_tensor_code_inds, vecs)\n\n\ndef save_metrics(metrics_hist_all, model_dir):\n metrics_file = model_dir + \"/metrics.json\"\n logging.info(\"Saving metrics to: {}\".format(metrics_file))\n with open(metrics_file, 'w') as metrics_file:\n # concatenate dev, train metrics into one dict\n data = metrics_hist_all[0].copy()\n data.update(\n {\"%s_te\" % (name):\n val for (name, val) in metrics_hist_all[1].items()})\n data.update(\n {\"%s_tr\" % (name):\n val for (name, val) in metrics_hist_all[2].items()})\n json.dump(data, metrics_file, indent=1)\n\n\ndef save_params_dict(params):\n params_file = params[\"model_dir\"] + \"/params.json\"\n logging.info(\"Saving params to: {}\".format(params_file))\n with open(params_file, 'w') as params_file:\n json.dump(params, params_file, indent=1)\n\n\ndef write_preds(yhat, model_dir, hids, fold, ind2c, yhat_raw=None):\n \"\"\"\n INPUTS:\n yhat: binary predictions matrix\n model_dir: which directory to save in\n hids: list of hadm_id's to save along with predictions\n fold: train, dev, or test\n ind2c: code lookup\n yhat_raw: predicted scores matrix (floats)\n \"\"\"\n preds_file = \"%s/preds_%s.psv\" % (model_dir, fold)\n with open(preds_file, 'w') as f:\n w = csv.writer(f, delimiter='|')\n for yhat_, hid in zip(yhat, hids):\n codes = [ind2c[ind] for ind in np.nonzero(yhat_)[0]]\n if len(codes) == 0:\n w.writerow([hid, ''])\n else:\n w.writerow([hid] + list(codes))\n if fold != 'train' and yhat_raw is not None:\n # write top 100 scores so we can re-do @k metrics later\n # top 100 only - saving the full set of scores\n # is very large (~1G for mimic-3 full test set)\n scores_file = '%s/pred_100_scores_%s.json' % (model_dir, fold)\n scores = {}\n sortd = np.argsort(yhat_raw)[:, ::-1]\n for i, (top_idxs, hid) in enumerate(zip(sortd, hids)):\n scores[int(hid)] = {\n ind2c[idx]:\n float(yhat_raw[i][idx]) for idx in top_idxs[:100]}\n with open(scores_file, 'w') as f:\n json.dump(scores, f, indent=1)\n\n logging.info(\"Saving predictions as {}\".format(preds_file))\n\n return preds_file\n\n\ndef save_everything(args, metrics_hist_all, model, model_dir,\n params, early_stopping_metric, evaluate=False):\n \"\"\"\n Save metrics, model, params all in model_dir\n \"\"\"\n save_metrics(metrics_hist_all, model_dir)\n params['model_dir'] = model_dir\n save_params_dict(params)\n\n if not evaluate:\n # save the model with the best early_stopping_metric metric\n if not np.all(np.isnan(metrics_hist_all[0][early_stopping_metric])):\n if early_stopping_metric == 'loss_dev':\n eval_val = np.nanargmin(\n metrics_hist_all[0][early_stopping_metric])\n else:\n eval_val = np.nanargmax(\n metrics_hist_all[0][early_stopping_metric])\n\n if eval_val == len(metrics_hist_all[0][early_stopping_metric]) - 1:\n # save state dict\n sd = model.cpu().state_dict()\n best_model = model_dir+\"/model_best_%s.pth\" % \\\n early_stopping_metric\n logging.info(\n \"Save best model to file: {}, evaluated with: {}\".format(\n best_model, early_stopping_metric))\n torch.save(sd, best_model)\n if args.gpu:\n model.cuda()\n logging.info(\"Saved metrics, params, model to directory: {}\".format(\n model_dir))\n\n\ndef convert_label_codes_to_idx(data, c2ind):\n \"\"\"\n for all train/test examples, convert code labels to idx labels,\n according to c2ind dict:\n [\n ('801.35', '348.4', '805.06', '807.01', '998.30', '707.24',\n 'E880.9','427.31', '414.01', '401.9', 'V58.61', 'V43.64',\n '707.00', 'E878.1', '96.71'),\n ('852.25', 'E888.9', '403.90', '585.9', '250.00', '414.00',\n 'V45.81', '96.71')\n ]\n -->\n [\n (6106, 1910, 6204, 6241, 7906, 4683,\n 8160, 2720, 2611, 2534, 8806, 8683,\n 4663, 8140, 7575),\n (6775, 8186, 2545, 4009, 985, 2610,\n 8717, 7575)\n ]\n \"\"\"\n logging.info(\n \"Convert labels from code to idx, according to c2ind of \"\n \"length: {}\".format(len(c2ind)))\n converted_data = []\n for i, item in enumerate(data):\n item_labels = []\n for label in item:\n idx = c2ind.get(label)\n if idx is not None:\n item_labels.append(idx)\n # else:\n # logging.warning(\n # \"label not found: {} in c2ind, data item: {}\".format(\n # label, i))\n converted_data.append(tuple(item_labels))\n logging.info(\n \"Done. First two data items before conversion: {}, and after: {}\"\n \"\".format(data[0:2], converted_data[0:2]))\n logging.info(\n \"Last 5 data items before conversion: {}, and after: {}\"\n \"\".format(data[-5:], converted_data[-5:]))\n\n return converted_data\n\n\ndef get_label_tuples(df, label_name, return_idx=True, c2ind=None):\n logging.info(\n f\"Getting list of labels from df['{label_name}'], \"\n \"return_idx={return_idx}\")\n list_of_lists = []\n for joined_label in df[label_name].tolist():\n list_of_lists.append(tuple(joined_label.split(\";\")))\n if return_idx and c2ind:\n list_of_lists = convert_label_codes_to_idx(list_of_lists, c2ind)\n return list_of_lists\n\n\ndef get_code_to_desc_dict(desc_dict, c2ind):\n \"\"\"\n returns dict with items {code: description}\n where 'code' is from c2ind dict, and\n 'description' is from description dict\n 'desc_dict', where keys are 'code'\n\n {\n '017.21': 'Tuberculosis of peripheral lymph nodes, bacteriological or\n histological examination not done',\n '017.22': 'Tuberculosis of peripheral lymph nodes..',\n }\n \"\"\"\n code_to_desc_dict = {}\n backup_code = None\n for item in c2ind.items():\n code = item[0]\n value = desc_dict.get(code)\n if value is not None:\n code_to_desc_dict[code] = desc_dict.get(code)\n backup_code = code\n else:\n code_to_desc_dict[code] = desc_dict.get(backup_code)\n logging.error(\n f\"No desc for code: {code}, c2ind: {item}. Replace it with\"\n f\" the previous code: {backup_code}\")\n return code_to_desc_dict\n\n\ndef prepare_x_z_corpus_files(\n train_df, test_df, code_to_desc_dict, path_x_trn,\n path_x_tst, path_z, path_corpus):\n dfz = pd.DataFrame.from_dict(\n code_to_desc_dict, orient='index',\n columns=['description'], dtype='object')\n dfz.iloc[:, 0].to_csv(\n path_or_buf=path_z, index=False, header=False)\n logging.info(\n f\"Prepared label description file {path_z}, line number \"\n \"corresponds to code in c2ind\")\n train_df.iloc[:, 2].to_csv(\n path_or_buf=path_x_trn, index=False, header=False)\n logging.info(f\"Prepared training text file: {path_x_trn}\")\n test_df.iloc[:, 2].to_csv(\n path_or_buf=path_x_tst, index=False, header=False)\n logging.info(f\"Prepared testing/dev text file: {path_x_tst}\")\n\n # concate Z and X.trn into joint corpus\n # cat ./X.trn.txt Z.all.txt > pecos_50/full_corpus.txt\n corpus = pd.concat(\n [dfz.iloc[:, 0], train_df.iloc[:, 2]], axis=0, join='outer')\n corpus.to_csv(\n path_or_buf=path_corpus, index=False, header=False)\n logging.info(\n f\"Prepared a joint training text and label \\\n description corpus: {path_corpus}\")\n\n\ndef encode_y_labels(train_df, test_df, label_name, c2ind,\n ind_list, prepare_text_files, number_labels):\n # prepare list of all labels:\n # get labels for datesets into a list of tuples\n logging.info(\"get label tuples for train data\")\n labels_tr = get_label_tuples(\n train_df, label_name, return_idx=True, c2ind=c2ind)\n logging.info(\"get label tuples for test data\")\n labels_te = get_label_tuples(\n test_df, label_name, return_idx=True, c2ind=c2ind)\n\n if prepare_text_files:\n path_y_trn = DATA_DIR+'/Y.trn.'+str(number_labels)+'.txt'\n path_y_tst = DATA_DIR+'/Y.tst.'+str(number_labels)+'.txt'\n\n with open(path_y_trn, \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerows(labels_tr)\n with open(path_y_tst, \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerows(labels_te)\n\n # create multihot label encoding, CSR matrix\n logging.info(\"Preparing CSR matrices for Y train, test\")\n label_encoder_multilabel = MultiLabelBinarizer(\n classes=ind_list, sparse_output=True)\n Y_trn = label_encoder_multilabel.fit_transform(labels_tr)\n Y_tst = label_encoder_multilabel.fit_transform(labels_te)\n\n # cast as correct dtype\n Y_trn = Y_trn.astype(dtype=np.float32, copy=False)\n Y_tst = Y_tst.astype(dtype=np.float32, copy=False)\n\n # Y_trn is a csr matrix with a shape (47719, 8887) and\n # 745363 non-zero values.\n logging.info(\n f\"Y_trn is a {Y_trn.getformat()} matrix with a shape {Y_trn.shape} \"\n f\"and {Y_trn.nnz} non-zero values.\")\n logging.info(\n f\"Y_tst is a {Y_tst.getformat()} matrix with a shape {Y_tst.shape} \"\n f\"and {Y_tst.nnz} non-zero values.\")\n\n return Y_trn, Y_tst\n","repo_name":"mfilipav/icd-classifier","sub_path":"icd_classifier/modeling/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":12827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"19924829844","text":"# -*- coding: utf-8 -*-\n\"\"\"Django Basic CMS module.\"\"\"\nVERSION = (0, 3, 16)\n__version__ = '.'.join(map(str, VERSION))\n__author__ = \"ArabellaTech\"\n__contact__ = \"geeks@arabel.la\"\n__homepage__ = \"https://github.com/ArabellaTech/django-basic-cms\"\n__docformat__ = \"restructuredtext\"\n__doc__ = 'A tree based Django Basic CMS application'\n__license__ = 'BSD'\n\ndefault_app_config = 'basic_cms.app_config.BasicCmsConfig'\n","repo_name":"ArabellaTech/django-basic-cms","sub_path":"basic_cms/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"91"} +{"seq_id":"24712080110","text":"from dataclasses import dataclass\nfrom typing import TYPE_CHECKING, Optional\n\nfrom sqlalchemy import select\nfrom sqlalchemy.engine import Result\nfrom sqlalchemy.sql.expression import Select\nfrom sqlalchemy.orm import selectinload, joinedload\n\nfrom store.game.models import GameModel\nfrom store.quiz.models import QuestionModel\n\nif TYPE_CHECKING:\n from store.database.database import Database\n from bot_long_poll.dcs import Update\n\n\n@dataclass\nclass GameAccessor:\n database: \"Database\"\n\n async def create_game(self, player=None) -> GameModel:\n if player:\n game_model = GameModel(state=\"Created\",\n players=[player],\n who_next=[player.vk_id])\n else:\n game_model = GameModel(state=\"Created\",\n players=[],\n who_next=[])\n async with self.database.session() as session:\n session.add(game_model)\n await session.commit()\n return game_model\n\n async def _get_one_or_none(self, query: Select) -> Optional[GameModel]:\n async with self.database.session() as session:\n result: Result = await session.execute(query)\n game_model = result.unique().scalar_one_or_none()\n return game_model\n\n async def get_game_by_state(self, state: str) -> Optional[GameModel]:\n query: Select = (select(GameModel)\n .options(selectinload(GameModel.players))\n .where(GameModel.state == state))\n async with self.database.session() as session:\n result: Result = await session.execute(query)\n game_model = result.scalar_one_or_none()\n return game_model\n\n async def get_game_by_id(self, id: int):\n query: Select = (select(GameModel)\n .where(GameModel.id == id))\n async with self.database.session() as session:\n result: Result = await session.execute(query)\n game_model = result.scalar_one()\n return game_model\n\n async def get_started_game_by_player(self, update: \"Update\") -> Optional[GameModel]:\n query: Select = (select(GameModel)\n .options(joinedload(GameModel.players))\n .options(joinedload(GameModel.question)\n .joinedload(QuestionModel.answer))\n .options(joinedload(GameModel.question)\n .joinedload(QuestionModel.theme))\n .where(GameModel.state == \"Started\")\n .where(GameModel.who_next.contains([update.from_id])))\n return await self._get_one_or_none(query)\n\n async def get_created_game_by_player(self, update: \"Update\") -> Optional[GameModel]:\n query: Select = (select(GameModel)\n .options(joinedload(GameModel.players))\n .where(GameModel.state == \"Created\")\n .where(GameModel.who_next.contains([update.from_id])))\n return await self._get_one_or_none(query)\n\n async def get_started_game_id_by_player(self, update: \"Update\") -> Optional[GameModel]:\n query: Select = (select(GameModel.id)\n .where(GameModel.state == \"Started\")\n .where(GameModel.who_next.contains([update.from_id])))\n return await self._get_one_or_none(query)\n\n async def get_created_and_started_game_by_player(self, update: \"Update\"):\n query: Select = (select(GameModel)\n .options(joinedload(GameModel.players))\n .options(joinedload(GameModel.question)\n .joinedload(QuestionModel.answer))\n .options(joinedload(GameModel.question)\n .joinedload(QuestionModel.theme))\n .where(GameModel.state.in_((\"Created\", \"Started\")))\n .where(GameModel.who_next.contains([update.from_id])))\n async with self.database.session() as session:\n result: Result = await session.execute(query)\n game_model = result.unique().scalars().all()\n return game_model\n\n async def add_player_in_game(self, game: GameModel) -> GameModel:\n return await self._update_instance(game)\n\n async def cancel_game_request(self, game: GameModel) -> GameModel:\n return await self._update_instance(game)\n\n async def _update_instance(self, game: GameModel) -> GameModel:\n async with self.database.session() as session:\n session.add(game)\n await session.commit()\n return game\n\n async def quite_game_request(self, game: GameModel) -> GameModel:\n return await self._update_instance(game)\n\n async def change_game_state_to_started(self, update: \"Update\") -> Optional[GameModel]:\n game = await self.get_created_game_by_player(update)\n if game:\n game.state = \"Started\"\n return await self._update_instance(game)\n\n async def add_question_in_game(self, game: GameModel, question: \"QuestionModel\") -> GameModel:\n word = \"*\" * len(question.answer.title)\n game.question = question\n game.question_id = question.id\n game.word = word\n return await self._update_instance(game)\n\n async def update_game_instance(self, game: \"GameModel\") -> \"GameModel\":\n return await self._update_instance(game)\n","repo_name":"milovanovmaksim/wheel_of_fortune","sub_path":"store/game/accessors.py","file_name":"accessors.py","file_ext":"py","file_size_in_byte":5548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"70597427504","text":"import os\n\n## read input\nshape_weights = {\n 'A': 1,\n 'B': 2,\n 'C': 3,\n}\n\noutcomes = {\n 'X': 0,\n 'Y': 3,\n 'Z': 6,\n}\n\ntransition = {\n 'Y': {\n 'A': 'A',\n 'B': 'B',\n 'C': 'C',\n },\n 'Z': {\n 'A': 'B',\n 'B': 'C',\n 'C': 'A',\n },\n 'X': {\n 'A': 'C',\n 'B': 'A',\n 'C': 'B',\n },\n}\n\nresult = 0\n\n__location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__))\n)\n\nwith open(os.path.join(__location__, 'input'), \"r\") as file:\n for line in file:\n l, r = line.rstrip(\"\\n\").split(' ')\n mine = transition[r][l]\n result = result + outcomes[r] + shape_weights[mine]\n\nprint(result)\n","repo_name":"daydiff/adventofcode","sub_path":"2022/day_2/part_2.py","file_name":"part_2.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"17253250799","text":"import requests\nimport pandas as pd\nimport re\nimport pymysql\nimport tushare as ts\nimport datetime\nimport numpy as np\n\n# 更新每日个股信息\n# 获取交易日期,并判断今日是否为交易日\n\n\ndef is_trade_day():\n a = datetime.date.today()\n d = datetime.timedelta(days=31)\n prem = a-d\n a = a.__format__('%Y%m%d')\n prem = prem.__format__('%Y%m%d')\n\n pro = ts.pro_api()\n df = pro.trade_cal(exchange='', start_date=prem, end_date=a)\n\n s = (df[df['cal_date'] == a])\n jud = (s['is_open'] == 1)\n return jud.iloc[0]\n\n\ndef get_trade_day():\n a = datetime.date.today()\n d = datetime.timedelta(days=31)\n prem = a-d\n a = a.__format__('%Y%m%d')\n prem = prem.__format__('%Y%m%d')\n pro = ts.pro_api()\n df = pro.trade_cal(exchange='', start_date=prem, end_date=a)\n\n trade_day = df[df['is_open'] == 1]\n return trade_day\n\n\n# 获取交易日当日每只股票数据\n\n\ndef get_data(csvfile):\n if not is_trade_day():\n return\n else:\n sl = pd.read_csv(csvfile, header=None, dtype=str)\n idx = sl.iloc[1:, 1]\n a = datetime.date.today()\n # d=datetime.timedelta(days=1)\n # a=a-d\n a = a.__format__('%Y-%m-%d')\n td = get_trade_day()\n pretd = td.iloc[-2, 1]\n pretd = datetime.datetime.strptime(pretd, \"%Y%m%d\")\n pretd = pretd.__format__('%Y-%m-%d')\n\n daily_data = pd.DataFrame(columns={})\n for code in idx:\n # print(code)\n url = 'http://quotes.money.163.com/trade/ls\\\n jysj_' + str(code) + '.html#'\n wbdata = requests.get(url).text\n sa = re.findall(a+\"(.*?)\"+pretd, wbdata)\n if sa:\n num = re.findall(\">(.*?)<\", sa[0], re.S)\n num = [x for x in num if x != '']\n daily_data.loc[:, str(code)] = num\n\n daily_data = daily_data.T\n return daily_data\n\n# 将最新数据存入数据库\n\n\ndef save_data(csvfie):\n if not is_trade_day():\n return\n else:\n data = get_data('sha_list.csv')\n print(data)\n idx = data.index\n sl = pd.read_csv('sha_list.csv', header=None, dtype=str)\n sl = sl.iloc[1:, 1:3]\n sl = sl.set_index(1)\n\n db = pymysql.connect(\n host='localhost', port=3306,\n charset='utf8',\n user='root', password='lmy6571495')\n cursor = db.cursor()\n cursor.execute(\"use stockdata\")\n\n for code in idx:\n save = data.loc[code]\n dt = datetime.date.today().__format__('%Y/%m/%d')\n co = '\\''+str(code)\n name = str(sl.loc[code, 2])\n spj = np.float64(save[3])\n zgj = np.float64(save[1])\n zdj = np.float64(save[2])\n kpj = np.float64(save[0])\n qsp = np.float64(1)\n zde = str(save[4])\n zdf = str(save[5])\n hsl = np.float64(save[9])\n cjl = save[6]\n cjl = cjl.replace(',', '')\n cjl = np.int64(cjl)\n cje = save[7]\n cje = cje.replace(',', '')\n cje = np.float64(cje)\n record = (\n dt, co, name, spj, zgj, zdj, kpj,\n qsp, zde, zdf, hsl, cjl, cje)\n\n # 插入数据语句\n try:\n print('正在存储stock_%s' % str(code))\n sqlSentence4 = \"insert into stock_%s\" % str(code) + \" (日期, 股票代码, 名称, 收盘价, 最高价, 最低价, 开盘价,\\\n 前收盘, 涨跌额, 涨跌幅, 换手率, 成交量, 成交金额) \\\n values ('%s',%s','%s',%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\" % record\n sqlSentence4 = sqlSentence4.replace('nan', 'null').replace('None', 'null').replace('none', 'null')\n cursor.execute(sqlSentence4)\n db.commit()\n except: # 如果以上插入过程出错,跳过这条数据记录,继续往下进行\n db.rollback()\n break\n cursor.close()\n db.commit()\n db.close()\n\n\nif __name__ == '__main__':\n save_data('sha_list.csv')\n save_data('sza_list.csv')\n","repo_name":"roadlmy/findata","sub_path":"src/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"} +{"seq_id":"17047164602","text":"import time\nimport logging\n\nfrom huey import crontab\nfrom huey.contrib.djhuey import periodic_task, task\nfrom huey.contrib.djhuey import db_periodic_task, db_task\n\nfrom managers.HaasManager import HaasManager\nfrom managers.UtilManager import *\n\nfrom managers.BasicAnalysisManager import BasicAnalysisManager\nfrom managers.HistoryRetrievalManager import HistoryRetrievalManager\n\nfrom haasomeapi.enums.EnumPriceSource import EnumPriceSource\n\ndef safeHistoryGet(pricesource: EnumPriceSource, primarycurrency: str, secondarycurrency: str, contractname: str, interval: int, depth: int):\n\n history = None\n historyResult = False\n failCount = 0\n\n while historyResult == False:\n history = HaasManager.haasomeClient.marketDataApi.get_history(pricesource, primarycurrency, secondarycurrency, contractname, interval, depth)\n if len(history.result) > 1:\n historyResult = True\n else:\n failCount = failCount + 1\n time.sleep(1)\n\n if failCount == 60:\n historyResult = True\n\n return history.result\n\n@db_task()\ndef download_history_for_all_markets_task(haasip: str, haasport: int, haassecret: str, accountguid: str, depth: int):\n \n logging.info(\"Started the download all market history for exchange task\")\n\n HaasManager.init_haas_manager(haasip, haasport, haassecret)\n\n historyTasks = {}\n historyResults = {}\n\n count = 0\n\n markets = HaasManager.get_all_markets_for_guid(accountguid)\n\n for market in markets:\n task = download_history_for_market_task(haasip, haasport, haassecret, market.priceSource,\n market.primaryCurrency, market.secondaryCurrency, \"\", 1, depth)\n historyTasks[count] = task\n count = count + 1\n\n lastUpdateCount = 0\n\n while len(historyResults) != len(historyTasks):\n for k, v in historyTasks.items():\n result = v.get()\n if result != None:\n if k in historyResults:\n pass\n else:\n historyResults[k] = result\n\n if len(historyResults) > lastUpdateCount:\n HistoryRetrievalManager.update_amount_retrieved(len(historyResults))\n lastUpdateCount = len(historyResults)\n\n time.sleep(1)\n\n HistoryRetrievalManager.mark_completed()\n\n logging.info(\"Completed the download all market history for exchange task\")\n\n@task()\ndef download_history_for_market_task(haasip: str, haasport: int, haassecret: str, pricesource: EnumPriceSource, primarycurrency: str, \n secondarycurrency: str, contractname: str, interval: int, depth: int):\n\n logging.info(\"Started the download history for \" + primarycurrency + \"/\" + secondarycurrency)\n\n HaasManager.init_haas_manager(haasip, haasport, haassecret)\n\n history = safeHistoryGet(pricesource, primarycurrency, secondarycurrency, contractname, interval, depth)\n\n logging.info(\"Completed the download history for \" + primarycurrency + \"/\" + secondarycurrency)\n\n return history\n\n@db_task()\ndef backtest_all_markets_with_bot(haasip: str, haasport: int, haassecret: str, accountguid: str, botguid: str, timeframeinminutes: int, contractname: str, iscustombot: bool):\n \n logging.info(\"Started the backtest of all markets\")\n\n HaasManager.init_haas_manager(haasip, haasport, haassecret)\n\n backtestTasks = {}\n backtestResults = {}\n\n count = 0\n\n markets = HaasManager.get_all_markets_for_guid(accountguid)\n\n accountInfo = HaasManager.get_account_info_for_id(accountguid)\n\n baseBotInfo = None\n\n if iscustombot:\n baseBotInfo = HaasManager.get_custom_bot_by_id(botguid)\n else:\n baseBotInfo = HaasManager.get_trade_bot_by_id(botguid)\n\n logging.info(baseBotInfo)\n logging.info(baseBotInfo.name)\n\n for market in markets:\n task = None\n\n if iscustombot:\n task = backtest_custom_bot_on_market(haasip, haasport, haassecret, accountguid, botguid, \n timeframeinminutes, market.primaryCurrency, market.secondaryCurrency, contractname)\n else: \n task = backtest_trade_bot_on_market(haasip, haasport, haassecret, accountguid, botguid, \n timeframeinminutes, market.primaryCurrency, market.secondaryCurrency, contractname)\n\n backtestTasks[count] = task\n count = count + 1\n\n lastUpdateCount = 0\n\n while len(backtestResults) != len(backtestTasks):\n for k, v in backtestTasks.items():\n result = v.get()\n if result != None:\n if k in backtestResults:\n pass\n else:\n backtestResults[k] = result\n BasicAnalysisManager.create_basic_analysis_results_model(accountguid, EnumPriceSource(accountInfo.connectedPriceSource).name, botguid, baseBotInfo.name, result.priceMarket.primaryCurrency, result.priceMarket.secondaryCurrency, \n result.roi, iscustombot)\n\n if len(backtestResults) > lastUpdateCount:\n BasicAnalysisManager.update_amount_retrieved(len(backtestResults))\n lastUpdateCount = len(backtestResults)\n\n time.sleep(1)\n\n BasicAnalysisManager.mark_completed()\n\n logging.info(\"Completed the backtest of all markets task\")\n\n@task()\ndef backtest_trade_bot_on_market(haasip: str, haasport: int, haassecret: str, accountguid: str, botguid: str, timeframeinminutes: int, primarycurrency: str, \n secondarycurrency: str, contractname: str):\n\n logging.info(\"Started backtest on pair \" + primarycurrency + \"/\" + secondarycurrency)\n\n HaasManager.init_haas_manager(haasip, haasport, haassecret)\n \n accountInfo = HaasManager.haasomeClient.accountDataApi.get_account_details(accountguid).result\n\n backTestResult = HaasManager.haasomeClient.customBotApi.backtest_custom_bot_on_market(accountguid, botguid, timeframeinminutes, primarycurrency, secondarycurrency, contractname)\n\n return backTestResult.result\n\n@task()\ndef backtest_custom_bot_on_market(haasip: str, haasport: int, haassecret: str, accountguid: str, botguid: str, timeframeinminutes: int, primarycurrency: str, \n secondarycurrency: str, contractname: str):\n\n logging.info(\"Started backtest on pair \" + primarycurrency + \"/\" + secondarycurrency)\n\n HaasManager.init_haas_manager(haasip, haasport, haassecret)\n \n accountInfo = HaasManager.haasomeClient.accountDataApi.get_account_details(accountguid).result\n\n # Create Template Bot\n backTestResult = HaasManager.haasomeClient.customBotApi.backtest_custom_bot_on_market(accountguid, botguid, timeframeinminutes, primarycurrency, secondarycurrency, contractname)\n\n return backTestResult.result","repo_name":"jthomaskerr/Haas-Dradis","sub_path":"analyzer/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":6647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"71973553583","text":"import os, uuid\nfrom azure.storage.queue import QueueServiceClient, QueueClient, QueueMessage\n\nconnect_str = os.getenv('AZURE_STORAGE_CONNECTION_STRING')\n\nqueue_name = \"testqueue\"\nqueue_client1 = QueueClient.from_connection_string(connect_str, \"testqueue1\")\nqueue_client2 = QueueClient.from_connection_string(connect_str, \"testqueue2\")\n# Receive messages one-by-one\nwhile(True):\n messages1 = queue_client1.receive_messages()\n messages2 = queue_client2.receive_messages()\n for msg in messages1:\n print(msg.content)\n # do the task\n queue_client1.delete_message(msg)\n for msg in messages2:\n print(msg.content)\n # do the task\n queue_client2.delete_message(msg)\n\n","repo_name":"GihanMora/Armitage_project","sub_path":"crawl_n_depth/Simplified_System/azure_test1.py","file_name":"azure_test1.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"14760123837","text":"from .prelude import *\nfrom .events import Event\nfrom .renderable import Renderable\n\nfrom typing import Tuple\n\nclass MainLoop:\n\tScreen_Size: Tuple[int, int] = get_screen_size()\n\tExiting = False\n\n\tCursor_End_Pos = (0, 0)\n\tCursor_End_Active = False\n\n\tOn_Main_Loop = Event()\n\tOn_Key_Press = Event()\n\n\t@classmethod\n\tdef set_cursor_pos(cls, x: int, y: int):\n\t\tcls.Cursor_End_Pos = (x, y)\n\n\t@classmethod\n\tdef set_cursor_active(cls, active = bool):\n\t\tcls.Cursor_End_Active = active\n\n\t@classmethod\n\tdef exit(cls):\n\t\tcls.Exiting = True\n\n\t@classmethod\n\tdef mainloop(cls) -> bool:\n\t\tif get_screen_size() != cls.Screen_Size:\n\t\t\tcls.Screen_Size = get_screen_size()\n\t\t\tclear_screen()\n\n\t\t\tRenderable.Ignore_Dirty = True\n\n\t\tkey_pressed = handle_input()\n\t\tif key_pressed != None:\n\t\t\tcls.On_Key_Press.invoke(key_pressed)\n\n\t\tcls.On_Main_Loop.invoke()\n\t\tEvent.handle_invoke_queue()\n\n\t\tcursor_x, cursor_y = cls.Cursor_End_Pos\n\n\t\tset_cursor_pos(cursor_x, cursor_y)\n\t\tshow_cursor() if cls.Cursor_End_Active else hide_cursor()\n\n\t\tflush_screen()\n\n\t\tRenderable.Ignore_Dirty = False\n\t\treturn (not cls.Exiting) and (len(cls.On_Main_Loop.subscribers) != 0)","repo_name":"BobbyShmurner/sorting_visualisation","sub_path":"core/mainloop.py","file_name":"mainloop.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"739889567","text":"from node import Node\nfrom mcts import MCTS\nimport argparse\nimport yaml\nimport random\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pdb\n\ndef make_binary_tree(depth=12):\n all_nodes = []\n for i in range(depth + 1):\n nodes_at_depth = []\n num_of_nodes = pow(2, i)\n for j in range(num_of_nodes):\n nodes_at_depth.append(Node(str(i) + \"_\" + str(j)))\n all_nodes.append(nodes_at_depth)\n\n leaf_nodes_dict = dict()\n for level, nodes in enumerate(all_nodes):\n for loc, n in enumerate(nodes):\n if level >= len(all_nodes) - 1:\n # Assign reward value to leaf nodes of the tree\n n.value = random.uniform(0, 100)\n leaf_nodes_dict[n] = n.value\n else:\n left = all_nodes[level + 1][2 * loc]\n right = all_nodes[level + 1][2 * loc + 1]\n n.left = left\n n.right = right\n root = all_nodes[0][0]\n return root, leaf_nodes_dict\n\nif __name__ == \"__main__\":\n with open(\"config.yml\", \"r\") as file:\n config = yaml.safe_load(file)\n\n depth = config[\"depth\"]\n num_iter = config[\"num_iter\"]\n num_rollout = config[\"num_rollout\"]\n exploration_weight = config[\"exploration_weight\"]\n\n # Build tree with random values on leaf nodes\n root, leaf_nodes_dict = make_binary_tree(depth=depth)\n\n # Calculate ground truth\n leaf_nodes_dict_sorted = sorted(leaf_nodes_dict.items(), key=lambda x: x[1], reverse=True)\n print(\"Expected (max) leaf node: {}, value: {}\".format(leaf_nodes_dict_sorted[0][0],\n leaf_nodes_dict_sorted[0][1]))\n\n # Use MCTS\n mcts = MCTS(exploration_weight=exploration_weight)\n while True:\n for _ in range(num_iter):\n mcts.run(root, num_rollout=num_rollout)\n \n root = mcts.policy(root)\n\n if root.is_terminal():\n print(\"Found optimal (max) leaf node: {}, value: {}\".format(root, root.value))\n break","repo_name":"journeyman9/planning-algorithms","sub_path":"MCTS/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"69887098544","text":"T = int(input())\n\nfor case in range(1, T+1):\n goal = int(input())\n days = [int(x) for x in input().split()]\n dp=[[0,0,0,0,0,0,0]for _ in range(7)]\n for i in range(7):\n dp[i][6]=sum(days)\n for j in range(6):\n for k in range(j+1):\n dp[i][j] += days[(i+k)%7]\n ans=goal//sum(days)*7\n goal %= sum(days)\n if(goal==0):\n ans -= 7\n goal=sum(days)\n add=7\n for i in range(7):\n for j in range(7):\n if(goal==dp[i][j]):\n if(add>j):\n add=j+1\n print(f\"#{case} {ans+add}\")","repo_name":"zjvlzld/algoritm","sub_path":"2022/swea13038.py","file_name":"swea13038.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"15728306025","text":"N_1, N_2 = map(int,input().split())\n\nN = max(N_1, N_2)\nlst = []\n\nfor i in range(1, N+1):\n if N_1 % i == 0 and N_2 % i == 0:\n lst.append(i)\n \nleast = lst[-1]\nprint(least)\nprint(least * (N_1//least) * (N_2//least))","repo_name":"Yoonsik-Shin/TIL","sub_path":"Algorism/BAEKJOON/Phase15/2609.py","file_name":"2609.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"32815707882","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nProblem definition:\n\nhttps://www.hackerrank.com/challenges/compress-the-string/problem\n\n\"\"\"\n\nfrom itertools import groupby\nimport sys\n\n\nclass ProblemAttributes(object):\n input_string = \"\"\n results = list()\n\n\ndef command_line():\n parse_first_line()\n\n\ndef parse_first_line():\n nm = input()\n ProblemAttributes.input_string = nm\n assert 1 <= len(ProblemAttributes.input_string) <= 1e4, \\\n \"The size of the input string must be within range 1 <= size of string < 1e4\"\n\n\ndef calculate():\n for key, group in groupby(ProblemAttributes.input_string):\n try:\n ProblemAttributes.results.append((len(list(group)), int(key)))\n except ValueError:\n ProblemAttributes.results.append((len(list(group)), key))\n\n print(*ProblemAttributes.results)\n\n\ndef main():\n command_line()\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"mikeleppane/CodeDrill","sub_path":"problem_13.py","file_name":"problem_13.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"17061859991","text":"import sys\r\nsys.path.append(\"intabot\");\r\nfrom instabot import Bot\r\nfrom time import sleep\r\n\r\n\r\ndef follow(bot, followers, follow_count, amt):\r\n lower = follow_count\r\n count = 0\r\n for user in followers[lower:]:\r\n if count >= amt:\r\n return count\r\n username = bot.get_username_from_user_id(user)\r\n print(\"Following\", username)\r\n if bot.follow(username):\r\n print(\"Followed\", username)\r\n count += 1\r\n else:\r\n print(\"Not followed\", username)\r\n sleep(5)\r\n return count\r\n\r\ndef unfollow(bot, amt):\r\n following = bot.get_user_following('')\r\n i = 0\r\n for user in reversed(following[:100]):\r\n username = bot.get_username_from_user_id(user)\r\n print(\"Unfollowing\", username)\r\n if bot.unfollow(username):\r\n i += 1\r\n print(\"Unfollowed -\", username)\r\n else:\r\n print(\"Not Unfollowed\", username)\r\n if i >= amt:\r\n break\r\n sleep(5)\r\n \r\n return True;\r\n\r\n\r\ndef filter(bot, amt):\r\n f = open('followed.txt', 'r')\r\n following = f.read().splitlines()\r\n f.close()\r\n \r\n print(following)\r\n add_to_acc = []\r\n \r\n for user in reversed(following[-amt:]):\r\n username = bot.get_username_from_user_id(user)\r\n user_info = bot.get_user_info(user)\r\n bio = user_info['biography']\r\n name = user_info['full_name']\r\n followers = user_info['follow_count']\r\n following = user_info['following_count']\r\n \r\n if \"rapper\" in bio.lower() or \"rapper\" in name.lower() or \"rapper\" in username.lower():\r\n if 1000 < followers < 10000 and following / followers < 1.4:\r\n add_to_acc.append(username)\r\n sleep(5)\r\n \r\n return add_to_acc\r\n\r\nbot = Bot(filter_users=False, max_following_to_followers_ratio=100, max_follower_to_following_ratio=100, follow_delay = 10, unfollow_delay = 10, max_followers_per_day = 1000, max_unfollow_per_day = 1000);\r\n\r\nusername = \"your_username\"\r\npassword = \"your_password\"\r\n\r\n# Log in to your Instagram account\r\nbot.login(username=username, password=password)\r\n\r\naccounts = []\r\naccounts.append(\"keyword\")\r\n\r\namt = 150\r\n\r\nfor acc in accounts:\r\n followers = bot.get_user_followers(acc)\r\n follow_count = 1200\r\n \r\n while follow_count <= len(followers):\r\n follow_count += follow(bot, followers, follow_count, amt)\r\n sleep(3600)\r\n unfollow(bot, amt)\r\n sleep(3600)\r\n accounts.append(filter(bot, amt))\r\n","repo_name":"joshuaonuoha/AI_Chatbot-","sub_path":"automation_igbot.py","file_name":"automation_igbot.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"73530034222","text":"'''\r\nProgrammer Name: Joesph D. Aguilar\r\nDescription: This program helps analyze data for runners\r\nDate: 08/09/2023\r\n'''\r\n\r\n#Project Instructions\r\n\r\n'''\r\n\r\nPlease make sure to have all the inputs be entered by the user not hardcoded. \r\n\r\nMike, Tina, Jason, Vicky, and Tammy are preparing for an upcoming marathon. \r\nEach day of the week, they run a certain number of miles and write them into a notebook. \r\nAt the end of the week, they would like to know the number of miles run each day, the total miles for the week, \r\nand average miles run each day, Write a program to help them analyze their data. Your program must contain parallel \r\nlists: a list to store the the names of the runners and a two-dimensional list of five rows and seven columns to store \r\nthe number of miles run by each runner each day. \r\n\r\nSample Output:\r\n\r\nName Day 1 Day 2 Day 3 Day 4 Day 5 Day 6 Day 7 Average\r\n\r\n=============================================\r\nMike 10.00 15.00 20.00 25.00 18.00 20.00 26.00 19.14\r\nTina 15.00 18.00 29.00 16.00 26.00 20.00 23.00 21.00\r\nJason 20.00 26.00 18.00 29.00 10.00 12.00 20.00 19.29\r\nVicky 17.00 20.00 15.00 26.00 18.00 25.00 12.00 19.00\r\nTammy 16.00 8.00 28.00 20.00 11.00 25.00 21.00 18.43\r\n\r\n'''\r\n\r\n#importing mean() to use calculate the average runtime later in program\r\nfrom statistics import mean\r\n\r\n#Defining main variables\r\nrunName = ['runner1', 'runner2', 'runner3', 'runner4', 'runner5']\r\nweekTime = ['time1', 'time2', 'time3', 'time4', 'time5', 'time6', 'time7']\r\ndaysAve = [['Day 1', 'Day 2', 'Day 3', 'Day 4', 'Day 5', 'Day 6', 'Day 7'],[]]\r\nrunLen = len(runName)\r\nweekTime = len(weekTime)\r\ndays = len(daysAve[0])\r\nuserName = None\r\nuserTime = None\r\naverage = None\r\naverageTotal = None\r\n\r\n#Runners times\r\nrunnerOne = []\r\nrunnerTwo = []\r\nrunnerThree = []\r\nrunnerFour = []\r\nrunnerFive = []\r\n\r\n#Getting runner's names\r\nfor x in range(runLen):\r\n while True:\r\n print('Please enter your name.')\r\n userName = input('Name: ')\r\n \r\n if not userName.isdigit():\r\n runName[x] = userName.capitalize()\r\n print('Thank you. Next')\r\n break\r\n else:\r\n print('Error: Please enter letters only!')\r\n \r\n#Getting runner one's time\r\nfor x in range(weekTime):\r\n while True:\r\n print('\\nRunner One please enter your times. Ex(00.00)')\r\n userTime = input(f'Day {x + 1}: ')\r\n userTime = float(userTime)\r\n userTime = round(userTime, 2)\r\n \r\n if (isinstance(userTime, float)):\r\n runnerOne.append(userTime)\r\n average = mean(runnerOne)\r\n averageTotal = round(average, 2)\r\n break\r\n else:\r\n print('Error: Please enter numerical digits only!')\r\n \r\ndaysAve[1].append(averageTotal) \r\n\r\n#Getting runner two's time\r\nfor x in range(weekTime):\r\n while True:\r\n print('\\nRunner Two please enter your times. Ex(00.00)')\r\n userTime = input(f'Day {x + 1}: ')\r\n userTime = float(userTime)\r\n userTime = round(userTime, 2)\r\n \r\n if (isinstance(userTime, float)):\r\n runnerTwo.append(userTime)\r\n average = mean(runnerTwo)\r\n averageTotal = round(average, 2)\r\n break\r\n else:\r\n print('Error: Please enter numerical digits only!')\r\n \r\ndaysAve[1].append(averageTotal) \r\n\r\n#Getting runner three's time\r\nfor x in range(weekTime):\r\n while True:\r\n print('\\nRunner three please enter your times. Ex(00.00)')\r\n userTime = input(f'Day {x + 1}: ')\r\n userTime = float(userTime)\r\n userTime = round(userTime, 2)\r\n \r\n if (isinstance(userTime, float)):\r\n runnerThree.append(userTime)\r\n average = mean(runnerThree)\r\n averageTotal = round(average, 2)\r\n break\r\n else:\r\n print('Error: Please enter numerical digits only!')\r\n \r\ndaysAve[1].append(averageTotal) \r\n\r\n#Getting runner fours's time\r\nfor x in range(weekTime):\r\n while True:\r\n print('\\nRunner four please enter your times. Ex(00.00)')\r\n userTime = input(f'Day {x + 1}: ')\r\n userTime = float(userTime)\r\n userTime = round(userTime, 2)\r\n \r\n if (isinstance(userTime, float)):\r\n runnerFour.append(userTime)\r\n average = mean(runnerFour)\r\n averageTotal = round(average, 2)\r\n break\r\n else:\r\n print('Error: Please enter numerical digits only!')\r\n \r\ndaysAve[1].append(averageTotal)\r\n\r\n#Getting runner five's time\r\nfor x in range(weekTime):\r\n while True:\r\n print('\\nRunner five please enter your times. Ex(00.00)')\r\n userTime = input(f'Day {x + 1}: ')\r\n userTime = float(userTime)\r\n userTime = round(userTime, 2)\r\n \r\n if (isinstance(userTime, float)):\r\n runnerFive.append(userTime)\r\n average = mean(runnerFive)\r\n averageTotal = round(average, 2)\r\n break\r\n else:\r\n print('Error: Please enter numerical digits only!')\r\n \r\ndaysAve[1].append(averageTotal)\r\n\r\n#Displaying everything\r\nprint('\\n Name ', *daysAve[0], 'Average')\r\nprint(\" ========================================================\")\r\nprint(f' {runName[0]} ', *runnerOne, f' {daysAve[1][0]}')\r\nprint(f' {runName[1]} ', *runnerTwo, f' {daysAve[1][1]}')\r\nprint(f'{runName[2]} ', *runnerThree, f' {daysAve[1][2]}')\r\nprint(f'{runName[3]} ', *runnerFour, f' {daysAve[1][3]}')\r\nprint(f'{runName[4]} ', *runnerFive, f' {daysAve[1][4]}')","repo_name":"JoesphDAguilar/CS119PythonProjects","sub_path":"Project3_Program2.py","file_name":"Project3_Program2.py","file_ext":"py","file_size_in_byte":5645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"41350149915","text":"# encoding: utf-8\n\n\"\"\"\n\n@author: linchart\n@file: config.py\n@version: 1.0\n@time : 2019/1/20\n\n\"\"\"\n\nimport torch\n\ndevice = torch.device(\"cuda: 0\" if torch.cuda.is_available() else \"cpu\")\n\nTRAIN_BATCH_SIZE = 32\nDIM = 200\nHIDDEN_SIZE = 128\nNUM_LAYER = 1\ndrop_out = 0.3\nepochs = 30\nsilent = False\nlabel_class = 2\nbidirectional = True\nLR = 0.001\n\nfinetune_epochs=500\nfinetune_batch_size = 20\n\n\ntrain_file = '../output/train_char.csv'\ntest_file = '../output/test_char1.csv'\npredict_file = '../input/other_test_data_deal.csv'\n# predict_file = '../input/finetune_test_data.csv'\nfinetune_train_file = '../input/finetune_train_data.csv'\nfinetune_valid_file = '../input/finetune_validation_data.csv'\nfinetune_test_file = '../input/finetune_test_data.csv'\n\n\n\nvector_file = '../output/chars.vector'\n\n# model_path = '../output/model_best.pth.tar'\nmodel_path = '../output/finetune_model_best.pth.tar'\n","repo_name":"linchart/NLP","sub_path":"bigru_attention/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"69867179503","text":"from functools import partial\nfrom class_fungsi import StopWordRemovalTransformer, LemmatizeTransformer, DocEmbeddingVectorizer\nfrom sklearn.model_selection import RandomizedSearchCV, cross_val_score\nimport numpy as np\nfrom urllib.request import urlopen, urlretrieve\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.naive_bayes import BernoulliNB\nimport tarfile\nimport pandas as pd\nimport os.path\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.feature_selection import SelectKBest, mutual_info_classif\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport string\n\nSYMBOLS = \" \".join(string.punctuation).split(\" \") + [\"-\", \"...\", \"”\", \"”\"]\n\ndef iText(file):\n text = file[-1].strip() \n label = file[-1].strip() \n data_frame = (text, label)\n\n return data_frame\n\n\ndef fetch_URLSpam(data_home='data'):\n URL_LINGSPAM = 'http://nlp.cs.aueb.gr/software_and_datasets/lingspam_public.tar.gz'\n if not os.path.exists(data_home + '/lingspam_public.tar.gz'):\n urlretrieve(URL_LINGSPAM, data_home + '/lingspam_public.tar.gz')\n df = pd.DataFrame(columns=['text', 'spam?'])\n with tarfile.open(mode=\"r:gz\", name=data_home+'/lingspam_public.tar.gz') as f:\n # We load only the raw texts. \n folder = 'lingspam_public/bare/'\n files = [name for name in f.getnames() if name.startswith(folder) and name.endswith('.txt')]\n for name in files:\n m = f.extractfile(name)\n df = df.append({'text':str(m.read(), 'utf-8'), \n 'spam?':1 if 'spmsg' in name else 0}, \n ignore_index=True)\n return df \n\n\ndef create_pipelines_URLSpam():\n stop = ('stop', StopWordRemovalTransformer())\n lemma = ('lemma', LemmatizeTransformer())\n binz = ('binarizer', CountVectorizer())\n we = ('document embedding', DocEmbeddingVectorizer())\n sel = ('fsel', SelectKBest(score_func=mutual_info_classif, k=100))\n clf = ('cls', BernoulliNB()) # Binary features in the original paper. \n return Pipeline([binz, sel, clf]), \\\n Pipeline([stop, binz, sel, clf]), \\\n Pipeline([lemma, binz, sel, clf]), \\\n Pipeline([stop, lemma, binz, sel, clf]), \\\n Pipeline([stop, lemma, we, sel, clf])\n\n\ndef fetch_spambase(data_home='data'):\n URL_SPAMBASE = 'https://archive.ics.uci.edu/ml/machine-learning-databases/spambase/'\n\n columns = []\n with urlopen(URL_SPAMBASE + 'spambase.names') as f:\n content = f.readlines()\n for line in content:\n if str(line,'utf-8').startswith(('word_freq', 'char_freq', 'capital_run')):\n columns.append(str(line,'utf-8').split(':')[0]) \n columns.append('spam?')\n df = pd.read_csv(URL_SPAMBASE + 'spambase.data', header=None)\n df.columns = columns\n return df\n\ndef create_pipeline_spambase():\n clf = ('cls', BernoulliNB()) # Has binary and frequencies. \n return Pipeline([clf])\n\n","repo_name":"zakyyusuff/zakar","sub_path":"zakar.py","file_name":"zakar.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"73373100464","text":"import cv2 as cv\nimport numpy as np \nimport time\nimport mediapipe as mp\n\nclass handDetector():\n def __init__(self,mode=False,maxHands=2,detectionCon=0.5,tractCon=0.5):\n self.mode = mode\n self.maxHands=maxHands\n self.detectionCon=detectionCon\n self.tractCon=tractCon\n\n self.hands=mp.solutions.hands\n self.hands_mesh=self.hands.Hands(self.mode,self.maxHands,self.detectionCon,self.tractCon)\n self.mpDraw=mp.solutions.drawing_utils\n\n def findHands(self,frm,draw=True):\n rgb=cv.cvtColor(frm,cv.COLOR_BGR2RGB)\n self.op=self.hands_mesh.process(rgb)\n if self.op.multi_hand_landmarks:\n for i in self.op.multi_hand_landmarks:\n self.mpDraw.draw_landmarks(frm,i,self.hands.HAND_CONNECTIONS)\n return frm\n\n\n def findPosition(self,frm,handNo=0,draw=True):\n lmList=[]\n if self.op.multi_hand_landmarks:\n myHand=self.op.multi_hand_landmarks[handNo]\n for id, lm in enumerate(myHand.landmark):\n h,w,c=frm.shape\n cx,cy=int(lm.x*w),int(lm.y*h)\n print(id,cx,cy)\n lmList.append([id,cx,cy])\n if draw:\n cv.circle(frm,(cx,cy),15,(255,0,255),cv.FILLED)\n return lmList\n\n\ndef main():\n pTime=0\n\n cTime=0\n\n cap=cv.VideoCapture(0)\n detector=handDetector()\n while True:\n _,frm=cap.read()\n frm=detector.findHands(frm)\n lmList=detector.findPosition(frm)\n if len(lmList)!=0:\n print(lmList[4])\n cTime=time.time()\n fps=1/(cTime-pTime)\n pTime=cTime\n\n cv.putText(frm,str(int(fps)),(10,70),cv.FONT_HERSHEY_SCRIPT_SIMPLEX,3,(255,0,255),3)\n image=cv.flip(frm,1)\n cv.imshow('windows',image)\n\n if cv.waitKey(1)==27:\n cv.destroyAllWindows()\n cap.release()\n break\n\nif __name__ == '__main__':\n main()","repo_name":"golesuman/66daysofdata","sub_path":"Day23/handtrackingmodule.py","file_name":"handtrackingmodule.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"74067697903","text":"import sys\n\n# Intrapackage imports\nimport pyx12.errors\nimport pyx12.segment\nfrom pyx12.rawx12file import RawX12File\n\n\nclass X12Base(object):\n \"\"\"\n Base class of X12 Reader and X12 Writer\n Common X12 validation\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize the X12 file\n \"\"\"\n self.err_list = []\n self.loops = []\n self.hl_stack = []\n self.gs_count = 0\n self.st_count = 0\n self.hl_count = 0\n self.seg_count = 0\n self.cur_line = 0\n self.isa_ids = []\n self.gs_ids = []\n self.st_ids = []\n self.lx_count = 0\n self.check_837_lx = False\n self.isa_usage = None\n self.seg_term = None\n self.ele_term = None\n self.subele_term = None\n self.repetition_term = None\n\n def Close(self):\n \"\"\"\n Complete any outstanding tasks\n \"\"\"\n pass\n\n def _parse_segment(self, seg_data):\n \"\"\"\n Catch segment issues common to both readers and writers\n\n @param seg_data: Segment data instance\n @type seg_data: L{segment}\n \"\"\"\n if seg_data.is_empty():\n err_str = 'Segment \"{}\" is empty'.format(seg_data)\n self._seg_error('8', err_str, None, src_line=self.cur_line + 1)\n if not seg_data.is_seg_id_valid():\n err_str = 'Segment identifier \"{}\" is invalid'.format(seg_data.get_seg_id())\n self._seg_error('1', err_str, None, src_line=self.cur_line + 1)\n seg_id = seg_data.get_seg_id()\n if seg_id == 'ISA':\n if len(seg_data) != 16:\n err_str = 'The ISA segment must have 16 elements ({})'.format(seg_data)\n raise pyx12.errors.X12Error(err_str)\n interchange_control_number = seg_data.get_value('ISA13')\n if interchange_control_number in self.isa_ids:\n err_str = 'ISA Interchange Control Number '\n err_str += '{} not unique within file'.format(interchange_control_number)\n self._isa_error('025', err_str)\n self.loops.append(('ISA', interchange_control_number))\n self.isa_ids.append(interchange_control_number)\n self.gs_count = 0\n self.gs_ids = []\n self.isa_usage = seg_data.get_value('ISA15')\n elif seg_id == 'GS':\n group_control_number = seg_data.get_value('GS06')\n if group_control_number in self.gs_ids:\n err_str = 'GS Interchange Control Number '\n err_str += '{} not unique within file'.format(group_control_number)\n self._gs_error('6', err_str)\n self.gs_count += 1\n self.gs_ids.append(group_control_number)\n self.loops.append(('GS', group_control_number))\n self.st_count = 0\n self.st_ids = []\n elif seg_id == 'ST':\n self.hl_stack = []\n self.hl_count = 0\n transaction_control_number = seg_data.get_value('ST02')\n if transaction_control_number in self.st_ids:\n err_str = 'ST Interchange Control Number '\n err_str += '{} not unique within file'.format(transaction_control_number)\n self._st_error('23', err_str)\n self.st_count += 1\n self.st_ids.append(transaction_control_number)\n self.loops.append(('ST', transaction_control_number))\n self.seg_count = 1\n self.hl_count = 0\n #elif seg_id == 'LS':\n # self.seg_count += 1\n # self.loops.append(('LS', seg_data.get_value('LS06')))\n #elif seg_id == 'LE':\n # self.seg_count += 1\n # del self.loops[-1]\n elif seg_id == 'HL':\n self.hl_count += 1\n hl_count = seg_data.get_value('HL01')\n if self.hl_count != self._int(hl_count):\n #raise pyx12.errors.X12Error, \\\n # 'My HL count %i does not match your HL count %s' \\\n # % (self.hl_count, seg[1])\n err_str = 'My HL count {:d} does not match your HL count {}'.format(self.hl_count, hl_count)\n self._seg_error('HL1', err_str)\n if seg_data.get_value('HL02') != '':\n hl_parent = self._int(seg_data.get_value('HL02'))\n if hl_parent not in self.hl_stack:\n err_str = 'HL parent ({:d}) is not a valid parent'.format(hl_parent)\n self._seg_error('HL2', err_str)\n while self.hl_stack and hl_parent != self.hl_stack[-1]:\n del self.hl_stack[-1]\n else:\n if len(self.hl_stack) != 0:\n pass\n #err_str = 'HL parent is blank, but stack not empty'\n #self._seg_error('HL2', err_str)\n self.hl_stack.append(self.hl_count)\n elif self.check_837_lx and seg_id == 'CLM':\n self.lx_count = 0\n elif self.check_837_lx and seg_id == 'LX':\n self.lx_count += 1\n if seg_data.get_value('LX01') != '{:d}'.format(self.lx_count):\n err_str = 'Your 2400/LX01 Service Line Number {} does not match my count of {:d}'.format(\\\n seg_data.get_value('LX01'), self.lx_count)\n self._seg_error('LX', err_str)\n # count all regular segments\n if seg_id not in ('ISA', 'IEA', 'GS', 'GE', 'ST', 'SE'):\n self.seg_count += 1\n self.cur_line += 1\n\n def pop_errors(self):\n \"\"\"\n Pop error list\n @return: List of errors\n \"\"\"\n tmp = self.err_list\n self.err_list = []\n return tmp\n\n def _isa_error(self, err_cde, err_str):\n \"\"\"\n @param err_cde: ISA level error code\n @type err_cde: string\n @param err_str: Description of the error\n @type err_str: string\n \"\"\"\n self.err_list.append(('isa', err_cde, err_str, None, None))\n\n def _gs_error(self, err_cde, err_str):\n \"\"\"\n @param err_cde: GS level error code\n @type err_cde: string\n @param err_str: Description of the error\n @type err_str: string\n \"\"\"\n self.err_list.append(('gs', err_cde, err_str, None, None))\n\n def _st_error(self, err_cde, err_str):\n \"\"\"\n @param err_cde: Segment level error code\n @type err_cde: string\n @param err_str: Description of the error\n @type err_str: string\n \"\"\"\n self.err_list.append(('st', err_cde, err_str, None, None))\n\n def _seg_error(self, err_cde, err_str, err_value=None, src_line=None):\n \"\"\"\n @param err_cde: Segment level error code\n @type err_cde: string\n @param err_str: Description of the error\n @type err_str: string\n \"\"\"\n self.err_list.append(('seg', err_cde, err_str, err_value, src_line))\n\n def _int(self, str_val):\n \"\"\"\n Converts a string to an integer\n @type str_val: string\n @return: Int value if successful, None if not\n @rtype: int\n \"\"\"\n try:\n return int(str_val)\n except ValueError:\n return None\n return None\n\n def get_isa_id(self):\n \"\"\"\n Get the current ISA identifier\n\n @rtype: string\n \"\"\"\n for loop in self.loops:\n if loop[0] == 'ISA':\n return loop[1]\n return None\n\n def get_gs_id(self):\n \"\"\"\n Get the current GS identifier\n\n @rtype: string\n \"\"\"\n for loop in self.loops:\n if loop[0] == 'GS':\n return loop[1]\n return None\n\n def get_st_id(self):\n \"\"\"\n Get the current ST identifier\n\n @rtype: string\n \"\"\"\n for loop in self.loops:\n if loop[0] == 'ST':\n return loop[1]\n return None\n\n def get_ls_id(self):\n \"\"\"\n Get the current LS identifier\n\n @rtype: string\n \"\"\"\n for loop in self.loops:\n if loop[0] == 'LS':\n return loop[1]\n return None\n\n def get_seg_count(self):\n \"\"\"\n Get the current segment count\n\n @rtype: int\n \"\"\"\n return self.seg_count\n\n def get_cur_line(self):\n \"\"\"\n Get the current line\n\n @rtype: int\n \"\"\"\n return self.cur_line\n\n def get_term(self):\n \"\"\"\n Get the original terminators\n\n @rtype: tuple(string, string, string, string)\n \"\"\"\n return (self.seg_term, self.ele_term, self.subele_term, '\\n', self.repetition_term)\n\n\nclass X12Reader(X12Base):\n \"\"\"\n Read an X12 data file\n\n Errors found when reading the segment such as loop counting or ID\n errors can be retrieved using the pop_errors function\n \"\"\"\n\n def __init__(self, src_file_obj):\n \"\"\"\n Initialize the file X12 file reader\n\n @param src_file_obj: absolute path of source file or an open,\n readable file object\n @type src_file_obj: string or open file object\n \"\"\"\n self.fd_in = None\n self.need_to_close = False\n try:\n res = src_file_obj.closed\n self.fd_in = src_file_obj\n except AttributeError:\n if src_file_obj == '-':\n self.fd_in = sys.stdin\n else:\n self.fd_in = open(src_file_obj, 'U', encoding='ascii')\n self.need_to_close = True\n X12Base.__init__(self)\n try:\n self.raw = RawX12File(self.fd_in)\n except pyx12.errors.X12Error:\n raise\n (seg_term, ele_term, subele_term, eol, repetition_term) = self.raw.get_term()\n self.seg_term = seg_term\n self.ele_term = ele_term\n self.subele_term = subele_term\n self.repetition_term = repetition_term\n self.icvn = self.raw.icvn\n\n def __del__(self):\n try:\n if self.need_to_close:\n self.fd_in.close()\n except Exception:\n pass\n\n def _parse_segment(self, seg_data):\n \"\"\"\n Catch segment issues\n\n @param seg_data: Segment data instance\n @type seg_data: L{segment}\n \"\"\"\n X12Base._parse_segment(self, seg_data)\n seg_id = seg_data.get_seg_id()\n if seg_id == 'IEA':\n if self.loops[-1][0] != 'ISA':\n # Unterminated GS loop\n err_str = 'Unterminated Loop {}'.format(self.loops[-1][0])\n self._isa_error('024', err_str)\n del self.loops[-1]\n if self.loops[-1][1] != seg_data.get_value('IEA02'):\n err_str = 'IEA id={} does not match ISA id={}'.format(\\\n seg_data.get_value('IEA02'), self.loops[-1][1])\n self._isa_error('001', err_str)\n if self._int(seg_data.get_value('IEA01')) != self.gs_count:\n err_str = 'IEA count for IEA02={} is wrong'.format(\\\n seg_data.get_value('IEA02'))\n self._isa_error('021', err_str)\n del self.loops[-1]\n elif seg_id == 'GE':\n if self.loops[-1][0] != 'GS':\n err_str = 'Unterminated segment {}'.format(self.loops[-1][1])\n self._gs_error('3', err_str)\n del self.loops[-1]\n if self.loops[-1][1] != seg_data.get_value('GE02'):\n err_str = 'GE id={} does not match GS id={}'.format(\\\n seg_data.get_value('GE02'), self.loops[-1][1])\n self._gs_error('4', err_str)\n if self._int(seg_data.get_value('GE01')) != self.st_count:\n err_str = 'GE count of {} for GE02={} is wrong. I count {}'.format(\\\n seg_data.get_value('GE01'),\n seg_data.get_value('GE02'), \n self.st_count)\n self._gs_error('5', err_str)\n del self.loops[-1]\n elif seg_id == 'SE':\n se_trn_control_num = seg_data.get_value('SE02')\n if self.loops[-1][0] != 'ST' or \\\n self.loops[-1][1] != se_trn_control_num:\n err_str = 'SE id={} does not match ST id={}'.format(\\\n se_trn_control_num, self.loops[-1][1])\n self._st_error('3', err_str)\n if self._int(seg_data.get_value('SE01')) != self.seg_count + 1:\n err_str = 'SE count of {} for SE02={} is wrong. I count {}'.format(\\\n seg_data.get_value('SE01'),\n se_trn_control_num, \n self.seg_count + 1)\n self._st_error('4', err_str)\n del self.loops[-1]\n\n def __iter__(self):\n \"\"\"\n Iterate over input segments\n \"\"\"\n self.err_list = []\n for line in self.raw:\n # We have not yet incremented cur_line\n if line.startswith(' '):\n err_str = 'Segment contains a leading space'\n self._seg_error('1', err_str, None, src_line=self.cur_line + 1)\n line = line.lstrip()\n if line[-1] == self.ele_term:\n err_str = 'Segment contains trailing element terminators'\n self._seg_error('SEG1', err_str, None, src_line=self.cur_line + 1)\n seg_data = pyx12.segment.Segment(line, self.seg_term, self.ele_term, self.subele_term)\n self._parse_segment(seg_data)\n yield(seg_data)\n #yield(None)\n\n def cleanup(self):\n \"\"\"\n At EOF, check for missing loop trailers\n \"\"\"\n if self.loops:\n for (seg, id1) in self.loops:\n if seg == 'ST':\n err_str = 'Mandatory segment \"Transaction Set Trailer\" '\n err_str += '(SE={}) missing'.format(id1)\n self._st_error('2', err_str)\n elif seg == 'GS':\n err_str = 'Mandatory segment \"Functional Group Trailer\" '\n err_str += '(GE={}) missing'.format(id1)\n self._gs_error('3', err_str)\n elif seg == 'ISA':\n err_str = 'Mandatory segment \"Interchange Control Trailer\" '\n err_str += '(IEA={}) missing'.format(id1)\n self._isa_error('023', err_str)\n #elif self.loops[-1][0] == 'LS':\n # err_str = 'LS id=%s was not closed with a LE' % \\\n # (id1, self.loops[-1][1])\n\n# Backward compatible name\nX12file = X12Reader\n\n\nclass X12Writer(X12Base):\n \"\"\"\n X12 file and stream writer\n \"\"\"\n\n def __init__(self, src_file_obj, seg_term='~', ele_term='*', subele_term='\\\\', eol='\\n', repetition_term='^'):\n \"\"\"\n Initialize the file X12 file writer\n\n @param src_file_obj: absolute path of source file or an open,\n readable file object\n @type src_file_obj: string or open file object\n \"\"\"\n self.fd_out = None\n try:\n res = src_file_obj.write\n # isinstance(f, file)\n self.fd_out = src_file_obj\n except AttributeError:\n if src_file_obj == '-':\n self.fd_out = sys.stdout\n else:\n self.fd_out = open(src_file_obj, mode='w', encoding='ascii')\n #assert self.fd_out.encoding in ('ascii', 'US-ASCII'), 'Outfile file must have ASCII encoding, is %s' % (self.fd_out.encoding)\n X12Base.__init__(self)\n #terms = set([seg_term, ele_term, subele_term, repetition_term])\n self.seg_term = seg_term\n self.ele_term = ele_term\n self.subele_term = subele_term\n self.repetition_term = repetition_term\n self.eol = eol\n\n def Close(self):\n \"\"\"\n End any open loops. Should be called at the end of writing.\n \"\"\"\n self._popToLoop('ISA')\n X12Base.Close(self)\n\n def Write(self, seg_data):\n \"\"\"\n Write the segment to the stream given current separators\n\n @param seg_data: Segment data instance\n @type seg_data: L{segment}\n \"\"\"\n self._parse_segment(seg_data)\n # If we have hit a loop closing segment, generate any missing, containing, closing segments\n # then generate this segment\n seg_id = seg_data.get_seg_id()\n if seg_id == 'IEA':\n self._popToLoop('ISA')\n elif seg_id == 'GE':\n self._popToLoop('GS')\n elif seg_id == 'SE':\n self._popToLoop('ST')\n elif self.check_837_lx and seg_id == 'LX':\n # Write our own LX counter\n seg_data.set('01', '{:d}'.format(self.lx_count))\n self._write_segment(seg_data)\n elif seg_id == 'ISA':\n # Replace terminators\n self._write_isa_segment(seg_data)\n else:\n self._write_segment(seg_data)\n\n def _close_loop(self, loop_type, loop_id):\n if loop_type == 'ISA':\n self._close_iea(loop_id)\n elif loop_type == 'GS':\n self._close_ge(loop_id)\n elif loop_type == 'ST':\n self._close_se(loop_id)\n\n def _popToLoop(self, loop_type):\n \"\"\"\n Move up the loop open loops, up to and including the given loop\n\n @param loop_type: The current ending loop\n @type loop_type: string\n \"\"\"\n while len(self.loops) > 0 and self.loops[-1][0] != loop_type:\n loop = self.loops.pop()\n self._close_loop(loop[0], loop[1])\n if len(self.loops) > 0:\n loop = self.loops.pop()\n self._close_loop(loop[0], loop[1])\n\n def _close_iea(self, id):\n \"\"\"\n Close a ISA/IEA loop, reset GS counter\n\n @param id: ISA loop ID\n @type id: string\n \"\"\"\n seg_temp = self._get_trailer_segment('IEA', self.gs_count, id)\n self._write_segment(seg_temp)\n self.gs_count = 0\n\n def _close_ge(self, id):\n \"\"\"\n Close a GS/GE loop, reset ST counter\n\n @param id: GS loop ID\n @type id: string\n \"\"\"\n seg_temp = self._get_trailer_segment('GE', self.st_count, id)\n self._write_segment(seg_temp)\n self.st_count = 0\n\n def _close_se(self, id):\n \"\"\"\n Close a ST/SE loop, reset segment counter\n\n @param id: ST loop ID\n @type id: string\n \"\"\"\n seg_temp = self._get_trailer_segment('SE', self.seg_count + 1, id)\n self._write_segment(seg_temp)\n self.seg_count = 0\n\n def _write_segment(self, seg_data):\n \"\"\"\n Write the given segment, using the current delimiters and end of line\n\n @param seg_data: segment to write\n @type seg_data: L{segment}\n \"\"\"\n out = seg_data.format(self.seg_term, self.ele_term, self.subele_term) + self.eol\n # self.fd_out.write(out.decode('ascii'))\n self.fd_out.write(out)\n\n def _write_isa_segment(self, seg_data):\n \"\"\"\n Write the ISA segment, using the current delimiters and end of line\n\n ISA*03*SENDER *01* *ZZ*SENDER *ZZ*RECEIVER *040608*1333*U*00401*000000288*0*P*:~\n ISA*03*SENDER *01* *ZZ*SENDER *ZZ*RECEIVER *040611*1333*^*00501*000000125*0*P*\\\\~\n\n @param seg_data: ISA segment to write\n @type seg_data: L{segment}\n \"\"\"\n icvn = seg_data.get_value('ISA12')\n if icvn == '00501':\n seg_data.set('ISA11', self.repetition_term)\n seg_data.set('ISA16', self.subele_term)\n out = seg_data.format(\n self.seg_term, self.ele_term, self.subele_term) + self.eol\n # self.fd_out.write(out.decode('ascii'))\n self.fd_out.write(out)\n\n def _get_trailer_segment(self, seg_id, count, id):\n \"\"\"\n Create a loop trailer segment, using the matching loop start and current count\n\n @param seg_id: end loop segment id\n @type seg_id: string\n @param count: count of loop members\n @type count: non-negative int\n @param id: loop id, should come from loop header\n @type id: string\n \"\"\"\n ele_term = self.ele_term\n seg_str = '{seg_id}{ele_term}{count:d}{ele_term}{id}'.format(\\\n seg_id=seg_id, ele_term=ele_term, count=count, id=id)\n return pyx12.segment.Segment(seg_str, self.seg_term, self.ele_term,\n self.subele_term)\n","repo_name":"azoner/pyx12","sub_path":"pyx12/x12file.py","file_name":"x12file.py","file_ext":"py","file_size_in_byte":20435,"program_lang":"python","lang":"en","doc_type":"code","stars":148,"dataset":"github-code","pt":"91"} +{"seq_id":"25076058715","text":"\nfrom app.errors.custom.exceptions import MissingGroupError\nfrom app.settings.settings import Config\nfrom app.correct.dbwebb_manager import DbwebbManager\nfrom app.correct import grader\nfrom flask import current_app\n\ndef course(canvas, course_id, course_name, assignment_to_correct=None, workflow_state=\"submitted\"):\n current_app.logger.debug(f\"Course {course_id}, name {course_name}\")\n\n course = canvas.get_course(course_id)\n groups = course.get_groups(include=[\"users\"])\n submissions = course.get_multiple_submissions(\n assignment_ids=[] if assignment_to_correct is None else [assignment_to_correct],\n student_ids=\"all\",\n workflow_state=workflow_state,\n include=[\"assignment\", \"user\"]\n )\n current_app.logger.debug(f\"submissions {submissions}\")\n\n config = Config(course_name)\n\n users_to_skip = {}\n for sub in submissions:\n current_app.logger.info(f\"Starting correcting {sub.user['login_id']} {sub.assignment['name']}\")\n try:\n assignment = sub.assignment\n if should_grade(sub, sub.assignment, users_to_skip, config):\n # test\n CM = DbwebbManager(course_name, assignment, sub, config)\n result = CM.test()\n\n # grade\n grader.grade_submission(sub, result)\n\n CM.clean_up_students_code()\n # handle group submissions\n if assignment[\"group_category_id\"] is not None and not assignment[\"grade_group_students_individually\"]:\n add_users_to_skip_from_group(sub.user_id, groups, assignment, users_to_skip)\n else:\n current_app.logger.info(f\"Skipped correcting {sub.user['login_id']} {assignment['name']}\")\n except MissingGroupError:\n current_app.logger.error(f\"Skipped correcting {sub.user['login_id']} {assignment['name']} because no group for group_category_id {assignment['group_category_id']} and user_id {sub.user['login_id']} found\")\n CM = \"\"\n result = \"\"\n\n\n\n\ndef should_grade(sub, assignment, users_to_skip, config):\n \"\"\"\n should skip if its ignored in config or if group partner has already been graded.\n \"\"\"\n skip = (\n assignment[\"group_category_id\"] is not None\n and not assignment[\"grade_group_students_individually\"]\n and sub.user_id in users_to_skip.get(assignment[\"id\"], [])\n ) or assignment[\"name\"] in config[\"ignore_assignments\"]\n\n return not skip\n\n\n\n\ndef add_users_to_skip_from_group(user_id, groups, assignment, users_to_skip):\n group = get_group_using_category_and_user(\n groups,\n assignment[\"group_category_id\"],\n user_id\n )\n partners = get_users_group_partners(group, user_id)\n if assignment[\"id\"] in users_to_skip:\n users_to_skip[assignment[\"id\"]].extend(partners)\n else:\n users_to_skip[assignment[\"id\"]] = partners\n\n\n\ndef get_users_group_partners(group, user_id):\n \"\"\"\n Return a list with a groups users without a user\n \"\"\"\n current_app.logger.debug(f\"get group partners in group {group} for user: {user_id}\")\n\n return [user[\"id\"] for user in group.users if user[\"id\"] != user_id]\n\n\n\ndef get_group_using_category_and_user(groups, group_category_id, user_id):\n for group in groups:\n if group.group_category_id == group_category_id:\n if user_id in [user[\"id\"] for user in group.users]:\n return group\n raise MissingGroupError(\"No group for group_category_id {group_category_id} and user_id {user_id} found\")\n","repo_name":"dbwebb-se/umbridge","sub_path":"app/correct/course.py","file_name":"course.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"23698375136","text":"# Solitaire Deck\n\nimport allCards, random, Board\n\n\ndef shuffle(deck):\n population = allCards.Cards.keys()\n deck = random.sample(population, 52)\n return deck\n\ndef move(amt, src, dest, hidden=False):\n if amt > 0: # Pulls cards from the top\n moving = src[0:amt]\n hiddenSwap(moving, hidden)\n del src[0:amt]\n dest.extend(moving)\n if amt < 0: # Pulls cards from the bottom\n moving = src[amt:]\n hiddenSwap(moving, hidden)\n del src[amt:]\n dest.extend(moving)\n\n\ndef move_to_ace(amt, src, dest):\n working = allCards.Cards[src[-1]]\n if working['suit'] == dest['suit']:\n if working['value'] == (allCards.Cards[dest['contents'][-1]]['value']) + 1:\n move(amt, src, dest)\n\n## use info from the link\n## https://stackoverflow.com/questions/15210148/get-parents-keys-from-nested-dictionary\n## to figure out how to identify the dict or whatever\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef hiddenSwap(moving, hidden):\n if hidden == False:\n for card in moving:\n if allCards.Cards[card]['hidden'] == 1:\n allCards.Cards[card]['hidden'] = 0\n if hidden == True:\n for card in moving:\n if allCards.Cards[card]['hidden'] == 0:\n allCards.Cards[card]['hidden'] = 1\n\n\n\ndeck = []\ndeck = shuffle(deck)\n","repo_name":"spark-c/abnd-mySolitaire","sub_path":"Deck.py","file_name":"Deck.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"1210782136","text":"import random\r\nplay = True\r\nwhile play == True:\r\n proba = 1\r\n liczba = random.randint(0, 100)\r\n liczba_player = int(input(\"Twoja liczba: \"))\r\n while liczba_player != liczba:\r\n if liczba_player > liczba:\r\n print(\"liczba jest mniejsza\")\r\n elif liczba_player < liczba:\r\n print(\"liczba jest większa\")\r\n liczba_player = int(input(\"Twoja liczba: \"))\r\n proba+=1\r\n print(f\"Udało się! Zajęło ci to {proba} prób\")\r\n if input(\"Czy chcesz zagrać jeszcze raz ? wpisz Yes jęsli chcesz \") != \"Yes\":\r\n play = False\r\n","repo_name":"FISU02/Python-part-2","sub_path":"ify/1.3.py","file_name":"1.3.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"11723006232","text":"from odoo import models, fields, api\n\n\nclass ResUsers(models.Model):\n _inherit = 'res.users'\n\n warehouse_location = fields.Selection([('madrid1', 'Madrid - Avd. del Sol'),\n ('madrid2', 'Madrid - Casablanca'),\n ('madrid3', 'Madrid - Vicálvaro'),\n ('italia', 'Italia - Arcore'),\n ('transit', 'In transit')], \"Warehouse Location\")\n","repo_name":"Albertocanal/CMNT_004_15","sub_path":"project-addons/crm_claim_rma_custom/models/res_users.py","file_name":"res_users.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"91"} +{"seq_id":"7660040992","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 10 11:07:28 2020\n\n@author: Philipe_Leal\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os, sys\nimport geopandas as gpd\nimport cartopy.crs as ccrs\nimport matplotlib.ticker as mticker\npd.set_option('display.max_rows', 5000)\npd.set_option('display.max_columns', 5000)\nimport xarray as xr\n\n\nfrom IDW_over_xarray import (get_coord_limits_from_dataarray, \n create_output_grid, \n xr_to_2D_array)\n\nfrom scipy.interpolate import LSQSphereBivariateSpline\n\n\n\ndef rebuild_dataarray(arr, xcoor, ycoor, xdim, ydim):\n \n return xr.DataArray(arr.T, \n coords={ydim:ycoor,\n xdim:xcoor},\n dims=[ydim,\n xdim])\n \ndef plot_dataArray(arr, transform=ccrs.PlateCarree(), x='lon', y='lat', figsup=None):\n \n fig, ax = plt.subplots(subplot_kw={'projection':ccrs.PlateCarree()})\n \n arr.plot(ax=ax, transform=transform, \n x=x, y=y, \n add_colorbar=True)\n ax.gridlines(draw_labels=True)\n ax.coastlines()\n \n fig.suptitle(figsup)\n \n fig.show()\n\n\n\ndef fix_radian_limits(arr, is_lon=True):\n \n if is_lon:\n \n arr = np.arccos(np.cos(arr))\n else:\n arr = np.arcsin(np.sin(arr))\n \n return arr\n\ndef interpolate_using_spherical_coords(points,\n point_values, \n output_grid, \n skip_nans=True,\n ):\n \n if skip_nans:\n nanmean = np.nanmean(point_values)\n \n point_values = point_values - nanmean\n \n point_values[np.isnan(point_values)] = 0\n \n \n lons , lats = np.deg2rad(points.T)\n lons = fix_radian_limits(lons, is_lon=True)\n lats = fix_radian_limits(lats, is_lon=False)\n \n \n new_lon, new_lat = [np.deg2rad(x) for x in output_grid]\n \n new_lon = fix_radian_limits(new_lon, is_lon=True)\n new_lat = fix_radian_limits(new_lat, is_lon=False)\n \n \n print('lon min: ', lons.min(), \n 'lon max: ', lons.max(), \n 'lat min: ', lats.min(), \n 'lat max: ', lats.max(),\n 'new_lon min: ', new_lon.min(), \n 'new_lon max: ', new_lon.max(),\n 'new_lat min: ', new_lat.min(), \n 'new_lat max: ', new_lat.max()\n )\n \n \n knotst, knotsp = new_lat, new_lon\n knotst[0] += .0001\n knotst[-1] -= .0001\n knotsp[0] += .0001\n knotsp[-1] -= .0001\n \n \n \n \n lut = LSQSphereBivariateSpline(np.sort(lats), \n np.sort(lons),\n point_values, \n knotst, knotsp)\n \n \n data_interp = lut(new_lat, new_lon)\n \n if skip_nans:\n data_interp = data_interp + nanmean\n \n return data_interp\n \nif '__main__' == __name__:\n \n ds = xr.tutorial.open_dataset('rasm').load()\n Tair = ds['Tair']\n \n Tair_T1 = Tair.isel(time=0)\n #Tair_T1.coords['xc'] = (Tair_T1.coords['xc'] + 180) % 360 - 180\n #Tair_T1.coords['yc'] = (Tair_T1.coords['yc'] + 90) % 180 - 90\n \n \n \n points, point_values = xr_to_2D_array(Tair_T1)\n \n\n \n xres = 20\n yres = 20\n \n xmin, xmax, ymin, ymax = get_coord_limits_from_dataarray(Tair_T1)\n \n \n output_grid, output_shape, Xcoords, Ycoords = create_output_grid(xmin, xmax, xres, ymin, ymax, yres)\n \n \n \n R = interpolate_using_spherical_coords(points,\n point_values, \n (Xcoords, Ycoords), \n skip_nans=True,\n )\n \n \n \n dataArray_interpolated = rebuild_dataarray(R, \n Xcoords, \n Ycoords,\n xdim='lon', \n ydim='lat')\n \n \n \n plot_dataArray(Tair_T1, transform=ccrs.PlateCarree(), \n x='xc', y='yc', figsup='Data Original')\n \n \n \n plot_dataArray(dataArray_interpolated, transform=ccrs.PlateCarree(), \n x='lon', y='lat', figsup='Interp - Spherical nearest')\n \n\n \n \n ","repo_name":"PhilipeRLeal/xarray_case_studies","sub_path":"scripts/regridding/spherical/scipy_spherical_interpolation.py","file_name":"scipy_spherical_interpolation.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"4267781520","text":"import sys\nimport xml.etree.ElementTree as ET\nimport subprocess\nimport json\n\ndef createCurlRequest(body):\n request1 = '''curl -X POST --header \"Content-Type: application/json\" --header \"Accept: application/json\" -d \"{\n \\\\\"id\\\\\": \\\\\"string\\\\\",\n \\\\\"instanceAlignments\\\\\": [\n {\n \\\\\"alignments\\\\\": [\n {\n \\\\\"sourceTextEnd\\\\\": 0,\n \\\\\"sourceTextStart\\\\\": 0,\n \\\\\"targetTextEnd\\\\\": 0,\n \\\\\"targetTextStart\\\\\": 0\n }\n ],\n \\\\\"confidence\\\\\": 0,\n \\\\\"sourceLanguage\\\\\": \\\\\"string\\\\\",\n \\\\\"targetLanguage\\\\\": \\\\\"string\\\\\"\n }\n ],\n \\\\\"instances\\\\\": [\n {\n \\\\\"body\\\\\":'''\n request2 = '''\n \\\\\"metadata\\\\\": {\n \\\\\"date\\\\\": \\\\\"string\\\\\",\n \\\\\"language\\\\\": \\\\\"en\\\\\",\n \\\\\"originalLanguage\\\\\": \\\\\"en\\\\\",\n \\\\\"summary\\\\\": \\\\\"something\\\\\",\n \\\\\"tags\\\\\": [\n \\\\\"string\\\\\"\n ],\n \\\\\"tokenizedText\\\\\": [\n {\n \\\\\"tokens\\\\\": [\n {\n \\\\\"offset\\\\\": 0,\n \\\\\"sourceDocument\\\\\": {\n \\\\\"id\\\\\": \\\\\"string\\\\\",\n \\\\\"language\\\\\": \\\\\"string\\\\\"\n },\n \\\\\"token\\\\\": \\\\\"string\\\\\"\n }\n ]\n }\n ],\n \\\\\"topics\\\\\": [\n \\\\\"string\\\\\"\n ]\n },\n \\\\\"title\\\\\": \\\\\"string\\\\\"\n }\n ],\n \\\\\"source\\\\\": {\n \\\\\"type\\\\\": \\\\\"string\\\\\",\n \\\\\"url\\\\\": \\\\\"string\\\\\"\n }\n}\" \"http://localhost:5001/EntityTagging/api/v2.0/processDocument?applyCoreference=true&applyEntityLinking=true\"'''\n\n request = request1 + '\\\\\"' + body.replace('\"', '\\\\\"') + '\\\\\",' + request2\n return request\n\n# curlAndRecordResponse:\n# Sends the curl request containing body, and stores the result in the responseFile\ndef curlAndRecordResponse(body, responseFile):\n request = createCurlRequest(body)\n\n # requires python 3.5+\n result = subprocess.run(request, stdout=subprocess.PIPE, shell=True)\n responseFile.write(result.stdout.decode(\"utf-8\") + \"\\\\n\")\n\ndef main():\n\n # Read the file as a soup object\n fname = sys.argv[1]\n tree = ET.parse(fname)\n root = tree.getroot()\n docs = root.findall(\"DOC\")\n\n prefix = fname.rstrip(\".dat\")\n prefix = prefix.split(\"/\")[-1]\n outputFname = prefix + \"_output.dat\"\n responseFname = prefix + \"_response.dat\"\n outputFile = open(outputFname, \"w+\")\n responseFile = open(responseFname, \"w+\")\n\n docDict = dict()\n\n for doc in docs:\n no = doc.find(\"DOCNO\").text\n hl = doc.find(\"HL\").text\n hl = \" \".join(hl.split()) + \".\"\n # ln = doc.find(\"LN\").text # now dealing with 88\n text = doc.find(\"TEXT\").text\n text = \" \".join(text.split())\n\n\n # Process headline\n hls = [x.strip() for x in hl.split(\"----\")]\n newhl = \"\"\n for hl in hls:\n newhl += hl\n newhl += \" \"\n\n usefulText = newhl + text\n\n docDict[no] = usefulText\n\n outputFile.write(usefulText+\"\\n\")\n curlAndRecordResponse(usefulText, responseFile)\n\n outputFile.close()\n responseFile.close()\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"mikiihuang/entityLinking","sub_path":"txtParser.py","file_name":"txtParser.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"14030846000","text":"def to_number(digits):\n\tresult = 0\n\tlength = len(digits)\n\ttemp = 1\n\tfor i in digits:\n\t\twhile length > 0:\n\t\t\tf = digits[length - 1] * temp\n\t\t\tlength -= 1\n\t\t\ttemp *= 10\n\t\t\tresult += f\n\treturn result \n\nprint (to_number([1,2,3,5,6]))\nprint (to_number([9,9,9]))","repo_name":"ivelintod/Programming-101-v3","sub_path":"week1/List_of_Digits_into_a_Number.py","file_name":"List_of_Digits_into_a_Number.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"37948090772","text":"\nfrom keras.models import Model\nimport numpy as np\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\n#import matplotlib\n#matplotlib.use('Agg')\nfrom recombinator_cnn import MSCNN\nimport tensorflow as tf\n\nclass Evaluation:\n\n def __init__(self, model, n_classes, test_imgs_file, test_masks_file):\n\n self.model = model\n self.n_classes = n_classes\n # self.test_imgs = np.load(test_imgs_file)['arr_0']\n # self.test_masks = np.load(test_masks_file)['arr_0']\n\n self.test_imgs = np.load(test_imgs_file)\n self.test_masks = np.load(test_masks_file)\n\n self.mIoU = 0\n\n self.IoU_class = [0 for c in range(n_classes)]\n self.class_count = [0 for c in range(n_classes)]\n self.Acc_class = [0 for c in range(n_classes)]\n self.Prec_class = [0 for c in range(n_classes)]\n\n self.confusion_matrix = np.zeros((n_classes, n_classes))\n\n self.categorical_loss = 0\n\n self.class_names = ['background', 'person', 'car',\n 'bird', 'cat', 'dog']\n\n #self.class_weights = np.load('class_weights.npy')\n\n def categorical_crossentropy(self, y_true, y_pred):\n \"\"\"\n computes the categorical crossentropy over the ground truth mask divided by size of mask\n :param y_true: 4D tensor of shape batch_size, w, h, n_classes\n :param y_pred:\n :return:\n \"\"\"\n epsilon = 10e-8\n y_pred = np.clip(y_pred, epsilon, 1. - epsilon)\n\n L_mask = np.multiply(y_true, np.log(y_pred)) # returns shape batch_size, w, h, n_classes\n L_mask = np.sum(L_mask, axis=(0, 1))\n\n L_mask = L_mask * self.class_weights[np.newaxis, :]\n\n self.categorical_loss += -np.sum(L_mask) / (256**2 * self.test_imgs.shape[0])\n\n\n def classIoU(self, y_true, y_pred):\n \"\"\"\n computes the intersection over union for a label-output pair\n :param y_true: ground truth tensor\n :param y_pred: predicted tensor\n :return:\n \"\"\"\n\n # change y_pred to a binary mask\n\n y_pred = np.around(y_pred, decimals=0).astype(np.uint8)\n class_list = []\n # find all the gt classes\n for k in range(y_true.shape[-1]):\n ind = np.where(y_true[:, :, k] > 0)\n if len(ind[0]) > 0:\n class_list.append(k)\n\n for c in class_list:\n I = np.sum(np.multiply(y_true[:, :, c], y_pred[:, :, c]))\n U = np.sum(np.bitwise_or(y_true[:, :, c], y_pred[:, :, c]))\n IoU = I / U\n self.mIoU += IoU\n self.IoU_class[c] += IoU\n self.class_count[c] += 1\n\n\n\n\n def conf_matrix(self, y_true, y_pred):\n \"\"\"\n builds a confusion matrix where entry i, j denotes how many pixels of class i were classified as class j\n :param y_true: ground truth tensor\n :param y_pred: predicted tensor\n :return:\n \"\"\"\n y_true = np.argmax(y_true, axis=-1).flatten()\n y_pred = np.argmax(y_pred, axis=-1).flatten()\n\n for k in range(y_true.shape[0]):\n self.confusion_matrix[y_true[k]][y_pred[k]] += 1\n\n def normalize_conf_matrix(self):\n \"\"\"\n scales the confusion matrix such that it is row stochastic\n :return:\n \"\"\"\n for r in range(self.n_classes):\n sum = np.sum(self.confusion_matrix[r, :])\n self.confusion_matrix[r, :] = self.confusion_matrix[r, :] / sum\n\n def normalize_IoU(self):\n \"\"\"\n compute mean IoU and mean IoU per class\n :return:\n \"\"\"\n n_inst = sum(self.class_count)\n self.mIoU /= n_inst\n for c in range(self.n_classes):\n if self.class_count[c] > 0:\n self.IoU_class[c] /= self.class_count[c]\n else:\n self.IoU_class[c] = 0\n\n\n def evaluate_segmentation(self, visualize, conf_mat_filename):\n\n # mean rgb values on Imagenet as described by VGG16 people\n mean_pixel = [103.939, 116.779, 123.68]\n\n mean_img = np.ones((256, 256, 3))\n mean_img[:, :, 0] = mean_pixel[0]\n mean_img[:, :, 1] = mean_pixel[1]\n mean_img[:, :, 2] = mean_pixel[2]\n\n for idx in range(len(self.test_imgs)):\n img = self.test_imgs[idx]\n\n # convert image to 4D tensor so the model can read it\n img = img.reshape(1, img.shape[0], img.shape[1], img.shape[2])\n\n output_tensor = self.model.predict(x=img, batch_size=1)[0, :, :, :]\n #prediction = np.argmax(output_tensor, axis=-1).astype(np.uint8)\n prediction = np.around(output_tensor, decimals=0).astype(np.uint8)\n prediction = np.argmax(prediction, axis=-1)\n label = self.test_masks[idx]\n\n if visualize:\n # change one-hot encoded label to mask\n label_img = np.argmax(label, axis = -1)\n\n img = img + mean_img\n img = img.astype(np.uint8, copy=False)\n plt.imshow(img[0])\n plt.show()\n\n sns.heatmap(label_img, cmap=sns.hls_palette(self.n_classes, l=.2, s=.8), cbar=True, vmin=0, vmax=self.n_classes-1)\n plt.show()\n\n sns.heatmap(prediction, cmap=sns.hls_palette(self.n_classes, l=.2, s=.8), cbar=True, vmin=0, vmax=self.n_classes-1)\n plt.show()\n\n self.classIoU(label, output_tensor)\n #self.categorical_crossentropy(label, output_tensor)\n self.conf_matrix(label, output_tensor)\n\n self.normalize_conf_matrix()\n self.normalize_IoU()\n\n print(\"mean IoU: \", self.mIoU)\n #print('categorical loss ', self.categorical_loss)\n\n for c in range(self.n_classes):\n print('IoU of class '+ self.class_names[c] + \": \", self.IoU_class[c])\n\n conf_mat = sns.heatmap(self.confusion_matrix, annot=True, xticklabels=self.class_names, yticklabels=self.class_names)\n plt.show()\n # conf_mat.savefig(conf_mat_filename)\n\n #np.save(conf_mat_filename, self.confusion_matrix)\n\nif __name__ == \"__main__\":\n\n #matplotlib.use('Agg')\n\n model = MSCNN()\n\n end2end = model.load_from_json('recombinator_model.json', 'new_weights.h5')\n eval = Evaluation(model=end2end, n_classes=model.n_classes, test_imgs_file='test_set_imgs.npy',\n test_masks_file='test_set_masks.npy')\n\n eval.evaluate_segmentation(visualize=True, conf_mat_filename='test_conf_mat')\n\n # eval = Evaluation(model=end2end, n_classes=model.n_classes, test_imgs_file='val_imgs.npz',\n # test_masks_file='val_masks_full.npz')\n # eval.evaluate_segmentation(visualize=False, conf_mat_filename='val_conf_mat')\n #\n # eval = Evaluation(model=end2end, n_classes=model.n_classes, test_imgs_file='train_imgs.npz',\n # test_masks_file='train_masks_full.npz')\n # eval.evaluate_segmentation(visualize=False, conf_mat_filename='train_conf_mat')\n","repo_name":"Emr03/HonoursThesis","sub_path":"segmentation/evaluate_model.py","file_name":"evaluate_model.py","file_ext":"py","file_size_in_byte":6967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"32816512199","text":"from simtools.ModBuilder import SingleSimulationBuilder\nfrom simtools.SetupParser import SetupParser\nfrom simtools.ExperimentManager.ExperimentManagerFactory import ExperimentManagerFactory\nfrom dtk.generic import serialization\nfrom dtk.tools.serialization import combine_inset_charts\nfrom dtk.tools.serialization import ks_channel_testing\n\nfrom os import path\nimport json\n\nclass SerializationKsTest(object):\n def __init__(self, inset_channels=[],\n property_channels=[],\n config_builder=None,\n experiment_name='Serialization test',\n experiment_tags={'role':'serialization_test'},\n timesteps_to_serialize = []):\n self.inset_channels = inset_channels\n self.property_channels = property_channels\n self.cb = config_builder\n self.exp_name = experiment_name\n self.exp_tags = experiment_tags\n\n self.exp_manager = None\n self.baseline_sim = None\n self.timesteps_to_serialize = timesteps_to_serialize\n self.s_timestep_tagname = 'Serialization_Timestep'\n\n def run_test(self):\n SetupParser.init()\n self.exp_manager = ExperimentManagerFactory.init()\n exp_manager = self.exp_manager\n\n for tag in self.exp_tags:\n exp_manager.experiment_tags[tag] = self.exp_tags[tag]\n\n cfg_builder = self.cb\n cfg_builder.set_param(\"Config_Name\", \"Full run sim\")\n fullrun_builder = SingleSimulationBuilder()\n fullrun_builder.tags['role'] = 'fullrun'\n exp_manager.run_simulations(config_builder=cfg_builder,\n exp_builder=fullrun_builder,\n exp_name=self.exp_name)\n\n old_sim_duration = cfg_builder.params[\"Simulation_Duration\"]\n for ts in self.timesteps_to_serialize:\n ts_string = str(ts)\n serialization.add_SerializationTimesteps(config_builder=cfg_builder,\n timesteps=[ts],\n end_at_final=True)\n cfg_builder.set_param(\"Config_Name\",\"Serializing sim at {0}\".format(ts_string))\n serialized_builder = SingleSimulationBuilder()\n serialized_builder.tags['role'] = 'serializer'\n serialized_builder.tags[self.s_timestep_tagname] = ts_string\n exp_manager.run_simulations(config_builder=cfg_builder,\n exp_builder=serialized_builder,\n exp_name=self.exp_name)\n\n exp_manager.wait_for_finished()\n\n self.baseline_sim = exp_manager.experiment.get_simulations_with_tag('role','fullrun')[0]\n cfg_builder.params.pop(\"Serialization_Time_Steps\")\n\n for ts in self.timesteps_to_serialize:\n ts_string = str(ts)\n serialized_sim = exp_manager.experiment.get_simulations_with_tag(self.s_timestep_tagname,ts_string)[0]\n serialized_output_path = path.join(serialized_sim.get_path(), 'output')\n\n # build s_pop_filename\n prefix_len = 5 - len(ts_string)\n prefix = '0' * prefix_len\n s_pop_filename = 'state-{0}{1}.dtk'.format(prefix, ts_string)\n\n cfg_builder.set_param(\"Config_Name\",\"Reloading sim at {0}\".format(ts_string))\n reloaded_builder = SingleSimulationBuilder()\n reloaded_builder.tags['role'] = 'reloader'\n reloaded_builder.tags[self.s_timestep_tagname] = ts_string\n cfg_builder.params[\"Start_Time\"] = ts\n cfg_builder.params[\"Simulation_Duration\"] = old_sim_duration - ts\n\n serialization.load_Serialized_Population(config_builder=cfg_builder,\n population_filenames=[s_pop_filename],\n population_path=serialized_output_path)\n exp_manager.run_simulations(config_builder=cfg_builder,\n exp_builder=reloaded_builder,\n exp_name=self.exp_name)\n exp_manager.wait_for_finished()\n self.create_combined_charts()\n if self.inset_channels:\n self.test_channels(self.inset_channels, original_report_name='InsetChart.json')\n\n def create_combined_charts(self):\n merged_insets = False\n merged_property_charts = False\n for ts in self.timesteps_to_serialize:\n ts_string = str(ts)\n sims_to_combine = self.exp_manager.experiment.get_simulations_with_tag(self.s_timestep_tagname,ts_string)\n s_sim = None\n r_sim = None\n print(\"sims to combine length: {0}\\n\".format(len(sims_to_combine)))\n print(\"sim[0] tags: {0}\\n\".format(sims_to_combine[0].tags))\n if 'serializer' == sims_to_combine[0].tags['role']:\n s_sim = sims_to_combine[0]\n r_sim = sims_to_combine[1]\n else:\n r_sim = sims_to_combine[0]\n s_sim = sims_to_combine[1]\n s_output_folder = path.join(s_sim.get_path(), 'output')\n r_output_folder = path.join(r_sim.get_path(), 'output')\n\n if self.inset_channels:\n merged_insets = True\n self.merge_json_charts(serialized_output_folder=s_output_folder,\n reloaded_output_folder=r_output_folder,\n original_report_name='InsetChart.json')\n if self.property_channels:\n merged_property_charts = True\n self.merge_json_charts(serialized_output_folder=s_output_folder,\n reloaded_output_folder=r_output_folder,\n original_report_name='PropertyReport.json')\n if merged_insets or merged_property_charts:\n baseline_output = path.join(self.baseline_sim.get_path(), 'output')\n if merged_insets: # Create a 'combined' report so you can look in COMPS\n self.merge_json_charts(serialized_output_folder=None,\n reloaded_output_folder=baseline_output,\n original_report_name='InsetChart.json')\n if merged_property_charts:\n self.merge_json_charts(serialized_output_folder=None,\n reloaded_output_folder=baseline_output,\n original_report_name='PropertyReport.json')\n\n def merge_json_charts(self, serialized_output_folder, reloaded_output_folder, original_report_name='InsetChart.json'):\n r_inset_chart = path.join(reloaded_output_folder, original_report_name)\n combined_chart_name = 'combined_{0}'.format(original_report_name)\n combined_chart_path = path.join(reloaded_output_folder, combined_chart_name)\n if serialized_output_folder:\n s_inset_chart = path.join(serialized_output_folder, original_report_name)\n combined_chart = combine_inset_charts.combine_channels(s_inset_chart, r_inset_chart)\n else:\n with open(r_inset_chart) as infile:\n combined_chart = json.load(infile)\n self.baseline_charts = {}\n self.baseline_charts[original_report_name] = r_inset_chart\n print (\"combined chart path is {0}\".format(combined_chart_path))\n with open(combined_chart_path, 'w') as outfile:\n json.dump(combined_chart, outfile, indent=4, sort_keys=True)\n\n def test_channels(self, channels_to_test, original_report_name=\"InsetChart.json\"):\n for ts in self.timesteps_to_serialize:\n ts_string = str(ts)\n ts_sims = self.exp_manager.experiment.get_simulations_with_tag(self.s_timestep_tagname, ts_string)\n ts_reloaded = None\n for sim in ts_sims:\n if 'role' in sim.tags and 'reloader' == sim.tags['role']:\n ts_reloaded = sim\n break\n if not ts_reloaded:\n raise Exception(\"Couldn't find the reloader sim for timestep {0}\".format(ts_string))\n test_output_folder = path.join(sim.get_path(), 'output')\n test_report_path = path.join(test_output_folder, original_report_name)\n KsTester = ks_channel_testing.KsChannelTester(\n ref_path=self.baseline_charts[original_report_name],\n test_path=test_report_path,\n channel_list=channels_to_test)\n new_tags = ts_reloaded.tags\n for c in channels_to_test:\n stat, p_val = KsTester.test_channel(c)\n tag_name = c.replace(' ','_')\n tag_name = tag_name.replace(':','_')\n tag_name = \"step_{0}_{1}_{2}\".format(ts_string, tag_name, 'PVal')\n new_tags[tag_name] = p_val\n ts_reloaded.set_tags(new_tags)\n ts_reloaded.description = \"\"\n ts_reloaded.save()\n # get the reloaded sim\n # # for c in channels_to_test\n\n","repo_name":"mooresea/chikv-colombia-emod","sub_path":"dtk-tools-fork/dtk-tools/test/CWiswell-test/Serialization_KS_Testing.py","file_name":"Serialization_KS_Testing.py","file_ext":"py","file_size_in_byte":9170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"9030832792","text":"import os, discord\nfrom os import system\nimport time\nimport asyncio\nimport pytz\nimport datetime \n#import urllib2\nimport urllib.request\nimport requests\nimport gzip\nfrom datetime import datetime, timedelta, timezone\nfrom datetime import time as tme\n#from keep_alive import keep_alive\nfrom discord.ext import commands, tasks\nfrom discord.utils import get\nfrom discord import Member\nfrom discord.ext.commands import has_permissions, MissingPermissions\nfrom json import loads, dumps\nfrom backup import backup\nfrom startup import startup\n\n#Lists\nimport lists\n#Auth For Leadership Commands\n#authorized = lists.authorized\nbanned = lists.banned\ndevelopers = lists.developers\n#Authorized Based On Clan\nnlcauth = lists.nlcauth\nbocauth = lists.bocauth\ndsrauth = lists.dsrauth\ntsauth = lists.tsauth\nffauth = lists.ffauth\n#Server IDs\nNLC = lists.NLC\nBOC = lists.BOC\nTestSrvr = lists.TestSrvr\nDSR = lists.DSR\nFRF = lists.FRF\n\nutc=timezone.utc\ntmes=tme(hour=0,minute=20,tzinfo=utc)\n\nclass EconCmds(commands.Cog, name=\"Dredark Economy Dump Commands\",description=\"All Commands relating to the Econ Dumps\"):\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n self.exchangeRatesUpdater.start()\n def cog_unload(self):\n self.exchangeRatesUpdater.cancel()\n #pass\n\n def get_gzipped_json(url):\n return loads(gzip.decompress(requests.get(url).content))\n \n @commands.command(name='readecon',brief='Test Command for devs.',hidden=True,help=\"Testing Command For Dredark Econ Data Dumps!\")\n @commands.has_role('Developer')\n async def econdump(self,ctx,msg):\n #msgparts, datax = msg.split(\" \")\n await ctx.send(\"Hi\")\n #response = requests.get(\"https://pub.drednot.io/test/econ/2022_11_13/log.json.gz\")#,params={'q': 'requests+language:python'})\n #data = response.content\n #json_response = response.json()\n #repository = json_response[0]\n #await ctx.send(repository)\n #with gzip.open(data, 'rb') as f:\n #file_content = f.read(33)\n #json_resb = file_content.json\n #repository = json_resb[0 \n jsondata = lists.get_gzipped_json(\"https://pub.drednot.io/test/econ/2022_11_18/log.json.gz\")\n await ctx.send(len(jsondata))\n await ctx.send(jsondata[int(msg)])\n #for i, line in enumerate(data.split('\\n')):\n #await ctx.send(f'{i} {line}')\n\n @commands.command(name=\"econitemsnew\",help=\"This will call from the Dredark Public Economy Data Dumps. This command only handles the items_new key. \\n -Command Format: n!econdata VERSION YEAR MONTHNUMBER DAY KEYS; \\n-VERSION is test or prod (main server); \\n-The DATE formating for November 13th, 2022 is as follows: n!econdata 2022 11 13; \\n-KEY FORMATING: The key you input is for what item will be called from, this is a value from 0->30; \\n-Formatting Keys: To format search keys you must use data provided by Cogg at https://drednot.io/c/coder-docs in the Econ Dumps item;\",aliases=['ecin','ecnewitems','eitemsnew'])\n async def econitemsnew(self,ctx,version,year,monthnumber,day,keys):\n if str(ctx.message.author.id) in banned:\n await ctx.send('Your ID Is In The Banned List and you cannot use New Light. If you think this is an error please contact JaWarrior#6752.')\n return False\n elif str(ctx.message.author.id) not in banned:\n msgb = version + \" \" + year + \" \" + monthnumber + \" \" + day + \" \" + keys\n #lists.logback(ctx,msgb)\n #keyparts, datax = keys.split(\" \")\n response = requests.get(f'https://pub.drednot.io/{version}/econ/{int(year)}_{int(monthnumber)}_{int(day)}/summary.json',params={'q': 'requests+language:python'})\n json_response = response.json()\n repository = json_response[\"items_new\"][int(keys)]\n await ctx.send(repository)\n else:\n await ctx.send(\"I Had An Error Checking My Banned User List, Please Try Running The Command Again.\")\n return False\n\n @commands.command(name=\"itemschema\",help=\"This searches the item_schema.json file for the provided item_id Format: n!itemschema 1, ex: n!itemschema id 1 returns the entry for Iron because Iron's Item ID is 1.\")\n async def itemschema(self,ctx,target_item):\n if str(ctx.message.author.id) in banned:\n await ctx.send('Your ID Is In The Banned List and you cannot use New Light. If you think this is an error please contact JaWarrior#6752.')\n elif str(ctx.message.author.id) not in banned:\n msgb = target_item #target_key + \" \" + target_item\n #lists.logback(ctx,msgb)\n url = \"https://pub.drednot.io/test/econ/item_schema.json\"\n response = loads(requests.get(url).content)\n def find_route(data, route_no):\n return list(filter(lambda x: x.get('id') == route_no, data))\n #if target_key == \"name\":\n #item = str(target_item)\n #elif target_key == \"id\":\n #item = int(target_item)\n #else:\n #await ctx.send(\"Target_key error\")\n route = find_route(response,int(target_item))\n #embed=discord.Embed(title=f'{}', description=f'Clan Full Name: {route[0][\"name\"]}\\nClan Abbreviation: {route[0][\"abrv\"]}\\nClan Emoji: {route[0][\"emoji\"]}\\nClan Relation: {route[0][\"relation\"]}', color=0xFF5733)\n #await ctx.send(embed=embed)\n await ctx.send(route)\n else:\n await ctx.send(\"I Had An Error Checking My Banned User List, Please Try Running The Command Again.\")\n return False\n\n @commands.command(name=\"econdata\",help=\"Calls data from one of the following keys: count_ships/count_logs/items_held/items_moved\")\n async def econdata(self,ctx,version,year,monthnumber,day,key):\n if str(ctx.message.author.id) in banned:\n await ctx.send('Your ID Is In The Banned List and you cannot use New Light. If you think this is an error please contact JaWarrior#6752.')\n elif str(ctx.message.author.id) not in banned:\n msgb = version + \" \" + year + \" \" + monthnumber + \" \" + day + \" \" + key\n #lists.logback(ctx,msgb)\n response = requests.get(f'https://pub.drednot.io/{version}/econ/{int(year)}_{int(monthnumber)}_{int(day)}/summary.json',params={'q': 'requests+language:python'})\n json_response = response.json()\n repository = json_response[key]\n await ctx.send(repository)\n else:\n await ctx.send(\"I Had An Error Checking My Banned User List, Please Try Running The Command Again.\")\n return False\n\n @commands.command(name=\"readlogs\",help=\"Reads the log file from the Dred Public Econ Dumps. \\nFormatting:\\n -Version is test or prod(main server)\\n -Year,MonthNumber,Day follow date formatting (2022_11(month,november)_13(day)\\n -Key is a number 1-50000 to start (acutal max will be provided once yiu run the command so you can run again with real data.\")\n async def readligs(self,ctx,version,year,monthnumber,day,key):\n if str(ctx.message.author.id) in banned:\n await ctx.send('Your ID Is In The Banned List and you cannot use New Light. If you think this is an error please contact JaWarrior#6752.')\n elif str(ctx.message.author.id) not in banned:\n msgb = version + \" \" + year + \" \" + monthnumber + \" \" + day + \" \" + key\n #lists.logback(ctx,msgb)\n jsondata = lists.get_gzipped_json(f'https://pub.drednot.io/{version}/econ/{year}_{monthnumber}_{day}/log.json.gz')\n await ctx.send(len(jsondata))\n await ctx.send(jsondata[int(key)])\n else:\n await ctx.send(\"Error\")\n\n @commands.command(name=\"readships\",help=\"Reads the ships file from the Dred Public Econ Dumps. \\nFormatting:\\n -Version is test or prod(main server)\\n -Year,MonthNumber,Day follow date formatting (2022 11(month,november) 13(day)\\n -Key is the list item index, so Key=10 responds with item 10 in the list, the actual length will be send also.\")\n async def readships(self,ctx,version,year,monthnumber,day,key):\n if str(ctx.message.author.id) in banned:\n await ctx.send('Your ID Is In The Banned List and you cannot use New Light. If you think this is an error please contact JaWarrior#6752.')\n elif str(ctx.message.author.id) not in banned:\n msgb = version + \" \" + year + \" \" + monthnumber + \" \" + day + \" \" + key\n #lists.logback(ctx,msgb)\n jsondata = lists.get_gzipped_json(f'https://pub.drednot.io/{version}/econ/{year}_{monthnumber}_{day}/ships.json.gz')\n await ctx.send(len(jsondata))\n await ctx.send(jsondata[int(key)])\n else:\n await ctx.send(\"Error\")\n\n @commands.command(name=\"searchhexcode\",hidden=False,aliases=['shex','hex','hexcode'],help=\"This searches either the ships or log file for all entries containing the provided HEX CODE!\\n -Command Formart: n!searchhexcode .\\n -The HEX CODE is a ships hex code in Dredark.\\n -The EXTRA_KEY is for calling data from log.json, enter src or dst depending on if you want the source or destination, EXTRA_KEY is only if you pick the log file.\\n -LOG_COUNT is for log.json only, it is how many items you want, as the logs could be upwards of 200+, in TEST SERVER the log can be 1,000+ items.\")\n async def writeconfile(self,ctx,version,year,month,day,file,hex_code,extra_key=\"hex_code\",log_count=\"none\"):\n if str(ctx.message.author.id) in banned:\n await ctx.send('Your ID Is In The Banned List and you cannot use New Light. If you think this is an error please contact JaWarrior#6752.')\n elif str(ctx.message.author.id) not in banned:\n msgb = version + \" \" + year + \" \" + month + \" \" + day + \" \" + file + \" \" + hex_code +\" \"+ extra_key+\" \"+log_count\n #lists.logback(ctx,msgb)\n jsondata = lists.get_gzipped_json(f'https://pub.drednot.io/{version}/econ/{year}_{month}_{day}/{file}.json.gz')\n if file == \"log\":\n hexcode=\"{\"+hex_code+\"}\"\n else:\n hexcode=hex_code\n def find_route(data, route_no):\n return list(filter(lambda x: x.get(extra_key) == route_no, data))\n route = find_route(jsondata,hexcode)\n #print(route)\n if len(route) <= 10:\n await ctx.send(route)\n else:\n await ctx.send(f'The length of the list I want to give you is too long to send in discord as one message. There are {len(route)} items and I can only send lists of less than or equal to 10 items (Discord Character Limit). I will now send the list in chunks of ~10ish. Amount of items sent is based on the LOG_COUNT key.')\n i=0\n if log_count == \"none\":\n log_count=100\n else:\n pass\n b=int(log_count)\n for x in route:\n routeb=route[i:i+9]\n await ctx.send(\"----------\")\n await ctx.send(routeb)\n i=i+10\n if int(i) >= int(b):\n break\n elif int(i) == int(b):\n break\n elif int(i) <= int(b):\n continue\n else:\n break\n else:\n await ctx.send(\"Error\")\n \n @tasks.loop(time=tmes)\n #@commands.command(name=\"ert\")\n async def exchangeRatesUpdater(self,ctx):\n print(\"Updating Exchange Rates\")\n myguild = self.bot.get_guild(1031900634741473280)\n mychannel = await myguild.fetch_channel(1150474219021410357)\n threads=mychannel.threads\n year=datetime.today().year\n month=datetime.today().month\n day=datetime.today().day\n alldat = requests.get(f'https://pub.drednot.io/prod/econ/{int(year)}_{int(month)}_{(int(day)-1)}/summary.json').json()\n #alldat = requests.get(f'https://pub.drednot.io/prod/econ/2023_9_1/summary.json').json()\n data=alldat[\"items_held\"]\n datab=alldat[\"items_moved\"]\n keys=list(data.keys())\n flux=float(data[\"5\"])\n tracked=[1,2,3,4,5,51,53,55,56,102,104,108,109,110,111,112,113,114,115,116,120,122,162,164,226,228,229,242,243,246,252,253,256,257,258,305,306,307]\n for x in keys:\n if int(x) in tracked:\n if x == \"5\":\n continue\n else:\n item=float(data[x])\n #ib=float(datab[x])\n rate=(flux/item)*0.5\n divrate=(float(rate)*float(4))*0.5\n ratefinal=\"%.2f\" % round(rate, 2)\n divfinal=\"%.5f\" % round(divrate, 5)\n #rb=(flux/ib)*0.5\n #db=(float(rate)*float(16))*0.5\n #rbf=\"%.2f\" % round(rb, 2)\n #dbf=\"%.2f\" % round(db, 2)\n #avgrt=(float(ratefinal)+float(rbf))/float(2)\n #fxrt=\"%.2f\" % round(avgrt, 2)\n #avgdv=(float(divfinal)+float(dbf))/float(2)\n #fxdv=\"%.2f\" % round(avgdv, 2)\n itemname=lists.itemNameById(int(x))\n def find_route(lst, route_no):\n found=[]\n for z in lst:\n if z.name==route_no:\n found.append(z)\n return found\n thd=find_route(threads,itemname)\n if len(thd)==0:\n await mychannel.create_thread(name=itemname,content=f'Exchange Rate For {itemname}')\n await asyncio.sleep(0.1)\n upmc=await myguild.fetch_channel(1150474219021410357)\n newthread=upmc.get_thread(upmc.last_message_id)\n await newthread.send(content=f\"Rate : `{ratefinal}`;\\nDivRate : `{divfinal}`;\\nName: `{itemname}`;\\nId : `{int(x)}`;\\nDate : `{datetime.today()}`\")\n else:\n thrd=mychannel.get_thread(thd[0].id)\n cntnt=thrd.last_message.content\n cntnt=cntnt.replace(\"`\",\"\").replace(\"\\n\",\"\")\n ctnt=cntnt.split(\";\")\n oldrate=(ctnt[0].split(\":\"))\n olddiv=(ctnt[1].split(\":\"))\n ratechange=float(oldrate[1])-float(ratefinal)\n divchange=float(olddiv[1])-float(divfinal)\n await thrd.purge(limit=100)\n #print(len(f\"Rate : `{ratefinal}`;\\nDiv Rate : `{divfinal}`;\\nName: `{itemname}`;\\nId : `{int(x)}`;\\nDate : `{datetime.today()}`\\n\\nChange:\\n-Rate Change: `{ratechange}`\\n-DivRate Change: `{divchange}`\\nYesterday's Rates:\\n-Yesterday's Rate: `{float(oldrate[1])}`\\n-Yesterday's DivRate: `{float(olddiv[1])}`\"))\n try:\n await thrd.send(f\"Rate : `{ratefinal}`;\\nDiv Rate : `{divfinal}`;\\nName: `{itemname}`;\\nId : `{int(x)}`;\\nDate : `{datetime.today()}`\\n\\nChange:\\n-Rate Change: `{ratechange}`\\n-DivRate Change: `{divchange}`\\n\\nYesterday's Rates:\\n-Yesterday's Rate: `{float(oldrate[1])}`\\n-Yesterday's DivRate: `{float(olddiv[1])}`\")\n except:\n print(\"Error\")\n else:\n continue\n print(\"Exchange Rates Updated\")\n\nasync def setup(bot: commands.Bot):\n await bot.add_cog(EconCmds(bot))","repo_name":"JaWarrior12/New-Light","sub_path":"cogs/econcmds.py","file_name":"econcmds.py","file_ext":"py","file_size_in_byte":14257,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"37127761822","text":"\"\"\"\nCurve fitting routines\n\ncurve + bounds\noptimization algorithm\ncost function\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Callable\n\nimport numpy as np\nimport pandas as pd\n\n\ndef fit_parametric_power_curve(\n x: np.ndarray | pd.Series,\n y: np.ndarray | pd.Series,\n curve: Callable,\n optimization_algorithm: Callable,\n cost_function: Callable,\n bounds: tuple[\n tuple[float, float],\n tuple[float, float],\n tuple[float, float],\n tuple[float, float],\n tuple[float, float],\n ],\n return_params: bool = False,\n):\n \"\"\"\n Fit curve to filtered power-windspeed data.\n\n Args:\n x(:obj:`numpy.ndarray` | `pandas.Series`): independent variable\n y(:obj:`numpy.ndarray` | `pandas.Series`): dependent variable\n curve(:obj:`Callable`): function/lambda name for power curve desired default is curves.logistic5param\n optimization_algorithm(Function): scipy.optimize style optimization algorithm\n cost_function(:obj:`Callable`): Python function that takes two np.array 1D of real numbers and returns a real numeric\n cost.\n bounds(:obj:`tuple[tuple[float, float], tuple[float, float], tuple[float, float], tuple[float, float], tuple[float, float]]`):\n bounds on parameters for power curve, default is for logistic5param, with power in kw and windspeed in m/s\n return_params(:obj:`bool`): If True, return a tuple of (Callable, scipy.optimize.fit), and if\n False return only the Callable.\n\n Returns:\n Callable(np.array -> np.array): function handle to optimized power curve\n \"\"\"\n\n # Build opt function as a closure on \"x\" and \"y\"\n def f(opt_params):\n return cost_function(curve(x, *opt_params), y)\n\n # Run the optimization algorithm\n fit = optimization_algorithm(f, bounds)\n\n # Create closure of curve function with fit params\n def fit_curve(x_2):\n return curve(x_2, *fit.x)\n\n # Return values based on flag\n if return_params:\n return lambda x_2: fit_curve, fit\n else:\n return fit_curve\n\n\n\"\"\"\nCost Functions\n\"\"\"\n\n\ndef least_squares(x: np.ndarray | pd.Series, y: np.ndarray | pd.Series):\n \"\"\"Least Squares loss function\n\n Args:\n x(:obj:`np.ndarray` | `pandas.Series`): 1-D array of numbers representing x\n y(:obj:`np.ndarray` | `pandas.Series`): 1-D array of numbers representing y\n\n Returns:\n The least square of x and y.\n \"\"\"\n return np.sum((x - y) ** 2)\n","repo_name":"NREL/OpenOA","sub_path":"openoa/utils/power_curve/parametric_optimize.py","file_name":"parametric_optimize.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"91"} +{"seq_id":"37879041082","text":"import requests\nimport urllib.parse\nimport os\nimport time\n\n'''\n整体思路是定义一个类,输入下载的个数和搜索内容\n'''\n\n\nclass Music:\n def __init__(self, music_list, search_url, download_url, headers1, headers2, path):\n self.url = search_url\n self.download_url = download_url\n self.search_headers = headers1\n self.download_headers = headers2\n self.music_info = []\n self.music = {}\n self.path = path\n self.c = []\n self.music_list = music_list\n\n def parse_list(self, id):\n playlist_id = id\n addr = 'https://c.y.qq.com/qzone/fcg-bin/fcg_ucc_getcdinfo_byids_cp.fcg?type=1&json=1&utf8=1&onlysong=0&new_format=1&disstid=7766417428&g_tk_new_20200303=5381&g_tk=5381&loginUin=0&hostUin=0&format=json&inCharset=utf8&outCharset=utf-8¬ice=0&platform=yqq.json&needNewCode=0'\n header = {\n 'origin': 'https://y.qq.com',\n 'referer': 'https://y.qq.com/',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36'\n\n }\n response = requests.get(url=addr, headers=header)\n info = eval(response.text)\n print(1)\n name = []\n singer = []\n for i in range(len(info['cdlist'][0]['songlist'])):\n name.append(info['cdlist'][0]['songlist'][i]['name'])\n singer.append((info['cdlist'][0]['songlist'][i]['singer'][0]['name']))\n\n self.c = list(zip(name, singer))\n # print(self.c)\n # print(len(self.c))\n\n def search(self, name):\n # print(name)\n # print(urllib.parse.quote(name))\n # 这里是字符串格式化的,第一次已经填充过了,所以第二次就不会填充了\n self.url[1] = [urllib.parse.quote(name)]\n # print(type(self.url[1]))\n s_url = self.url[0][0] + self.url[1][0] + self.url[2][0]\n # print(self.url)\n response = requests.get(url=s_url, headers=self.search_headers)\n rec = response.json()\n # print(rec)\n '''\n 得到歌曲信息 name id\n '''\n self.music['name'] = rec['data']['list'][0]['name']\n self.music['id'] = rec['data']['list'][0]['rid']\n self.music['singer'] = rec['data']['list'][0]['artist']\n self.music_info.append(self.music)\n print(self.music)\n\n def download(self):\n self.parse_list(self.music_list)\n for i in range(len(self.c)):\n self.search(self.c[i][0] + ' ' + self.c[i][1])\n file_name = str(self.music_info[0]['singer']) + ' - ' + str(\n self.music_info[0]['name'])\n # download(music_info[int(index[i]) - 1]['id'], file_name)\n info_url = self.download_url.format(format(self.music_info[0]['id']))\n get_download_url = requests.get(url=info_url, headers=self.download_headers)\n\n download_url = get_download_url.json()['url']\n if download_url == '':\n print('没有资源')\n else:\n time.sleep(1)\n download_music = requests.get(url=download_url)\n self.mkdir()\n with open(r'D:\\MUSIC\\kuwo\\{0}.mp3'.format(file_name), 'wb') as f:\n f.write(download_music.content)\n print('{} 下载完成!'.format(file_name))\n\n def mkdir(self):\n folder = os.path.exists(self.path)\n if not folder: # 判断是否存在文件夹如果不存在则创建为文件夹\n os.makedirs(self.path) # makedirs 创建文件时如果路径不存在会创建这个路径\n\n\nif __name__ == '__main__':\n # search_name, number, search_url, download_url, headers1, headers2\n playlist_id = '7766417428'\n search_url = [['http://www.kuwo.cn/api/www/search/searchMusicBykeyWord?key='], [], ['&pn=1&rn=30&reqId=24b2acb0-e1f4-11e9-9c10-df22c38d9fb9']]\n download_url = 'http://www.kuwo.cn/url?format=mp3&rid={}&response=url&type=convert_url3&br=320kmp3&from=web&t=1569668134762&reqId=717bbfe5-a891-49c2-b549-c923ea781cec'\n headers1 = {\n 'Cookie': 'Cookie: _ga=GA1.2.1209502700.1578305434; _gid=GA1.2.570101734.1578305434; _gat=1; Hm_lvt_cdb524f42f0ce19b169a8071123a4797=1578305434; Hm_lpvt_cdb524f42f0ce19b169a8071123a4797=1578305434; kw_token=EQ4EIPN5TE7',\n 'Host': 'www.kuwo.cn',\n 'csrf': 'EQ4EIPN5TE7',\n 'Referer': 'http://www.kuwo.cn/search/list?key=%E8%AE%B8%E5%B5%A9',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'}\n headers2 = {\n 'Cookie': 'Hm_lvt_cdb524f42f0ce19b169a8071123a4797=1569666496,1569668107; Hm_lpvt_cdb524f42f0ce19b169a8071123a4797=1569677402',\n 'Host': 'www.kuwo.cn',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'}\n\n path = r'D:\\MUSIC\\kuwo'\n music = Music(playlist_id, search_url, download_url, headers1, headers2, path)\n music.download()\n\n\n\n","repo_name":"ChasionNo1/learn_tensorflow","sub_path":"音乐下载/music_list.py","file_name":"music_list.py","file_ext":"py","file_size_in_byte":5069,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"26314652992","text":"from sqlalchemy import insert\nfrom .base import BaseRepository\nfrom db.secretdata import secretdata\nfrom models.secretdata import Secretdata, SecretdataIn\n\nfrom typing import List, Optional\n\nfrom datetime import datetime\n\nimport requests\nfrom requests.auth import HTTPBasicAuth\nimport json\n\n\nclass SecretdataRepository(BaseRepository):\n \n # Достаем все\n async def get_all(self, limit: int=100, skip: int=0) -> List[Secretdata]:\n query = secretdata.select().limit(limit).offset(skip)\n return await self.database.fetch_all(query)\n \n # Достаем по id\n async def get_by_id(self, id: int) -> Optional[Secretdata]:\n query = secretdata.select().where(secretdata.c.id==id)\n secret_data = await self.database.fetch_one(query)\n if secret_data is None:\n return None\n return Secretdata.parse_obj(secret_data)\n \n # Создаем данные\n async def create_sd(self, sd: SecretdataIn) -> Secretdata:\n secret_data = Secretdata(\n encrypted_text = sd.encrypted_text,\n decrypted_text = sd.decrypted_text,\n created_at = datetime.utcnow(),\n updated_at = datetime.utcnow()\n )\n values = {**secret_data.dict()}\n values.pop(\"id\", None)\n values.pop(\"decrypted_text\", None)\n query = secretdata.insert().values(**values)\n secret_data.id = await self.database.execute(query=query)\n return secret_data\n \n # Обновляем данные\n async def update_sd(self, id: int, sd: SecretdataIn) -> Secretdata:\n secret_data = Secretdata(\n id=id,\n encrypted_text = sd.encrypted_text,\n decrypted_text = sd.decrypted_text,\n created_at = datetime.utcnow(),\n updated_at = datetime.utcnow()\n )\n values = {**secret_data.dict()}\n values.pop(\"id\", None)\n values.pop(\"created_at\", None)\n values.pop(\"encrypted_text\", None)\n query = secretdata.update().where(secretdata.c.id==id).values(**values)\n await self.database.execute(query=query)\n return secret_data\n \n # Удаляем данные\n async def delete_sd(self, id: int) -> bool:\n try:\n query = secretdata.delete().where(secretdata.c.id==id)\n await self.database.execute(query=query)\n return True\n except:\n return False\n \n # Достаем зашифрованные данные\n async def encrypted(self) -> List:\n response = requests.get('http://yarlikvid.ru:9999/api/top-secret-data') \n return json.loads(response.text)\n \n # Добавление нескольких строк\n async def create_many(self, item_list: List) -> bool:\n items = await self.create_list(item_list)\n try:\n await self.database.execute_many(secretdata.insert(), items)\n return True\n except:\n return False\n\n # Вспомогательный метод для формирования данных для запроса\n async def create_list(self, items) -> List:\n item_list = []\n for item in items:\n item_list.append({\n 'encrypted_text': item,\n 'created_at': datetime.utcnow(),\n 'updated_at': datetime.utcnow()\n })\n return item_list\n \n # Расшифруем данные\n async def decrypted(self, data: List) -> List:\n decrypted_list = []\n for item in data:\n encrypted_item = []\n parsed_item = Secretdata.parse_obj(item)\n encrypted_item.append(parsed_item.encrypted_text)\n decrypt = requests.post('http://yarlikvid.ru:9999/api/decrypt',\n auth=HTTPBasicAuth('qummy', 'GiVEmYsecReT!'),\n json=encrypted_item)\n query = secretdata.update().where(\n secretdata.c.encrypted_text==parsed_item.encrypted_text\n ).values(decrypted_text=json.loads(decrypt.text)[0])\n decrypted_list.append(json.loads(decrypt.text)[0])\n await self.database.execute(query=query)\n return decrypted_list\n \n # Отправляем репозиторий\n async def send_gitrepo(self, data: List):\n decrypted_list = []\n for item in data:\n parsed_item = Secretdata.parse_obj(item)\n decrypted_list.append(parsed_item.decrypted_text)\n my_info = {\n \"name\": \"Сердюк Евгений\",\n \"repo_url\": \"https://github.com/realowner/ForQummy\",\n \"result\": decrypted_list\n }\n send = requests.post('http://yarlikvid.ru:9999/api/result',\n auth=HTTPBasicAuth('qummy', 'GiVEmYsecReT!'),\n json=my_info)\n return my_info","repo_name":"realowner/fastapi-encode-decode","sub_path":"repositories/secretdata.py","file_name":"secretdata.py","file_ext":"py","file_size_in_byte":4919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"33320212002","text":"# -*- coding: utf-8 -*-\n\"\"\"\nBase classes and constants.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom collections import namedtuple\n\n# keys\n\n# enable calmjs compatibility - i.e. the dynamic import feature\nCALMJS_COMPAT = 'calmjs_compat'\n# the map from a module name to the loader needed; used by the various\n# functions and methods in the loaderplugin module\n# see definition of WebpackModuleLoaderRegistryKey later\nCALMJS_WEBPACK_MODNAME_LOADER_MAP = 'calmjs_webpack_modname_loader_map'\n\n# The spec key for storing the base webpack configuration.\nWEBPACK_CONFIG = 'webpack_config'\n# The spec key for storing the webpack.devtool\nWEBPACK_DEVTOOL = 'webpack_devtool'\n# The key for the webpack.output.library\nWEBPACK_OUTPUT_LIBRARY = 'webpack_output_library'\n# The key for generating a combined single test index.\nWEBPACK_SINGLE_TEST_BUNDLE = 'webpack_single_test_bundle'\n# The key for webpack externals\nWEBPACK_EXTERNALS = 'webpack_externals'\n# The key for specifying the raw entry point - the alias will need to be\n# resolved to the actual webpack_entry.\nWEBPACK_ENTRY_POINT = 'webpack_entry_point'\n# For webpack loaderplugin integration - this is the spec key - note that\n# this is NOT for webpack plugins which are a separate type of things\nWEBPACK_LOADERPLUGINS = 'webpack_loaderplugins'\n# webpack mode; determines development/production settings.\nWEBPACK_MODE = 'webpack_mode'\n# for the module.rules section; used by loaderplugin module\nWEBPACK_MODULE_RULES = 'webpack_module_rules'\n# for the configuration in webpack config\nWEBPACK_RESOLVELOADER_ALIAS = 'webpack_resolveloader_alias'\n\n# Enable the --optimize-minimize option for webpack\nWEBPACK_OPTIMIZE_MINIMIZE = 'webpack_optimize_minimize'\n# option for enabling the checking of imports; defaults to True.\nVERIFY_IMPORTS = 'verify_imports'\n\n# constants\n\n# the default calmjs.webpack loaderplugins registry name\nCALMJS_WEBPACK_LOADERPLUGINS = 'calmjs.webpack.loaderplugins'\n\n# The calmjs loader name\nDEFAULT_CALMJS_EXPORT_NAME = '__calmjs_loader__'\n\n# The webpack.library.export default name\nDEFAULT_BOOTSTRAP_EXPORT = '__calmjs__'\n\n# The bootstrap for commonjs global module usage - this has a number of\n# caveats and really not recommended for usage. To use the resulting\n# artifact directly within node, the 'global' package from npm must be\n# installed, and the result of the import must be assigned to __calmjs__\n# in order for the chaining to work. Example:\n#\n# > var __calmjs__ = require('./framework.js');\n# ...\n# > __calmjs__ = require('./widget.js');\n# { require: [Function],\n# modules:\n# { 'widget/core': { Core: 'framework.lib.Core/widget.core.Core' },\n# ... } }\n#\n# Naturally, this is NOT supported, but done so to make it possible to\n# use.\nDEFAULT_BOOTSTRAP_COMMONJS = ('global', DEFAULT_BOOTSTRAP_EXPORT)\n\n# the compelted default config.\nDEFAULT_BOOTSTRAP_EXPORT_CONFIG = {\n \"commonjs\": list(DEFAULT_BOOTSTRAP_COMMONJS),\n \"commonjs2\": list(DEFAULT_BOOTSTRAP_COMMONJS),\n \"root\": DEFAULT_BOOTSTRAP_EXPORT,\n \"amd\": DEFAULT_BOOTSTRAP_EXPORT,\n}\n\nDEFAULT_WEBPACK_MODES = ('none', 'development', 'production')\n# default webpack mode is production (and a warning) if unset\nDEFAULT_WEBPACK_MODE = DEFAULT_WEBPACK_MODES[0]\nDEFAULT_WEBPACK_DEVTOOL = False\n\n\n# due to webpack specific requirements, a special type for the key is\n# needed for the WebpackModuleLoaderRegistry such that the correct\n# handling mechanism may be done.\nCALMJS_WEBPACK_MODULE_LOADER_SUFFIX = '.webpackloader'\nWebpackModuleLoaderRegistryKey = namedtuple(\n 'WebpackModuleLoaderRegistryKey', ['loader', 'modname'])\n","repo_name":"calmjs/calmjs.webpack","sub_path":"src/calmjs/webpack/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"} +{"seq_id":"72399739182","text":"class Model:\n def __init__(self, name, inputs, outputs, gpu=0, cpu=1, max_batch_size=0):\n self.name = name\n self.inputs = inputs\n self.outputs = outputs\n self.gpu = gpu\n self.cpu = cpu\n self.max_batch_size = max_batch_size\n\n class TensorIO:\n def __init__(self, name, datatype, dims, label_filename=None):\n self.name = name\n self.datatype = datatype\n self.dims = dims\n self.label_filename = label_filename\n\n\nclass TFLiteTritonModel(Model):\n def __init__(\n self,\n name,\n inputs,\n outputs,\n gpu=0,\n cpu=1,\n max_batch_size=0,\n armnn_cpu=False,\n armnn_gpu=False,\n armnn_cpu_parameters={},\n armnn_gpu_parameters={},\n xnnpack=False,\n xnnpack_parameters={},\n ):\n super(TFLiteTritonModel, self).__init__(\n name, inputs, outputs, gpu, cpu, max_batch_size\n )\n self.armnn_cpu = armnn_cpu\n self.armnn_gpu = armnn_gpu\n self.armnn_cpu_parameters = armnn_cpu_parameters\n self.armnn_gpu_parameters = armnn_gpu_parameters\n self.xnnpack = xnnpack\n self.xnnpack_parameters = xnnpack_parameters\n","repo_name":"hmahadik/tflite-backend","sub_path":"qa/helpers/triton_model_config.py","file_name":"triton_model_config.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"1362971659","text":"import os\nimport pprint\nimport jsonref\nfrom pathlib import Path\nimport shutil\n\nfrom logs import logDecorator as lD\n\n# ----------------------------------\n\nconfig = jsonref.load(open(\"../config/config.json\"))\nlogBase = config[\"logging\"][\"logBase\"] + \".modules.splitFullMask.splitFullMask\"\nconfig_splitFullMask = jsonref.load(open(\"../config/modules/splitFullMask.json\"))\n\n\n@lD.log(logBase + \".splitFullMask\")\ndef splitFullMask(logger, top, FULL_or_MASK, extension, copy_to):\n\n \"\"\"\n This function recursively walks through a given directory\n (`top`) using depth-first search (bottom up), finds file names\n containing the `FULL_or_MASK` substring and copies it to the\n target directory `copy_to`.\n\n Parameters\n ----------\n top : {str}\n The directory to look in.\n FULL_or_MASK : {str}\n The substring to look for, either \"FULL\" or \"MASK\".\n extension : {str}\n The extension of the file to look for. e.g. \".png\".\n copy_to : {str}\n The directory to copy to.\n\n Returns\n -------\n files_moved : {int}\n The number of files moved.\n \"\"\"\n\n try:\n files_moved = 0\n\n # Count number of .dcm files in ../data/Mass/Test.\n for curdir, _, files in os.walk(top):\n\n files.sort()\n\n for f in files:\n\n if f.endswith(extension) and FULL_or_MASK in f:\n\n source = os.path.join(curdir, f)\n dest = os.path.join(copy_to, f)\n shutil.move(source, dest)\n\n files_moved += 1\n # if files_moved == 1:\n # break\n\n except Exception as e:\n # logger.error(f'Unable to splitFullMask!\\n{e}')\n print((f\"Unable to splitFullMaskk!\\n{e}\"))\n\n return files_moved\n\n\n# ----------------------------------\n\n\n@lD.log(logBase + \".main\")\ndef main(logger, resultsDict):\n \"\"\"main function for countDicom module.\n\n This function recursively counts the number of .dcm files in\n the given directory (i.e. includes all its subdirectories).\n\n Parameters\n ----------\n logger : {logging.Logger}\n The logger used for logging error information\n resultsDict: {dict}\n A dictionary containing information about the\n command line arguments. These can be used for\n overwriting command line arguments as needed.\n \"\"\"\n\n print(\"=\" * 30)\n print(\"Main function of splitFullMask module.\")\n print(\"=\" * 30)\n print(\"We get a copy of the result dictionary over here ...\")\n pprint.pprint(resultsDict)\n\n # Get the path to the folder that contains all the nested .dcm files.\n top = Path(config_splitFullMask[\"top\"])\n FULL_or_MASK = config_splitFullMask[\"FULL_or_MASK\"]\n extension = config_splitFullMask[\"extension\"]\n copy_to = Path(config_splitFullMask[\"copy_to\"])\n\n # Count.\n files_moved = splitFullMask(\n top=top, FULL_or_MASK=FULL_or_MASK, extension=extension, copy_to=copy_to\n )\n\n print(f\"Number of files with '{FULL_or_MASK}' moved: {files_moved}\")\n print()\n print(\"Getting out of countFileType.\")\n print(\"-\" * 30)\n print()\n\n return\n","repo_name":"CleonWong/Can-You-Find-The-Tumour","sub_path":"src/modules/splitFullMask/splitFullMask.py","file_name":"splitFullMask.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"91"} +{"seq_id":"12853815071","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 21 04:20:17 2021\n\n@author: pon01\n\"\"\"\n\nt1 = (1, 'two', 3)\nt2 = (t1, 3.25)\nprint(t2)\nprint((t1 + t2))\nprint((t1 + t2)[3])\nprint((t1 + t2)[2:4])\n\ndef intersect(t1, t2):\n \"\"\"t1과 t2는 tuple.\n t1과 t2 모두에 포함되는 elements를 포함한 튜플을 리턴\"\"\"\n result = ()\n for e in t1:\n if e in t2:\n result += (e,)\n return result\nprint(intersect((1, 'a', 2), ('b', 2, 'a')))\n\n\ndef find_extreme_divisors(n1, n2):\n \"\"\"n1과 n2는 양의 정수\n n1과 n2의 1보다 큰 최소공약수와 최대공약수 튜플을 리턴 \n 만약 1을 제외한 공약수가 없다면 (None, None)을 리턴\"\"\"\n min_val, max_val = None, None\n for i in range(2, min(n1, n2) + 1):\n if n1%i == 0 and n2%i == 0:\n if min_val == None:\n min_val = i\n max_val = i\n return min_val, max_val\n\n#Finger exercise\ntup = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)\ntupsum = 0\nfor i in tup:\n tupsum = tupsum + i\ntupmean = tupsum / len(tup)\n\nL = ['I did it all', 4, 'love']\nfor e in L:\n print(e)\n\nL1 = [1, 2, 3]\nL2 = L1[-1::-1]\nfor i in range(len(L1)):\n print(L1[i]*L2[i])\n \nL = ['I did it all', 4, 'love']\nfor e in L:\n print(e)\n","repo_name":"pon01095/python-Guttag","sub_path":"ch5 data structure.py","file_name":"ch5 data structure.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"31611685404","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom models.base import BaseNet\n\nclass Framework(BaseNet):\n\n def __init__(self, cfg, net):\n super(Framework, self).__init__()\n\n self.cfg = cfg\n self.fast_net = net\n self.eye = None\n\n def parameter_groups(self, base_lr, wd):\n return self.fast_net.parameter_groups(base_lr, wd)\n\n def _align(self, x, t):\n tf = F.affine_grid(t, size=x.size(), align_corners=False)\n return F.grid_sample(x, tf, align_corners=False, mode=\"nearest\")\n\n def _key_val(self, ctr, q):\n \"\"\"\n Args:\n ctr: [N,K]\n q: [BHW,K]\n Returns:\n val: [BHW,N]\n \"\"\"\n\n # [BHW,K] x [N,K].t -> [BHWxN]\n vals = torch.mm(q, ctr.t()) # [BHW,N]\n\n # normalising attention\n return vals / self.cfg.TEST.TEMP\n\n def _sample_index(self, x, T, N):\n \"\"\"Sample indices of the anchors\n\n Args:\n x: [BT,K,H,W]\n Returns:\n index: [B,N*N,K]\n \"\"\"\n\n BT,K,H,W = x.shape\n B = x.view(-1,T,K,H*W).shape[0]\n\n # sample indices from a uniform grid\n xs, ys = W // N, H // N\n x_sample = torch.arange(0, W, xs).view(1, 1, N)\n y_sample = torch.arange(0, H, ys).view(1, N, 1)\n\n # Random offsets\n # [B x 1 x N]\n x_sample = x_sample + torch.randint(0, xs, (B, 1, 1))\n # [B x N x 1]\n y_sample = y_sample + torch.randint(0, ys, (B, 1, 1))\n\n # batch index\n # [B x N x N]\n hw_index = torch.LongTensor(x_sample + y_sample * W)\n\n return hw_index\n\n def _sample_from(self, x, index, T, N):\n \"\"\"Gather the features based on the index\n\n Args:\n x: [BT,K,H,W]\n index: [B,N,N] defines the indices of NxN grid for a single\n frame in each of B video clips\n Returns:\n anchors: [BNN,K] sampled features given by index from x\n \"\"\"\n\n BT,K,H,W = x.shape\n x = x.view(-1,T,K,H*W)\n B = x.shape[0]\n\n # > [B,T,K,HW] > [B,T,HW,K] > [B,THW,K]\n x = x.permute([0,1,3,2]).reshape(B,-1,K)\n\n # every video clip will have the same samples\n # on the grid\n # [B x N x N] -> [B x N*N x 1] -> [B x N*N x K]\n index = index.view(B,-1,1).expand(-1,-1,K)\n\n # selecting from the uniform grid\n y = x.gather(1, index.to(x.device))\n\n # [BNN,K]\n return y.flatten(0,1)\n\n def _mark_from(self, x, index, T, N, fill_value=0):\n \"\"\"This is analogous to _sample_from except that\n here we simply \"mark\" the sampled positions in the tensor\n Used for visualisation only.\n Since it is a binary mask, K == 1\n\n Args:\n x: [BT,1,H,W] binary mask\n index: [B,N,N] defines the indices of NxN grid for a single\n frame in each of B video clips\n Returns:\n y: [BT,1,H,W] marked sample positions\n \"\"\"\n\n BT,K,H,W = x.shape\n assert K == 1, \"Expected binary mask\"\n x = x.view(-1,T,K,H*W)\n B = x.shape[0]\n\n # > [B,T,K,HW] > [B,T,HW,K] > [B,THW,K]\n x = x.permute([0,1,3,2]).reshape(B,-1,K)\n\n # every video clip will have the same samples\n # on the grid\n # [B x N x N] -> [B x N*N x 1] -> [B x N*N x K]\n index = index.view(B,-1,1).expand(-1,-1,K)\n\n # selecting from the uniform grid\n # [B x T*H*W x K]\n y = x.scatter(1, index.to(x.device), fill_value)\n\n # [B x T*H*W x K] -> [BT x K x H x W]\n return y.view(-1,H*W,K).permute([0,2,1]).view(-1,K,H,W)\n\n def _cluster_grid(self, k1, k2, aff1, aff2, T, index=None):\n \"\"\" Selecting clusters within a sequence\n Args:\n k1: [BT,K,H,W]\n k2: [BT,K,H,W]\n \"\"\"\n\n BT,K,H,W = k1.shape\n assert BT % T == 0, \"Batch not divisible by sequence length\"\n B = BT // T\n\n # N = [G x G]\n N = self.cfg.MODEL.GRID_SIZE ** 2\n\n # [BT,K,H,W] -> [BTHW,K]\n flatten = lambda x: x.flatten(2,3).permute([0,2,1]).flatten(0,1)\n\n # [BTHW,BN] -> [BT,BN,H,W]\n def unflatten(x, aff=None):\n x = x.view(BT,H*W,-1).permute([0,2,1]).view(BT,-1,H,W)\n if aff is None:\n return x\n return self._align(x, aff)\n\n index = self._sample_index(k1, T, N = self.cfg.MODEL.GRID_SIZE)\n query1 = self._sample_from(k1, index, T, N = self.cfg.MODEL.GRID_SIZE)\n\n \"\"\"Computing the distances and pseudo labels\"\"\"\n\n # [BTHW,K]\n k1_ = flatten(k1)\n k2_ = flatten(k2)\n\n # [BTHW,BN] -> [BTHW,BN] -> [BT,BN,H,W]\n vals_soft = unflatten(self._key_val(query1, k1_), aff1)\n vals_pseudo = unflatten(self._key_val(query1, k2_), aff2)\n\n # [BT,BN,H,W]\n probs_pseudo = self._pseudo_mask(vals_pseudo, T)\n probs_pseudo2 = self._pseudo_mask(vals_soft, T)\n\n pseudo = probs_pseudo.argmax(1)\n pseudo2 = probs_pseudo2.argmax(1)\n\n # mask\n def grid_mask():\n grid_mask = torch.ones(BT,1,H,W).to(pseudo.device)\n return self._mark_from(grid_mask, index, T, N = self.cfg.MODEL.GRID_SIZE)\n\n return vals_soft, pseudo, index, [vals_pseudo, pseudo2, grid_mask]\n\n # sampling affinity\n def _aff_sample(self, k1, k2, T):\n BT,K,h,w = k1.size()\n B = BT // T\n hw = h*w\n\n def gen(query):\n grid_mask = torch.ones(B,1,hw).to(k1.device)\n # generating random indices\n indices = torch.randint(0, hw, (B,1,1)).to(k1.device)\n grid_mask.scatter_(2, indices, 0)\n\n # [B,K,H,W] -> [B,K,1]\n query_ = query[::T].view(B,K,-1).gather(2, indices.expand(-1,K,-1))\n\n def aff(keys):\n k = keys.view(B,T,K,-1)\n # [B,T,K,HW] x [B,1,K,HW] -> [B,T,HW]\n aff = (k * query_[:,None,:,:]).sum(2)\n return (aff + 1) / 2\n\n\n aff1 = aff(k1)\n aff2 = aff(k2)\n\n return grid_mask.view(B,h,w), aff1.view(BT,h,w), aff2.view(BT,h,w)\n\n grid_mask1, aff1_1, aff1_2 = gen(k1)\n grid_mask2, aff2_1, aff2_2 = gen(k2)\n\n return grid_mask1, aff1_1, aff1_2, \\\n grid_mask2, aff2_1, aff2_2\n\n def _pseudo_mask(self, logits, T):\n BT,K,h,w = logits.shape\n assert BT % T == 0, \"Batch not divisible by sequence length\"\n B = BT // T\n\n # N = [G x G]\n N = self.cfg.MODEL.GRID_SIZE ** 2\n\n # generating a pseudo label\n # first we need to mask out the affinities across the batch\n if self.eye is None or self.eye.shape[0] != B*T \\\n or self.eye.shape[1] != B*N:\n eye = torch.eye(B)[:,:,None].expand(-1,-1,N).reshape(B,-1)\n eye = eye.unsqueeze(1).expand(-1,T,-1).reshape(B*T, B*N, 1, 1)\n self.eye = eye.to(logits.device)\n\n probs = F.softmax(logits, 1)\n return probs * self.eye\n\n def _ref_loss(self, x, y, N = 4):\n B,_,h,w = x.shape\n\n index = self._sample_index(x, T=1, N=N)\n x1 = self._sample_from(x, index, T=1, N=N)\n y1 = self._sample_from(y, index, T=1, N=N)\n logits = torch.mm(x1, y1.t()) / self.cfg.TEST.TEMP\n\n labels = torch.arange(logits.size(1)).to(logits.device)\n return F.cross_entropy(logits, labels)\n\n def _ce_loss(self, x, pseudo_map, T, eps=1e-5):\n error_map = F.cross_entropy(x, pseudo_map, reduction=\"none\", ignore_index=-1)\n\n BT,h,w = error_map.shape\n errors = error_map.view(-1,T,h,w)\n error_ref, error_t = errors[:,0], errors[:,1:]\n\n return error_ref.mean(), error_t.mean(), error_map\n\n def _forward_reg(self, frames2, norm):\n losses = {}\n\n if not self.cfg.TRAIN.STOP_GRAD:\n k2, res3, res4 = self.fast_net(frames2, norm)\n return k2, res3, res4, losses\n\n training = self.fast_net.training\n if self.cfg.TRAIN.BLOCK_BN:\n self.fast_net.eval()\n\n with torch.no_grad():\n k2, res3, res4 = self.fast_net(frames2, norm)\n\n if self.cfg.TRAIN.BLOCK_BN:\n self.fast_net.train(training)\n\n return k2, res3, res4, losses\n\n def fetch_first(self, x1, x2, T):\n assert x1.shape[1:] == x2.shape[1:]\n c,h,w = x1.shape[1:]\n\n x1 = x1.view(-1,T+1,c,h,w)\n x2 = x2.view(-1,T-1,c,h,w)\n\n x2 = torch.cat((x1[:,-1:], x2), 1)\n x1 = x1[:,:-1]\n\n return x1.flatten(0,1), x2.flatten(0,1)\n\n def forward(self, frames, frames2=None, mask=None, T=None, affine=None, affine2=None, embd_only=False, norm=True, dbg=False):\n \"\"\"Extract temporal correspondences\n Args:\n frames: [B,T,C,H,W]\n\n Returns:\n losses: a dictionary with the embedding loss\n net_outs: feature embeddings\n \n \"\"\"\n\n # embedding for self-supervised learning\n key1, res3, res4 = self.fast_net(frames, norm)\n\n outs, losses = {}, {}\n if embd_only: # only embedding\n return res3, res4, key1\n else:\n key2, res3_2, res4_2, losses = self._forward_reg(frames2, norm)\n\n # fetching the first frame from the second view\n key1, key2 = self.fetch_first(key1, key2, T)\n\n vals, pseudo, index, dbg_info = self._cluster_grid(key1, key2, affine, affine2, T)\n\n vals_pseudo, pseudo2, grid_mask = dbg_info\n\n key1_aligned = self._align(key1, affine)\n key2_aligned = self._align(key2, affine2)\n\n n_ref = self.cfg.MODEL.GRID_SIZE_REF\n losses[\"cross_key\"] = self._ref_loss(key1_aligned[::T], key2_aligned[::T], N = n_ref)\n\n # losses\n _, losses[\"temp\"], outs[\"error_map\"] = self._ce_loss(vals, pseudo, T)\n\n # computing the main loss\n losses[\"main\"] = self.cfg.MODEL.CE_REF * losses[\"cross_key\"] + losses[\"temp\"]\n\n if dbg:\n vals = F.softmax(vals, 1)\n vals_pseudo = F.softmax(vals_pseudo, 1)\n\n frames, frames2 = self.fetch_first(frames, frames2, T)\n outs[\"frames_orig\"] = frames\n outs[\"frames\"] = self._align(frames, affine)\n outs[\"frames2\"] = self._align(frames2, affine2)\n\n outs[\"map_soft\"] = vals\n outs[\"map\"] = pseudo\n outs[\"map_target_soft\"] = vals_pseudo\n outs[\"map_target\"] = pseudo2\n outs[\"grid_mask\"] = grid_mask()\n\n outs[\"aff_mask1\"], outs[\"aff11\"], outs[\"aff12\"], \\\n outs[\"aff_mask2\"], outs[\"aff21\"], outs[\"aff22\"] = self._aff_sample(key1, key2, T)\n\n return losses, outs\n","repo_name":"visinf/dense-ulearn-vos","sub_path":"models/framework.py","file_name":"framework.py","file_ext":"py","file_size_in_byte":10788,"program_lang":"python","lang":"en","doc_type":"code","stars":182,"dataset":"github-code","pt":"91"} +{"seq_id":"11925477317","text":"import os\nimport subprocess\nfrom filecmp import dircmp\nfrom progress.bar import IncrementalBar\nimport numpy as np\n\n_FNULL = open(os.devnull, 'w')\n\nshapenet_category_to_id = {\n'airplane'\t: '02691156',\n'bench'\t\t: '02828884',\n'cabinet'\t: '02933112',\n'car'\t\t: '02958343',\n'chair'\t\t: '03001627',\n'lamp'\t\t: '03636649',\n'monitor'\t: '03211117',\n'rifle'\t\t: '04090263',\n'sofa'\t\t: '04256520',\n'speaker'\t: '03691459',\n'table'\t\t: '04379243',\n'telephone'\t: '04401088',\n'vessel'\t: '04530566'\n}\n\ndef get_path(cat_id):\n obj_prefix = os.path.join('/media/tree/data1/data/ShapeNetCore.v1',cat_id)\n view_prefix = os.path.join('/media/tree/data1/projects/PointGAN/3d-lmnet/data/ShapeNetRendering',cat_id)\n output_prefix = os.path.join('/media/tree/data1/projects/AttentionBased/data/image_256_256_12_with_texture',cat_id)\n\n view_path = '/media/tree/data1/projects/AttentionBased/PSGN/data_generate/rendering_metadata.txt'\n if not os.path.isdir(output_prefix):\n os.makedirs(output_prefix)\n obj_path = []\n # view_path = []\n output_folder = []\n\n for i in dircmp(obj_prefix,view_prefix).common_dirs:\n obj_path.append(os.path.join(obj_prefix, i, 'model.obj'))\n # view_path.append(os.path.join(view_prefix, i, 'rendering/rendering_metadata.txt'))\n output_folder.append(os.path.join(output_prefix, i))\n \n for i in output_folder:\n if not os.path.isdir(i):\n os.makedirs(i)\n return obj_path, view_path, output_folder\n\ndef render_all():\n script_path = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'render_single.py')\n for cat, cat_id in shapenet_category_to_id.items():\n obj_path, view_path, output_folder = get_path(cat_id)\n print('Rendering %d images for cat %s' % (len(obj_path),cat_id))\n bar = IncrementalBar(max=len(obj_path))\n call_kwargs = dict(stdout=_FNULL, stderr=subprocess.STDOUT)\n for i in range(len(obj_path)):\n bar.next()\n subprocess.call([\n 'blender',\n '--background',\n '--python', script_path, '--',\n '--obj_path', str(obj_path[i]),\n '--view_path', str(view_path),\n '--output_folder', str(output_folder[i]),\n ],**call_kwargs)\n bar.finish()\n print('All cats rendered!')\n\n\nif __name__ == '__main__':\n render_all()\n\n\n\n","repo_name":"VIM-Lab/AttentionDPCR","sub_path":"data_generate/render_all.py","file_name":"render_all.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"91"} +{"seq_id":"34053589393","text":"# -*- coding: ISO-8859-1 -*-\n\nfrom odoo import api, fields, models\n\nimport logging\n\nlogger = logging.getLogger(__name__)\ntry:\n # import unicodecsv\n import csv\nexcept ImportError:\n logger.debug('Cannot import csv')\n\n\nclass HrAttendance(models.Model):\n _inherit = 'hr.attendance'\n\n @api.model\n def crud(self, values):\n employee_to_update = self.env['hr.employee'].with_context(active_test=False).search([\n ('barcode', '=', values.get('barcode')), ('company_id', '=', values.get('company_id'))], limit=1)\n task_id = self.env['hr.attendance.task'].search([('id_activity', '=', values.get('attendance_task', 0))],\n limit=1)\n\n if employee_to_update and values.get('check_in'):\n employee_id = employee_to_update.id\n check_in = (values.get(\"check_in\"))\n check_out = (values.get(\"check_out\")) if values.get(\"check_out\") else None\n attendance_task = task_id.id if task_id else None\n att_obj = self.search([('employee_id', '=', employee_id), ('check_in', '=', check_in)], limit=1)\n if not att_obj:\n self.sudo().create({\n 'employee_id': employee_id,\n 'check_in': check_in,\n 'check_out': check_out,\n 'attendance_task': attendance_task,\n 'company_id': values.get(\"company_id\"),\n })\n","repo_name":"rafaelaxcelere/jpl","sub_path":"hr_attendance_import_csv/models/hr_attendance.py","file_name":"hr_attendance.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"26503800427","text":"from flask import Flask, render_template, request\nimport requests\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef main():\n myname = \"Sparta\"\n return render_template(\"index.html\", name=myname)\n\n\n@app.route('/detail/')\ndef detail(keyword):\n r = requests.get(f\"https://owlbot.info/api/v4/dictionary/{keyword}\", headers={\"Authorization\": \"Token e92b6d41d3548ec32e875ef446e8e83ee254d827\"})\n result = r.json()\n print(result)\n word_receive = request.args.get(\"word_give\")\n print(word_receive)\n return render_template(\"detail.html\", word=keyword)\n\n\nif __name__ == '__main__':\n app.run('0.0.0.0', port=5000, debug=True)","repo_name":"lilclown97/TIL","sub_path":"항해99/practice/searchword/prac/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"36766709943","text":"from flask import Flask\nfrom flask import render_template\nfrom flask import request, jsonify\nfrom flask import current_app as app\nfrom application.models import Category, Product, Managers, Users, Orders_Desc, Order_Details\nfrom .database import db\nfrom datetime import datetime\n\n@app.route(\"//cart_addition/prod_\")\ndef define_item_qty(usr, pid):\n p = Product.query.filter(Product.PID == pid).first()\n c = Category.query.filter(Category.CID == p.CID).first()\n return render_template(\"add_to_cart.html\", user=usr, prod=p, cat = c)\n\n@app.route(\"/add_item\", methods=['POST'])\ndef add_to_cart():\n uname = request.json['uid']\n pid = request.json['pid']\n qty = request.json['qty']\n\n status = \"\"\n u = Users.query.filter(Users.Uname == uname).first()\n active_order = None\n for o in u.orders:\n if o.Status == 1:\n active_order = o\n print(active_order.OID)\n if active_order is None:\n try:\n new_order = Orders_Desc(Status = 1, Uname = uname)\n db.session.add(new_order)\n db.session.flush()\n \n new_line_item = Order_Details(OID = new_order.OID, PID = pid, Qty = qty)\n db.session.add(new_line_item)\n db.session.flush()\n except:\n status = \"Invalid addition\"\n print(\"Rolling back\")\n db.session.rollback()\n else:\n status=\"success\"\n db.session.commit()\n print(\"Commit\")\n else:\n try: \n new_line_item = Order_Details(OID = active_order.OID, PID = pid, Qty = qty)\n db.session.add(new_line_item)\n db.session.flush()\n except:\n status = \"This item exists in your cart\\n You can change your quantity by editing the cart\"\n print(\"Rolling back\")\n db.session.rollback()\n else:\n status=\"success\"\n db.session.commit()\n print(\"Commit\")\n \n return jsonify(stat=status)\n\n@app.route(\"//cart\")\ndef show_cart(usr):\n u = Users.query.filter(Users.Uname == usr).first()\n active_order = None\n for o in u.orders:\n if o.Status == 1:\n active_order = o\n if active_order is None:\n return render_template(\"cart.html\", user=usr, order=None)\n else:\n details = None\n #details = Product.query.join(Order_Details, Product.PID == Order_Details.PID).filter(Order_Details.OID == active_order.OID)\n details = db.session.query(Order_Details, Product).join(Product, Product.PID == Order_Details.PID).filter(Order_Details.OID == active_order.OID).all()\n if len(details) == 0:\n return render_template(\"cart.html\", user=usr, order=None) \n return render_template(\"cart.html\", user=usr, order=details)\n \n@app.route(\"/update_item\", methods = ['POST'])\ndef update_order():\n oid = request.json['oid']\n pid = request.json['pid']\n qty = request.json['qty']\n\n status = \"\"\n item = Order_Details.query.filter(Order_Details.OID == oid, Order_Details.PID == pid).first()\n \n if item is None:\n status = \"failure\"\n else:\n try:\n item.Qty = qty;\n db.session.flush()\n\n except Exception as e:\n status = \"Invalid request\"\n print(\"Rolling back\")\n \n else:\n status=\"success\"\n db.session.commit()\n print(\"Commit\")\n \n return jsonify(stat=status)\n\n@app.route(\"/del_item\", methods = ['POST'])\ndef del_from_order():\n oid = request.json['oid']\n pid = request.json['pid']\n\n item = Order_Details.query.filter(Order_Details.OID == oid, Order_Details.PID == pid).first()\n\n try:\n if item is not None:\n \n db.session.delete(item)\n db.session.commit()\n return jsonify(stat='success')\n else:\n return jsonify(error='Product not found')\n except Exception as e:\n db.session.rollback()\n return jsonify(error='An error occurred while deleting the product')\n \n@app.route(\"/checkout_cart\", methods = ['POST'])\ndef checkout():\n oid = request.json['oid']\n iso_datetime = request.json['date']\n datetime_obj = datetime.strptime(iso_datetime, '%Y-%m-%dT%H:%M:%S.%fZ')\n\n status = \"\"\n order = Orders_Desc.query.filter(Orders_Desc.OID == oid).first()\n \n if order is None:\n status = \"failure\"\n else:\n try:\n order.Status = 0\n order.Date = datetime_obj\n db.session.flush()\n \n except Exception as e:\n status = \"Invalid request\"\n print(\"Rolling back\")\n \n else:\n status=\"success\"\n db.session.commit()\n update_stock(oid)\n print(\"Commit\")\n \n return jsonify(stat=status)\n\ndef update_stock(oid):\n order_items = Order_Details.query.filter(Order_Details.OID == oid).all()\n for item in order_items:\n try:\n prod = Product.query.filter(Product.PID == item.PID).first()\n prod.Stock = prod.Stock - item.Qty\n db.session.flush()\n #print(prod.PID, prod.Stock)\n \n except Exception as e:\n status = \"Invalid request\"\n print(\"Rolling back\")\n \n status=\"success\"\n db.session.commit()\n #print(\"here----Commit\")\n \n return jsonify(stat=status)\n\n \n","repo_name":"subhashree211002/Grocery-Store","sub_path":"application/user_actions.py","file_name":"user_actions.py","file_ext":"py","file_size_in_byte":5466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"10659105852","text":"from datetime import datetime, timedelta, timezone\nimport math\nfrom math import floor, sqrt\n\nimport numpy as np\nimport pandas as pd\nimport statsmodels.regression.mixed_linear_model as sm # statsmodels version >= 0.8 !\nimport ephem\n\n__author__ = \"Eric Dose :: New Mexico Mira Project, Albuquerque\"\n\n\nclass Timespan:\n \"\"\" Holds one (start, end) span of time. Immutable.\n Input: 2 python datetimes (in UTC), defining start and end of timespan.\n methods:\n ts2 = ts.copy()\n ts2 == ts # only if both start and end are equal\n ts2 = ts.delay_seconds(120) # returns new Timespan, offset in both start and end\n ts.intersect(other) # returns True iff any overlap at all\n ts2 = ts.subtract(other) # returns new Timespan; longer of 2 possible spans if ambiguous.\n ts.contains_time(t) # returns True iff ts.start <= t <= ts.end\n ts.contains_timespan(other) # returns True iff ts wholly contains other\n str(ts) # returns string describing Timespan's start, end, and duration in seconds.\n \"\"\"\n def __init__(self, start_utc, end_utc):\n self.start = start_utc\n self.end = max(start_utc, end_utc)\n self.seconds = (self.end-self.start).seconds\n self.midpoint = self.start + timedelta(seconds=self.seconds / 2)\n\n def copy(self):\n return Timespan(self.start, self.end)\n\n def __eq__(self, other):\n return self.start == other.start and self.end == other.end\n\n def delay_seconds(self, seconds):\n delay = timedelta(seconds=seconds)\n return Timespan(self.start+delay, self.end+delay)\n\n def expand_seconds(self, seconds):\n # Use negative seconds to contract Timespan. New Timespan will have non-negative duration.\n expansion = timedelta(seconds=seconds)\n new_start = min(self.start - expansion, self.midpoint)\n new_end = max(self.end + expansion, self.midpoint)\n return Timespan(new_start, new_end)\n\n def intersect(self, other):\n new_start = max(self.start, other.start)\n new_end = min(self.end, other.end)\n return Timespan(new_start, new_end)\n\n def subtract(self, other):\n if self.intersect(other).seconds == 0: # case: no overlap/intersection.\n return self\n if other.contains_timespan(self): # case: self entirely subtracted away.\n return Timespan(self.start, self.start)\n if self.contains_timespan(other): # case: 2 timespans -> take the longer.\n diff_early = Timespan(self.start, other.start)\n diff_late = Timespan(other.end, self.end)\n if diff_early.seconds >= diff_late.seconds:\n return diff_early\n else:\n return diff_late\n if self.start < other.start: # remaining case: partial overlap.\n return Timespan(self.start, other.start)\n else:\n return Timespan(other.end, self.end)\n\n def contains_time(self, time_utc):\n return self.start <= time_utc <= self.end\n\n def contains_timespan(self, other):\n return (self.start <= other.start) & (self.end >= other.end)\n\n @staticmethod\n def longer(ts1, ts2, on_tie=\"earlier\"):\n \"\"\"\n Returns Timespan with longer duration (larger .seconds).\n If equal duration:\n if_tie==\"earlier\", return earlier.\n if_tie==\"first\", return ts1.\n [TODO: add \"random\" option later to return randomly chosen ts1 or ts2.]\n :param ts1: input Timespan object.\n :param ts2: input Timespan object.\n :param on_tie: \"earlier\" or \"first\". Any other string behaves as \"first\".\n :return: the Timespan object with longer duration.\n \"\"\"\n if ts1.seconds > ts2.seconds:\n return ts1\n if ts2.seconds > ts1.seconds:\n return ts2\n # here: equal length cases. First, try to break duration tie with earlier midpoint.\n if on_tie.lower() == \"earlier\" and ts1.midpoint != ts2.midpoint:\n if ts1.midpoint < ts2.midpoint:\n return ts1\n return ts2\n # here, tie-breaking has failed. So simply return first of 2 input Timespans.\n return ts1\n\n def __str__(self):\n return \"Timespan '\" + str(self.start) + \"' to '\" + str(self.end) + \"' = \" + \\\n str(self.seconds) + \" seconds.\"\n\n\nclass RaDec:\n \"\"\"\n Holds one Right Ascension, Declination sky position (internally as degrees).\n Parameters:\n ra : (hours hex string, or degrees)\n dec : (degrees hex string, or degrees)\n Methods:\n\n \"\"\"\n def __init__(self, ra, dec):\n if isinstance(ra, str):\n self.ra = ra_as_degrees(ra)\n else:\n self.ra = ra\n if isinstance(dec, str):\n self.dec = dec_as_degrees(dec)\n else:\n self.dec = dec\n self.as_degrees = self.ra, self.dec # stored internally as degrees\n self.as_hex = ra_as_hours(self.ra), dec_as_hex(self.dec)\n\n def degrees_from(self, other):\n deg_per_radian = 180.0 / math.pi\n diff_ra = abs(self.ra - other.ra) / deg_per_radian\n cos_dec_1 = math.cos(self.dec / deg_per_radian)\n cos_dec_2 = math.cos(other.dec / deg_per_radian)\n diff_dec = abs(self.dec - other.dec) / deg_per_radian\n arg = math.sqrt(math.sin(diff_dec/2.0)**2 + cos_dec_1*cos_dec_2*math.sin(diff_ra/2.0)**2)\n if arg > 0.001:\n return deg_per_radian * (2.0 * math.asin(arg)) # haversine formula\n else:\n # spherical law of cosines\n sin_dec_1 = math.sin(self.dec / deg_per_radian)\n sin_dec_2 = math.sin(other.dec / deg_per_radian)\n return deg_per_radian * \\\n math.acos(sin_dec_1*sin_dec_2 + cos_dec_1*cos_dec_2*math.cos(diff_ra))\n\n def farther_from(self, other_ra_dec, degrees_limit):\n return self.degrees_from(other_ra_dec) > degrees_limit\n\n def __eq__(self, other):\n return (self.ra == other.ra) and (self.dec == other.dec)\n\n def __str__(self):\n ra_hex, dec_hex = self.as_hex\n return \"RaDec object: \" + ra_hex + \" \" + dec_hex\n\n def __repr__(self):\n ra_hex, dec_hex = self.as_hex\n return \"RaDec('\" + ra_hex + \"', '\" + dec_hex + \"')\"\n\n\ndef parse_hex(hex_string):\n \"\"\"\n Helper function for RA and Dec parsing, takes hex string, returns list of floats.\n :param hex_string: string in either full hex (\"12:34:56.7777\" or \"12 34 56.7777\"),\n or degrees (\"234.55\")\n :return: list of strings representing floats (hours:min:sec or deg:arcmin:arcsec).\n \"\"\"\n colon_list = hex_string.split(':')\n space_list = hex_string.split() # multiple spaces act as one delimiter\n if len(colon_list) >= len(space_list):\n return [x.strip() for x in colon_list]\n return space_list\n\n\ndef ra_as_degrees(ra_string):\n \"\"\"\n :param ra_string: string in either full hex (\"12:34:56.7777\" or \"12 34 56.7777\"),\n or degrees (\"234.55\")\n :return float of Right Ascension in degrees between 0 and 360.\n \"\"\"\n ra_list = parse_hex(ra_string)\n if len(ra_list) == 1:\n ra_degrees = float(ra_list[0]) # input assumed to be in degrees.\n elif len(ra_list) == 2:\n ra_degrees = 15 * (float(ra_list[0]) + float(ra_list[1])/60.0) # input assumed in hex.\n else:\n ra_degrees = 15 * (float(ra_list[0]) + float(ra_list[1]) / 60.0 +\n float(ra_list[2])/3600.0) # input assumed in hex.\n if (ra_degrees < 0) | (ra_degrees > 360):\n ra_degrees = None\n return ra_degrees\n\n\ndef hex_degrees_as_degrees(hex_degrees_string):\n \"\"\"\n :param hex_degrees_string: string in either full hex (\"-12:34:56.7777\", or \"-12 34 56.7777\"),\n or degrees (\"-24.55\")\n :return float of degrees (not limited)\n \"\"\"\n # dec_list = hex_degrees_string.split(\":\")\n dec_list = parse_hex(hex_degrees_string)\n # dec_list = [dec.strip() for dec in dec_list]\n if dec_list[0].startswith(\"-\"):\n sign = -1\n else:\n sign = 1\n if len(dec_list) == 1:\n dec_degrees = float(dec_list[0]) # input assumed to be in degrees.\n elif len(dec_list) == 2:\n dec_degrees = sign * (abs(float(dec_list[0])) + float(dec_list[1])/60.0) # input is hex.\n else:\n dec_degrees = sign * (abs(float(dec_list[0])) + float(dec_list[1]) / 60.0 +\n float(dec_list[2])/3600.0) # input is hex.\n return dec_degrees\n\n\ndef dec_as_degrees(dec_string):\n \"\"\" Input: string in either full hex (\"-12:34:56.7777\") or degrees (\"-24.55\")\n Returns: float of Declination in degrees, required to be -90 to +90, inclusive.\n \"\"\"\n dec_degrees = hex_degrees_as_degrees(dec_string)\n if (dec_degrees < -90) | (dec_degrees > +90):\n dec_degrees = None\n return dec_degrees\n\n\ndef ra_as_hours(ra_degrees):\n \"\"\" Input: float of Right Ascension in degrees.\n Returns: string of RA as hours, in hex, to the nearest 0.001 RA seconds.\n \"\"\"\n if (ra_degrees < 0) | (ra_degrees > 360):\n return None\n n_ra_milliseconds = round((ra_degrees * 3600 * 1000) / 15)\n ra_hours, remainder = divmod(n_ra_milliseconds, 3600 * 1000)\n ra_minutes, remainder = divmod(remainder, 60 * 1000)\n ra_seconds = round(remainder / 1000, 3)\n format_string = \"{0:02d}:{1:02d}:{2:06.3f}\"\n ra_str = format_string.format(int(ra_hours), int(ra_minutes), ra_seconds)\n if ra_str[:3] == \"24:\":\n ra_str = format_string.format(0, 0, 0)\n return ra_str\n\n\ndef dec_as_hex(dec_degrees):\n \"\"\" Input: float of Declination in degrees.\n Returns: string of Declination in hex, to the nearest 0.01 arcsecond.\n \"\"\"\n if (dec_degrees < -90) | (dec_degrees > +90):\n return None\n dec_string = degrees_as_hex(dec_degrees, seconds_decimal_places=2)\n return dec_string\n\n\ndef degrees_as_hex(angle_degrees, seconds_decimal_places=2):\n \"\"\"\n :param angle_degrees: any angle as degrees\n :return: same angle in hex notation, unbounded.\n \"\"\"\n if angle_degrees < 0:\n sign = \"-\"\n else:\n sign = \"+\"\n abs_degrees = abs(angle_degrees)\n milliseconds = round(abs_degrees * 3600 * 1000)\n degrees, remainder = divmod(milliseconds, 3600 * 1000)\n minutes, remainder = divmod(remainder, 60 * 1000)\n seconds = round(remainder / 1000, 2)\n format_string = '{0}{1:02d}:{2:02d}:{3:0' + str(int(seconds_decimal_places)+3) + \\\n '.0' + str(int(seconds_decimal_places)) + 'f}'\n hex_string = format_string.format(sign, int(degrees), int(minutes), seconds)\n return hex_string\n\n\ndef weighted_mean(values, weights):\n \"\"\"\n Returns weighted mean, weighted std deviation of values, and weighted std deviation of the mean.\n :param values: list (or other iterable) of values to be averaged\n :param weights: list (or other iterable) of weights; length must = length of values\n :return: 3-tuple (weighted mean, weighted std dev (population), weighted std dev of mean)\n \"\"\"\n if (len(values) != len(weights)) or (len(values) == 0) or (len(weights) == 0):\n raise ValueError('lengths of values & weights must be equal & non-zero.')\n if sum(weights) <= 0:\n raise ValueError('sum of weights must be positive.')\n value_list = list(values) # py list comprehension often misunderstands pandas Series indices.\n weight_list = list(weights) # \"\n norm_weights = [wt/sum(weights) for wt in weight_list]\n w_mean = sum([nwt * val for (nwt, val) in zip(norm_weights, value_list)])\n n_nonzero_weights = sum([w != 0 for w in weight_list])\n\n if n_nonzero_weights == 1:\n w_stdev_pop = 0\n w_stdev_w_mean = 0\n else:\n resid2 = [(val-w_mean)**2 for val in value_list]\n nwt2 = sum([nwt**2 for nwt in norm_weights])\n rel_factor = 1.0 / (1.0 - nwt2) # reliability factor (better than N'/(N'-1))\n w_stdev_pop = sqrt(rel_factor * sum([nwt * r2 for (nwt, r2) in zip(norm_weights, resid2)]))\n w_stdev_w_mean = sqrt(nwt2) * w_stdev_pop\n return w_mean, w_stdev_pop, w_stdev_w_mean\n\n\nDEFAULT_LADDER = [1.0, 1.25, 1.6, 2.0, 2.5, 3.2, 4.0, 5.0, 6.4, 8.0, 10.0]\n\n\ndef ladder_round(raw_value, ladder=DEFAULT_LADDER, direction=\"nearest\"):\n \"\"\"\n Rounds to a near-log scale value. May be useful for familiar exposure times.\n Can handle negative numbers, too. Zero returns zero.\n :param raw_value: the value we want to round\n :param ladder: ascending list of values from 1 to 10 to which to round.\n :param direction: \"nearest\" or \"down\" or \"up\"\n :return: raw_valued rounded to nearest ladder value, not counting powers of 10,\n e.g., 32.5 -> 32, 111 -> 100, 6321 -> 6400, -126 -> -125\n \"\"\"\n if raw_value == 0:\n return 0\n base = math.copysign(10**(math.floor(math.log10(math.fabs(raw_value)))), raw_value)\n target = math.fabs(raw_value / base)\n if target in ladder:\n return raw_value\n for i, val in enumerate(ladder[1:]):\n if target < val:\n ratio_below = target / ladder[i]\n ratio_above = ladder[i+1] / target\n if direction == \"down\":\n return base * ladder[i]\n if direction == \"up\":\n return base * ladder[i+1]\n if ratio_below <= ratio_above: # default case \"nearest\"\n return base * ladder[i] # round downward\n else:\n return base * ladder[i+1] # round upward\n\n\ndef get_phase(jd, jd_epoch, period):\n phase = math.modf((jd - jd_epoch) / period)[0]\n if phase < 0:\n phase += 1\n return phase\n\n\n\ndef jd_from_datetime_utc(datetime_utc=None):\n if datetime_utc is None:\n return None\n datetime_j2000 = datetime(2000, 1, 1, 0, 0, 0).replace(tzinfo=timezone.utc)\n jd_j2000 = 2451544.5\n seconds_since_j2000 = (datetime_utc - datetime_j2000).total_seconds()\n return jd_j2000 + seconds_since_j2000 / (24*3600)\n\n\ndef datetime_utc_from_jd(jd=None):\n if jd is None:\n return datetime.now(timezone.utc)\n datetime_j2000 = datetime(2000, 1, 1, 0, 0, 0).replace(tzinfo=timezone.utc)\n jd_j2000 = 2451544.5\n seconds_since_j2000 = 24 * 3600 * (jd - jd_j2000)\n return datetime_j2000 + timedelta(seconds=seconds_since_j2000)\n\n\ndef hhmm_from_datetime_utc(datetime_utc):\n minutes = round(datetime_utc.hour*60 # NB: banker's rounding (nearest even)\n + datetime_utc.minute\n + datetime_utc.second/60\n + datetime_utc.microsecond/(60*1000000)) % 1440\n hh = minutes // 60\n mm = minutes % 60\n return '{0:0>4d}'.format(100 * hh + mm)\n\n\ndef az_alt_at_datetime_utc(longitude, latitude, target_radec, datetime_utc):\n obs = ephem.Observer() # for local use.\n if isinstance(longitude, str):\n obs.lon = longitude\n else:\n # next line wrong?: if string should be in deg not radians?? (masked by long passed as hex string?)\n obs.lon = str(longitude * math.pi / 180)\n if isinstance(latitude, str):\n obs.lat = latitude\n else:\n # next line wrong?: if string should be in deg not radians?? (masked by long passed as hex string?)\n obs.lat = str(latitude * math.pi / 180)\n obs.date = datetime_utc\n target_ephem = ephem.FixedBody() # so named to suggest restricting its use to ephem.\n target_ephem._epoch = '2000'\n target_ephem._ra, target_ephem._dec = target_radec.as_hex # text: RA in hours, Dec in deg\n target_ephem.compute(obs)\n return target_ephem.az * 180 / math.pi, target_ephem.alt * 180 / math.pi\n\n\ndef isfloat(string):\n try:\n float(string)\n return True\n except ValueError:\n return False\n\n\ndef float_or_none(string):\n try:\n return float(string)\n except ValueError:\n return None\n\n\ndef event_utcs_in_timespan(jd_reference, period, timespan):\n \"\"\"\n Returns a list of UTC times of period events within a given Timespan.\n A generalization of (and replacing) fn find_minima_in_timespan()\n :param jd_reference: Julian Date of any occurence of the period event (e.g., Mira max) [float]\n :param period: in days [float]\n :param timespan: target timespan (start and end datetimes) [Timespan object]\n :return: list of up to 10 UTCs of periodic events within the target timespan [list of datetimes]\n Return None if jd_reference or period are invalid. Return empty list if no such events.\n \"\"\"\n if jd_reference is None or period is None:\n return None\n jd_ts_start = jd_from_datetime_utc(timespan.start)\n jd_ts_end = jd_from_datetime_utc(timespan.end)\n n_prior = floor((jd_ts_start - jd_reference) / period)\n jd_prior = jd_reference + n_prior * period\n utc_list = []\n for i in range(10):\n jd_test = jd_prior + i * period\n if jd_test > jd_ts_end:\n return utc_list\n if jd_test >= jd_ts_start:\n utc_list.append(datetime_utc_from_jd(jd_test))\n return utc_list\n\n\nclass MixedModelFit:\n \"\"\"\n Object: holds info for one mixed-model (py::statsmodel) fit. \n Generic in nature--NOT tied to astronomical usage.\n Uses formula form, i.e., statsmodel::sm.MixedLM.from_formula()\n \"\"\"\n def __init__(self, data, dep_var=None, fixed_vars=None, group_var=None):\n \"\"\"\n Executes mixed-model fit & makes data available.\n :param data: input data, one variable per column, one point per row [pandas Dataframe]\n :param dep_var: one column name as dependent 'Y' variable [string] \n :param fixed_vars: one or more column names as independent 'X' variable [string or\n list of strings]\n :param group_var: one column name as group (category; random-effect) variable [string]\n Usage: fit = MixedModel(df_input, 'Y', ['X1', 'X2'], 'a_group_type']\n fit = MixedModel(df_input, 'Y', 'X1', 'a_group_type'] (OK if only one indep var)\n \"\"\"\n if not isinstance(data, pd.DataFrame):\n print('Parameter \\'data\\' must be a pandas Dataframe of input data.')\n return\n if dep_var is None or fixed_vars is None or group_var is None:\n print('Provide all parameters: dep_var, fixed_vars, and group_var.')\n return\n if not isinstance(dep_var, str) or not isinstance(group_var, str):\n print('Parameters \\'dep_var\\' and \\'group_var\\' must both be strings.')\n return\n fixed_vars_valid = False # default if not validated\n if isinstance(fixed_vars, str):\n fixed_vars = list(fixed_vars)\n fixed_vars_valid = True\n if isinstance(fixed_vars, list):\n if len(fixed_vars) >= 1:\n if all([isinstance(var, str) for var in fixed_vars]):\n fixed_vars_valid = True\n if not fixed_vars_valid:\n print('Parameter \\'fixed_vars\\' must be a string or a list of strings.')\n return\n formula = dep_var + ' ~ ' + ' + '.join(fixed_vars)\n\n model = sm.MixedLM.from_formula(formula, groups=data[group_var], data=data)\n fit = model.fit()\n\n self.statsmodels_object = fit # instance of class MixedLMResults (py pkg statsmodels)\n\n # Scalar and naming attributes:\n self.converged = fit.converged # bool\n self.nobs = fit.nobs # number of observations used in fit\n self.likelihood = fit.llf\n self.dep_var = dep_var\n self.fixed_vars = fixed_vars\n self.group_var = group_var\n self.sigma = sqrt(sum(fit.resid**2)/(fit.nobs-len(fixed_vars)-2))\n\n # Fixed-effects dataframe (joins so we don't count on consistent input ordering):\n df = pd.DataFrame({'Value': fit.fe_params})\n df = df.join(pd.DataFrame({'Stdev': fit.bse_fe})) # join on index (enforce consistency)\n df = df.join(pd.DataFrame({'Tvalue': fit.tvalues})) # \" & any random effect discarded\n df = df.join(pd.DataFrame({'Pvalue': fit.pvalues})) # \" & \"\n df['Name'] = df.index\n self.df_fixed_effects = df.copy()\n\n # Random-effect dataframe, index=GroupName, cols=GroupName, GroupValue:\n df = pd.DataFrame(fit.random_effects).transpose() # DataFrame, 1 row/group\n df = df.rename(columns={'groups': 'Group'}) # was 'GroupValue'\n df['GroupName'] = df.index\n self.df_random_effects = df.copy()\n\n # Observation dataframe (safe to count on consistent input ordering -> easier construction):\n df = pd.DataFrame({'FittedValue': fit.fittedvalues})\n df['Residual'] = fit.resid\n self.df_observations = df.copy()\n\n def predict(self, df_predict_input, include_random_effect=True):\n \"\"\"\n Takes new_data and renders predicted dependent-variable values.\n Optionally includes effect of groups (random effects), unlike py::statsmodels.\n :param: new_data: new input data used to render predictions. \n Extra (unused) columns OK; model selects only needed columns. [pandas DataFrame] \n :param: include_random_effect: True to include them, False to omit/ignore [bool]\n :return: predictions of dependent-variable values matching rows of new data (pandas Series)\n \"\"\"\n\n # Get predicted values on fixed effects only (per statsmodels' weird def. of 'predicted'):\n fixed_effect_inputs = df_predict_input[self.fixed_vars] # 1 col per fixed effect variable\n predicted_on_fixed_only = self.statsmodels_object.predict(exog=fixed_effect_inputs)\n\n # If requested, add RE contibs (that were not included in MixedModels object 'fit'):\n if include_random_effect:\n df_random_effect_inputs = pd.DataFrame(df_predict_input[self.group_var])\n df_random_effect_values = self.df_random_effects[['Group']] # was ['GroupValue']\n predicted_on_random_only = pd.merge(df_random_effect_inputs, df_random_effect_values,\n left_on=self.group_var,\n right_index=True, how='left',\n sort=False)['Group'] # was 'GroupValue'\n total_prediction = predicted_on_fixed_only + predicted_on_random_only\n else:\n total_prediction = predicted_on_fixed_only\n\n return total_prediction\n","repo_name":"edose/photrix","sub_path":"photrix/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":22271,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"} +{"seq_id":"36513125479","text":"# -*- coding: utf-8 -*-\n# constants used throughout the API code\n\nimport numpy as np\n\n###################\n# filepaths and folder paths:\nMODEL_FOLDERPATH = \"./model/\"\nRAW_DATA_FILEPATH = \"./data/creditcard.csv\"\nTRAIN_DATA_FILEPATH = \"./data/train.csv\"\nTEST_DATA_FILEPATH = \"./data/test.csv\"\n\n###################\n# for data validation:\nX_DATA_COLUMNS = [\"Time\"] + [\"V\" + str(i) for i in range(1, 29)] + [\"Amount\"]\nX_DATA_DTYPES = [np.dtype(\"float64\") for col in X_DATA_COLUMNS]\nRAW_DATA_COLUMNS = [\"Time\"] + [\"V\" + str(i) for i in range(1, 29)] + [\"Amount\", \"Class\"]\nRAW_DATA_DTYPES = X_DATA_DTYPES + [np.dtype(\"int64\")]\n","repo_name":"pfpimenta/mlops_binary_classification","sub_path":"src/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"42289604726","text":"from django.shortcuts import render\nfrom . import form\nimport pandas as pd # libreria que permite manipular archivos\nimport matplotlib.pyplot as plt # libreria para graficar datos\nimport numpy as np # provee multidimensional soporte para objetos\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn import metrics\nfrom django.core.mail import send_mail\n\n\n# Create your views here.\n\n\ndef index(request):\n return render(request, 'index.html')\n\n\ndef training_model(array_to_train):\n df = pd.read_csv(\"./mastitis.csv\") # cargar los datos sobre mastitis\n df = df.set_index('ID_muestra')\n feature_col_names = ['ED',\n 'DEL',\n 'NP',\n 'PL',\n 'CE',\n 'CCS',\n 'SCCS']\n predicted_class_name = ['Resultado']\n X = df[feature_col_names].values\n y = df[predicted_class_name].values\n split_test_size = 0.30\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=split_test_size, random_state=45)\n nb_model = GaussianNB()\n nb_model.fit(X_train, y_train.ravel())\n nb_predict_test = nb_model.predict(X_test)\n print(\"Accuracy: {0:.4f}\".format(metrics.accuracy_score(y_test, nb_predict_test)))\n print(\"Confusion matrix\")\n print(\"{0}\".format(metrics.confusion_matrix(y_test, nb_predict_test)))\n print(\"\")\n print(\"Classification Report\")\n print(metrics.classification_report(y_test, nb_predict_test))\n # using the passed data to predict\n nb_predict_test = nb_model.predict([array_to_train])\n print(nb_predict_test)\n\n\ndef form_view(request):\n form_to_fill = form.Decease()\n if request.method == 'POST':\n form_to_fill = form.Decease(request.POST)\n\n if form_to_fill.is_valid():\n # DO SOMETHING CODE\n print(\"VALIDATION SUCCESS!\")\n array_of_values = [\n int(form_to_fill.cleaned_data['ED']),\n int(form_to_fill.cleaned_data['DEL']),\n int(form_to_fill.cleaned_data['NP']),\n int(form_to_fill.cleaned_data['PL']),\n int(form_to_fill.cleaned_data['CE']),\n int(form_to_fill.cleaned_data['CCS']),\n int(form_to_fill.cleaned_data['SCCS']),\n ]\n training_model(array_of_values)\n prediction_result_view(request, \"test\")\n\n return render(request, 'form_view.html', {'form': form_to_fill})\n\n\ndef prediction_result_view(request, pred=\"waiting data\"):\n print(\"this was called\", pred)\n return render(request, 'result.html', {\"result\": \"pred\"})\n","repo_name":"cesargamboa/predictingData","sub_path":"basic_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2675,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"71153431567","text":"#!/usr/bin/python3\nimport requests\n\n\nUSER_NAME = 'your user name here '\nTOKEN = 'your token here'\n\ndef extract_assets(handler):\n\n url = 'https://api.hackerone.com/v1/hackers/programs/'+handler\n headers = {\n 'Accept': 'application/json'\n }\n r = requests.get(url, auth=(USER_NAME, TOKEN), headers=headers)\n scope = []\n print('extracting assets')\n for domain in r.json()['relationships']['structured_scopes']['data']:\n scope.append(domain['attributes']['asset_identifier'])\n print(f\"Total asset : {len(scope)}\")\n for asset in scope:\n print(asset)\n return scope\n\n\ndef parse_args():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-t', '--target', type=str, required=True, help='handler of target to extract i')\n return parser.parse_args()\n\n\ndef banner():\n print(\"=\"*10)\n print(r\"\"\"\n▀▀█▀▀ █▀▀█ █▀▀█ █▀▀▀ █▀▀ ▀▀█▀▀ █▀▀ █░█ \n░░█░░ █▄▄█ █▄▄▀ █░▀█ █▀▀ ░░█░░ █▀▀ ▄▀▄ \n░░▀░░ ▀░░▀ ▀░▀▀ ▀▀▀▀ ▀▀▀ ░░▀░░ ▀▀▀ ▀░▀\n\"\"\")\n print(\"Extract assets from targets\")\n\n\ndef make_project(target, scope):\n import os\n os.mkdir(target)\n with open(target+'/wildcard', 'w') as file:\n for asset in scope:\n file.write(asset.replace(\"*.\", \"\") + \"\\n\")\n\n\ndef main():\n banner()\n args = parse_args()\n scope = []\n if args.target:\n scope = extract_assets(args.target)\n make_project(args.target, scope)\n else:\n print(\"python inscope.py -t \")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Al-Haimy/in-scope-hackerone","sub_path":"inscope.py","file_name":"inscope.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"65"} +{"seq_id":"70034390608","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom ..attention import (NoAttention, BiLinearSoftmaxAttention, \n BiLinearSigmoidAttention)\n\nimport argparse\n\n\nclass Seq2SeqSentenceExtractor(nn.Module):\n def __init__(self, input_size, hidden_size=100, num_layers=1, \n cell=\"gru\", rnn_dropout=0.0, bidirectional=False,\n mlp_layers=[100], mlp_dropouts=[.25],\n attention=\"bilinear-softmax\"):\n\n super(Seq2SeqSentenceExtractor, self).__init__()\n\n if cell not in [\"gru\", \"lstm\", \"rnn\"]:\n raise Exception((\"cell expected one of 'gru', 'lstm', or 'rnn' \"\n \"but got {}\").format(cell))\n if cell == \"gru\":\n self.encoder_rnn = nn.GRU(\n input_size, hidden_size, num_layers=num_layers, \n bidirectional=bidirectional,\n dropout=rnn_dropout if num_layers > 1 else 0.)\n self.decoder_rnn = nn.GRU(\n input_size, hidden_size, num_layers=num_layers, \n bidirectional=bidirectional,\n dropout=rnn_dropout if num_layers > 1 else 0.)\n elif cell == \"lstm\":\n self.encoder_rnn = nn.LSTM(\n input_size, hidden_size, num_layers=num_layers,\n bidirectional=bidirectional,\n dropout=rnn_dropout if num_layers > 1 else 0.)\n self.decoder_rnn = nn.LSTM(\n input_size, hidden_size, num_layers=num_layers,\n bidirectional=bidirectional,\n dropout=rnn_dropout if num_layers > 1 else 0.)\n else:\n self.encoder_rnn = nn.RNN(\n input_size, hidden_size, num_layers=num_layers,\n bidirectional=bidirectional,\n dropout=rnn_dropout if num_layers > 1 else 0.)\n self.decoder_rnn = nn.RNN(\n input_size, hidden_size, num_layers=num_layers,\n bidirectional=bidirectional,\n dropout=rnn_dropout if num_layers > 1 else 0.)\n\n self.decoder_start = nn.Parameter(\n torch.FloatTensor(input_size).normal_())\n\n self.rnn_dropout = rnn_dropout\n\n if attention == \"bilinear-softmax\":\n self.attention = BiLinearSoftmaxAttention()\n elif attention == \"bilinear-sigmoid\":\n self.attention = BiLinearSigmoidAttention()\n elif attention == \"none\":\n self.attention = NoAttention()\n else:\n raise Exception(\"attention must be 'none', 'bilinear-softmax', \"\n \"or 'bilinear-sigmoid'.\")\n\n self.teacher_forcing = True\n\n inp_size = hidden_size\n if bidirectional:\n inp_size *= 2\n\n if attention != \"none\":\n inp_size *= 2\n mlp = []\n for out_size, dropout in zip(mlp_layers, mlp_dropouts):\n mlp.append(nn.Linear(inp_size, out_size))\n mlp.append(nn.ReLU())\n mlp.append(nn.Dropout(p=dropout, inplace=True))\n inp_size = out_size \n mlp.append(nn.Linear(inp_size, 1))\n self.mlp = nn.Sequential(*mlp)\n\n @staticmethod\n def argparser():\n parser = argparse.ArgumentParser(usage=argparse.SUPPRESS)\n parser.add_argument(\n \"--hidden-size\", default=300, type=int)\n parser.add_argument(\n \"--bidirectional\", action=\"store_true\", default=False)\n parser.add_argument(\n \"--rnn-dropout\", default=.25, type=float)\n parser.add_argument(\n \"--num-layers\", default=1, type=int)\n parser.add_argument(\"--cell\", choices=[\"rnn\", \"gru\", \"lstm\"],\n default=\"gru\", type=str)\n parser.add_argument(\n \"--mlp-layers\", default=[100], type=int, nargs=\"+\")\n parser.add_argument(\n \"--mlp-dropouts\", default=[.25], type=float, nargs=\"+\")\n return parser\n\n def _start_decoder(self, batch_size, rnn_state):\n start_emb = self.decoder_start.view(1, 1, -1).repeat(1, batch_size, 1)\n _, updated_rnn_state = self.decoder_rnn(start_emb, rnn_state)\n return updated_rnn_state\n\n def _apply_rnn(self, rnn, packed_input, rnn_state=None):\n packed_output, updated_rnn_state = rnn(packed_input, rnn_state)\n output, _ = nn.utils.rnn.pad_packed_sequence(\n packed_output, \n batch_first=True)\n output = F.dropout(\n output, p=self.rnn_dropout, training=self.training, inplace=True)\n return output, updated_rnn_state\n\n def forward(self, sentence_embeddings, num_sentences, targets=None):\n\n batch_size = sentence_embeddings.size(0)\n\n packed_sentence_embeddings = nn.utils.rnn.pack_padded_sequence(\n sentence_embeddings, \n num_sentences.data.tolist(), \n batch_first=True)\n\n encoder_output, rnn_state = self._apply_rnn(\n self.encoder_rnn, \n packed_sentence_embeddings)\n\n rnn_state = self._start_decoder(batch_size, rnn_state)\n \n decoder_output, rnn_state = self._apply_rnn(\n self.decoder_rnn,\n packed_sentence_embeddings,\n rnn_state=rnn_state)\n\n mlp_input, scores = self.attention(\n encoder_output, decoder_output, num_sentences)\n\n return self.mlp(mlp_input).squeeze(2), scores\n\n\n def initialize_parameters(self, logger=None):\n if logger:\n logger.info(\" Seq2SeqSentenceExtractor initialization started.\")\n for name, p in self.named_parameters():\n if \"weight\" in name:\n if logger:\n logger.info(\" {} ({}): Xavier normal init.\".format(\n name, \",\".join([str(x) for x in p.size()])))\n nn.init.xavier_normal_(p) \n elif \"bias\" in name:\n if logger:\n logger.info(\" {} ({}): constant (0) init.\".format(\n name, \",\".join([str(x) for x in p.size()])))\n nn.init.constant_(p, 0) \n else:\n if logger:\n logger.info(\" {} ({}): random normal init.\".format(\n name, \",\".join([str(x) for x in p.size()])))\n nn.init.normal_(p) \n if logger:\n logger.info(\" Seq2SeqSentenceExtractor initialization finished.\")\n","repo_name":"kedz/nnsum","sub_path":"nnsum/module/sentence_extractor/seq2seq_sentence_extractor.py","file_name":"seq2seq_sentence_extractor.py","file_ext":"py","file_size_in_byte":6380,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"65"} +{"seq_id":"33893261926","text":"from scipy.optimize import curve_fit\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec, pylab\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\ndef sigmoid_b(x, x0, a, b):\n return a*sigmoid(x-x0) + b\n\ndef sigprime(x, x0, a):\n return a*sigmoid(x-x0)*(1 - sigmoid(x-x0))\n\ndef gaus(x, x0, sigma):\n return (1/(sigma*np.sqrt(2*np.pi))) * np.exp( (pow(x-x0, 2)) / (-2*pow(sigma, 2)) )\n\ndef gaus_b(x, A, x0, sigma, b):\n return A*gaus(x, x0, sigma) + b\n\ndef LSE(x, y, a, x0, sigma, b, err):\n return sum( [(pow(gaus_b(ix, a, x0, sigma, b) - iy, 2))/err for ix, iy in zip(x, y)])/len(y)\n\ndef Norm(X):\n m = X.mean()\n s = X.std()\n return (X-m)/s\n\ndef GetBkg(y):\n mu = y.mean()\n std = y.std()\n y = np.array([i for i in y if i < mu + 3*std])\n return y.mean(), y.std()\n\ndef GetSeed(x, y): \n mean = x[np.argmax(y)]\n peak = y.max()\n minFW = 1000\n for arg, i in enumerate(y):\n if abs(i - peak/2) < minFW:\n minFW = abs(i - peak/2)\n argFW = arg\n sigma = abs(x[argFW]-mean) / 2.355 \n area = sigma*peak\n return area, mean, sigma\n\ndef GetVars(x, y, inds, bkg):\n areas = []\n means = []\n sigmas = []\n bkgs = []\n for i, ind in enumerate(inds):\n imin = ind-50 if ind>50 else 0\n imax = ind+50 if ind?aid=android&client_sys=android&time=